зеркало из
1
0
Форкнуть 0

Re-generate Image Analysis SDK with: tox run -e generate -c ..\..\..\eng\tox\tox.ini --root . (#33772)

* After running tox run -e generate -c ..\..\..\eng\tox\tox.ini --root .

* Run again, this time with added pyproject.toml

* Pick up new TypeSpec change, with prop name `list` instead of `values`

* Update samples & test to use 'list' prop name instead of 'values'

* No need to suppress mypy `attr-defined` error. Update README.md code snippets

* Use uniform style in sample code

* Hand fix emitter bug (GitHub issue filed). Remove mypy.ini (no longer needed). Fix code style I don't like

* Fix pylint issues
This commit is contained in:
Darren Cohen 2024-01-10 12:10:50 -08:00 коммит произвёл GitHub
Родитель da79b36422
Коммит 9fb905c405
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
28 изменённых файлов: 455 добавлений и 460 удалений

Просмотреть файл

@ -53,7 +53,7 @@ from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -65,8 +65,8 @@ except KeyError:
# Create an Image Analysis client for synchronous operations
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
```
@ -135,14 +135,14 @@ Notes:
```python
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Get a caption for the image. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.CAPTION ],
gender_neutral_caption = True # Optional (default is False)
image_data=image_data,
visual_features=[VisualFeatures.CAPTION],
gender_neutral_caption=True, # Optional (default is False)
)
# Print caption results to the console
@ -165,9 +165,9 @@ This example is similar to the above, expect it calls the `analyze` method and p
```python
# Get a caption for the image. This will be a synchronously (blocking) call.
result = client.analyze(
image_url = "https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features = [ VisualFeatures.CAPTION ],
gender_neutral_caption = True # Optional (default is False)
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.CAPTION],
gender_neutral_caption=True, # Optional (default is False)
)
# Print caption results to the console
@ -187,13 +187,13 @@ This example demonstrates how to extract printed or hand-written text for the im
```python
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Extract text (OCR) from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.READ ]
image_data=image_data,
visual_features=[VisualFeatures.READ]
)
# Print text (OCR) analysis results to the console
@ -221,8 +221,8 @@ This example is similar to the above, expect it calls the `analyze` method and p
```python
# Extract text (OCR) from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_url = "https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features = [ VisualFeatures.READ ]
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.READ]
)
# Print text (OCR) analysis results to the console
@ -277,21 +277,21 @@ The client uses the standard [Python logging library](https://docs.python.org/3/
import sys
import logging
# Acquire the logger for this client library. Use 'azure' to affect both
# Acquire the logger for this client library. Use 'azure' to affect both
# 'azure.core` and `azure.ai.vision.imageanalysis' libraries.
logger = logging.getLogger('azure')
logger = logging.getLogger("azure")
# Set the desired logging level. logging.INFO or logging.DEBUG are good options.
logger.setLevel(logging.INFO)
# Direct logging output to stdout (the default):
handler = logging.StreamHandler(stream = sys.stdout)
handler = logging.StreamHandler(stream=sys.stdout)
# Or direct logging output to a file:
# handler = logging.FileHandler(filename = 'sample.log')
logger.addHandler(handler)
# Optional: change the default logging format. Here we add a timestamp.
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s")
handler.setFormatter(formatter)
```
@ -304,9 +304,9 @@ By default logs redact the values of URL query strings, the values of some HTTP
```python
# Create an Image Analysis client with none redacted log
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key),
logging_enable = True
endpoint=endpoint,
credential=AzureKeyCredential(key),
logging_enable=True
)
```

Просмотреть файл

@ -6,21 +6,17 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._client import ImageAnalysisClient
from ._patch import ImageAnalysisClient
from ._version import VERSION
__version__ = VERSION
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ImageAnalysisClient",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()

Просмотреть файл

@ -147,8 +147,7 @@ class ImageAnalysisClientOperationsMixin(ImageAnalysisClientMixinABC):
:paramtype visual_features: list[str or ~azure.ai.vision.imageanalysis.models.VisualFeatures]
:keyword language: The desired language for result generation (a two-letter language code).
If this option is not specified, the default value 'en' is used (English).
See https://aka.ms/cv-languages for a list of supported languages.
At the moment, only tags can be generated in non-English languages. Default value is None.
See https://aka.ms/cv-languages for a list of supported languages. Default value is None.
:paramtype language: str
:keyword gender_neutral_caption: Boolean flag for enabling gender-neutral captioning for
Caption and Dense Captions features.
@ -468,8 +467,7 @@ class ImageAnalysisClientOperationsMixin(ImageAnalysisClientMixinABC):
:paramtype visual_features: list[str or ~azure.ai.vision.imageanalysis.models.VisualFeatures]
:keyword language: The desired language for result generation (a two-letter language code).
If this option is not specified, the default value 'en' is used (English).
See https://aka.ms/cv-languages for a list of supported languages.
At the moment, only tags can be generated in non-English languages. Default value is None.
See https://aka.ms/cv-languages for a list of supported languages. Default value is None.
:paramtype language: str
:keyword gender_neutral_caption: Boolean flag for enabling gender-neutral captioning for
Caption and Dense Captions features.

Просмотреть файл

@ -5,7 +5,8 @@
# --------------------------------------------------------------------------
from typing import List
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.

Просмотреть файл

@ -21,6 +21,7 @@ from . import models as _models
from ._operations._operations import ImageAnalysisClientOperationsMixin
from ._client import ImageAnalysisClient as ImageAnalysisClientGenerated
class ImageAnalysisClient(ImageAnalysisClientGenerated):
"""ImageAnalysisClient.
@ -115,29 +116,32 @@ class ImageAnalysisClient(ImageAnalysisClientGenerated):
visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features)
if image_url is not None:
return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
self,
image_content = _models._models.ImageUrl(url = image_url), # pylint: disable=protected-access
visual_features = visual_features_impl,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
**kwargs)
image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)
if image_data is not None:
return ImageAnalysisClientOperationsMixin._analyze_from_buffer( # pylint: disable=protected-access
return ImageAnalysisClientOperationsMixin._analyze_from_buffer( # pylint: disable=protected-access
self,
image_content = image_data,
visual_features = visual_features_impl,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
**kwargs)
image_content=image_data,
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)
raise ValueError("Either image_data or image_url must be specified.")
__all__: List[str] = [
"ImageAnalysisClient"
] # Add all objects you want publicly available to users at this package level

Просмотреть файл

@ -6,18 +6,14 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._client import ImageAnalysisClient
from ._patch import ImageAnalysisClient
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ImageAnalysisClient",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()

Просмотреть файл

@ -5,7 +5,8 @@
# --------------------------------------------------------------------------
from typing import List
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.

Просмотреть файл

@ -21,6 +21,7 @@ from .. import models as _models
from ._operations._operations import ImageAnalysisClientOperationsMixin
from ._client import ImageAnalysisClient as ImageAnalysisClientGenerated
class ImageAnalysisClient(ImageAnalysisClientGenerated):
"""ImageAnalysisClient.
@ -115,29 +116,32 @@ class ImageAnalysisClient(ImageAnalysisClientGenerated):
visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features)
if image_url is not None:
return await ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
return await ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
self,
image_content = _models._models.ImageUrl(url = image_url), # pylint: disable=protected-access
visual_features = visual_features_impl,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
**kwargs)
image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)
if image_data is not None:
return await ImageAnalysisClientOperationsMixin._analyze_from_buffer( # pylint: disable=protected-access
return await ImageAnalysisClientOperationsMixin._analyze_from_buffer( # pylint: disable=protected-access
self,
image_content = image_data,
visual_features = visual_features_impl,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
**kwargs)
image_content=image_data,
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)
raise ValueError("Either image_data or image_url must be specified.")
__all__: List[str] = [
"ImageAnalysisClient"
] # Add all objects you want publicly available to users at this package level

Просмотреть файл

@ -157,18 +157,18 @@ class DenseCaptionsResult(_model_base.Model):
All required parameters must be populated in order to send to server.
:ivar values: The list of image captions. Required.
:vartype values: list[~azure.ai.vision.imageanalysis.models.DenseCaption]
:ivar list: The list of image captions. Required.
:vartype list: list[~azure.ai.vision.imageanalysis.models.DenseCaption]
"""
values: List["_models.DenseCaption"] = rest_field()
list: List["_models.DenseCaption"] = rest_field(name="values")
"""The list of image captions. Required."""
@overload
def __init__(
self,
*,
values: List["_models.DenseCaption"],
list: List["_models.DenseCaption"],
):
...
@ -627,18 +627,18 @@ class ObjectsResult(_model_base.Model):
All required parameters must be populated in order to send to server.
:ivar values: A list of physical object detected in an image and their location. Required.
:vartype values: list[~azure.ai.vision.imageanalysis.models.DetectedObject]
:ivar list: A list of physical object detected in an image and their location. Required.
:vartype list: list[~azure.ai.vision.imageanalysis.models.DetectedObject]
"""
values: List["_models.DetectedObject"] = rest_field()
list: List["_models.DetectedObject"] = rest_field(name="values")
"""A list of physical object detected in an image and their location. Required."""
@overload
def __init__(
self,
*,
values: List["_models.DetectedObject"],
list: List["_models.DetectedObject"],
):
...
@ -658,18 +658,18 @@ class PeopleResult(_model_base.Model):
All required parameters must be populated in order to send to server.
:ivar values: A list of people detected in an image and their location. Required.
:vartype values: list[~azure.ai.vision.imageanalysis.models.DetectedPerson]
:ivar list: A list of people detected in an image and their location. Required.
:vartype list: list[~azure.ai.vision.imageanalysis.models.DetectedPerson]
"""
values: List["_models.DetectedPerson"] = rest_field()
list: List["_models.DetectedPerson"] = rest_field(name="values")
"""A list of people detected in an image and their location. Required."""
@overload
def __init__(
self,
*,
values: List["_models.DetectedPerson"],
list: List["_models.DetectedPerson"],
):
...
@ -725,18 +725,18 @@ class SmartCropsResult(_model_base.Model):
All required parameters must be populated in order to send to server.
:ivar values: A list of crop regions. Required.
:vartype values: list[~azure.ai.vision.imageanalysis.models.CropRegion]
:ivar list: A list of crop regions. Required.
:vartype list: list[~azure.ai.vision.imageanalysis.models.CropRegion]
"""
values: List["_models.CropRegion"] = rest_field()
list: List["_models.CropRegion"] = rest_field(name="values")
"""A list of crop regions. Required."""
@overload
def __init__(
self,
*,
values: List["_models.CropRegion"],
list: List["_models.CropRegion"],
):
...
@ -758,18 +758,18 @@ class TagsResult(_model_base.Model):
All required parameters must be populated in order to send to server.
:ivar values: A list of tags. Required.
:vartype values: list[~azure.ai.vision.imageanalysis.models.DetectedTag]
:ivar list: A list of tags. Required.
:vartype list: list[~azure.ai.vision.imageanalysis.models.DetectedTag]
"""
values: List["_models.DetectedTag"] = rest_field()
list: List["_models.DetectedTag"] = rest_field(name="values")
"""A list of tags. Required."""
@overload
def __init__(
self,
*,
values: List["_models.DetectedTag"],
list: List["_models.DetectedTag"],
):
...

Просмотреть файл

@ -1,13 +0,0 @@
# Per advice from Azure SDK team, suppress the assignment error in _models.py. This file was auto-generated from TypeSpec files, and produces these MyPy errors:
# azure\ai\vision\imageanalysis\models\_models.py:164: error: Incompatible types in assignment (expression has type "List[DenseCaption]", base class "_MyMutableMapping" defined the type as "Callable[[_MyMutableMapping], ValuesView[Any]]") [assignment]
# azure\ai\vision\imageanalysis\models\_models.py:634: error: Incompatible types in assignment (expression has type "List[DetectedObject]", base class "_MyMutableMapping" defined the type as "Callable[[_MyMutableMapping], ValuesView[Any]]") [assignment]
# azure\ai\vision\imageanalysis\models\_models.py:665: error: Incompatible types in assignment (expression has type "List[DetectedPerson]", base class "_MyMutableMapping" defined the type as "Callable[[_MyMutableMapping], ValuesView[Any]]") [assignment]
# azure\ai\vision\imageanalysis\models\_models.py:732: error: Incompatible types in assignment (expression has type "List[CropRegion]", base class "_MyMutableMapping" defined the type as "Callable[[_MyMutableMapping], ValuesView[Any]]") [assignment]
# azure\ai\vision\imageanalysis\models\_models.py:765: error: Incompatible types in assignment (expression has type "List[DetectedTag]", base class "_MyMutableMapping" defined the type as "Callable[[_MyMutableMapping], ValuesView[Any]]") [assignment]
# See GitHub issue: https://github.com/Azure/autorest.python/issues/2317
[mypy-azure.ai.vision.imageanalysis.models._models]
disable_error_code = assignment
# Note that we also have the line `# mypy: disable-error-code="attr-defined"` at the top of every sample code file
# under the "samples" folder. This is to suppress the MyPy error "ImageAnalysisClient has no attribute analyze [attr-defined]".
# See GitHub issue: https://github.com/Azure/autorest.python/issues/2321

Просмотреть файл

@ -0,0 +1,2 @@
[tool.generate]
autorest-post-process = true

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to generate a human-readable sentence that describes the content
@ -28,6 +27,7 @@ USAGE:
"""
import asyncio
async def sample_caption_image_file_async():
import os
from azure.ai.vision.imageanalysis.aio import ImageAnalysisClient
@ -44,19 +44,19 @@ async def sample_caption_image_file_async():
exit()
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Create an asynchronous Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Get a caption for the image, asynchronously.
result = await client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.CAPTION ]
image_data=image_data,
visual_features=[VisualFeatures.CAPTION]
)
await client.close()
@ -74,5 +74,6 @@ async def sample_caption_image_file_async():
async def main():
await sample_caption_image_file_async()
if __name__ == '__main__':
if __name__ == "__main__":
asyncio.run(main())

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to extract printed or hand-written text from a
@ -31,6 +30,7 @@ USAGE:
"""
import asyncio
async def sample_ocr_image_file_async():
import os
from azure.ai.vision.imageanalysis.aio import ImageAnalysisClient
@ -48,14 +48,14 @@ async def sample_ocr_image_file_async():
# Create an asynchronous Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Extract text (OCR) from an image URL, asynchronously.
result = await client.analyze(
image_url = "https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features = [ VisualFeatures.READ ]
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.READ]
)
await client.close()
@ -76,5 +76,6 @@ async def sample_ocr_image_file_async():
async def main():
await sample_ocr_image_file_async()
if __name__ == '__main__':
if __name__ == "__main__":
asyncio.run(main())

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to analyze all supported visual features from the image file sample.jpg,
@ -26,6 +25,8 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_analyze_all_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
@ -36,21 +37,21 @@ def sample_analyze_all_image_file():
import sys
import logging
# Acquire the logger for this client library. Use 'azure' to affect both
# Acquire the logger for this client library. Use 'azure' to affect both
# 'azure.core` and `azure.ai.vision.imageanalysis' libraries.
logger = logging.getLogger('azure')
logger = logging.getLogger("azure")
# Set the desired logging level. logging.INFO or logging.DEBUG are good options.
logger.setLevel(logging.INFO)
# Direct logging output to stdout (the default):
handler = logging.StreamHandler(stream = sys.stdout)
handler = logging.StreamHandler(stream=sys.stdout)
# Or direct logging output to a file:
# handler = logging.FileHandler(filename = 'sample.log')
logger.addHandler(handler)
# Optional: change the default logging format. Here we add a timestamp.
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s")
handler.setFormatter(formatter)
# [END logging]
@ -64,34 +65,34 @@ def sample_analyze_all_image_file():
exit()
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# [START create_client_with_logging]
# Create an Image Analysis client with none redacted log
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key),
logging_enable = True
endpoint=endpoint,
credential=AzureKeyCredential(key),
logging_enable=True
)
# [END create_client_with_logging]
# Analyze all visual features from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [
image_data=image_data,
visual_features=[
VisualFeatures.TAGS,
VisualFeatures.OBJECTS,
VisualFeatures.CAPTION,
VisualFeatures.DENSE_CAPTIONS,
VisualFeatures.READ,
VisualFeatures.SMART_CROPS,
VisualFeatures.PEOPLE
], # Mandatory. Select one or more visual features to analyze.
smart_crops_aspect_ratios = [0.9, 1.33], # Optional. Relevant only if SMART_CROPS was specified above.
gender_neutral_caption = True, # Optional. Relevant only if CAPTION or DENSE_CAPTIONS were specified above.
language = "en", # Optional. Relevant only if TAGS is specified above. See https://aka.ms/cv-languages for supported languages.
model_version = "latest" # Optional. Analysis model version to use. Defaults to "latest".
VisualFeatures.PEOPLE,
], # Mandatory. Select one or more visual features to analyze.
smart_crops_aspect_ratios=[0.9, 1.33], # Optional. Relevant only if SMART_CROPS was specified above.
gender_neutral_caption=True, # Optional. Relevant only if CAPTION or DENSE_CAPTIONS were specified above.
language="en", # Optional. Relevant only if TAGS is specified above. See https://aka.ms/cv-languages for supported languages.
model_version="latest", # Optional. Analysis model version to use. Defaults to "latest".
)
# Print all analysis results to the console
@ -103,7 +104,7 @@ def sample_analyze_all_image_file():
if result.dense_captions is not None:
print(" Dense Captions:")
for caption in result.dense_captions.values:
for caption in result.dense_captions.list:
print(f" '{caption.text}', {caption.bounding_box}, Confidence: {caption.confidence:.4f}")
if result.read is not None:
@ -115,22 +116,22 @@ def sample_analyze_all_image_file():
if result.tags is not None:
print(" Tags:")
for tag in result.tags.values:
for tag in result.tags.list:
print(f" '{tag.name}', Confidence {tag.confidence:.4f}")
if result.objects is not None:
print(" Objects:")
for object in result.objects.values:
for object in result.objects.list:
print(f" '{object.tags[0].name}', {object.bounding_box}, Confidence: {object.tags[0].confidence:.4f}")
if result.people is not None:
print(" People:")
for person in result.people.values:
for person in result.people.list:
print(f" {person.bounding_box}, Confidence {person.confidence:.4f}")
if result.smart_crops is not None:
print(" Smart Cropping:")
for smart_crop in result.smart_crops.values:
for smart_crop in result.smart_crops.list:
print(f" Aspect ratio {smart_crop.aspect_ratio}: Smart crop {smart_crop.bounding_box}")
print(f" Image height: {result.metadata.height}")
@ -138,5 +139,5 @@ def sample_analyze_all_image_file():
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
if __name__ == "__main__":
sample_analyze_all_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to generate a human-readable sentence that describes the content
@ -26,6 +25,8 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_caption_image_file():
# [START create_client]
import os
@ -33,7 +34,7 @@ def sample_caption_image_file():
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -45,21 +46,21 @@ def sample_caption_image_file():
# Create an Image Analysis client for synchronous operations
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# [END create_client]
# [START caption]
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Get a caption for the image. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.CAPTION ],
gender_neutral_caption = True # Optional (default is False)
image_data=image_data,
visual_features=[VisualFeatures.CAPTION],
gender_neutral_caption=True, # Optional (default is False)
)
# Print caption results to the console
@ -73,5 +74,5 @@ def sample_caption_image_file():
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
if __name__ == "__main__":
sample_caption_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to generate a human-readable sentence that describes the content
@ -26,13 +25,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_caption_image_url():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -44,16 +45,16 @@ def sample_caption_image_url():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# [START caption]
# Get a caption for the image. This will be a synchronously (blocking) call.
result = client.analyze(
image_url = "https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features = [ VisualFeatures.CAPTION ],
gender_neutral_caption = True # Optional (default is False)
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.CAPTION],
gender_neutral_caption=True, # Optional (default is False)
)
# Print caption results to the console
@ -67,5 +68,5 @@ def sample_caption_image_url():
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
if __name__ == "__main__":
sample_caption_image_url()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to generate up to 10 human-readable sentences (captions) that describe
@ -30,13 +29,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_dense_captions_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -48,20 +49,20 @@ def sample_dense_captions_image_file():
# Create an Image Analysis client.
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Load image to analyze into a 'bytes' object.
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Extract multiple captions, each for a different area of the image.
# This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.DENSE_CAPTIONS ],
gender_neutral_caption = True # Optional (default is False)
image_data=image_data,
visual_features=[VisualFeatures.DENSE_CAPTIONS],
gender_neutral_caption=True, # Optional (default is False)
)
# Print dense caption results to the console. The first caption always
@ -69,12 +70,12 @@ def sample_dense_captions_image_file():
print("Image analysis results:")
print(" Dense Captions:")
if result.dense_captions is not None:
for caption in result.dense_captions.values:
for caption in result.dense_captions.list:
print(f" '{caption.text}', {caption.bounding_box}, Confidence: {caption.confidence:.4f}")
print(f" Image height: {result.metadata.height}")
print(f" Image width: {result.metadata.width}")
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
sample_dense_captions_image_file()
if __name__ == "__main__":
sample_dense_captions_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to detect physical objects in an image file sample.jpg, using a synchronous client.
@ -23,13 +22,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_objects_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -41,30 +42,30 @@ def sample_objects_image_file():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Detect objects in an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.OBJECTS ]
image_data=image_data,
visual_features=[VisualFeatures.OBJECTS]
)
# Print Objects analysis results to the console
print("Image analysis results:")
print(" Objects:")
if result.objects is not None:
for object in result.objects.values:
for object in result.objects.list:
print(f" '{object.tags[0].name}', {object.bounding_box}, Confidence: {object.tags[0].confidence:.4f}")
print(f" Image height: {result.metadata.height}")
print(f" Image width: {result.metadata.width}")
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
sample_objects_image_file()
if __name__ == "__main__":
sample_objects_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to extract printed or hand-written text for the image file sample.jpg
@ -29,13 +28,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_ocr_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -47,19 +48,19 @@ def sample_ocr_image_file():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# [START read]
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Extract text (OCR) from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.READ ]
image_data=image_data,
visual_features=[VisualFeatures.READ]
)
# Print text (OCR) analysis results to the console
@ -76,5 +77,5 @@ def sample_ocr_image_file():
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
sample_ocr_image_file()
if __name__ == "__main__":
sample_ocr_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to extract printed or hand-written text from a
@ -29,13 +28,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_ocr_image_url():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -47,15 +48,15 @@ def sample_ocr_image_url():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# [START read]
# Extract text (OCR) from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_url = "https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features = [ VisualFeatures.READ ]
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.READ]
)
# Print text (OCR) analysis results to the console
@ -72,5 +73,5 @@ def sample_ocr_image_url():
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
sample_ocr_image_url()
if __name__ == "__main__":
sample_ocr_image_url()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to detect people in the image file sample.jpg using a synchronous client.
@ -22,13 +21,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_people_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -40,31 +41,30 @@ def sample_people_image_file():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Find people in an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.PEOPLE ]
image_data=image_data,
visual_features=[VisualFeatures.PEOPLE]
)
# Print People analysis results to the console
print("Image analysis results:")
print(" People:")
if result.people is not None:
for person in result.people.values:
for person in result.people.list:
print(f" {person.bounding_box}, Confidence {person.confidence:.4f}")
print(f" Image height: {result.metadata.height}")
print(f" Image width: {result.metadata.width}")
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
if __name__ == "__main__":
sample_people_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to find representatives sub-regions of the image file sample.jpg,
@ -31,13 +30,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_smart_crops_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -49,31 +50,31 @@ def sample_smart_crops_image_file():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Do Smart Cropping analysis on an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.SMART_CROPS ],
smart_crops_aspect_ratios = [0.9, 1.33] # Optional. Specify one more desired aspect ratios
image_data=image_data,
visual_features=[VisualFeatures.SMART_CROPS],
smart_crops_aspect_ratios=[0.9, 1.33], # Optional. Specify one more desired aspect ratios
)
# Print smart crop analysis results to the console
print("Image analysis results:")
print(" Smart Cropping:")
if result.smart_crops is not None:
for smart_crop in result.smart_crops.values:
for smart_crop in result.smart_crops.list:
print(f" Aspect ratio {smart_crop.aspect_ratio}: Smart crop {smart_crop.bounding_box}")
print(f" Image height: {result.metadata.height}")
print(f" Image width: {result.metadata.width}")
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
sample_smart_crops_image_file()
if __name__ == "__main__":
sample_smart_crops_image_file()

Просмотреть файл

@ -2,7 +2,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# mypy: disable-error-code="attr-defined"
"""
DESCRIPTION:
This sample demonstrates how to extract content tags in an image file sample.jpg, using a synchronous client.
@ -24,13 +23,15 @@ USAGE:
where `your-resource-name` is your unique Azure Computer Vision resource name.
2) VISION_KEY - Your Computer Vision key (a 32-character Hexadecimal number)
"""
def sample_tags_image_file():
import os
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
# Set the values of your computer vision endpoint and computer vision key
# Set the values of your computer vision endpoint and computer vision key
# as environment variables:
try:
endpoint = os.environ["VISION_ENDPOINT"]
@ -42,31 +43,31 @@ def sample_tags_image_file():
# Create an Image Analysis client
client = ImageAnalysisClient(
endpoint = endpoint,
credential = AzureKeyCredential(key)
endpoint=endpoint,
credential=AzureKeyCredential(key)
)
# Load image to analyze into a 'bytes' object
with open("sample.jpg", 'rb') as f:
with open("sample.jpg", "rb") as f:
image_data = f.read()
# Do 'Tags' analysis on an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
image_data = image_data,
visual_features = [ VisualFeatures.TAGS ],
language = "en" # Optional. See https://aka.ms/cv-languages for supported languages.
image_data=image_data,
visual_features=[VisualFeatures.TAGS],
language="en", # Optional. See https://aka.ms/cv-languages for supported languages.
)
# Print Tags analysis results to the console
print("Image analysis results:")
print(" Tags:")
if result.tags is not None:
for tag in result.tags.values:
for tag in result.tags.list:
print(f" '{tag.name}', Confidence {tag.confidence:.4f}")
print(f" Image height: {result.metadata.height}")
print(f" Image width: {result.metadata.width}")
print(f" Model version: {result.model_version}")
if __name__ == '__main__':
if __name__ == "__main__":
sample_tags_image_file()

Просмотреть файл

@ -42,7 +42,6 @@ setup(
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
@ -66,8 +65,8 @@ setup(
},
install_requires=[
"isodate<1.0.0,>=0.6.1",
"azure-core<2.0.0,>=1.28.0",
"azure-core<2.0.0,>=1.29.5",
"typing-extensions>=4.3.0; python_version<'3.8.0'",
],
python_requires=">=3.7",
python_requires=">=3.8",
)

Просмотреть файл

@ -21,7 +21,7 @@ LOGGING_ENABLED = True
if LOGGING_ENABLED:
# Create a logger for the 'azure' SDK
# See https://docs.python.org/3/library/logging.html
logger = logging.getLogger('azure')
logger = logging.getLogger("azure")
logger.setLevel(logging.INFO) # INFO or DEBUG
# Configure a console output
@ -31,8 +31,8 @@ if LOGGING_ENABLED:
ServicePreparer = functools.partial(
EnvironmentVariableLoader,
"vision",
vision_endpoint = "https://fake-resource-name.cognitiveservices.azure.com",
vision_key = "00000000000000000000000000000000",
vision_endpoint="https://fake-resource-name.cognitiveservices.azure.com",
vision_key="00000000000000000000000000000000",
)
@ -55,214 +55,212 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
key = kwargs.pop("vision_key")
self._create_client(endpoint, key, sync, get_connection_url)
def _create_client_for_authentication_failure(self, sync: bool, **kwargs):
endpoint = kwargs.pop("vision_endpoint")
key = "00000000000000000000000000000000"
self._create_client(endpoint, key, sync, False)
def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool):
credential = AzureKeyCredential(key)
if sync:
self.client = sdk.ImageAnalysisClient(
endpoint = endpoint,
credential = credential,
logging_enable = LOGGING_ENABLED,
raw_request_hook = self._raw_request_check if get_connection_url else None)
endpoint=endpoint,
credential=credential,
logging_enable=LOGGING_ENABLED,
raw_request_hook=self._raw_request_check if get_connection_url else None,
)
assert self.client is not None
else:
self.async_client = async_sdk.ImageAnalysisClient(
endpoint = endpoint,
credential = credential,
logging_enable = LOGGING_ENABLED,
raw_request_hook = self._raw_request_check if get_connection_url else None)
endpoint=endpoint,
credential=credential,
logging_enable=LOGGING_ENABLED,
raw_request_hook=self._raw_request_check if get_connection_url else None,
)
assert self.async_client is not None
def _raw_request_check(self, request: PipelineRequest):
self.connection_url = request.http_request.url
print(f"Connection URL: {request.http_request.url}")
def _do_analysis(
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
query_params: Optional[dict] = None,
**kwargs):
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
query_params: Optional[dict] = None,
**kwargs,
):
image_content: Union[str, bytes]
if "http" in image_source:
result = self.client.analyze(
image_url = image_source,
visual_features = visual_features,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
params = query_params)
image_url=image_source,
visual_features=visual_features,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
params=query_params,
)
else:
# Load image to analyze into a 'bytes' object
with open(image_source, 'rb') as f:
with open(image_source, "rb") as f:
image_data = bytes(f.read())
result = self.client.analyze(
image_data = image_data,
visual_features = visual_features,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
params = query_params)
image_data=image_data,
visual_features=visual_features,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
params=query_params,
)
# Optional: console printout of all results
if ImageAnalysisTestBase.PRINT_ANALYSIS_RESULTS:
ImageAnalysisTestBase._print_analysis_results(result)
# Validate all results
ImageAnalysisTestBase._validate_result(result, visual_features, gender_neutral_caption, smart_crops_aspect_ratios)
ImageAnalysisTestBase._validate_result(
result, visual_features, gender_neutral_caption, smart_crops_aspect_ratios
)
# Validate that additional query parameters exists in the connection URL, if specify
if query_params is not None:
ImageAnalysisTestBase._validate_query_parameters(query_params, self.connection_url)
async def _do_async_analysis(
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
query_params: Optional[dict] = None,
**kwargs):
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
query_params: Optional[dict] = None,
**kwargs,
):
image_content: Union[str, bytes]
if "http" in image_source:
result = await self.async_client.analyze(
image_url = image_source,
visual_features = visual_features,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
params = query_params)
image_url=image_source,
visual_features=visual_features,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
params=query_params,
)
else:
# Load image to analyze into a 'bytes' object
with open(image_source, 'rb') as f:
with open(image_source, "rb") as f:
image_data = bytes(f.read())
result = await self.async_client.analyze(
image_data = image_data,
visual_features = visual_features,
language = language,
gender_neutral_caption = gender_neutral_caption,
smart_crops_aspect_ratios = smart_crops_aspect_ratios,
model_version = model_version,
params = query_params)
image_data=image_data,
visual_features=visual_features,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
params=query_params,
)
# Optional: console printout of all results
if ImageAnalysisTestBase.PRINT_ANALYSIS_RESULTS:
ImageAnalysisTestBase._print_analysis_results(result)
# Validate all results
ImageAnalysisTestBase._validate_result(result, visual_features, gender_neutral_caption, smart_crops_aspect_ratios)
ImageAnalysisTestBase._validate_result(
result, visual_features, gender_neutral_caption, smart_crops_aspect_ratios
)
# Validate that additional query parameters exists in the connection URL, if specify
if query_params is not None:
ImageAnalysisTestBase._validate_query_parameters(query_params, self.connection_url)
def _do_analysis_with_error(
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
expected_status_code: int,
expected_message_contains: str,
**kwargs):
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
expected_status_code: int,
expected_message_contains: str,
**kwargs,
):
image_content: Union[str, bytes]
try:
if "http" in image_source:
result = self.client.analyze(
image_url = image_source,
visual_features = visual_features)
result = self.client.analyze(image_url=image_source, visual_features=visual_features)
else:
# Load image to analyze into a 'bytes' object
with open(image_source, 'rb') as f:
with open(image_source, "rb") as f:
image_data = bytes(f.read())
result = self.client.analyze(
image_data = image_data,
visual_features = visual_features)
result = self.client.analyze(image_data=image_data, visual_features=visual_features)
except AzureError as e:
print(e)
assert hasattr(e, 'status_code')
assert hasattr(e, "status_code")
assert e.status_code == expected_status_code
assert expected_message_contains in e.message
return
assert False # We should not get here
assert False # We should not get here
async def _do_async_analysis_with_error(
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
expected_status_code: int,
expected_message_contains: str,
**kwargs):
self,
image_source: str,
visual_features: List[sdk.models.VisualFeatures],
expected_status_code: int,
expected_message_contains: str,
**kwargs,
):
image_content: Union[str, bytes]
try:
if "http" in image_source:
result = await self.async_client.analyze(
image_url = image_source,
visual_features = visual_features)
result = await self.async_client.analyze(image_url=image_source, visual_features=visual_features)
else:
# Load image to analyze into a 'bytes' object
with open(image_source, 'rb') as f:
with open(image_source, "rb") as f:
image_data = bytes(f.read())
result = await self.async_client.analyze(
image_data = image_data,
visual_features = visual_features)
result = await self.async_client.analyze(image_data=image_data, visual_features=visual_features)
except AzureError as e:
print(e)
assert hasattr(e, 'status_code')
assert hasattr(e, "status_code")
assert e.status_code == expected_status_code
assert expected_message_contains in e.message
return
assert False # We should not get here
assert False # We should not get here
@staticmethod
def _validate_query_parameters(query_params: dict, connection_url: str):
assert len(query_params) > 0
query_string = ''
query_string = ""
for key, value in query_params.items():
query_string += '&' + key + '=' + value
query_string = '?' + query_string[1:]
query_string += "&" + key + "=" + value
query_string = "?" + query_string[1:]
assert query_string in connection_url
@staticmethod
def _validate_result(
result: sdk.models.ImageAnalysisResult,
expected_features: List[sdk.models.VisualFeatures],
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None):
result: sdk.models.ImageAnalysisResult,
expected_features: List[sdk.models.VisualFeatures],
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
):
ImageAnalysisTestBase._validate_metadata(result)
ImageAnalysisTestBase._validate_model_version(result)
@ -301,20 +299,17 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
else:
assert result.read is None
@staticmethod
def _validate_metadata(result: sdk.models.ImageAnalysisResult):
assert result.metadata is not None
assert result.metadata.height == 576
assert result.metadata.width == 864
@staticmethod
def _validate_model_version(result: sdk.models.ImageAnalysisResult):
assert result.model_version is not None
assert result.model_version == "2023-10-01"
@staticmethod
def _validate_caption(result: sdk.models.ImageAnalysisResult, gender_neutral_caption: Optional[bool] = None):
assert result.caption is not None
@ -327,14 +322,13 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert "laptop" in result.caption.text.lower()
assert 0.0 < result.caption.confidence < 1.0
@staticmethod
def _validate_dense_captions(result: sdk.models.ImageAnalysisResult):
assert result.dense_captions is not None
assert len(result.dense_captions.values) > 1
assert len(result.dense_captions.list) > 1
# First dense caption should apply to the whole image, and be identical to the caption found in CaptionResult
first_dense_caption = result.dense_captions.values[0]
first_dense_caption = result.dense_captions.list[0]
assert first_dense_caption is not None
assert first_dense_caption.text is not None
if result.caption is not None:
@ -349,7 +343,7 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert first_dense_caption.bounding_box.width == result.metadata.width
# Sanity checks on all dense captions
for dense_caption in result.dense_captions.values:
for dense_caption in result.dense_captions.list:
assert dense_caption is not None
assert dense_caption.text is not None
assert len(dense_caption.text) > 0
@ -362,24 +356,25 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert dense_caption.bounding_box.width <= result.metadata.width - dense_caption.bounding_box.x
# Make sure each dense caption is unique
for i, dense_caption in enumerate(result.dense_captions.values):
for other_dense_caption in result.dense_captions.values[i + 1:]:
for i, dense_caption in enumerate(result.dense_captions.list):
for other_dense_caption in result.dense_captions.list[i + 1 :]:
# Do not include the check below. It's okay to have two identical dense captions since they have different bounding boxes.
#assert other_dense_caption.text != dense_caption.text
assert not (other_dense_caption.bounding_box.x == dense_caption.bounding_box.x
# assert other_dense_caption.text != dense_caption.text
assert not (
other_dense_caption.bounding_box.x == dense_caption.bounding_box.x
and other_dense_caption.bounding_box.y == dense_caption.bounding_box.y
and other_dense_caption.bounding_box.height == dense_caption.bounding_box.height
and other_dense_caption.bounding_box.width == dense_caption.bounding_box.width)
and other_dense_caption.bounding_box.width == dense_caption.bounding_box.width
)
@staticmethod
def _validate_objects(result: sdk.models.ImageAnalysisResult):
objects = result.objects
assert objects is not None
assert len(objects.values) > 1
assert len(objects.list) > 1
found1 = False
for object in objects.values:
for object in objects.list:
assert object is not None
assert object.tags is not None
assert len(object.tags) == 1
@ -394,25 +389,26 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert found1
# Make sure each object box is unique
for i in range(len(objects.values)):
for j in range(i + 1, len(objects.values)):
box_i = objects.values[i].bounding_box
box_j= objects.values[j].bounding_box
assert not (box_i.x ==box_j.x
for i in range(len(objects.list)):
for j in range(i + 1, len(objects.list)):
box_i = objects.list[i].bounding_box
box_j = objects.list[j].bounding_box
assert not (
box_i.x == box_j.x
and box_i.y == box_j.y
and box_i.height == box_j.height
and box_i.width == box_j.width)
and box_i.width == box_j.width
)
@staticmethod
def _validate_tags(result: sdk.models.ImageAnalysisResult):
tags = result.tags
assert tags is not None
assert tags.values is not None
assert len(tags.values) > 1
assert tags.list is not None
assert len(tags.list) > 1
found1, found2 = False, False
for tag in tags.values:
for tag in tags.list:
assert tag.name is not None
assert len(tag.name) > 0
assert 0.0 < tag.confidence < 1.0
@ -425,17 +421,16 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert found2
# Make sure each tag is unique
for i in range(len(tags.values)):
for j in range(i + 1, len(tags.values)):
assert tags.values[j].name != tags.values[i].name
for i in range(len(tags.list)):
for j in range(i + 1, len(tags.list)):
assert tags.list[j].name != tags.list[i].name
@staticmethod
def _validate_people(result: sdk.models.ImageAnalysisResult):
assert result.people is not None
assert len(result.people.values) > 0
assert len(result.people.list) > 0
for person in result.people.values:
for person in result.people.list:
assert 0.0 < person.confidence < 1.0
assert person.bounding_box.x >= 0
assert person.bounding_box.y >= 0
@ -443,32 +438,31 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert person.bounding_box.width <= result.metadata.width - person.bounding_box.x
# Make sure each person is unique
for i, person in enumerate(result.people.values):
for other_person in result.people.values[i + 1:]:
for i, person in enumerate(result.people.list):
for other_person in result.people.list[i + 1 :]:
assert not (
other_person.bounding_box.x == person.bounding_box.x
and other_person.bounding_box.y == person.bounding_box.y
and other_person.bounding_box.height == person.bounding_box.height
and other_person.bounding_box.width == person.bounding_box.width)
and other_person.bounding_box.width == person.bounding_box.width
)
@staticmethod
def _validate_smart_crops(
result: sdk.models.ImageAnalysisResult,
smart_crops_aspect_ratios: Optional[List[float]] = None):
result: sdk.models.ImageAnalysisResult, smart_crops_aspect_ratios: Optional[List[float]] = None
):
assert result.smart_crops is not None
crop_regions = result.smart_crops.values
crop_regions = result.smart_crops.list
if smart_crops_aspect_ratios is None:
assert(len(crop_regions) == 1)
assert(crop_regions[0].aspect_ratio >= 0.5 and crop_regions[0].aspect_ratio <= 2.0)
assert len(crop_regions) == 1
assert crop_regions[0].aspect_ratio >= 0.5 and crop_regions[0].aspect_ratio <= 2.0
else:
assert len(crop_regions) == len(smart_crops_aspect_ratios)
for i, region in enumerate(crop_regions):
assert region.aspect_ratio == smart_crops_aspect_ratios[i]
assert(region.aspect_ratio >= 0.75 and region.aspect_ratio <= 1.8)
assert region.aspect_ratio >= 0.75 and region.aspect_ratio <= 1.8
for region in crop_regions:
assert region.bounding_box.x >= 0
@ -478,13 +472,13 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
# Make sure each bounding box is unique
for i, region in enumerate(crop_regions):
for other_region in crop_regions[i+1:]:
for other_region in crop_regions[i + 1 :]:
assert not (
other_region.bounding_box.x == region.bounding_box.x
and other_region.bounding_box.y == region.bounding_box.y
and other_region.bounding_box.height == region.bounding_box.height
and other_region.bounding_box.width == region.bounding_box.width)
and other_region.bounding_box.width == region.bounding_box.width
)
@staticmethod
def _validate_read(result: sdk.models.ImageAnalysisResult):
@ -541,7 +535,6 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
assert polygon[i].x > 0.0
assert polygon[i].y > 0.0
@staticmethod
def _print_analysis_results(result: sdk.models.ImageAnalysisResult):
@ -555,33 +548,38 @@ class ImageAnalysisTestBase(AzureRecordedTestCase):
if result.dense_captions is not None:
print(" Dense Captions:")
for caption in result.dense_captions.values:
for caption in result.dense_captions.list:
print(" '{}', {}, Confidence: {:.4f}".format(caption.text, caption.bounding_box, caption.confidence))
if result.objects is not None:
print(" Objects:")
for object in result.objects.values:
print(" '{}', {}, Confidence: {:.4f}".format(object.tags[0].name, object.bounding_box, object.tags[0].confidence))
for object in result.objects.list:
print(
" '{}', {}, Confidence: {:.4f}".format(
object.tags[0].name, object.bounding_box, object.tags[0].confidence
)
)
if result.tags is not None:
print(" Tags:")
for tag in result.tags.values:
for tag in result.tags.list:
print(" '{}', Confidence {:.4f}".format(tag.name, tag.confidence))
if result.people is not None:
print(" People:")
for person in result.people.values:
for person in result.people.list:
print(" {}, Confidence {:.4f}".format(person.bounding_box, person.confidence))
if result.smart_crops is not None:
print(" Smart Cropping:")
for smart_crop in result.smart_crops.values:
print(" Aspect ratio {}: Smart crop {}" .format(smart_crop.aspect_ratio, smart_crop.bounding_box))
for smart_crop in result.smart_crops.list:
print(" Aspect ratio {}: Smart crop {}".format(smart_crop.aspect_ratio, smart_crop.bounding_box))
if result.read is not None:
print(" Read:")
for line in result.read.blocks[0].lines:
print(f" Line: '{line.text}', Bounding box {line.bounding_polygon}")
for word in line.words:
print(f" Word: '{word.text}', Bounding polygon {word.bounding_polygon}, Confidence {word.confidence:.4f}")
print(
f" Word: '{word.text}', Bounding polygon {word.bounding_polygon}, Confidence {word.confidence:.4f}"
)

Просмотреть файл

@ -11,31 +11,32 @@ from devtools_testutils.aio import recorded_by_proxy_async
# The test class name needs to start with "Test" to get collected by pytest
class TestImageAnalysisAsyncClient(ImageAnalysisTestBase):
#**********************************************************************************
# **********************************************************************************
#
# HAPPY PATH TESTS
#
#**********************************************************************************
# **********************************************************************************
# Test all visual features from a local image, using default settings
@ServicePreparer()
@recorded_by_proxy_async
async def test_analyze_async_all_features_from_file(self, **kwargs):
self._create_client_for_standard_analysis(sync = False, **kwargs)
self._create_client_for_standard_analysis(sync=False, **kwargs)
await self._do_async_analysis(
image_source = self.IMAGE_FILE,
visual_features = [
image_source=self.IMAGE_FILE,
visual_features=[
sdk.models.VisualFeatures.TAGS,
sdk.models.VisualFeatures.OBJECTS,
sdk.models.VisualFeatures.CAPTION,
sdk.models.VisualFeatures.DENSE_CAPTIONS,
sdk.models.VisualFeatures.READ,
sdk.models.VisualFeatures.SMART_CROPS,
sdk.models.VisualFeatures.PEOPLE
sdk.models.VisualFeatures.PEOPLE,
],
**kwargs)
**kwargs
)
await self.async_client.close()
@ -44,50 +45,50 @@ class TestImageAnalysisAsyncClient(ImageAnalysisTestBase):
@recorded_by_proxy_async
async def test_analyze_async_single_feature_from_url(self, **kwargs):
self._create_client_for_standard_analysis(sync = False, **kwargs)
self._create_client_for_standard_analysis(sync=False, **kwargs)
await self._do_async_analysis(
image_source = self.IMAGE_URL,
visual_features = [ sdk.models.VisualFeatures.DENSE_CAPTIONS ],
gender_neutral_caption = True,
**kwargs)
image_source=self.IMAGE_URL,
visual_features=[sdk.models.VisualFeatures.DENSE_CAPTIONS],
gender_neutral_caption=True,
**kwargs
)
await self._do_async_analysis(
image_source = self.IMAGE_URL,
visual_features = [ sdk.models.VisualFeatures.SMART_CROPS ],
smart_crops_aspect_ratios = [0.9, 1.33],
**kwargs)
image_source=self.IMAGE_URL,
visual_features=[sdk.models.VisualFeatures.SMART_CROPS],
smart_crops_aspect_ratios=[0.9, 1.33],
**kwargs
)
await self._do_async_analysis(
image_source = self.IMAGE_URL,
visual_features = [ sdk.models.VisualFeatures.TAGS ],
language = "en",
**kwargs)
image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.TAGS], language="en", **kwargs
)
await self._do_async_analysis(
image_source = self.IMAGE_URL,
visual_features = [ sdk.models.VisualFeatures.PEOPLE ],
**kwargs)
image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.PEOPLE], **kwargs
)
await self.async_client.close()
#**********************************************************************************
# **********************************************************************************
#
# ERROR TESTS
#
#**********************************************************************************
# **********************************************************************************
@ServicePreparer()
@recorded_by_proxy_async
async def test_analyze_async_authentication_failure(self, **kwargs):
self._create_client_for_authentication_failure(sync = False, **kwargs)
self._create_client_for_authentication_failure(sync=False, **kwargs)
await self._do_async_analysis_with_error(
image_source = self.IMAGE_URL,
visual_features = [ sdk.models.VisualFeatures.TAGS ],
expected_status_code = 401,
expected_message_contains = "Access denied",
**kwargs)
image_source=self.IMAGE_URL,
visual_features=[sdk.models.VisualFeatures.TAGS],
expected_status_code=401,
expected_message_contains="Access denied",
**kwargs
)
await self.async_client.close()

Просмотреть файл

@ -12,81 +12,77 @@ from devtools_testutils import recorded_by_proxy
# The test class name needs to start with "Test" to get collected by pytest
class TestImageAnalysisClient(ImageAnalysisTestBase):
#**********************************************************************************
# **********************************************************************************
#
# HAPPY PATH TESTS
#
#**********************************************************************************
# **********************************************************************************
# Test all visual features from an image URL, which settings specified
@ServicePreparer()
@recorded_by_proxy
def test_analyze_sync_all_features_from_url(self, **kwargs):
self._create_client_for_standard_analysis(sync = True, **kwargs)
self._create_client_for_standard_analysis(sync=True, **kwargs)
self._do_analysis(
image_source = self.IMAGE_URL,
visual_features = [
image_source=self.IMAGE_URL,
visual_features=[
sdk.models.VisualFeatures.TAGS,
sdk.models.VisualFeatures.OBJECTS,
sdk.models.VisualFeatures.CAPTION,
sdk.models.VisualFeatures.DENSE_CAPTIONS,
sdk.models.VisualFeatures.READ,
sdk.models.VisualFeatures.SMART_CROPS,
sdk.models.VisualFeatures.PEOPLE
sdk.models.VisualFeatures.PEOPLE,
],
language = "en",
gender_neutral_caption = True,
smart_crops_aspect_ratios = [0.9, 1.33],
model_version = "latest",
**kwargs)
language="en",
gender_neutral_caption=True,
smart_crops_aspect_ratios=[0.9, 1.33],
model_version="latest",
**kwargs
)
self.client.close()
# Test some visual features, one after the other, from file, using default settings
@ServicePreparer()
@recorded_by_proxy
def test_analyze_sync_single_feature_from_file(self, **kwargs):
self._create_client_for_standard_analysis(sync = True, get_connection_url = True, **kwargs)
self._create_client_for_standard_analysis(sync=True, get_connection_url=True, **kwargs)
self._do_analysis(
image_source = self.IMAGE_FILE,
visual_features = [ sdk.models.VisualFeatures.CAPTION ],
query_params = {'key1': 'value1', 'key2': 'value2'},
**kwargs)
image_source=self.IMAGE_FILE,
visual_features=[sdk.models.VisualFeatures.CAPTION],
query_params={"key1": "value1", "key2": "value2"},
**kwargs
)
self._do_analysis(
image_source = self.IMAGE_FILE,
visual_features = [ sdk.models.VisualFeatures.READ ],
**kwargs)
self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.READ], **kwargs)
self._do_analysis(
image_source = self.IMAGE_FILE,
visual_features = [ sdk.models.VisualFeatures.TAGS ],
**kwargs)
self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.TAGS], **kwargs)
self.client.close()
#**********************************************************************************
# **********************************************************************************
#
# ERROR TESTS
#
#**********************************************************************************
# **********************************************************************************
@ServicePreparer()
@recorded_by_proxy
def test_analyze_sync_image_url_does_not_exist(self, **kwargs):
self._create_client_for_standard_analysis(sync = True, **kwargs)
self._create_client_for_standard_analysis(sync=True, **kwargs)
self._do_analysis_with_error(
image_source = "https://www.this.is.a.bad.url.com/for/sure.jpg",
visual_features = [ sdk.models.VisualFeatures.CAPTION ],
expected_status_code = 400,
expected_message_contains = "image url is not accessible",
**kwargs)
image_source="https://www.this.is.a.bad.url.com/for/sure.jpg",
visual_features=[sdk.models.VisualFeatures.CAPTION],
expected_status_code=400,
expected_message_contains="image url is not accessible",
**kwargs
)
self.client.close()

Просмотреть файл

@ -1,5 +1,6 @@
additionalDirectories: []
repo: Azure/azure-rest-api-specs
directory: specification/ai/ImageAnalysis
commit: 49a78b92e9ef42da134e833e36123ca9bb20994f
commit: f2d0e7863a591e538366b251cfd85cc725153384