azure-docs-sdk-java/docs-ref-autogen/com.azure.ai.contentsafety....

134 строки
14 KiB
YAML

### YamlMime:JavaType
uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient"
name: "ContentSafetyAsyncClient"
nameWithType: "ContentSafetyAsyncClient"
summary: "Initializes a new instance of the asynchronous Content<wbr>Safety<wbr>Client type."
inheritances:
- "<xref href=\"java.lang.Object?displayProperty=fullName\" data-throw-if-not-resolved=\"False\" />"
inheritedClassMethods:
- classRef: "java.lang.<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html\">Object</a>"
methodsRef:
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#clone--\">clone</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#equals-java.lang.Object-\">equals</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#finalize--\">finalize</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#getClass--\">getClass</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#hashCode--\">hashCode</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#notify--\">notify</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#notifyAll--\">notifyAll</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#toString--\">toString</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait--\">wait</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait-long-\">wait</a>"
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait-long-int-\">wait</a>"
syntax: "public final class **ContentSafetyAsyncClient**"
methods:
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(com.azure.ai.contentsafety.models.AnalyzeImageOptions)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(AnalyzeImageOptions options)"
name: "analyzeImage(AnalyzeImageOptions options)"
nameWithType: "ContentSafetyAsyncClient.analyzeImage(AnalyzeImageOptions options)"
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
parameters:
- description: "The image analysis request."
name: "options"
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageOptions?alt=com.azure.ai.contentsafety.models.AnalyzeImageOptions&text=AnalyzeImageOptions\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<AnalyzeImageResult> analyzeImage(AnalyzeImageOptions options)"
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
returns:
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(com.azure.core.util.BinaryData)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(BinaryData content)"
name: "analyzeImage(BinaryData content)"
nameWithType: "ContentSafetyAsyncClient.analyzeImage(BinaryData content)"
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
parameters:
- description: "The image analysis request."
name: "content"
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<AnalyzeImageResult> analyzeImage(BinaryData content)"
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
returns:
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(java.lang.String)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(String blobUrl)"
name: "analyzeImage(String blobUrl)"
nameWithType: "ContentSafetyAsyncClient.analyzeImage(String blobUrl)"
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
parameters:
- description: "The image analysis request."
name: "blobUrl"
type: "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/String.html\">String</a>"
syntax: "public Mono<AnalyzeImageResult> analyzeImage(String blobUrl)"
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
returns:
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImageWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
name: "analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
nameWithType: "ContentSafetyAsyncClient.analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
parameters:
- description: "The image analysis request."
name: "options"
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
- description: "The options to configure the HTTP request before HTTP client sends it."
name: "requestOptions"
type: "<xref href=\"com.azure.core.http.rest.RequestOptions?alt=com.azure.core.http.rest.RequestOptions&text=RequestOptions\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<Response<BinaryData>> analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n image (Required): {\n content: byte[] (Optional)\n blobUrl: String (Optional)\n }\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n outputType: String(FourSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
returns:
description: "the image analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref> on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(com.azure.ai.contentsafety.models.AnalyzeTextOptions)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(AnalyzeTextOptions options)"
name: "analyzeText(AnalyzeTextOptions options)"
nameWithType: "ContentSafetyAsyncClient.analyzeText(AnalyzeTextOptions options)"
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
parameters:
- description: "The text analysis request."
name: "options"
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextOptions?alt=com.azure.ai.contentsafety.models.AnalyzeTextOptions&text=AnalyzeTextOptions\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<AnalyzeTextResult> analyzeText(AnalyzeTextOptions options)"
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
returns:
description: "the text analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(java.lang.String)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(String text)"
name: "analyzeText(String text)"
nameWithType: "ContentSafetyAsyncClient.analyzeText(String text)"
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
parameters:
- description: "The text analysis request."
name: "text"
type: "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/String.html\">String</a>"
syntax: "public Mono<AnalyzeTextResult> analyzeText(String text)"
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
returns:
description: "the text analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeTextWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
name: "analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
nameWithType: "ContentSafetyAsyncClient.analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
parameters:
- description: "The text analysis request."
name: "options"
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
- description: "The options to configure the HTTP request before HTTP client sends it."
name: "requestOptions"
type: "<xref href=\"com.azure.core.http.rest.RequestOptions?alt=com.azure.core.http.rest.RequestOptions&text=RequestOptions\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<Response<BinaryData>> analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n text: String (Required)\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n blocklistNames (Optional): [\n String (Optional)\n ]\n haltOnBlocklistHit: Boolean (Optional)\n outputType: String(FourSeverityLevels/EightSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n blocklistsMatch (Optional): [\n (Optional){\n blocklistName: String (Required)\n blocklistItemId: String (Required)\n blocklistItemText: String (Required)\n }\n ]\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
returns:
description: "the text analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref> on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
type: "class"
desc: "Initializes a new instance of the asynchronous ContentSafetyClient type."
metadata: {}
package: "com.azure.ai.contentsafety"
artifact: com.azure:azure-ai-contentsafety:1.0.6