134 строки
13 KiB
YAML
134 строки
13 KiB
YAML
### YamlMime:JavaType
|
|
uid: "com.azure.ai.contentsafety.ContentSafetyClient"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient"
|
|
name: "ContentSafetyClient"
|
|
nameWithType: "ContentSafetyClient"
|
|
summary: "Initializes a new instance of the synchronous Content<wbr>Safety<wbr>Client type."
|
|
inheritances:
|
|
- "<xref href=\"java.lang.Object?displayProperty=fullName\" data-throw-if-not-resolved=\"False\" />"
|
|
inheritedClassMethods:
|
|
- classRef: "java.lang.<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html\">Object</a>"
|
|
methodsRef:
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#clone--\">clone</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#equals-java.lang.Object-\">equals</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#finalize--\">finalize</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#getClass--\">getClass</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#hashCode--\">hashCode</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#notify--\">notify</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#notifyAll--\">notifyAll</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#toString--\">toString</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait--\">wait</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait-long-\">wait</a>"
|
|
- "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#wait-long-int-\">wait</a>"
|
|
syntax: "public final class **ContentSafetyClient**"
|
|
methods:
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(com.azure.ai.contentsafety.models.AnalyzeImageOptions)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(AnalyzeImageOptions options)"
|
|
name: "analyzeImage(AnalyzeImageOptions options)"
|
|
nameWithType: "ContentSafetyClient.analyzeImage(AnalyzeImageOptions options)"
|
|
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
|
|
parameters:
|
|
- description: "The image analysis request."
|
|
name: "options"
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageOptions?alt=com.azure.ai.contentsafety.models.AnalyzeImageOptions&text=AnalyzeImageOptions\" data-throw-if-not-resolved=\"False\" />"
|
|
syntax: "public AnalyzeImageResult analyzeImage(AnalyzeImageOptions options)"
|
|
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
|
|
returns:
|
|
description: "the image analysis response."
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(com.azure.core.util.BinaryData)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(BinaryData content)"
|
|
name: "analyzeImage(BinaryData content)"
|
|
nameWithType: "ContentSafetyClient.analyzeImage(BinaryData content)"
|
|
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
|
|
parameters:
|
|
- description: "The image analysis request."
|
|
name: "content"
|
|
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
|
|
syntax: "public AnalyzeImageResult analyzeImage(BinaryData content)"
|
|
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
|
|
returns:
|
|
description: "the image analysis response."
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(java.lang.String)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImage(String blobUrl)"
|
|
name: "analyzeImage(String blobUrl)"
|
|
nameWithType: "ContentSafetyClient.analyzeImage(String blobUrl)"
|
|
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
|
|
parameters:
|
|
- description: "The image analysis request."
|
|
name: "blobUrl"
|
|
type: "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/String.html\">String</a>"
|
|
syntax: "public AnalyzeImageResult analyzeImage(String blobUrl)"
|
|
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
|
|
returns:
|
|
description: "the image analysis response."
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImageWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
name: "analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
nameWithType: "ContentSafetyClient.analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
summary: "Analyze Image A synchronous API for the analysis of potentially harmful image content."
|
|
parameters:
|
|
- description: "The image analysis request."
|
|
name: "options"
|
|
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
|
|
- description: "The options to configure the HTTP request before HTTP client sends it."
|
|
name: "requestOptions"
|
|
type: "<xref href=\"com.azure.core.http.rest.RequestOptions?alt=com.azure.core.http.rest.RequestOptions&text=RequestOptions\" data-throw-if-not-resolved=\"False\" />"
|
|
syntax: "public Response<BinaryData> analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n image (Required): {\n content: byte[] (Optional)\n blobUrl: String (Optional)\n }\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n outputType: String(FourSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
|
|
returns:
|
|
description: "the image analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref>."
|
|
type: "<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />>"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeText(com.azure.ai.contentsafety.models.AnalyzeTextOptions)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeText(AnalyzeTextOptions options)"
|
|
name: "analyzeText(AnalyzeTextOptions options)"
|
|
nameWithType: "ContentSafetyClient.analyzeText(AnalyzeTextOptions options)"
|
|
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
|
|
parameters:
|
|
- description: "The text analysis request."
|
|
name: "options"
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextOptions?alt=com.azure.ai.contentsafety.models.AnalyzeTextOptions&text=AnalyzeTextOptions\" data-throw-if-not-resolved=\"False\" />"
|
|
syntax: "public AnalyzeTextResult analyzeText(AnalyzeTextOptions options)"
|
|
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
|
|
returns:
|
|
description: "the text analysis response."
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeText(java.lang.String)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeText(String text)"
|
|
name: "analyzeText(String text)"
|
|
nameWithType: "ContentSafetyClient.analyzeText(String text)"
|
|
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
|
|
parameters:
|
|
- description: "The text analysis request."
|
|
name: "text"
|
|
type: "<a href=\"https://docs.oracle.com/javase/8/docs/api/java/lang/String.html\">String</a>"
|
|
syntax: "public AnalyzeTextResult analyzeText(String text)"
|
|
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
|
|
returns:
|
|
description: "the text analysis response."
|
|
type: "<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />"
|
|
- uid: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeTextWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
|
|
fullName: "com.azure.ai.contentsafety.ContentSafetyClient.analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
name: "analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
nameWithType: "ContentSafetyClient.analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
summary: "Analyze Text A synchronous API for the analysis of potentially harmful text content."
|
|
parameters:
|
|
- description: "The text analysis request."
|
|
name: "options"
|
|
type: "<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />"
|
|
- description: "The options to configure the HTTP request before HTTP client sends it."
|
|
name: "requestOptions"
|
|
type: "<xref href=\"com.azure.core.http.rest.RequestOptions?alt=com.azure.core.http.rest.RequestOptions&text=RequestOptions\" data-throw-if-not-resolved=\"False\" />"
|
|
syntax: "public Response<BinaryData> analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
|
|
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n text: String (Required)\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n blocklistNames (Optional): [\n String (Optional)\n ]\n haltOnBlocklistHit: Boolean (Optional)\n outputType: String(FourSeverityLevels/EightSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n blocklistsMatch (Optional): [\n (Optional){\n blocklistName: String (Required)\n blocklistItemId: String (Required)\n blocklistItemText: String (Required)\n }\n ]\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
|
|
returns:
|
|
description: "the text analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref>."
|
|
type: "<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />>"
|
|
type: "class"
|
|
desc: "Initializes a new instance of the synchronous ContentSafetyClient type."
|
|
metadata: {}
|
|
package: "com.azure.ai.contentsafety"
|
|
artifact: com.azure:azure-ai-contentsafety:1.0.8
|