### Packages impacted by this PR
@azure/openai

### Issues associated with this PR
N/A

### Describe the problem that is addressed by this PR
Preparing for releasing v2.0 that targets API
[v2024-10-21](https://github.com/Azure/azure-rest-api-specs/blob/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2024-10-21/inference.json).

### What are the possible designs available to address the problem? If
there are more than one possible design, why was the one in this PR
chosen?
N/A

### Are there test cases added in this PR? _(If not, why?)_
N/A

### Provide a list of related PRs _(if any)_
N/A

### Command used to generate this PR:**_(Applicable only to SDK release
request PRs)_

### Checklists
- [x] Added impacted package name to the issue description
- [ ] Does this PR needs any fixes in the SDK Generator?** _(If so,
create an Issue in the
[Autorest/typescript](https://github.com/Azure/autorest.typescript)
repository and link it here)_
- [x] Added a changelog (if necessary)

---------

Co-authored-by: Krista Pratico <krpratic@microsoft.com>
This commit is contained in:
Deyaaeldeen Almahallawi 2024-11-16 00:34:56 +03:00 коммит произвёл GitHub
Родитель b3e5d58e1f
Коммит a179d22585
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
88 изменённых файлов: 1866 добавлений и 735 удалений

Просмотреть файл

@ -1,14 +1,17 @@
# Release History
## 2.0.0-beta.4 (Unreleased)
## 2.0.0 (2024-11-18)
### Features Added
This release marks the first stable library version for `@azure/openai` and it exposes Azure-exclusive features supported in the latest Azure OpenAI Service stable `api-version` label of `2024-10-21`.
The following features are supported in this release:
- Azure OpenAI On Your Data which enables you to run advanced AI models, without needing to train or fine-tune models, on your own enterprise data located in an Azure Search index or in an vector store in Azure Cosmos DB.
- Azure OpenAI content filtering which detects and prevents the output of harmful content.
### Breaking Changes
### Bugs Fixed
### Other Changes
- Models related to features still in preview have been removed.
## 2.0.0-beta.3 (2024-11-05)

Просмотреть файл

@ -69,7 +69,7 @@ Migrated code:
```typescript
import { AzureOpenAI } from "openai";
const deployment = "Your Azure OpenAI deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const options = { azureADTokenProvider, deployment, apiVersion }
const client = new AzureOpenAI(options);
```

Просмотреть файл

@ -60,7 +60,7 @@ There are several ways to authenticate with the Azure OpenAI service and the rec
import { AzureOpenAI } from "openai";
const deployment = "Your deployment name";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
```
@ -126,7 +126,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,
@ -182,7 +182,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -1,6 +1,6 @@
{
"name": "@azure/openai",
"version": "2.0.0-beta.4",
"version": "2.0.0",
"description": "A companion library to openai for Azure OpenAI.",
"sdk-type": "client",
"main": "./dist/commonjs/index.js",

Просмотреть файл

@ -15,23 +15,13 @@ import type { CompletionCreateParamsStreaming } from 'openai/resources/index';
import type { ErrorModel } from '@azure-rest/core-client';
// @public
export type AzureChatExtensionConfiguration = AzureChatExtensionConfigurationParent | AzureSearchChatExtensionConfiguration | AzureCosmosDBChatExtensionConfiguration | ElasticsearchChatExtensionConfiguration | PineconeChatExtensionConfiguration | MongoDBChatExtensionConfiguration;
export type AzureChatExtensionConfiguration = AzureChatExtensionConfigurationParent | AzureSearchChatExtensionConfiguration | AzureCosmosDBChatExtensionConfiguration;
// @public
export interface AzureChatExtensionConfigurationParent {
type: string;
}
// @public
export interface AzureChatExtensionDataSourceResponseCitation {
chunk_id?: string;
content: string;
filepath?: string;
rerank_score?: number;
title?: string;
url?: string;
}
// @public
export interface AzureChatExtensionDataSourceResponseCitationOutput {
chunk_id?: string;
@ -41,20 +31,8 @@ export interface AzureChatExtensionDataSourceResponseCitationOutput {
url?: string;
}
// @public
export interface AzureChatExtensionRetrievedDocument extends AzureChatExtensionDataSourceResponseCitation {
data_source_index: number;
filter_reason?: AzureChatExtensionRetrieveDocumentFilterReason;
original_search_score?: number;
search_queries: string[];
}
// @public
export type AzureChatExtensionRetrieveDocumentFilterReason = "score" | "rerank";
// @public
export interface AzureChatExtensionsMessageContextOutput {
all_retrieved_documents?: Array<AzureChatExtensionRetrievedDocument>;
citations?: Array<AzureChatExtensionDataSourceResponseCitationOutput>;
intent?: string;
}
@ -67,17 +45,13 @@ export interface AzureCosmosDBChatExtensionConfiguration extends AzureChatExtens
// @public
export interface AzureCosmosDBChatExtensionParameters {
allow_partial_result?: boolean;
authentication?: OnYourDataAuthenticationOptions;
authentication: OnYourDataAuthenticationOptions;
container_name: string;
database_name: string;
embedding_dependency: OnYourDataVectorizationSource;
fields_mapping: AzureCosmosDBFieldMappingOptions;
in_scope?: boolean;
include_contexts?: OnYourDataContextProperty[];
index_name: string;
max_search_queries?: number;
role_information?: string;
strictness?: number;
top_n_documents?: number;
}
@ -100,18 +74,14 @@ export interface AzureSearchChatExtensionConfiguration extends AzureChatExtensio
// @public
export interface AzureSearchChatExtensionParameters {
allow_partial_result?: boolean;
authentication?: OnYourDataAuthenticationOptions;
authentication: OnYourDataAuthenticationOptions;
embedding_dependency?: OnYourDataVectorizationSource;
endpoint: string;
fields_mapping?: AzureSearchIndexFieldMappingOptions;
filter?: string;
in_scope?: boolean;
include_contexts?: OnYourDataContextProperty[];
index_name: string;
max_search_queries?: number;
query_type?: string;
role_information?: string;
semantic_configuration?: string;
strictness?: number;
top_n_documents?: number;
@ -122,7 +92,6 @@ export interface AzureSearchIndexFieldMappingOptions {
content_fields?: string[];
content_fields_separator?: string;
filepath_field?: string;
image_vector_fields?: string[];
title_field?: string;
url_field?: string;
vector_fields?: string[];
@ -144,12 +113,6 @@ export { CompletionCreateParamsNonStreaming }
export { CompletionCreateParamsStreaming }
// @public
export interface ContentFilterBlocklistIdResultOutput {
filtered: boolean;
id: string;
}
// @public
export interface ContentFilterCitedDetectionResultOutput {
detected: boolean;
@ -158,12 +121,6 @@ export interface ContentFilterCitedDetectionResultOutput {
URL?: string;
}
// @public
export interface ContentFilterDetailedResults {
details: ContentFilterBlocklistIdResultOutput[];
filtered: boolean;
}
// @public
export interface ContentFilterDetectionResultOutput {
detected: boolean;
@ -172,10 +129,8 @@ export interface ContentFilterDetectionResultOutput {
// @public
export interface ContentFilterResultDetailsForPromptOutput {
custom_blocklists?: ContentFilterDetailedResults;
error?: ErrorModel;
hate?: ContentFilterResultOutput;
indirect_attack?: ContentFilterDetectionResultOutput;
jailbreak?: ContentFilterDetectionResultOutput;
profanity?: ContentFilterDetectionResultOutput;
self_harm?: ContentFilterResultOutput;
@ -191,7 +146,6 @@ export interface ContentFilterResultOutput {
// @public
export interface ContentFilterResultsForChoiceOutput {
custom_blocklists?: ContentFilterDetailedResults;
error?: ErrorModel;
hate?: ContentFilterResultOutput;
profanity?: ContentFilterDetectionResultOutput;
@ -208,39 +162,6 @@ export interface ContentFilterResultsForPromptOutput {
prompt_index: number;
}
// @public
export interface ElasticsearchChatExtensionConfiguration extends AzureChatExtensionConfigurationParent {
parameters: ElasticsearchChatExtensionParameters;
type: "elasticsearch";
}
// @public
export interface ElasticsearchChatExtensionParameters {
allow_partial_result?: boolean;
authentication?: OnYourDataAuthenticationOptions;
embedding_dependency?: OnYourDataVectorizationSource;
endpoint: string;
fields_mapping?: ElasticsearchIndexFieldMappingOptions;
in_scope?: boolean;
include_contexts?: OnYourDataContextProperty[];
index_name: string;
max_search_queries?: number;
query_type?: string;
role_information?: string;
strictness?: number;
top_n_documents?: number;
}
// @public
export interface ElasticsearchIndexFieldMappingOptions {
content_fields?: string[];
content_fields_separator?: string;
filepath_field?: string;
title_field?: string;
url_field?: string;
vector_fields?: string[];
}
// @public
export interface ImageGenerationContentFilterResults {
hate?: ContentFilterResultOutput;
@ -251,7 +172,6 @@ export interface ImageGenerationContentFilterResults {
// @public
export interface ImageGenerationPromptFilterResults {
custom_blocklists?: ContentFilterDetailedResults;
hate?: ContentFilterResultOutput;
jailbreak?: ContentFilterDetectionResultOutput;
profanity?: ContentFilterDetectionResultOutput;
@ -260,47 +180,6 @@ export interface ImageGenerationPromptFilterResults {
violence?: ContentFilterResultOutput;
}
// @public
export interface MongoDBChatExtensionConfiguration extends AzureChatExtensionConfigurationParent {
parameters: MongoDBChatExtensionParameters;
type: "mongo_db";
}
// @public
export interface MongoDBChatExtensionParameters {
allow_partial_result?: boolean;
app_name: string;
authentication?: OnYourDataUsernameAndPasswordAuthenticationOptions;
collection_name: string;
database_name: string;
embedding_dependency: OnYourDataEndpointVectorizationSource | OnYourDataDeploymentNameVectorizationSource;
endpoint: string;
fields_mapping: MongoDBFieldMappingOptions;
in_scope?: boolean;
include_contexts?: OnYourDataContextProperty[];
index_name: string;
max_search_queries?: number;
role_information?: string;
strictness?: number;
top_n_documents?: number;
}
// @public
export interface MongoDBFieldMappingOptions {
content_fields: string[];
content_fields_separator?: string;
filepath_field?: string;
title_field?: string;
url_field?: string;
vector_fields: string[];
}
// @public
export interface OnYourDataAccessTokenAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
access_token: string;
type: "access_token";
}
// @public
export interface OnYourDataApiKeyAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
key: string;
@ -308,7 +187,7 @@ export interface OnYourDataApiKeyAuthenticationOptions extends OnYourDataAuthent
}
// @public
export type OnYourDataAuthenticationOptions = OnYourDataAuthenticationOptionsParent | OnYourDataApiKeyAuthenticationOptions | OnYourDataConnectionStringAuthenticationOptions | OnYourDataKeyAndKeyIdAuthenticationOptions | OnYourDataEncodedApiKeyAuthenticationOptions | OnYourDataAccessTokenAuthenticationOptions | OnYourDataSystemAssignedManagedIdentityAuthenticationOptions | OnYourDataUserAssignedManagedIdentityAuthenticationOptions;
export type OnYourDataAuthenticationOptions = OnYourDataAuthenticationOptionsParent | OnYourDataApiKeyAuthenticationOptions | OnYourDataConnectionStringAuthenticationOptions | OnYourDataSystemAssignedManagedIdentityAuthenticationOptions | OnYourDataUserAssignedManagedIdentityAuthenticationOptions;
// @public
export interface OnYourDataAuthenticationOptionsParent {
@ -324,41 +203,16 @@ export interface OnYourDataConnectionStringAuthenticationOptions extends OnYourD
// @public
export interface OnYourDataDeploymentNameVectorizationSource extends OnYourDataVectorizationSourceParent {
deployment_name: string;
dimensions?: number;
type: "deployment_name";
}
// @public
export interface OnYourDataEncodedApiKeyAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
encoded_api_key: string;
type: "encoded_api_key";
}
// @public
export interface OnYourDataEndpointVectorizationSource extends OnYourDataVectorizationSourceParent {
authentication: OnYourDataVectorSearchAuthenticationOptions;
authentication: OnYourDataApiKeyAuthenticationOptions;
endpoint: string;
type: "endpoint";
}
// @public
export interface OnYourDataIntegratedVectorizationSource extends OnYourDataVectorizationSourceParent {
type: "integrated";
}
// @public
export interface OnYourDataKeyAndKeyIdAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
key: string;
key_id: string;
type: "key_and_key_id";
}
// @public
export interface OnYourDataModelIdVectorizationSource extends OnYourDataVectorizationSourceParent {
model_id: string;
type: "model_id";
}
// @public
export interface OnYourDataSystemAssignedManagedIdentityAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
type: "system_assigned_managed_identity";
@ -371,68 +225,13 @@ export interface OnYourDataUserAssignedManagedIdentityAuthenticationOptions exte
}
// @public
export interface OnYourDataUsernameAndPasswordAuthenticationOptions extends OnYourDataAuthenticationOptionsParent {
password: string;
type: "username_and_password";
username: string;
}
// @public
export type OnYourDataVectorizationSource = OnYourDataVectorizationSourceParent | OnYourDataEndpointVectorizationSource | OnYourDataDeploymentNameVectorizationSource | OnYourDataModelIdVectorizationSource | OnYourDataIntegratedVectorizationSource;
export type OnYourDataVectorizationSource = OnYourDataVectorizationSourceParent | OnYourDataEndpointVectorizationSource | OnYourDataDeploymentNameVectorizationSource;
// @public
export interface OnYourDataVectorizationSourceParent {
type: string;
}
// @public
export interface OnYourDataVectorSearchAccessTokenAuthenticationOptions extends OnYourDataVectorSearchAuthenticationOptions {
access_token: string;
type: "access_token";
}
// @public
export interface OnYourDataVectorSearchApiKeyAuthenticationOptions extends OnYourDataVectorSearchAuthenticationOptions {
key: string;
type: "api_key";
}
// @public
export interface OnYourDataVectorSearchAuthenticationOptions {
type: string;
}
// @public
export interface PineconeChatExtensionConfiguration extends AzureChatExtensionConfigurationParent {
parameters: PineconeChatExtensionParameters;
type: "pinecone";
}
// @public
export interface PineconeChatExtensionParameters {
allow_partial_result?: boolean;
authentication?: OnYourDataAuthenticationOptions;
embedding_dependency: OnYourDataVectorizationSource;
environment: string;
fields_mapping: PineconeFieldMappingOptions;
in_scope?: boolean;
include_contexts?: OnYourDataContextProperty[];
index_name: string;
max_search_queries?: number;
role_information?: string;
strictness?: number;
top_n_documents?: number;
}
// @public
export interface PineconeFieldMappingOptions {
content_fields: string[];
content_fields_separator?: string;
filepath_field?: string;
title_field?: string;
url_field?: string;
}
// (No @packageDocumentation comment for this package)
```

Просмотреть файл

@ -26,7 +26,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.transcriptions.create({
model: "",

Просмотреть файл

@ -26,7 +26,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.translations.create({
model: "",

Просмотреть файл

@ -22,7 +22,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -13,6 +13,7 @@ import { AzureOpenAI } from "openai";
import { getBearerTokenProvider, DefaultAzureCredential } from "@azure/identity";
export async function main() {
const apiVersion = "2024-09-01-preview";
// Create AzureOpenAI client with Microsoft Entra ID
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
@ -20,6 +21,7 @@ export async function main() {
const client = new AzureOpenAI({
azureADTokenProvider,
apiVersion,
});
// Create an assistant using code interpreter tool

Просмотреть файл

@ -24,7 +24,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.completions.create({ prompt, model: "", max_tokens: 128 });

Просмотреть файл

@ -24,7 +24,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const deployment = "text-embedding-3-large";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const embeddings = await client.embeddings.create({ input, model: "" });

Просмотреть файл

@ -29,7 +29,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "dall-e-3";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const results = await client.images.generate({ prompt, model: "", n, size });

Просмотреть файл

@ -27,7 +27,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,

Просмотреть файл

@ -22,7 +22,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -23,7 +23,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -24,7 +24,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.completions.create({
prompt,

Просмотреть файл

@ -24,7 +24,7 @@ const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the spe
// Corresponds to your Model deployment within your OpenAI resource
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = "tts";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);

Просмотреть файл

@ -41,7 +41,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [{ role: "user", content: "What's the weather like in Boston?" }],

Просмотреть файл

@ -35,7 +35,7 @@ const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
// Set the AZURE_OPENAI_ENDPOINT environment variable to the Azure OpenAI endpoint
const client = new AzureOpenAI({ azureADTokenProvider, apiVersion: "2024-08-01-preview" });
const client = new AzureOpenAI({ azureADTokenProvider, apiVersion: "2024-10-21" });
async function getEmbeddings(text, model = deployment) {
const res = await client.embeddings.create({ input: text, model });

Просмотреть файл

@ -76,7 +76,7 @@ async function deleteIndexIfExists(indexClient, indexName) {
async function askOpenAI(azureSearchIndexName, messages) {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment: chatDeploymentId, apiVersion });
const events = client.chat.completions.create({
messages,

Просмотреть файл

@ -25,7 +25,7 @@ app.post("/api/completions", async (req, res) => {
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
deployment: process.env.AZURE_OPENAI_DEPLOYMENT,
azureADTokenProvider,
apiVersion: "2024-08-01-preview",
apiVersion: "2024-10-21",
});
const prompt = req.body.prompt;

Просмотреть файл

@ -24,7 +24,7 @@ const openAI = new AzureOpenAI({
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
deployment: process.env.AZURE_OPENAI_DEPLOYMENT,
azureADTokenProvider,
apiVersion: "2024-08-01-preview",
apiVersion: "2024-10-21",
});
app.get("/api/auth", async (req, res) => {

Просмотреть файл

@ -25,7 +25,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.transcriptions.create({
model: "",

Просмотреть файл

@ -25,7 +25,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.translations.create({
model: "",

Просмотреть файл

@ -21,7 +21,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const batchContent = `{ "custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": { "model": "${deployment}", "messages": [{ "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is 2+2?" }] } }`;

Просмотреть файл

@ -21,7 +21,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -12,6 +12,7 @@ const { AzureOpenAI } = require("openai");
const { getBearerTokenProvider, DefaultAzureCredential } = require("@azure/identity");
async function main() {
const apiVersion = "2024-09-01-preview";
// Create AzureOpenAI client with Microsoft Entra ID
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
@ -19,6 +20,7 @@ async function main() {
const client = new AzureOpenAI({
azureADTokenProvider,
apiVersion,
});
// Create an assistant using code interpreter tool

Просмотреть файл

@ -23,7 +23,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.completions.create({ prompt, model: "", max_tokens: 128 });

Просмотреть файл

@ -23,7 +23,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const deployment = "text-embedding-3-large";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const embeddings = await client.embeddings.create({ input, model: "" });

Просмотреть файл

@ -28,7 +28,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "dall-e-3";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const results = await client.images.generate({ prompt, model: "", n, size });

Просмотреть файл

@ -25,7 +25,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,

Просмотреть файл

@ -21,7 +21,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -21,7 +21,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -23,7 +23,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.completions.create({
prompt,

Просмотреть файл

@ -23,7 +23,7 @@ const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the spe
// Corresponds to your Model deployment within your OpenAI resource
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = "tts";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);

Просмотреть файл

@ -40,7 +40,7 @@ async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [{ role: "user", content: "What's the weather like in Boston?" }],

Просмотреть файл

@ -25,7 +25,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.transcriptions.create({
model: "",

Просмотреть файл

@ -25,7 +25,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.translations.create({
model: "",

Просмотреть файл

@ -21,7 +21,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -12,6 +12,7 @@ import { AzureOpenAI } from "openai";
import { getBearerTokenProvider, DefaultAzureCredential } from "@azure/identity";
export async function main() {
const apiVersion = "2024-09-01-preview";
// Create AzureOpenAI client with Microsoft Entra ID
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
@ -19,6 +20,7 @@ export async function main() {
const client = new AzureOpenAI({
azureADTokenProvider,
apiVersion,
});
// Create an assistant using code interpreter tool

Просмотреть файл

@ -23,7 +23,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.completions.create({ prompt, model: "", max_tokens: 128 });

Просмотреть файл

@ -23,7 +23,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const deployment = "text-embedding-3-large";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const embeddings = await client.embeddings.create({ input, model: "" });

Просмотреть файл

@ -28,7 +28,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "dall-e-3";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const results = await client.images.generate({ prompt, model: "", n, size });

Просмотреть файл

@ -26,7 +26,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,

Просмотреть файл

@ -21,7 +21,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -22,7 +22,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [

Просмотреть файл

@ -23,7 +23,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.completions.create({
prompt,

Просмотреть файл

@ -23,7 +23,7 @@ const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the spe
// Corresponds to your Model deployment within your OpenAI resource
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = "tts";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);

Просмотреть файл

@ -40,7 +40,7 @@ export async function main() {
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-08-01-preview";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [{ role: "user", content: "What's the weather like in Boston?" }],

Просмотреть файл

@ -0,0 +1,90 @@
---
page_type: sample
languages:
- javascript
products:
- azure
- azure-cognitive-services
- azure-openai
urlFragment: openai-javascript
---
# Azure OpenAI client library samples for JavaScript
These sample programs show how to use the JavaScript client libraries for Azure OpenAI in some common scenarios.
| **File Name** | **Description** |
| ----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
| [audioTranscription.js][audiotranscription] | audio transcription. |
| [audioTranslation.js][audiotranslation] | audio translation. |
| [batch.js][batch] | create and retrieve batch content. |
| [chatCompletions.js][chatcompletions] | get chat completions. |
| [codeInterpreter.js][codeinterpreter] | interpreting code. |
| [completions.js][completions] | get completions. |
| [embeddings.js][embeddings] | generates embedding vectors from a prompt using Azure OpenAI Get Embeddings. |
| [images.js][images] | generates images from prompts using Azure OpenAI Batch Image Generation. |
| [onYourData.js][onyourdata] | chat completions with your own data. |
| [streamChatCompletions.js][streamchatcompletions] | list chat completions. |
| [streamChatCompletionsWithContentFilter.js][streamchatcompletionswithcontentfilter] | get completions. |
| [streamCompletions.js][streamcompletions] | list completions. |
| [textToSpeech.js][texttospeech] | text to speech. |
| [toolCall.js][toolcall] | get chat completions with functions. |
## Prerequisites
The sample programs are compatible with [LTS versions of Node.js](https://github.com/nodejs/release#release-schedule).
You need [an Azure subscription][freesub] and the following Azure resources to run these sample programs:
- [Azure Cognitive Services instance][createinstance_azurecognitiveservicesinstance]
Samples retrieve credentials to access the service endpoint from environment variables. Alternatively, edit the source code to include the appropriate credentials. See each individual sample for details on which environment variables/credentials it requires to function.
Adapting the samples to run in the browser may require some additional consideration. For details, please see the [package README][package].
## Setup
To run the samples using the published version of the package:
1. Install the dependencies using `npm`:
```bash
npm install
```
2. Edit the file `sample.env`, adding the correct credentials to access the Azure service and run the samples. Then rename the file from `sample.env` to just `.env`. The sample programs will read this file automatically.
3. Run whichever samples you like (note that some samples may require additional setup, see the table above):
```bash
node audioTranscription.js
```
Alternatively, run a single sample with the correct environment variables set (setting up the `.env` file is not required if you do this), for example (cross-platform):
```bash
npx dev-tool run vendored cross-env AUDIO_FILE_PATH="<audio file path>" node audioTranscription.js
```
## Next Steps
Take a look at our [API Documentation][apiref] for more information about the APIs that are available in the clients.
[audiotranscription]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/audioTranscription.js
[audiotranslation]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/audioTranslation.js
[batch]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/batch.js
[chatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/chatCompletions.js
[codeinterpreter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/codeInterpreter.js
[completions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/completions.js
[embeddings]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/embeddings.js
[images]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/images.js
[onyourdata]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/onYourData.js
[streamchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/streamChatCompletions.js
[streamchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/streamChatCompletionsWithContentFilter.js
[streamcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/streamCompletions.js
[texttospeech]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/textToSpeech.js
[toolcall]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/javascript/toolCall.js
[apiref]: https://docs.microsoft.com/javascript/api/@azure/openai
[freesub]: https://azure.microsoft.com/free/
[createinstance_azurecognitiveservicesinstance]: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource
[package]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai/README.md

Просмотреть файл

@ -0,0 +1,42 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to transcribe the content of an audio file.
*
* @summary audio transcription.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
const { createReadStream } = require("fs");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// You will need to set these environment variables or edit the following values
const audioFilePath = process.env["AUDIO_FILE_PATH"] || "<audio file path>";
async function main() {
console.log("== Transcribe Audio Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.transcriptions.create({
model: "",
file: createReadStream(audioFilePath),
});
console.log(`Transcription: ${result.text}`);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,42 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to translate the content of an audio file.
*
* @summary audio translation.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
const { createReadStream } = require("fs");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// You will need to set these environment variables or edit the following values
const audioFilePath = process.env["AUDIO_FILE_PATH"] || "<audio file path>";
async function main() {
console.log("== Translate Audio Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.translations.create({
model: "",
file: createReadStream(audioFilePath),
});
console.log(`Translation: ${result.text}`);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,64 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use Azure OpenAI global batch deployment.
*
* @summary create and retrieve batch content.
*/
const { AzureOpenAI, toFile } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
async function main() {
console.log("== Batch Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-09-01-preview";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const batchContent = `{ "custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": { "model": "${deployment}", "messages": [{ "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is 2+2?" }] } }`;
// Upload a file with "batch" purpose
const file = await client.files.create({
file: await toFile(Buffer.from(batchContent), "batch.jsonl"),
purpose: "batch",
});
// Create the batch
const batch = await client.batches.create({
endpoint: "/v1/chat/completions",
input_file_id: file.id,
completion_window: "24h",
});
console.log(batch);
// Checking batch status
const retrievedBatch = await client.batches.retrieve(batch.id);
console.log(retrievedBatch);
// Retrieve the batch output
const outputFileId = retrievedBatch.output_file_id ?? retrievedBatch.error_file_id;
if (outputFileId) {
const fileResponse = await client.files.content(outputFileId);
const fileContent = await fileResponse.text();
console.log(fileContent);
}
// Clean up file
await client.files.del(file.id);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,45 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get chat completions for a chat context.
*
* @summary get chat completions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
async function main() {
console.log("== Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
});
for (const choice of result.choices) {
console.log(choice.message);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,88 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use the AOAI assistants API with the code interpreter tool
*
*
* @summary interpreting code.
*/
const { AzureOpenAI } = require("openai");
const { getBearerTokenProvider, DefaultAzureCredential } = require("@azure/identity");
async function main() {
const apiVersion = "2024-09-01-preview";
// Create AzureOpenAI client with Microsoft Entra ID
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
const client = new AzureOpenAI({
azureADTokenProvider,
apiVersion,
});
// Create an assistant using code interpreter tool
const assistant = await client.beta.assistants.create({
tools: [{ type: "code_interpreter" }],
model: "gpt-4-1106-preview",
name: "JS CI Math Tutor",
description: "Math Tutor for Math Problems",
instructions: "You are a personal math tutor. Write and run code to answer math questions.",
metadata: { foo: "bar" },
});
// Create a new thread
const thread = await client.beta.threads.create();
const question = "I need to solve the equation '3x + 11 = 14'. Can you help me?";
const role = "user";
// Create a message on the thread
await client.beta.threads.messages.create(thread.id, {
role,
content: question,
});
// Start a new run with instructions
const instructions = "Please address the user as Jane Doe. The user has a premium account.";
let run = await client.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistant.id,
instructions,
});
// Check for potential error
if (run.status === "failed") {
throw new Error(run.last_error?.message);
}
// Retrieve the messages from the run
const runMessages = await client.beta.threads.messages.list(thread.id);
for await (const runMessageDatum of runMessages) {
for (const item of runMessageDatum.content) {
switch (item.type) {
case "text": {
console.log(`${runMessageDatum.role}: ${item.text.value}`);
break;
}
case "image_file": {
console.log(`Received image: ${item.image_file.file_id}`);
break;
}
case "image_url": {
console.log(`Received image: ${item.image_url.url}`);
break;
}
default: {
console.log(`Unhandled item type: ${item.type}`);
}
}
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,39 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get completions for the provided prompt.
*
* @summary get completions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
const prompt = ["What is Azure OpenAI?"];
async function main() {
console.log("== Get completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.completions.create({ prompt, model: "", max_tokens: 128 });
for (const choice of result.choices) {
console.log(choice.text);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,40 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get embedding vectors for a piece of text using Azure OpenAI.
*
* @summary generates embedding vectors from a prompt using Azure OpenAI Get Embeddings.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// The prompt to generate the embeddings vector
const input = ["This is the sample text to be embedded"];
async function main() {
console.log("== Get embeddings sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-10-21";
const deployment = "text-embedding-3-large";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const embeddings = await client.embeddings.create({ input, model: "" });
for (const embeddingData of embeddings.data) {
console.log(`The embedding values are ${embeddingData.embedding}`);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,44 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to generate images from prompts using Azure OpenAI Batch Image Generation.
*
* @summary generates images from prompts using Azure OpenAI Batch Image Generation.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// The prompt to generate images from
const prompt = "a monkey eating a banana";
const size = "1024x1024";
// The number of images to generate
const n = 1;
async function main() {
console.log("== Batch Image Generation ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "dall-e-3";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const results = await client.images.generate({ prompt, model: "", n, size });
for (const image of results.data) {
console.log(`Image generation result URL: ${image.url}`);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,66 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use Azure's Bring Your Own Data with Azure OpenAI Chat Completions.
*
* @summary chat completions with your own data.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// Your Azure Cognitive Search endpoint, and index name
const azureSearchEndpoint = process.env["AZURE_SEARCH_ENDPOINT"] || "<search endpoint>";
const azureSearchIndexName = process.env["AZURE_SEARCH_INDEX"] || "<search index>";
async function main() {
console.log("== On Your Data Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,
messages: [
{
role: "user",
content:
"What's the most common feedback we received from our customers about the product?",
},
],
max_tokens: 128,
model: "",
data_sources: [
{
type: "azure_search",
parameters: {
endpoint: azureSearchEndpoint,
index_name: azureSearchIndexName,
authentication: {
type: "system_assigned_managed_identity",
},
},
},
],
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.delta?.content);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,36 @@
{
"name": "@azure-samples/openai-js",
"private": true,
"version": "1.0.0",
"description": "Azure OpenAI client library samples for JavaScript",
"engines": {
"node": ">=18.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/Azure/azure-sdk-for-js.git",
"directory": "sdk/openai/openai"
},
"keywords": [
"node",
"azure",
"cloud",
"typescript",
"browser",
"isomorphic",
"openai",
"ai"
],
"author": "Microsoft Corporation",
"license": "MIT",
"bugs": {
"url": "https://github.com/Azure/azure-sdk-for-js/issues"
},
"homepage": "https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai",
"dependencies": {
"@azure/openai": "latest",
"dotenv": "latest",
"openai": "^4.47.2",
"@azure/identity": "^4.2.0"
}
}

Просмотреть файл

@ -0,0 +1,3 @@
# Used to authenticate with the Azure OpenAI. Retrieve these
# values from an Azure OpenAI instance in the Azure Portal.
AZURE_OPENAI_ENDPOINT="https://<resource name>.openai.azure.com"

Просмотреть файл

@ -0,0 +1,49 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to list chat completions for a chat context.
*
* @summary list chat completions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
async function main() {
console.log("== Streaming Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.delta?.content);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,72 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get completions for the provided prompt and parse output for content filter
*
* @summary get completions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
async function main() {
console.log("== Streaming Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(`Chunk: ${choice.delta?.content}`);
const filterResults = choice.content_filter_results;
if (!filterResults) {
continue;
}
if (filterResults.error) {
console.log(
`\tContent filter ran into an error ${filterResults.error.code}: ${filterResults.error.message}`,
);
} else {
const { hate, sexual, self_harm, violence } = filterResults;
console.log(
`\tHate category is filtered: ${hate?.filtered}, with ${hate?.severity} severity`,
);
console.log(
`\tSexual category is filtered: ${sexual?.filtered}, with ${sexual?.severity} severity`,
);
console.log(
`\tSelf-harm category is filtered: ${self_harm?.filtered}, with ${self_harm?.severity} severity`,
);
console.log(
`\tViolence category is filtered: ${violence?.filtered}, with ${violence?.severity} severity`,
);
}
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,46 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to list completions for the provided prompt.
*
* @summary list completions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
const prompt = ["What is Azure OpenAI?"];
async function main() {
console.log("== Stream Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.completions.create({
prompt,
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.text);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,51 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to convert text into speech.
*
* @summary text to speech.
*/
require("openai/shims/node");
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
const { writeFile } = require("fs/promises");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
// You will need to set these environment variables or edit the following values
const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the speech file>";
// Corresponds to your Model deployment within your OpenAI resource
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = "tts";
const apiVersion = "2024-09-01-preview";
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
async function main() {
console.log("== Text to Speech Sample ==");
const openai = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const response = await openai.audio.speech.create({
model: deployment,
voice: "alloy",
input: "the quick brown chicken jumped over the lazy dogs",
});
const stream = response.body;
console.log(`Streaming response to ${speechFilePath}`);
await writeFile(speechFilePath, stream);
console.log("Finished streaming");
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,65 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to define and call functions with chat completions.
*
* @summary get chat completions with functions.
*/
const { AzureOpenAI } = require("openai");
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
require("dotenv/config");
const getCurrentWeather = {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
},
},
required: ["location"],
},
};
async function main() {
console.log("== Chat Completions Sample with Tool Calling ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [{ role: "user", content: "What's the weather like in Boston?" }],
model: "",
tools: [
{
type: "function",
function: getCurrentWeather,
},
],
});
for (const choice of result.choices) {
console.log(choice.message?.tool_calls);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});
module.exports = { main };

Просмотреть файл

@ -0,0 +1,103 @@
---
page_type: sample
languages:
- typescript
products:
- azure
- azure-cognitive-services
- azure-openai
urlFragment: openai-typescript
---
# Azure OpenAI client library samples for TypeScript
These sample programs show how to use the TypeScript client libraries for Azure OpenAI in some common scenarios.
| **File Name** | **Description** |
| ----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
| [audioTranscription.ts][audiotranscription] | audio transcription. |
| [audioTranslation.ts][audiotranslation] | audio translation. |
| [batch.ts][batch] | create and retrieve batch content. |
| [chatCompletions.ts][chatcompletions] | get chat completions. |
| [codeInterpreter.ts][codeinterpreter] | interpreting code. |
| [completions.ts][completions] | get completions. |
| [embeddings.ts][embeddings] | generates embedding vectors from a prompt using Azure OpenAI Get Embeddings. |
| [images.ts][images] | generates images from prompts using Azure OpenAI Batch Image Generation. |
| [onYourData.ts][onyourdata] | chat completions with your own data. |
| [streamChatCompletions.ts][streamchatcompletions] | list chat completions. |
| [streamChatCompletionsWithContentFilter.ts][streamchatcompletionswithcontentfilter] | get completions. |
| [streamCompletions.ts][streamcompletions] | list completions. |
| [textToSpeech.ts][texttospeech] | text to speech. |
| [toolCall.ts][toolcall] | get chat completions with functions. |
## Prerequisites
The sample programs are compatible with [LTS versions of Node.js](https://github.com/nodejs/release#release-schedule).
Before running the samples in Node, they must be compiled to JavaScript using the TypeScript compiler. For more information on TypeScript, see the [TypeScript documentation][typescript]. Install the TypeScript compiler using:
```bash
npm install -g typescript
```
You need [an Azure subscription][freesub] and the following Azure resources to run these sample programs:
- [Azure Cognitive Services instance][createinstance_azurecognitiveservicesinstance]
Samples retrieve credentials to access the service endpoint from environment variables. Alternatively, edit the source code to include the appropriate credentials. See each individual sample for details on which environment variables/credentials it requires to function.
Adapting the samples to run in the browser may require some additional consideration. For details, please see the [package README][package].
## Setup
To run the samples using the published version of the package:
1. Install the dependencies using `npm`:
```bash
npm install
```
2. Compile the samples:
```bash
npm run build
```
3. Edit the file `sample.env`, adding the correct credentials to access the Azure service and run the samples. Then rename the file from `sample.env` to just `.env`. The sample programs will read this file automatically.
4. Run whichever samples you like (note that some samples may require additional setup, see the table above):
```bash
node dist/audioTranscription.js
```
Alternatively, run a single sample with the correct environment variables set (setting up the `.env` file is not required if you do this), for example (cross-platform):
```bash
npx dev-tool run vendored cross-env AUDIO_FILE_PATH="<audio file path>" node dist/audioTranscription.js
```
## Next Steps
Take a look at our [API Documentation][apiref] for more information about the APIs that are available in the clients.
[audiotranscription]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/audioTranscription.ts
[audiotranslation]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/audioTranslation.ts
[batch]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/batch.ts
[chatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/chatCompletions.ts
[codeinterpreter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/codeInterpreter.ts
[completions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/completions.ts
[embeddings]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/embeddings.ts
[images]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/images.ts
[onyourdata]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/onYourData.ts
[streamchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/streamChatCompletions.ts
[streamchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/streamChatCompletionsWithContentFilter.ts
[streamcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/streamCompletions.ts
[texttospeech]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/textToSpeech.ts
[toolcall]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2/typescript/src/toolCall.ts
[apiref]: https://docs.microsoft.com/javascript/api/@azure/openai
[freesub]: https://azure.microsoft.com/free/
[createinstance_azurecognitiveservicesinstance]: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource
[package]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai/README.md
[typescript]: https://www.typescriptlang.org/docs/home.html

Просмотреть файл

@ -0,0 +1,45 @@
{
"name": "@azure-samples/openai-ts",
"private": true,
"version": "1.0.0",
"description": "Azure OpenAI client library samples for TypeScript",
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"build": "tsc",
"prebuild": "rimraf dist/"
},
"repository": {
"type": "git",
"url": "git+https://github.com/Azure/azure-sdk-for-js.git",
"directory": "sdk/openai/openai"
},
"keywords": [
"node",
"azure",
"cloud",
"typescript",
"browser",
"isomorphic",
"openai",
"ai"
],
"author": "Microsoft Corporation",
"license": "MIT",
"bugs": {
"url": "https://github.com/Azure/azure-sdk-for-js/issues"
},
"homepage": "https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai",
"dependencies": {
"@azure/openai": "latest",
"dotenv": "latest",
"openai": "^4.47.2",
"@azure/identity": "^4.2.0"
},
"devDependencies": {
"@types/node": "^18.0.0",
"typescript": "~5.6.2",
"rimraf": "latest"
}
}

Просмотреть файл

@ -0,0 +1,3 @@
# Used to authenticate with the Azure OpenAI. Retrieve these
# values from an Azure OpenAI instance in the Azure Portal.
AZURE_OPENAI_ENDPOINT="https://<resource name>.openai.azure.com"

Просмотреть файл

@ -0,0 +1,40 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to transcribe the content of an audio file.
*
* @summary audio transcription.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
import { createReadStream } from "fs";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// You will need to set these environment variables or edit the following values
const audioFilePath = process.env["AUDIO_FILE_PATH"] || "<audio file path>";
export async function main() {
console.log("== Transcribe Audio Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.transcriptions.create({
model: "",
file: createReadStream(audioFilePath),
});
console.log(`Transcription: ${result.text}`);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,40 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to translate the content of an audio file.
*
* @summary audio translation.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
import { createReadStream } from "fs";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// You will need to set these environment variables or edit the following values
const audioFilePath = process.env["AUDIO_FILE_PATH"] || "<audio file path>";
export async function main() {
console.log("== Translate Audio Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "whisper-deployment";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.audio.translations.create({
model: "",
file: createReadStream(audioFilePath),
});
console.log(`Translation: ${result.text}`);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,62 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use Azure OpenAI global batch deployment.
*
* @summary create and retrieve batch content.
*/
import { AzureOpenAI, toFile } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
export async function main() {
console.log("== Batch Chat Completions Sample ==");
const apiVersion = "2024-09-01-preview";
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const batchContent = `{ "custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": { "model": "${deployment}", "messages": [{ "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is 2+2?" }] } }`;
// Upload a file with "batch" purpose
const file = await client.files.create({
file: await toFile(Buffer.from(batchContent), "batch.jsonl"),
purpose: "batch",
});
// Create the batch
const batch = await client.batches.create({
endpoint: "/v1/chat/completions",
input_file_id: file.id,
completion_window: "24h",
});
console.log(batch);
// Checking batch status
const retrievedBatch = await client.batches.retrieve(batch.id);
console.log(retrievedBatch);
// Retrieve the batch output
const outputFileId = retrievedBatch.output_file_id ?? retrievedBatch.error_file_id;
if (outputFileId) {
const fileResponse = await client.files.content(outputFileId);
const fileContent = await fileResponse.text();
console.log(fileContent);
}
// Clean up file
await client.files.del(file.id);
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,43 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get chat completions for a chat context.
*
* @summary get chat completions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
export async function main() {
console.log("== Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
});
for (const choice of result.choices) {
console.log(choice.message);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,86 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use the AOAI assistants API with the code interpreter tool
*
*
* @summary interpreting code.
*/
import { AzureOpenAI } from "openai";
import { getBearerTokenProvider, DefaultAzureCredential } from "@azure/identity";
export async function main() {
const apiVersion = "2024-09-01-preview";
// Create AzureOpenAI client with Microsoft Entra ID
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
const client = new AzureOpenAI({
azureADTokenProvider,
apiVersion,
});
// Create an assistant using code interpreter tool
const assistant = await client.beta.assistants.create({
tools: [{ type: "code_interpreter" }],
model: "gpt-4-1106-preview",
name: "JS CI Math Tutor",
description: "Math Tutor for Math Problems",
instructions: "You are a personal math tutor. Write and run code to answer math questions.",
metadata: { foo: "bar" },
});
// Create a new thread
const thread = await client.beta.threads.create();
const question = "I need to solve the equation '3x + 11 = 14'. Can you help me?";
const role = "user";
// Create a message on the thread
await client.beta.threads.messages.create(thread.id, {
role,
content: question,
});
// Start a new run with instructions
const instructions = "Please address the user as Jane Doe. The user has a premium account.";
let run = await client.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistant.id,
instructions,
});
// Check for potential error
if (run.status === "failed") {
throw new Error(run.last_error?.message);
}
// Retrieve the messages from the run
const runMessages = await client.beta.threads.messages.list(thread.id);
for await (const runMessageDatum of runMessages) {
for (const item of runMessageDatum.content) {
switch (item.type) {
case "text": {
console.log(`${runMessageDatum.role}: ${item.text.value}`);
break;
}
case "image_file": {
console.log(`Received image: ${item.image_file.file_id}`);
break;
}
case "image_url": {
console.log(`Received image: ${item.image_url.url}`);
break;
}
default: {
console.log(`Unhandled item type: ${item.type}`);
}
}
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,37 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get completions for the provided prompt.
*
* @summary get completions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
const prompt = ["What is Azure OpenAI?"];
export async function main() {
console.log("== Get completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.completions.create({ prompt, model: "", max_tokens: 128 });
for (const choice of result.choices) {
console.log(choice.text);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,38 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get embedding vectors for a piece of text using Azure OpenAI.
*
* @summary generates embedding vectors from a prompt using Azure OpenAI Get Embeddings.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// The prompt to generate the embeddings vector
const input = ["This is the sample text to be embedded"];
export async function main() {
console.log("== Get embeddings sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const apiVersion = "2024-10-21";
const deployment = "text-embedding-3-large";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const embeddings = await client.embeddings.create({ input, model: "" });
for (const embeddingData of embeddings.data) {
console.log(`The embedding values are ${embeddingData.embedding}`);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,42 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to generate images from prompts using Azure OpenAI Batch Image Generation.
*
* @summary generates images from prompts using Azure OpenAI Batch Image Generation.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// The prompt to generate images from
const prompt = "a monkey eating a banana";
const size = "1024x1024";
// The number of images to generate
const n = 1;
export async function main() {
console.log("== Batch Image Generation ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "dall-e-3";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const results = await client.images.generate({ prompt, model: "", n, size });
for (const image of results.data) {
console.log(`Image generation result URL: ${image.url}`);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,65 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to use Azure's Bring Your Own Data with Azure OpenAI Chat Completions.
*
* @summary chat completions with your own data.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
import "@azure/openai/types";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// Your Azure Cognitive Search endpoint, and index name
const azureSearchEndpoint = process.env["AZURE_SEARCH_ENDPOINT"] || "<search endpoint>";
const azureSearchIndexName = process.env["AZURE_SEARCH_INDEX"] || "<search index>";
export async function main() {
console.log("== On Your Data Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-1106-preview";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
stream: true,
messages: [
{
role: "user",
content:
"What's the most common feedback we received from our customers about the product?",
},
],
max_tokens: 128,
model: "",
data_sources: [
{
type: "azure_search",
parameters: {
endpoint: azureSearchEndpoint,
index_name: azureSearchIndexName,
authentication: {
type: "system_assigned_managed_identity",
},
},
},
],
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.delta?.content);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,47 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to list chat completions for a chat context.
*
* @summary list chat completions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
export async function main() {
console.log("== Streaming Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.delta?.content);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,71 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to get completions for the provided prompt and parse output for content filter
*
* @summary get completions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
import "@azure/openai/types";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
export async function main() {
console.log("== Streaming Chat Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-35-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.chat.completions.create({
messages: [
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
{ role: "user", content: "Can you help me?" },
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
{ role: "user", content: "What's the best way to train a parrot?" },
],
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(`Chunk: ${choice.delta?.content}`);
const filterResults = choice.content_filter_results;
if (!filterResults) {
continue;
}
if (filterResults.error) {
console.log(
`\tContent filter ran into an error ${filterResults.error.code}: ${filterResults.error.message}`,
);
} else {
const { hate, sexual, self_harm, violence } = filterResults;
console.log(
`\tHate category is filtered: ${hate?.filtered}, with ${hate?.severity} severity`,
);
console.log(
`\tSexual category is filtered: ${sexual?.filtered}, with ${sexual?.severity} severity`,
);
console.log(
`\tSelf-harm category is filtered: ${self_harm?.filtered}, with ${self_harm?.severity} severity`,
);
console.log(
`\tViolence category is filtered: ${violence?.filtered}, with ${violence?.severity} severity`,
);
}
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,44 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to list completions for the provided prompt.
*
* @summary list completions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
const prompt = ["What is Azure OpenAI?"];
export async function main() {
console.log("== Stream Completions Sample ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "text-davinci-003";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const events = await client.completions.create({
prompt,
model: "",
max_tokens: 128,
stream: true,
});
for await (const event of events) {
for (const choice of event.choices) {
console.log(choice.text);
}
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,49 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to convert text into speech.
*
* @summary text to speech.
*/
import "openai/shims/node";
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
import { writeFile } from "fs/promises";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
// You will need to set these environment variables or edit the following values
const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the speech file>";
// Corresponds to your Model deployment within your OpenAI resource
// Navigate to the Azure OpenAI Studio to deploy a model.
const deployment = "tts";
const apiVersion = "2024-09-01-preview";
const credential = new DefaultAzureCredential();
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
export async function main() {
console.log("== Text to Speech Sample ==");
const openai = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const response = await openai.audio.speech.create({
model: deployment,
voice: "alloy",
input: "the quick brown chicken jumped over the lazy dogs",
});
const stream = response.body;
console.log(`Streaming response to ${speechFilePath}`);
await writeFile(speechFilePath, stream);
console.log("Finished streaming");
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,63 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
/**
* Demonstrates how to define and call functions with chat completions.
*
* @summary get chat completions with functions.
*/
import { AzureOpenAI } from "openai";
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
// OpenAI resource. You can find this in the Azure portal.
// Load the .env file if it exists
import "dotenv/config";
const getCurrentWeather = {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
},
},
required: ["location"],
},
};
export async function main() {
console.log("== Chat Completions Sample with Tool Calling ==");
const scope = "https://cognitiveservices.azure.com/.default";
const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
const deployment = "gpt-4-turbo";
const apiVersion = "2024-10-21";
const client = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
const result = await client.chat.completions.create({
messages: [{ role: "user", content: "What's the weather like in Boston?" }],
model: "",
tools: [
{
type: "function",
function: getCurrentWeather,
},
],
});
for (const choice of result.choices) {
console.log(choice.message?.tool_calls);
}
}
main().catch((err) => {
console.error("The sample encountered an error:", err);
});

Просмотреть файл

@ -0,0 +1,17 @@
{
"compilerOptions": {
"target": "ES2020",
"module": "commonjs",
"moduleResolution": "node",
"resolveJsonModule": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"alwaysStrict": true,
"outDir": "dist",
"rootDir": "src"
},
"include": [
"src/**/*.ts"
]
}

Просмотреть файл

@ -1,8 +1,6 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
type OnYourDataContextProperty = "citations" | "intent" | "all_retrieved_documents";
/**
* A specific representation of configurable options for Azure Search when using it as an Azure OpenAI chat
* extension.
@ -27,27 +25,13 @@ export interface AzureSearchChatExtensionParameters {
* If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
* authentication.
*/
authentication?: OnYourDataAuthenticationOptions;
authentication: OnYourDataAuthenticationOptions;
/** The configured top number of documents to feature for the configured query. */
top_n_documents?: number;
/** Whether queries should be restricted to use of indexed data. */
in_scope?: boolean;
/** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */
strictness?: number;
/** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */
role_information?: string;
/**
* The max number of rewritten queries should be send to search provider for one user message. If not specified,
* the system will decide the number of queries to send.
*/
max_search_queries?: number;
/**
* If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
* If not specified, or specified as false, the request will fail if any search query fails.
*/
allow_partial_result?: boolean;
/** The included properties of the output context. If not specified, the default value is `citations` and `intent`. */
include_contexts?: OnYourDataContextProperty[];
/** The absolute endpoint path for the Azure Cognitive Search resource to use. */
endpoint: string;
/** The name of the index to use as available in the referenced Azure Cognitive Search resource. */
@ -82,94 +66,6 @@ export interface AzureSearchIndexFieldMappingOptions {
content_fields_separator?: string;
/** The names of fields that represent vector data. */
vector_fields?: string[];
/** The names of fields that represent image vector data. */
image_vector_fields?: string[];
}
/**
* A specific representation of configurable options for Elasticsearch when using it as an Azure OpenAI chat
* extension.
*/
export interface ElasticsearchChatExtensionConfiguration
extends AzureChatExtensionConfigurationParent {
/**
* The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its
* default value for Elasticsearch®.
*/
type: "elasticsearch";
/** The parameters to use when configuring Elasticsearch®. */
parameters: ElasticsearchChatExtensionParameters;
}
/** Parameters to use when configuring Elasticsearch® as an Azure OpenAI chat extension. The supported authentication types are KeyAndKeyId and EncodedAPIKey. */
export interface ElasticsearchChatExtensionParameters {
/**
* The authentication method to use when accessing the defined data source.
* Each data source type supports a specific set of available authentication methods; please see the documentation of
* the data source for supported mechanisms.
* If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
* authentication.
*/
authentication?: OnYourDataAuthenticationOptions;
/** The configured top number of documents to feature for the configured query. */
top_n_documents?: number;
/** Whether queries should be restricted to use of indexed data. */
in_scope?: boolean;
/** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */
strictness?: number;
/** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */
role_information?: string;
/**
* The max number of rewritten queries should be send to search provider for one user message. If not specified,
* the system will decide the number of queries to send.
*/
max_search_queries?: number;
/**
* If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
* If not specified, or specified as false, the request will fail if any search query fails.
*/
allow_partial_result?: boolean;
/** The included properties of the output context. If not specified, the default value is `citations` and `intent`. */
include_contexts?: OnYourDataContextProperty[];
/** The endpoint of Elasticsearch®. */
endpoint: string;
/** The index name of Elasticsearch®. */
index_name: string;
/** The index field mapping options of Elasticsearch®. */
fields_mapping?: ElasticsearchIndexFieldMappingOptions;
/**
* The query type of Elasticsearch®.
*
* Possible values: "simple", "vector"
*/
query_type?: string;
/** The embedding dependency for vector search. */
embedding_dependency?: OnYourDataVectorizationSource;
}
/** Optional settings to control how fields are processed when using a configured Elasticsearch® resource. */
export interface ElasticsearchIndexFieldMappingOptions {
/** The name of the index field to use as a title. */
title_field?: string;
/** The name of the index field to use as a URL. */
url_field?: string;
/** The name of the index field to use as a filepath. */
filepath_field?: string;
/** The names of index fields that should be treated as content. */
content_fields?: string[];
/** The separator pattern that content fields should use. */
content_fields_separator?: string;
/** The names of fields that represent vector data. */
vector_fields?: string[];
}
/** The authentication options for Azure OpenAI On Your Data when using access token. */
export interface OnYourDataAccessTokenAuthenticationOptions
extends OnYourDataAuthenticationOptionsParent {
/** The authentication type of access token. */
type: "access_token";
/** The access token to use for authentication. */
access_token: string;
}
/**
@ -199,27 +95,13 @@ export interface AzureCosmosDBChatExtensionParameters {
* If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
* authentication.
*/
authentication?: OnYourDataAuthenticationOptions;
authentication: OnYourDataAuthenticationOptions;
/** The configured top number of documents to feature for the configured query. */
top_n_documents?: number;
/** Whether queries should be restricted to use of indexed data. */
in_scope?: boolean;
/** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */
strictness?: number;
/** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */
role_information?: string;
/**
* The max number of rewritten queries should be send to search provider for one user message. If not specified,
* the system will decide the number of queries to send.
*/
max_search_queries?: number;
/**
* If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
* If not specified, or specified as false, the request will fail if any search query fails.
*/
allow_partial_result?: boolean;
/** The included properties of the output context. If not specified, the default value is `citations` and `intent`. */
include_contexts?: OnYourDataContextProperty[];
/** The MongoDB vCore database name to use with Azure Cosmos DB. */
database_name: string;
/** The name of the Azure Cosmos DB resource container. */
@ -278,34 +160,10 @@ export interface OnYourDataConnectionStringAuthenticationOptions
*/
export interface OnYourDataDeploymentNameVectorizationSource
extends OnYourDataVectorizationSourceParent {
/** The type of vectorization source to use. Always 'DeploymentName' for this type. */
/** The type of vectorization source to use. Always 'deployment_name' for this type. */
type: "deployment_name";
/** The embedding model deployment name within the same Azure OpenAI resource. This enables you to use vector search without Azure OpenAI api-key and without Azure OpenAI public network access. */
deployment_name: string;
/** The number of dimensions the embeddings should have. Only supported in `text-embedding-3` and later models. */
dimensions?: number;
}
/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch encoded API key. */
export interface OnYourDataEncodedApiKeyAuthenticationOptions
extends OnYourDataAuthenticationOptionsParent {
/** The authentication type of Elasticsearch encoded API Key. */
type: "encoded_api_key";
/** The encoded API key to use for authentication. */
encoded_api_key: string;
}
/**
* The authentication options for Azure OpenAI On Your Data when using a username and password.
*/
export interface OnYourDataUsernameAndPasswordAuthenticationOptions
extends OnYourDataAuthenticationOptionsParent {
/** The discriminator type for username and password. */
type: "username_and_password";
/** The username. */
username: string;
/** The password. */
password: string;
}
/**
@ -313,34 +171,12 @@ export interface OnYourDataUsernameAndPasswordAuthenticationOptions
* on a public Azure OpenAI endpoint call for embeddings.
*/
export interface OnYourDataEndpointVectorizationSource extends OnYourDataVectorizationSourceParent {
/** The type of vectorization source to use. Always 'Endpoint' for this type. */
/** The type of vectorization source to use. Always 'endpoint' for this type. */
type: "endpoint";
/** Specifies the resource endpoint URL from which embeddings should be retrieved. It should be in the format of https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/embeddings. The api-version query parameter is not allowed. */
endpoint: string;
/** Specifies the authentication options to use when retrieving embeddings from the specified endpoint. */
authentication: OnYourDataVectorSearchAuthenticationOptions;
}
/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch key and key ID pair. */
export interface OnYourDataKeyAndKeyIdAuthenticationOptions
extends OnYourDataAuthenticationOptionsParent {
/** The authentication type of Elasticsearch key and key ID pair. */
type: "key_and_key_id";
/** The key to use for authentication. */
key: string;
/** The key ID to use for authentication. */
key_id: string;
}
/**
* The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based
* on a search service model ID. Currently only supported by Elasticsearch®.
*/
export interface OnYourDataModelIdVectorizationSource extends OnYourDataVectorizationSourceParent {
/** The type of vectorization source to use. Always 'ModelId' for this type. */
type: "model_id";
/** The embedding model ID build inside the search service. Currently only supported by Elasticsearch®. */
model_id: string;
authentication: OnYourDataApiKeyAuthenticationOptions;
}
/** The authentication options for Azure OpenAI On Your Data when using a system-assigned managed identity. */
@ -375,207 +211,11 @@ export interface AzureChatExtensionConfigurationParent {
type: string;
}
/**
* A specific representation of configurable options for Pinecone when using it as an Azure OpenAI chat
* extension.
*/
export interface PineconeChatExtensionConfiguration extends AzureChatExtensionConfigurationParent {
/**
* The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its
* default value for Pinecone.
*/
type: "pinecone";
/** The parameters to use when configuring Azure OpenAI chat extensions. */
parameters: PineconeChatExtensionParameters;
}
/** Parameters for configuring Azure OpenAI Pinecone chat extensions. The supported authentication type is APIKey. */
export interface PineconeChatExtensionParameters {
/**
* The authentication method to use when accessing the defined data source.
* Each data source type supports a specific set of available authentication methods; please see the documentation of
* the data source for supported mechanisms.
* If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
* authentication.
*/
authentication?: OnYourDataAuthenticationOptions;
/** The configured top number of documents to feature for the configured query. */
top_n_documents?: number;
/** Whether queries should be restricted to use of indexed data. */
in_scope?: boolean;
/** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */
strictness?: number;
/** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */
role_information?: string;
/**
* The max number of rewritten queries should be send to search provider for one user message. If not specified,
* the system will decide the number of queries to send.
*/
max_search_queries?: number;
/**
* If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
* If not specified, or specified as false, the request will fail if any search query fails.
*/
allow_partial_result?: boolean;
/** The included properties of the output context. If not specified, the default value is `citations` and `intent`. */
include_contexts?: OnYourDataContextProperty[];
/** The environment name of Pinecone. */
environment: string;
/** The name of the Pinecone database index. */
index_name: string;
/** Customized field mapping behavior to use when interacting with the search index. */
fields_mapping: PineconeFieldMappingOptions;
/** The embedding dependency for vector search. */
embedding_dependency: OnYourDataVectorizationSource;
}
/** Optional settings to control how fields are processed when using a configured Pinecone resource. */
export interface PineconeFieldMappingOptions {
/** The name of the index field to use as a title. */
title_field?: string;
/** The name of the index field to use as a URL. */
url_field?: string;
/** The name of the index field to use as a filepath. */
filepath_field?: string;
/** The names of index fields that should be treated as content. */
content_fields: string[];
/** The separator pattern that content fields should use. */
content_fields_separator?: string;
}
/**
* A specific representation of configurable options for Mongo DB when using it as an Azure OpenAI chat
* extension.
*/
export interface MongoDBChatExtensionConfiguration extends AzureChatExtensionConfigurationParent {
/**
* The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its
* default value for Azure Cosmos DB.
*/
type: "mongo_db";
/** The parameters to use when configuring Azure OpenAI CosmosDB chat extensions. */
parameters: MongoDBChatExtensionParameters;
}
/**
* Parameters to use when configuring Azure OpenAI On Your Data chat extensions when using Mongo DB.
* The supported authentication type is ConnectionString.
*/
export interface MongoDBChatExtensionParameters {
/**
* The authentication method to use when accessing the defined data source.
* Each data source type supports a specific set of available authentication methods; please see the documentation of
* the data source for supported mechanisms.
* If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
* authentication.
*/
authentication?: OnYourDataUsernameAndPasswordAuthenticationOptions;
/** The configured top number of documents to feature for the configured query. */
top_n_documents?: number;
/** Whether queries should be restricted to use of indexed data. */
in_scope?: boolean;
/** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */
strictness?: number;
/** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */
role_information?: string;
/**
* The max number of rewritten queries should be send to search provider for one user message. If not specified,
* the system will decide the number of queries to send.
*/
max_search_queries?: number;
/**
* If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
* If not specified, or specified as false, the request will fail if any search query fails.
*/
allow_partial_result?: boolean;
/** The included properties of the output context. If not specified, the default value is `citations` and `intent`. */
include_contexts?: OnYourDataContextProperty[];
/** The endpoint name for MongoDB. */
endpoint: string;
/** The collection name for MongoDB. */
collection_name: string;
/** The database name for MongoDB. */
database_name: string;
/** The app name for MongoDB. */
app_name: string;
/** The name of the MongoDB index. */
index_name: string;
/** Customized field mapping behavior to use when interacting with the search index. */
fields_mapping: MongoDBFieldMappingOptions;
/** The embedding dependency for vector search. */
embedding_dependency:
| OnYourDataEndpointVectorizationSource
| OnYourDataDeploymentNameVectorizationSource;
}
/** Optional settings to control how fields are processed when using a configured MongoDB resource. */
export interface MongoDBFieldMappingOptions {
/** The name of the index field to use as a title. */
title_field?: string;
/** The name of the index field to use as a URL. */
url_field?: string;
/** The name of the index field to use as a filepath. */
filepath_field?: string;
/** The names of index fields that should be treated as content. */
content_fields: string[];
/** The separator pattern that content fields should use. */
content_fields_separator?: string;
/** The names of fields that represent vector data. */
vector_fields: string[];
}
/**
* The authentication options for Azure OpenAI On Your Data vector search.
*/
export interface OnYourDataVectorSearchAuthenticationOptions {
/** The type of authentication to use. */
type: string;
}
/**
* The authentication options for Azure OpenAI On Your Data when using an API key.
*/
/**
* The authentication options for Azure OpenAI On Your Data vector search when using an API key.
*/
export interface OnYourDataVectorSearchApiKeyAuthenticationOptions
extends OnYourDataVectorSearchAuthenticationOptions {
/** The authentication type of API key. */
type: "api_key";
/** The API key to use for authentication. */
key: string;
}
/**
* The authentication options for Azure OpenAI On Your Data vector search when using access token.
*/
export interface OnYourDataVectorSearchAccessTokenAuthenticationOptions
extends OnYourDataVectorSearchAuthenticationOptions {
/** The authentication type of access token. */
type: "access_token";
/** The access token to use for authentication. */
access_token: string;
}
/**
* Represents the integrated vectorizer defined within the search resource.
*/
export interface OnYourDataIntegratedVectorizationSource
extends OnYourDataVectorizationSourceParent {
/** The type discriminator. Always 'integrated'. */
type: "integrated";
}
/** The authentication options for Azure OpenAI On Your Data. */
export type OnYourDataAuthenticationOptions =
| OnYourDataAuthenticationOptionsParent
| OnYourDataApiKeyAuthenticationOptions
| OnYourDataConnectionStringAuthenticationOptions
| OnYourDataKeyAndKeyIdAuthenticationOptions
| OnYourDataEncodedApiKeyAuthenticationOptions
| OnYourDataAccessTokenAuthenticationOptions
| OnYourDataSystemAssignedManagedIdentityAuthenticationOptions
| OnYourDataUserAssignedManagedIdentityAuthenticationOptions;
@ -583,9 +223,7 @@ export type OnYourDataAuthenticationOptions =
export type OnYourDataVectorizationSource =
| OnYourDataVectorizationSourceParent
| OnYourDataEndpointVectorizationSource
| OnYourDataDeploymentNameVectorizationSource
| OnYourDataModelIdVectorizationSource
| OnYourDataIntegratedVectorizationSource;
| OnYourDataDeploymentNameVectorizationSource;
/**
* A representation of configuration data for a single Azure OpenAI chat extension. This will be used by a chat
@ -595,7 +233,4 @@ export type OnYourDataVectorizationSource =
export type AzureChatExtensionConfiguration =
| AzureChatExtensionConfigurationParent
| AzureSearchChatExtensionConfiguration
| AzureCosmosDBChatExtensionConfiguration
| ElasticsearchChatExtensionConfiguration
| PineconeChatExtensionConfiguration
| MongoDBChatExtensionConfiguration;
| AzureCosmosDBChatExtensionConfiguration;

Просмотреть файл

@ -32,8 +32,6 @@ export interface ContentFilterResultsForChoiceOutput {
self_harm?: ContentFilterResultOutput;
/** Describes whether profanity was detected. */
profanity?: ContentFilterDetectionResultOutput;
/** Describes detection results against configured custom blocklists. */
custom_blocklists?: ContentFilterDetailedResults;
/**
* Describes an error returned if the content filtering system is
* down or otherwise unable to complete the operation in time.
@ -45,15 +43,6 @@ export interface ContentFilterResultsForChoiceOutput {
protected_material_code?: ContentFilterCitedDetectionResultOutput;
}
/** Represents a structured collection of result details for content filtering. */
export interface ContentFilterDetailedResults {
/** A value indicating whether or not the content has been filtered. */
filtered: boolean;
/** The collection of detailed blocklist result information. */
details: ContentFilterBlocklistIdResultOutput[];
}
/** Represents the outcome of a detection operation against protected resources as performed by content filtering. */
export interface ContentFilterCitedDetectionResultOutput {
/** A value indicating whether or not the content has been filtered. */
@ -66,70 +55,6 @@ export interface ContentFilterCitedDetectionResultOutput {
license?: string;
}
/**
* A representation of the additional context information available when Azure OpenAI chat extensions are involved
* in the generation of a corresponding chat completions response. This context information is only populated when
* using an Azure OpenAI request configured to use a matching extension.
*/
export interface AzureChatExtensionsMessageContextOutput {
/**
* The contextual information associated with the Azure chat extensions used for a chat completions request.
* These messages describe the data source retrievals, plugin invocations, and other intermediate steps taken in the
* course of generating a chat completions response that was augmented by capabilities from Azure OpenAI chat
* extensions.
*/
citations?: Array<AzureChatExtensionDataSourceResponseCitationOutput>;
/** The detected intent from the chat history, used to pass to the next turn to carry over the context. */
intent?: string;
/** All the retrieved documents. */
all_retrieved_documents?: Array<AzureChatExtensionRetrievedDocument>;
}
/** A single instance of additional context information available when Azure OpenAI chat extensions are involved
* in the generation of a corresponding chat completions response. This context information is only populated when
* using an Azure OpenAI request configured to use a matching extension.
*/
export interface AzureChatExtensionDataSourceResponseCitation {
/** The content of the citation. */
content: string;
/** The title of the citation. */
title?: string;
/** The URL of the citation. */
url?: string;
/** The file path of the citation. */
filepath?: string;
/** The chunk ID of the citation. */
chunk_id?: string;
/** The rerank score of the retrieved document. */
rerank_score?: number;
}
/** The retrieved document. */
export interface AzureChatExtensionRetrievedDocument
extends AzureChatExtensionDataSourceResponseCitation {
/** The search queries used to retrieve the document. */
search_queries: string[];
/** The index of the data source. */
data_source_index: number;
/** The original search score of the retrieved document. */
original_search_score?: number;
/** Represents the rationale for filtering the document. If the document does not undergo filtering,
* this field will remain unset.
*/
filter_reason?: AzureChatExtensionRetrieveDocumentFilterReason;
}
/** The reason for filtering the retrieved document. */
export type AzureChatExtensionRetrieveDocumentFilterReason = "score" | "rerank";
/** Content filtering results for a single prompt in the request. */
export interface ContentFilterResultsForPromptOutput {
/** The index of this prompt in the set of prompt results */
@ -167,8 +92,6 @@ export interface ContentFilterResultDetailsForPromptOutput {
self_harm?: ContentFilterResultOutput;
/** Describes whether profanity was detected. */
profanity?: ContentFilterDetectionResultOutput;
/** Describes detection results against configured custom blocklists. */
custom_blocklists?: ContentFilterDetailedResults;
/**
* Describes an error returned if the content filtering system is
* down or otherwise unable to complete the operation in time.
@ -176,8 +99,6 @@ export interface ContentFilterResultDetailsForPromptOutput {
error?: ErrorModel;
/** Whether a jailbreak attempt was detected in the prompt. */
jailbreak?: ContentFilterDetectionResultOutput;
/** Whether an indirect attack was detected in the prompt. */
indirect_attack?: ContentFilterDetectionResultOutput;
}
/** Information about filtered content severity level and if it has been filtered or not. */
@ -200,12 +121,21 @@ export interface ContentFilterDetectionResultOutput {
detected: boolean;
}
/** Represents the outcome of an evaluation against a custom blocklist as performed by content filtering. */
export interface ContentFilterBlocklistIdResultOutput {
/** The ID of the custom blocklist evaluated. */
id: string;
/** A value indicating whether or not the content has been filtered. */
filtered: boolean;
/**
* A representation of the additional context information available when Azure OpenAI chat extensions are involved
* in the generation of a corresponding chat completions response. This context information is only populated when
* using an Azure OpenAI request configured to use a matching extension.
*/
export interface AzureChatExtensionsMessageContextOutput {
/**
* The contextual information associated with the Azure chat extensions used for a chat completions request.
* These messages describe the data source retrievals, plugin invocations, and other intermediate steps taken in the
* course of generating a chat completions response that was augmented by capabilities from Azure OpenAI chat
* extensions.
*/
citations?: Array<AzureChatExtensionDataSourceResponseCitationOutput>;
/** The detected intent from the chat history, used to pass to the next turn to carry over the context. */
intent?: string;
}
/**
@ -288,6 +218,4 @@ export interface ImageGenerationPromptFilterResults {
profanity?: ContentFilterDetectionResultOutput;
/** Whether a jailbreak attempt was detected in the prompt. */
jailbreak?: ContentFilterDetectionResultOutput;
/** Information about customer block lists and if something was detected the associated list ID. */
custom_blocklists?: ContentFilterDetailedResults;
}

Просмотреть файл

@ -4,14 +4,12 @@ import { getImageDimensionsFromResponse, getImageDimensionsFromString } from "./
import {
AzureChatExtensionDataSourceResponseCitationOutput,
AzureChatExtensionsMessageContextOutput,
ContentFilterBlocklistIdResultOutput,
ContentFilterCitedDetectionResultOutput,
ContentFilterDetectionResultOutput,
ContentFilterResultOutput,
ContentFilterResultDetailsForPromptOutput,
ContentFilterResultsForChoiceOutput,
ContentFilterResultsForPromptOutput,
ContentFilterDetailedResults,
} from "../../../src/types/index.js";
import { Assistant, AssistantCreateParams } from "openai/resources/beta/assistants.mjs";
import {
@ -157,7 +155,6 @@ function assertContentFilterResultsForChoice(cfr: ContentFilterResultsForChoiceO
ifDefined(cfr.sexual, assertContentFilterResult);
ifDefined(cfr.violence, assertContentFilterResult);
ifDefined(cfr.profanity, assertContentFilterDetectionResult);
ifDefined(cfr.custom_blocklists, assertContentFilterDetailedResult);
ifDefined(cfr.protected_material_code, assertContentFilterCitedDetectionResult);
ifDefined(cfr.protected_material_text, assertContentFilterDetectionResult);
}
@ -196,7 +193,6 @@ function assertContentFilterResultDetailsForPrompt(
ifDefined(cfr.violence, assertContentFilterResult);
ifDefined(cfr.profanity, assertContentFilterDetectionResult);
ifDefined(cfr.jailbreak, assertContentFilterDetectionResult);
ifDefined(cfr.custom_blocklists, assertContentFilterDetailedResult);
}
}
@ -216,19 +212,6 @@ function assertContentFilterDetectionResult(val: ContentFilterDetectionResultOut
assert.isBoolean(val.filtered);
}
function assertContentFilterDetailedResult(val: ContentFilterDetailedResults): void {
assert.isBoolean(val.filtered);
// TODO: Update the corresponding types once the Swagger is updated
ifDefined(val.details, (details) => {
assertNonEmptyArray(details, assertContentFilterBlocklistIdResult);
});
}
function assertContentFilterBlocklistIdResult(val: ContentFilterBlocklistIdResultOutput): void {
assert.isString(val.id);
assert.isBoolean(val.filtered);
}
function assertChoice(
choice: ChatCompletion.Choice | ChatCompletionChunk.Choice,
options: ChatCompletionTestOptions,

Просмотреть файл

@ -42,7 +42,7 @@ interface ModelInfo {
}
export enum APIVersion {
Preview = "2024-08-01-preview",
Stable = "2024-06-01",
Stable = "2024-10-21",
OpenAI = "OpenAI",
}
export const APIMatrix = [APIVersion.Preview, APIVersion.Stable];