[OpenAI] Add sample for tts (#30507)
This commit is contained in:
Родитель
ee2c251b2c
Коммит
796c37798e
|
@ -0,0 +1,50 @@
|
|||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
/**
|
||||
* Demonstrates how to convert text into speech.
|
||||
*
|
||||
* @summary text to speech.
|
||||
* @azsdk-weight 100
|
||||
*/
|
||||
|
||||
import "openai/shims/node";
|
||||
import { AzureOpenAI } from "openai";
|
||||
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
|
||||
import { writeFile } from "fs/promises";
|
||||
|
||||
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
|
||||
// OpenAI resource. You can find this in the Azure portal.
|
||||
// Load the .env file if it exists
|
||||
import "dotenv/config";
|
||||
|
||||
// You will need to set these environment variables or edit the following values
|
||||
const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the speech file>";
|
||||
|
||||
// Corresponds to your Model deployment within your OpenAI resource
|
||||
// Navigate to the Azure OpenAI Studio to deploy a model.
|
||||
const deployment = "tts";
|
||||
const apiVersion = "2024-05-01-preview";
|
||||
const credential = new DefaultAzureCredential();
|
||||
const scope = "https://cognitiveservices.azure.com/.default";
|
||||
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
|
||||
|
||||
export async function main() {
|
||||
console.log("== Text to Speech Sample ==");
|
||||
|
||||
const openai = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
|
||||
const response = await openai.audio.speech.create({
|
||||
model: deployment,
|
||||
voice: "alloy",
|
||||
input: "the quick brown chicken jumped over the lazy dogs",
|
||||
});
|
||||
|
||||
const stream = response.body;
|
||||
console.log(`Streaming response to ${speechFilePath}`);
|
||||
await writeFile(speechFilePath, stream);
|
||||
console.log("Finished streaming");
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error("The sample encountered an error:", err);
|
||||
});
|
|
@ -26,6 +26,7 @@ These sample programs show how to use the JavaScript client libraries for Azure
|
|||
| [streamChatCompletions.js][streamchatcompletions] | list chat completions. |
|
||||
| [streamChatCompletionsWithContentFilter.js][streamchatcompletionswithcontentfilter] | get completions. |
|
||||
| [streamCompletions.js][streamcompletions] | list completions. |
|
||||
| [textToSpeech.js][texttospeech] | text to speech. |
|
||||
| [toolCall.js][toolcall] | get chat completions with functions. |
|
||||
|
||||
## Prerequisites
|
||||
|
@ -79,6 +80,7 @@ Take a look at our [API Documentation][apiref] for more information about the AP
|
|||
[streamchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/javascript/streamChatCompletions.js
|
||||
[streamchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/javascript/streamChatCompletionsWithContentFilter.js
|
||||
[streamcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/javascript/streamCompletions.js
|
||||
[texttospeech]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/javascript/textToSpeech.js
|
||||
[toolcall]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/javascript/toolCall.js
|
||||
[apiref]: https://docs.microsoft.com/javascript/api/@azure/openai
|
||||
[freesub]: https://azure.microsoft.com/free/
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
/**
|
||||
* Demonstrates how to convert text into speech.
|
||||
*
|
||||
* @summary text to speech.
|
||||
*/
|
||||
|
||||
require("openai/shims/node");
|
||||
const { AzureOpenAI } = require("openai");
|
||||
const { DefaultAzureCredential, getBearerTokenProvider } = require("@azure/identity");
|
||||
const { writeFile } = require("fs/promises");
|
||||
|
||||
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
|
||||
// OpenAI resource. You can find this in the Azure portal.
|
||||
// Load the .env file if it exists
|
||||
require("dotenv/config");
|
||||
|
||||
// You will need to set these environment variables or edit the following values
|
||||
const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the speech file>";
|
||||
|
||||
// Corresponds to your Model deployment within your OpenAI resource
|
||||
// Navigate to the Azure OpenAI Studio to deploy a model.
|
||||
const deployment = "tts";
|
||||
const apiVersion = "2024-05-01-preview";
|
||||
const credential = new DefaultAzureCredential();
|
||||
const scope = "https://cognitiveservices.azure.com/.default";
|
||||
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
|
||||
|
||||
async function main() {
|
||||
console.log("== Text to Speech Sample ==");
|
||||
|
||||
const openai = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
|
||||
const response = await openai.audio.speech.create({
|
||||
model: deployment,
|
||||
voice: "alloy",
|
||||
input: "the quick brown chicken jumped over the lazy dogs",
|
||||
});
|
||||
|
||||
const stream = response.body;
|
||||
console.log(`Streaming response to ${speechFilePath}`);
|
||||
await writeFile(speechFilePath, stream);
|
||||
console.log("Finished streaming");
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error("The sample encountered an error:", err);
|
||||
});
|
||||
|
||||
module.exports = { main };
|
|
@ -26,6 +26,7 @@ These sample programs show how to use the TypeScript client libraries for Azure
|
|||
| [streamChatCompletions.ts][streamchatcompletions] | list chat completions. |
|
||||
| [streamChatCompletionsWithContentFilter.ts][streamchatcompletionswithcontentfilter] | get completions. |
|
||||
| [streamCompletions.ts][streamcompletions] | list completions. |
|
||||
| [textToSpeech.ts][texttospeech] | text to speech. |
|
||||
| [toolCall.ts][toolcall] | get chat completions with functions. |
|
||||
|
||||
## Prerequisites
|
||||
|
@ -91,6 +92,7 @@ Take a look at our [API Documentation][apiref] for more information about the AP
|
|||
[streamchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/typescript/src/streamChatCompletions.ts
|
||||
[streamchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/typescript/src/streamChatCompletionsWithContentFilter.ts
|
||||
[streamcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/typescript/src/streamCompletions.ts
|
||||
[texttospeech]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/typescript/src/textToSpeech.ts
|
||||
[toolcall]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v2-beta/typescript/src/toolCall.ts
|
||||
[apiref]: https://docs.microsoft.com/javascript/api/@azure/openai
|
||||
[freesub]: https://azure.microsoft.com/free/
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
/**
|
||||
* Demonstrates how to convert text into speech.
|
||||
*
|
||||
* @summary text to speech.
|
||||
*/
|
||||
|
||||
import "openai/shims/node";
|
||||
import { AzureOpenAI } from "openai";
|
||||
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
|
||||
import { writeFile } from "fs/promises";
|
||||
|
||||
// Set AZURE_OPENAI_ENDPOINT to the endpoint of your
|
||||
// OpenAI resource. You can find this in the Azure portal.
|
||||
// Load the .env file if it exists
|
||||
import "dotenv/config";
|
||||
|
||||
// You will need to set these environment variables or edit the following values
|
||||
const speechFilePath = process.env["SPEECH_FILE_PATH"] || "<path to save the speech file>";
|
||||
|
||||
// Corresponds to your Model deployment within your OpenAI resource
|
||||
// Navigate to the Azure OpenAI Studio to deploy a model.
|
||||
const deployment = "tts";
|
||||
const apiVersion = "2024-05-01-preview";
|
||||
const credential = new DefaultAzureCredential();
|
||||
const scope = "https://cognitiveservices.azure.com/.default";
|
||||
const azureADTokenProvider = getBearerTokenProvider(credential, scope);
|
||||
|
||||
export async function main() {
|
||||
console.log("== Text to Speech Sample ==");
|
||||
|
||||
const openai = new AzureOpenAI({ azureADTokenProvider, deployment, apiVersion });
|
||||
const response = await openai.audio.speech.create({
|
||||
model: deployment,
|
||||
voice: "alloy",
|
||||
input: "the quick brown chicken jumped over the lazy dogs",
|
||||
});
|
||||
|
||||
const stream = response.body;
|
||||
console.log(`Streaming response to ${speechFilePath}`);
|
||||
await writeFile(speechFilePath, stream);
|
||||
console.log("Finished streaming");
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error("The sample encountered an error:", err);
|
||||
});
|
Загрузка…
Ссылка в новой задаче