PowerPlatformConnectors/independent-publisher-conne.../Perplexity AI/apiDefinition.swagger.json

286 строки
11 KiB
JSON

{
"swagger": "2.0",
"info": {
"title": "Perplexity AI",
"description": "Unlock the most powerful AI research assistant. Raise Perplexity to the next level with more Copilot, upgraded AI, unlimited file upload, and web service access. Upgrade to Claude-2 or GPT-4 for more accurate answers, will pplx, Mistral, and Llama language models also available.",
"version": "1.1",
"contact": {
"name": "Troy Taylor",
"url": "https://www.hitachisolutions.com",
"email": "ttaylor@hitachisolutions.com"
},
"x-ms-keywords": [
"Perplexity",
"AI",
"completion"
]
},
"host": "api.perplexity.ai",
"basePath": "/",
"schemes": [
"https"
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {
"/chat/completions": {
"post": {
"responses": {
"200": {
"description": "default",
"schema": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "The identifier.",
"title": "ID"
},
"model": {
"type": "string",
"description": "The model.",
"title": "Model"
},
"object": {
"type": "string",
"description": "The object.",
"title": "Object"
},
"created": {
"type": "integer",
"format": "int32",
"description": "When created.",
"title": "Created"
},
"choices": {
"type": "array",
"items": {
"type": "object",
"properties": {
"index": {
"type": "integer",
"format": "int32",
"description": "The index.",
"title": "Index"
},
"finish_reason": {
"type": "string",
"description": "The finish reason",
"title": "Finish Reason"
},
"message": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content.",
"title": "Content"
},
"role": {
"type": "string",
"description": "The role.",
"title": "Role"
}
},
"title": "Message"
},
"delta": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content.",
"title": "Content"
},
"role": {
"type": "string",
"description": "The role.",
"title": "Role"
}
},
"title": "Delta"
}
}
},
"title": "Choices"
},
"usage": {
"type": "object",
"properties": {
"prompt_tokens": {
"type": "integer",
"format": "int32",
"description": "The prompt tokens used.",
"title": "Prompt Tokens"
},
"completion_tokens": {
"type": "integer",
"format": "int32",
"description": "The completion tokens used.",
"title": "Completion Tokens"
},
"total_tokens": {
"type": "integer",
"format": "int32",
"description": "The total tokens used.",
"title": "Total Tokens"
}
},
"title": "Usage"
}
}
}
}
},
"summary": "Get chat completion",
"parameters": [
{
"name": "body",
"in": "body",
"required": false,
"schema": {
"type": "object",
"properties": {
"model": {
"type": "string",
"description": "The name of the model that will complete your prompt.",
"title": "Model",
"default": "mistral-7b-instruct",
"enum": [
"mistral-7b-instruct",
"sonar-small-chat",
"sonar-small-online",
"sonar-medium-chat",
"sonar-medium-online",
"mistral-7b-instruct",
"mixtral-8x7b-instruct"
]
},
"messages": {
"type": "array",
"items": {
"type": "object",
"properties": {
"role": {
"type": "string",
"description": "The role of the speaker in this turn of conversation. After the (optional) system message, user and assistant roles should alternate with user then assistant, ending in user.",
"title": "Role",
"default": "system",
"enum": [
"system",
"user",
"assistant"
]
},
"content": {
"type": "string",
"description": "The contents of the message in this turn of conversation.",
"title": "Content"
}
},
"required": [
"content",
"role"
]
},
"description": "messages"
},
"max_tokens": {
"type": "integer",
"format": "int32",
"description": "The maximum number of completion tokens returned by the API. The total number of tokens requested in max_tokens plus the number of prompt tokens sent in messages must not exceed the context window token limit of model requested. If left unspecified, then the model will generate tokens until either it reaches its stop token or the end of its context window.",
"title": "Max Tokens"
},
"temperature": {
"type": "number",
"format": "double",
"description": "The amount of randomness in the response, valued between 0 inclusive and 2 exclusive. Higher values are more random, and lower values are more deterministic. You should either set temperature or top_p, but not both.",
"title": "Temperature"
},
"top_p": {
"type": "number",
"format": "double",
"description": "The nucleus sampling threshold, valued between 0 and 1 inclusive. For each subsequent token, the model considers the results of the tokens with top_p probability mass. You should either alter temperature or top_p, but not both.",
"title": "Top P"
},
"top_k": {
"type": "number",
"format": "double",
"description": "The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive. If set to 0, top-k filtering is disabled.",
"title": "Top K"
},
"presence_penalty": {
"type": "number",
"format": "double",
"description": "A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Incompatible with frequency penalty.",
"title": "Presence Penalty"
},
"frequency_penalty": {
"type": "number",
"format": "double",
"description": "A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. A value of 1.0 means no penalty. Incompatible with presence penalty.",
"title": "Frequency Penalty"
}
},
"required": [
"model",
"messages"
]
}
}
],
"description": "Generates a model's response for the given chat conversation.",
"operationId": "CompletionPost",
"x-ms-openai-data": {
"openai-enabled": true,
"operations": [
{
"operationId": "CompletionPost",
"x-ms-require-user-confirmation": true
}
]
}
}
}
},
"definitions": {},
"parameters": {},
"responses": {},
"securityDefinitions": {
"API Key (in the form 'Bearer YOUR_API_KEY')": {
"type": "apiKey",
"in": "header",
"name": "Authorization"
}
},
"security": [
{
"API Key (in the form 'Bearer YOUR_API_KEY')": []
}
],
"tags": [],
"x-ms-openai-manifest": {
"humanName": "Perplexity AI",
"modelDescription": "This is a Copilot plugin for Perplexity AI to retrieve chat completions.",
"contactEmail": "ttaylor@hitachisolutions.com"
},
"x-ms-connector-metadata": [
{
"propertyName": "Website",
"propertyValue": "https://www.perplexity.ai/"
},
{
"propertyName": "Privacy policy",
"propertyValue": "https://blog.perplexity.ai/legal/privacy-policy"
},
{
"propertyName": "Categories",
"propertyValue": "AI"
}
]
}