Legalbot AI Tools - connector submission (#487)
* Added files required for certification process - apiDefinition.swagger.json - apiProperties.json - settings.json - icon.png - readme.md * Add icon.png file * Added settings.json file The file includes dummy data for connector ID and environment * Update apiDefinition.swagger.json * Update readme.md * Update readme.md Added link to language combintions * Update apiProperties.json * Update readme.md * Move from custom to certified connectors Moved LegalBot artifacts from custom-connectors to certified-connectors. Deleted settings.json and icon.png
This commit is contained in:
Родитель
ca443344f3
Коммит
d9c4d7e185
|
@ -0,0 +1,977 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "LegalBot AI Tools",
|
||||
"version": "1.0",
|
||||
"description": "Provides API access to Artificial Intelligence, NLP, Machine Translation and more. Specially designed AI and NLP functions for legal industry",
|
||||
"contact": {
|
||||
"name": "LegalBot.io Support",
|
||||
"url": "https://legalbot.io/crm",
|
||||
"email": "support@legalbot.io"
|
||||
}
|
||||
},
|
||||
"host": "api.legalbot.io",
|
||||
"basePath": "/legal_ai/v1",
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"securityDefinitions": {
|
||||
"apiKeyHeader": {
|
||||
"type": "apiKey",
|
||||
"name": "Ocp-Apim-Subscription-Key",
|
||||
"in": "header"
|
||||
},
|
||||
"apiKeyQuery": {
|
||||
"type": "apiKey",
|
||||
"name": "subscription-key",
|
||||
"in": "query"
|
||||
}
|
||||
},
|
||||
"security": [
|
||||
{
|
||||
"apiKeyHeader": []
|
||||
},
|
||||
{
|
||||
"apiKeyQuery": []
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
"/translate/text": {
|
||||
"post": {
|
||||
"description": "Translates text from target language to source language",
|
||||
"operationId": "Translate_Text",
|
||||
"summary": "Translate Text",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"in": "header",
|
||||
"description": "content type of the request",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"default": "application/json",
|
||||
"enum": [
|
||||
"application/json"
|
||||
],
|
||||
"x-ms-visibility": "internal"
|
||||
},
|
||||
{
|
||||
"name": "send translation request",
|
||||
"in": "body",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Send translation request"
|
||||
},
|
||||
"description": "Request body for translation of text. 50,000,000 character limit (50 MB). Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Returns translated text",
|
||||
"headers": {
|
||||
"Content-Type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Receive translated text"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"transactionID": "string",
|
||||
"TranslatedText": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Error response in case of user input error including incorrect language combination or incorrect language format",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the request"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Error message related to failure of translation engine",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"504": {
|
||||
"description": "Error message related to timeout of service",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/translate/document": {
|
||||
"post": {
|
||||
"description": "Translates file content from source language to target language. ",
|
||||
"operationId": "Translate_Documents",
|
||||
"summary": "Translate Documents",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"in": "header",
|
||||
"description": "content type of the request",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"default": "application/json",
|
||||
"enum": [
|
||||
"application/json"
|
||||
],
|
||||
"x-ms-visibility": "internal"
|
||||
},
|
||||
{
|
||||
"name": "Send document translation request",
|
||||
"in": "body",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Send document translation request"
|
||||
},
|
||||
"description": "Request body for translation of a document. The file can be in HTML, PDF, .docx, .doc, .odt, .rtf, .txt, .pptx, .ppt, .xlsx format. Files must be converted to Base64 string format. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Returns URL link to translated document",
|
||||
"headers": {
|
||||
"Content-Type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Receive document translation request"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"document_link": "string",
|
||||
"transactionID": "string",
|
||||
"fileName": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Error with the request including invalid language pair or language code",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Error with the translation engine",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"504": {
|
||||
"description": "Error message due to timeout of request",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/NLP/document_similarity": {
|
||||
"post": {
|
||||
"description": "Calculates the similarity of two documents using Natural Language Processing",
|
||||
"operationId": "Document_Similarity_NLP",
|
||||
"summary": "Document Similarity NLP",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"in": "header",
|
||||
"description": "content type of the request",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"default": "application/json",
|
||||
"enum": [
|
||||
"application/json"
|
||||
],
|
||||
"x-ms-visibility": "internal"
|
||||
},
|
||||
{
|
||||
"name": "Document similarity with NLP",
|
||||
"in": "body",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Send NLP document similarity"
|
||||
},
|
||||
"description": "Request body containing two files for comparison and custom parameters. The files can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. Files must be converted to Base64 string format.",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Returns similarity score of the words in the documents",
|
||||
"headers": {
|
||||
"Content-Type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Receive NLP document similarity"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"similarity": 0.938416639715411,
|
||||
"transactionID": "string",
|
||||
"text_analysed": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credit": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Error response for incorrect user input",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the request"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Error response for problem with service",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"504": {
|
||||
"description": "Error response for timeout of service",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/NLP/document_keywords": {
|
||||
"post": {
|
||||
"description": "Extracts and returns the top N keywords from a document.",
|
||||
"operationId": "Top_Keywords",
|
||||
"summary": "Top Keywords",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"in": "header",
|
||||
"description": "content type of the request",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"default": "application/json",
|
||||
"enum": [
|
||||
"application/json"
|
||||
],
|
||||
"x-ms-visibility": "internal"
|
||||
},
|
||||
{
|
||||
"name": "send top N keywords request",
|
||||
"in": "body",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Send top N keywords request"
|
||||
},
|
||||
"description": "Request body to get top N keywords from document and custom parameters. The file can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. The files must be converted to Base64 string format.",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Returns top keywords from document",
|
||||
"headers": {
|
||||
"Content-Type": {
|
||||
"type": "string",
|
||||
"description": "content type of the response"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Receive NLP keywords"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"top_keywords": [
|
||||
"string"
|
||||
],
|
||||
"transactionID": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Error response for user input error",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string",
|
||||
"description": "content type of the request"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Error response for service error",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"504": {
|
||||
"description": "Error response for timeout",
|
||||
"headers": {
|
||||
"content-type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Error message"
|
||||
},
|
||||
"examples": {
|
||||
"application/json": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"Error message": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"error_detail": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"message": "string",
|
||||
"error_detail": "string"
|
||||
}
|
||||
},
|
||||
"Receive document translation request": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"document_link": {
|
||||
"type": "string"
|
||||
},
|
||||
"transactionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"fileName": {
|
||||
"type": "string"
|
||||
},
|
||||
"credits_used": {
|
||||
"type": "integer"
|
||||
},
|
||||
"credits_remaining": {
|
||||
"type": "integer"
|
||||
},
|
||||
"insufficient_credits": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"message": "string",
|
||||
"document_link": "string",
|
||||
"transactionID": "string",
|
||||
"fileName": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
},
|
||||
"Receive NLP keywords": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"top_keywords": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"transactionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"credits_used": {
|
||||
"type": "integer"
|
||||
},
|
||||
"credits_remaining": {
|
||||
"type": "integer"
|
||||
},
|
||||
"insufficient_credits": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"message": "string",
|
||||
"top_keywords": [
|
||||
"string"
|
||||
],
|
||||
"transactionID": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
},
|
||||
"Receive translated text": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"transactionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"TranslatedText": {
|
||||
"type": "string"
|
||||
},
|
||||
"credits_used": {
|
||||
"type": "integer"
|
||||
},
|
||||
"credits_remaining": {
|
||||
"type": "integer"
|
||||
},
|
||||
"insufficient_credits": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"message": "string",
|
||||
"transactionID": "string",
|
||||
"TranslatedText": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credits": false
|
||||
}
|
||||
},
|
||||
"Send top N keywords request": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clean": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true the text to be analyzed will be cleaned from punctuation, excessive white space and turned into lower case according to LegalBot.io model",
|
||||
"title": "Clean text",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"use_stopwords": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true the text will be processed to remove words which do not contain important significance",
|
||||
"title": "Use stop words",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"stop_word_model": {
|
||||
"type": "string",
|
||||
"description": "The LegalBot.io custom stop word model to use when processing the document.",
|
||||
"title": "Stop word model",
|
||||
"default": "None",
|
||||
"enum": [
|
||||
"None",
|
||||
"Basic",
|
||||
"Legal_v1",
|
||||
"user_only"
|
||||
]
|
||||
},
|
||||
"user_defined_stopwords": {
|
||||
"type": "string",
|
||||
"description": "User specific stop words to remove from the string. Each word separated by comma.",
|
||||
"title": "Custom stop words"
|
||||
},
|
||||
"num_keywords": {
|
||||
"type": "integer",
|
||||
"description": "The number of keywords to return",
|
||||
"title": "Number of keywords",
|
||||
"default": 5
|
||||
},
|
||||
"key_word_model": {
|
||||
"type": "string",
|
||||
"description": "The NLP keyword extraction model to use. \"word\" or \"noun\" keywords",
|
||||
"title": "Keyword model",
|
||||
"default": "word",
|
||||
"enum": [
|
||||
"word",
|
||||
"noun"
|
||||
]
|
||||
},
|
||||
"exclude_words_shorter_than": {
|
||||
"type": "integer",
|
||||
"description": "Excludes words from analysis if their string length is shorter than number specified",
|
||||
"title": "Exclude short words",
|
||||
"default": 0
|
||||
},
|
||||
"file": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"fileContent": {
|
||||
"type": "string",
|
||||
"description": "File content of the first document. File can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. File must be converted to Base64 string format.",
|
||||
"title": "File content"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the file",
|
||||
"title": "File name"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"fileContent",
|
||||
"name"
|
||||
]
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"clean": false,
|
||||
"use_stopwords": false,
|
||||
"stop_word_model": "string",
|
||||
"user_defined_stopwords": "string",
|
||||
"num_keywords": 0,
|
||||
"key_word_model": "string",
|
||||
"exclude_words_shorter_than": 0,
|
||||
"file": {
|
||||
"fileContent": "string",
|
||||
"name": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"clean",
|
||||
"file",
|
||||
"key_word_model",
|
||||
"num_keywords",
|
||||
"stop_word_model",
|
||||
"use_stopwords"
|
||||
]
|
||||
},
|
||||
"Send document translation request": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"source_language": {
|
||||
"type": "string",
|
||||
"description": "The source language of the document. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian. It is possible to specify Auto as a value.",
|
||||
"title": "Source language"
|
||||
},
|
||||
"target_language": {
|
||||
"type": "string",
|
||||
"description": "The target language you want the document translated into. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian.",
|
||||
"title": "Target language"
|
||||
},
|
||||
"file": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"fileContent": {
|
||||
"type": "string",
|
||||
"description": "The file content of the document in Base64 string format. The file can be in HTML, PDF, .docx, .doc, .odt, .rtf, .txt, .pptx, .ppt, .xlsx format.",
|
||||
"title": "File content"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the file with extension",
|
||||
"title": "File name"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"fileContent",
|
||||
"name"
|
||||
]
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"source_language": "string",
|
||||
"target_language": "string",
|
||||
"file": {
|
||||
"fileContent": "string",
|
||||
"name": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"file",
|
||||
"source_language",
|
||||
"target_language"
|
||||
]
|
||||
},
|
||||
"Send NLP document similarity": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"clean": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true the text to be analyzed will be cleaned from punctuation, excessive white space and turned into lower case according to LegalBot.io model",
|
||||
"title": "Clean text",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"use_stopwords": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true the text will be processed to remove words which do not contain important significance",
|
||||
"title": "Use stop words",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"stop_word_model": {
|
||||
"type": "string",
|
||||
"description": "The LegalBot.io custom stop word model to use when processing the document.",
|
||||
"title": "Stop word model",
|
||||
"default": "None",
|
||||
"enum": [
|
||||
"None",
|
||||
"Basic",
|
||||
"Legal_v1",
|
||||
"user_only"
|
||||
]
|
||||
},
|
||||
"user_defined_stopwords": {
|
||||
"type": "string",
|
||||
"description": "User specific stop words to remove from the string. Each word separated by comma.",
|
||||
"title": "Custom stop words"
|
||||
},
|
||||
"lemmatize": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true each word will be processed to change the word to its root word or lemma e.g. runs, running, ran = run",
|
||||
"title": "Lemmatize string",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"stemming": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true each word will be processed to the word to its stem word e.g. intelligent = intellig",
|
||||
"title": "Stem string",
|
||||
"enum": [
|
||||
true,
|
||||
false
|
||||
],
|
||||
"default": false
|
||||
},
|
||||
"exclude_words_shorter_than": {
|
||||
"type": "integer",
|
||||
"description": "Excludes words from analysis if their string length is shorter than number specified",
|
||||
"title": "Exclude short words",
|
||||
"default": 0
|
||||
},
|
||||
"file1": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"fileContent": {
|
||||
"type": "string",
|
||||
"description": "File content of the first document. File can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. File must be converted to Base64 string format.",
|
||||
"title": "File Content 1"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the first file",
|
||||
"title": "File name 1"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"fileContent",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"file2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"fileContent": {
|
||||
"type": "string",
|
||||
"description": "File content of the second document. File can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. File must be converted to Base64 string format.",
|
||||
"title": "File Content 2"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The name of the second file",
|
||||
"title": "File Name 2"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"fileContent",
|
||||
"name"
|
||||
]
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"clean": false,
|
||||
"use_stopwords": false,
|
||||
"stop_word_model": "string",
|
||||
"user_defined_stopwords": "string",
|
||||
"lemmatize": false,
|
||||
"stemming": false,
|
||||
"exclude_words_shorter_than": 0,
|
||||
"file1": {
|
||||
"fileContent": "string",
|
||||
"name": "string"
|
||||
},
|
||||
"file2": {
|
||||
"fileContent": "string",
|
||||
"name": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"clean",
|
||||
"file1",
|
||||
"file2",
|
||||
"lemmatize",
|
||||
"stemming",
|
||||
"stop_word_model",
|
||||
"use_stopwords"
|
||||
]
|
||||
},
|
||||
"Send translation request": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"target_language": {
|
||||
"type": "string",
|
||||
"description": "The target language to translate the text into. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian",
|
||||
"title": "Target language"
|
||||
},
|
||||
"source_language": {
|
||||
"type": "string",
|
||||
"description": "The source language of the text. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian. It is possible to specify Auto as a value.",
|
||||
"title": "Source language"
|
||||
},
|
||||
"text_to_translate": {
|
||||
"type": "string",
|
||||
"description": "This is the text you want translated",
|
||||
"title": "Text to translate"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"target_language": "string",
|
||||
"source_language": "string",
|
||||
"text_to_translate": "string"
|
||||
},
|
||||
"required": [
|
||||
"source_language",
|
||||
"target_language",
|
||||
"text_to_translate"
|
||||
]
|
||||
},
|
||||
"Receive NLP document similarity": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"similarity": {
|
||||
"type": "number"
|
||||
},
|
||||
"transactionID": {
|
||||
"type": "string"
|
||||
},
|
||||
"text_analysed": {
|
||||
"type": "string"
|
||||
},
|
||||
"credits_used": {
|
||||
"type": "integer"
|
||||
},
|
||||
"credits_remaining": {
|
||||
"type": "integer"
|
||||
},
|
||||
"insufficient_credit": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"message": "string",
|
||||
"similarity": 0.4343482942552459,
|
||||
"transactionID": "string",
|
||||
"text_analysed": "string",
|
||||
"credits_used": 0,
|
||||
"credits_remaining": 0,
|
||||
"insufficient_credit": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-ms-connector-metadata": [
|
||||
{
|
||||
"propertyName": "Website",
|
||||
"propertyValue": "https://legalbot.io"
|
||||
},
|
||||
{
|
||||
"propertyName": "Privacy policy",
|
||||
"propertyValue": "https://legalbot.io/privacy.html"
|
||||
},
|
||||
{
|
||||
"propertyName": "Categories",
|
||||
"propertyValue": "AI;website"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"properties": {
|
||||
"connectionParameters": {
|
||||
"api_key": {
|
||||
"type": "securestring",
|
||||
"uiDefinition": {
|
||||
"displayName": "API Key",
|
||||
"description": "The API Key for this api",
|
||||
"tooltip": "Provide your API Key",
|
||||
"constraints": {
|
||||
"tabIndex": 2,
|
||||
"clearText": false,
|
||||
"required": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"iconBrandColor": "#131347",
|
||||
"capabilities": [
|
||||
"actions"
|
||||
],
|
||||
"publisher": "LegalBot.io",
|
||||
"stackOwner": "LegalBot.io"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
|
||||
# LegalBot AI Tools
|
||||
LegalBot AI Tools lets you easily access Neuro Linguistic Programming (NLP) models, machine translation and legal content. You have the ability to customise the AI models to suit your particular needs. Functionality is specifically designed for the automation of tasks in the legal industry but is also useful for finance industry and any international business department. The functions specialise in handling and analysing documents and text.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Access to the API is through and API access key. To obtain an API key go to [LegalBot.io](https://legalbot.io) or email support@legalbot.io
|
||||
2. Ensure you have sufficient credits to perform your operations. More details on pricing are available at [LegalBot.io](https://legalbot.io) or email support@legalbot.io
|
||||
3. You will be requested to enter the API key when creating the new connector.
|
||||
|
||||
## Supported Actions
|
||||
|
||||
The following actions are supported:
|
||||
|
||||
* `Translate document`: Translates a document from the target language to source language whilst keeping the formatting of the document (55 languages supported). The machine translation uses legal translation dictionaries to give more accurate results for legal documents. Your files can be in HTML, PDF, .docx, .doc, .odt, .rtf, .txt, .pptx, .ppt, .xlsx format. Files must be converted to Base64 string format before sending e.g. base64(outputs('Get_file_content')?['body']). Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian. Possible language combinations can be found [here.](https://legalbot.io/translate/)
|
||||
|
||||
* `Translate text`: Translates text from the target language to source language (55 languages supported). The service supports a maximum 50,000,000 character limit (50 MB) per call however make sure your input fields will allow this length. The machine translation uses legal translation dictionaries to give more accurate results for legal texts. Language codes follow ISO 639-1 Code two letter format other than Chinese simplified: zh-Hans and Chinese traditional: zh-Hant. Source language may be in Auto. It is also possible to write the English version of the name e.g. English, German, French, Russian. Possible language combinations can be found [here.](https://legalbot.io/translate/)
|
||||
|
||||
* `Document similarity with NLP`: Compares how similar two documents are using NLP techniques. You can easily customise your AI model by changing the input parameters. The files can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. Files must be converted to Base64 string format before sending e.g. base64(outputs('Get_file_content')?['body']). Input parameters include:
|
||||
* `Clean text`: if set to true this will convert words in the document text to lowercase and remove punctuation and whitespace
|
||||
* `Use stop words`: if set to true then the text will be processed through a stop word engine to remove your own custom stop words or those defined in a particular stop word model
|
||||
* `Stop word model`: if you have chosen to use stop words then you can customise the stop word model you use. Available are Basic: common words like and and I; Legal_V1: words specifically used in agreements; user_only: this allows you to develop and use your own custom stop words for processing
|
||||
* `Custom stop words`: enter any specific stop words you want excluded from analysis. This is useful to fine tune your NLP analysis. Words are separated by a comma like this: these, are, my, custom, stop, words
|
||||
* `Lemmatize string`: if set to true the words in the document text will be lemmatized to their lemma or part of speech. For example the words "liabilities and liability" are converted to the same lemma of "liability and liability" and "warranty and warranties" into "warranty and warranty".
|
||||
* `Stem string`: if set to true the words in the document will be converted to their stem origin. This can help you customise your analysis. For example the words "liabilities and liability" are converted to the same stem of "liabil and liabil" and "warranty and warranties" into "warranti and warranti". Stemming is usually considered easier to implement tha lemmatizing.
|
||||
* `Exclude short words`: Set the number to higher than 0 to exclude words equal to or shorter than the number selected. For example if set to 3 all words with a charater length 3 or less will be excluded: e.g. "sue" would be excluded but not "sues". You must set use stop words to true to enable this feature.
|
||||
* `File content`: requires the content of two files in base64 string format along with their file names.
|
||||
|
||||
* `Top Keywords`: Extracts and returns the top keywords from a document. You can easily customise your AI model by changing the input parameters. The files can be in HTML, PDF, .docx, .doc, .odt, .rtf or .txt format. Files must be converted to Base64 string format before sending e.g. base64(outputs('Get_file_content')?['body']). Input parameters include:
|
||||
* `Number of keywords`: the number of keywords you would like to receive back
|
||||
* `Keyword model`: this can be set to either word model or noun model for keywords. An example of the difference is: the top 3 key words in the following sentence "LegalBot.io makes it fun, fun, fun and easy to use and create NLP and AI models!" would be 'fun', 'and', 'LegalBot.io' for the word model whilst the noun model would return 'legalbot.io', 'nlp', 'ai'. By customising the parameters below you can fine tune your results.
|
||||
* `Clean text`: if set to true this will convert words in the document text to lowercase and remove punctuation and whitespace
|
||||
* `Use stop words`: if set to true then the text will be processed through a stop word engine to remove your own custom stop words or those defined in a particular stop word model
|
||||
* `Stop word model`: if you have chosen to use stop words then you can customise the stop word model you use. Available are Basic: common words like and and I; Legal_V1: words specifically used in agreements; user_only: this allows you to develop and use your own custom stop words for processing
|
||||
* `Custom stop words`: enter any specific stop words you want excluded from analysis. This is useful to fine tune your NLP analysis. Words are separated by a comma like this: these, are, my, custom, stop, words
|
||||
* `Lemmatize string`: if set to true the words in the document text will be lemmatized to their lemma or part of speech. For example the words "liabilities and liability" are converted to the same lemma of "liability and liability" and "warranty and warranties" into "warranty and warranty".
|
||||
* `Stem string`: if set to true the words in the document will be converted to their stem origin. This can help you customise your analysis. For example the words "liabilities and liability" are converted to the same stem of "liabil and liabil" and "warranty and warranties" into "warranti and warranti". Stemming is usually considered easier to implement tha lemmatizing.
|
||||
* `Exclude short words`: Set the number to higher than 0 to exclude words equal to or shorter than the number selected. For example if set to 3 all words with a charater length 3 or less will be excluded: e.g. "sue" would be excluded but not "sues". You must set use stop words to true to enable this feature.
|
||||
* `File content`: requires the content of a file in base64 string format along with its file name.
|
||||
|
Загрузка…
Ссылка в новой задаче