From 3a4b6ceb68e5efeff63269994a14821fbbac8239 Mon Sep 17 00:00:00 2001 From: Sam Estrin Date: Mon, 17 Jun 2024 13:42:12 -0700 Subject: [PATCH] v0.0.10 --- README.md | 13 +-- config.js | 3 + docs/API.md | 66 ++++++++++++++ docs/APIKEYS.md | 18 ++++ docs/USAGE.md | 127 ++++++++++++++++++++++----- env | 4 + package.json | 2 +- src/ai21.js | 97 ++++++++++++++++++++ src/anthropic.js | 64 +++++++++----- src/azureai.js | 111 +++++++++++++++++++++++ src/cohere.js | 61 ++++++++----- src/gemini.js | 76 ++++++++++------ src/goose.js | 75 ++++++++++------ src/groq.js | 57 ++++++++---- src/huggingface.js | 100 +++++++++++++++++++++ src/index.js | 26 ++++++ src/llamacpp.js | 70 +++++++++------ src/mistral.js | 74 ++++++++++------ src/openai.js | 69 ++++++++++----- src/perplexity.js | 91 +++++++++++++++++++ src/reka.js | 58 +++++++----- test/basic/ai21.test.js | 29 ++++++ test/basic/huggingface.test.js | 41 +++++++++ test/basic/perplexity.test.js | 26 ++++++ test/basic/reka.test.js | 2 +- test/cache/huggingface.cache.test.js | 89 +++++++++++++++++++ test/json/gemini.json.test.js | 2 +- test/json/openai.json.test.js | 2 +- 28 files changed, 1213 insertions(+), 240 deletions(-) create mode 100644 src/ai21.js create mode 100644 src/azureai.js create mode 100644 src/huggingface.js create mode 100644 src/perplexity.js create mode 100644 test/basic/ai21.test.js create mode 100644 test/basic/huggingface.test.js create mode 100644 test/basic/perplexity.test.js create mode 100644 test/cache/huggingface.cache.test.js diff --git a/README.md b/README.md index 19f1821..c331868 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,13 @@ The LLM Interface project is a versatile and comprehensive wrapper designed to i ## Updates +**v0.0.10** + +- **Hugging Face**: Added support for Hugging Face Inference (_over 150,000 publicly accessible machine learning models_) +- **Perplexity Labs**: Added support for Perplexity Labs +- **JSON Improvements**: The `json_object` mode for OpenAI and Gemini now guarantees the return a valid JSON object or null. +- **Graceful Retries**: Retry LLM queries upon failure. + **v0.0.9** - **Response Caching**: Efficiently caches LLM responses to reduce costs, enhance performance and minimize redundant requests, with customizable cache timeout settings. @@ -19,10 +26,6 @@ The LLM Interface project is a versatile and comprehensive wrapper designed to i - **Mistral AI**: Added support for Mistral AI - **Cohere**: Added support for Cohere -**v0.0.7** - -- **Goose AI**: Added support for Goose AI - ## Features - **Unified Interface**: A single, consistent interface to interact with multiple LLM APIs. @@ -36,7 +39,7 @@ The LLM Interface project is a versatile and comprehensive wrapper designed to i The project relies on several npm packages and APIs. Here are the primary dependencies: -- `axios`: For making HTTP requests (used for Cohere, Goose AI, LLaMA.cpp, Mistral, and Reka AI). +- `axios`: For making HTTP requests (used for various HTTP AI APIs). - `@anthropic-ai/sdk`: SDK for interacting with the Anthropic API. - `@google/generative-ai`: SDK for interacting with the Google Gemini API. - `groq-sdk`: SDK for interacting with the Groq API. diff --git a/config.js b/config.js index bf73c68..0f55ef8 100644 --- a/config.js +++ b/config.js @@ -15,4 +15,7 @@ module.exports = { gooseApiKey: process.env.GOOSE_API_KEY, cohereApiKey: process.env.COHERE_API_KEY, mistralApiKey: process.env.MISTRAL_API_KEY, + huggingfaceApiKey: process.env.HUGGINGFACE_API_KEY, + perplexityApiKey: process.env.PERPLEXITY_API_KEY, + ai21ApiKey: process.env.AI21_API_KEY, }; diff --git a/docs/API.md b/docs/API.md index 8ccbea1..6fad7f3 100644 --- a/docs/API.md +++ b/docs/API.md @@ -37,6 +37,28 @@ openai }); ``` +### AI21 + +#### `sendMessage(message, options, cacheTimeoutSeconds)` + +- **Parameters:** + - `message`: An object containing the model and messages to send. + - `options`: An optional object containing `max_tokens` and `model`. + - `cacheTimeoutSeconds`: An optional number specifying the cache timeout in seconds. If set, caching is enabled. +- **Returns:** A promise that resolves to the response text. +- **Example:** + +```javascript +ai21 + .sendMessage(message, { max_tokens: 150 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Anthropic #### `sendMessage(message, options, cacheTimeoutSeconds)` @@ -147,6 +169,28 @@ groq }); ``` +### Hugging Face + +#### `sendMessage(message, options, cacheTimeoutSeconds)` + +- **Parameters:** + - `message`: An object containing the model and messages to send. + - `options`: An optional object containing `max_tokens` and `model`. + - `cacheTimeoutSeconds`: An optional number specifying the cache timeout in seconds. If set, caching is enabled. +- **Returns:** A promise that resolves to the response text. +- **Example:** + +```javascript +huggingface + .sendMessage(message, { max_tokens: 100 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Mistral AI #### `sendMessage(message, options, cacheTimeoutSeconds)` @@ -169,6 +213,28 @@ mistral }); ``` +### Perplexity Labs + +#### `sendMessage(message, options, cacheTimeoutSeconds)` + +- **Parameters:** + - `message`: An object containing the model and messages to send. + - `options`: An optional object containing `max_tokens` and `model`. + - `cacheTimeoutSeconds`: An optional number specifying the cache timeout in seconds. If set, caching is enabled. +- **Returns:** A promise that resolves to the response text. +- **Example:** + +```javascript +perplexity + .sendMessage(message, { max_tokens: 100 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Reka AI #### `sendMessage(message, options, cacheTimeoutSeconds)` diff --git a/docs/APIKEYS.md b/docs/APIKEYS.md index 1834cfd..c64dd9d 100644 --- a/docs/APIKEYS.md +++ b/docs/APIKEYS.md @@ -8,6 +8,12 @@ The OpenAI API requires a credit card. - https://platform.openai.com/api-keys +## AI21 Studio + +The AI21 API is a commercial product, but it currently does not require a credit card, and comes with a $90 credit. + +- https://studio.ai21.com/account/api-key?source=docs + ## Anthropic The Anthropic API requires a credit card. @@ -38,12 +44,24 @@ The Groq API is currently free. - https://console.groq.com/keys +## Hugging Face + +The Hugging Face Inference API is currently free for rate-limited, non-commercial use. + +- https://huggingface.co/settings/tokens + ## Mistral The Mistral API is a commercial product, but it currently does not require a credit card, and comes with a $5.00 credit. - https://console.mistral.ai/api-keys/ +## Perplexity + +The Perplexity API requires a credit cards. + +- https://www.perplexity.ai/settings/api + ## Reka AI The Reka AI API requires a credit card, but currently comes with a $5 credit. diff --git a/docs/USAGE.md b/docs/USAGE.md index 59dd3b0..e4f53eb 100644 --- a/docs/USAGE.md +++ b/docs/USAGE.md @@ -5,18 +5,21 @@ - [Initializing llm-interface](#initializing-llm-interface) - [Basic Usage Examples](#basic-usage-examples) - [OpenAI Interface](#openai-interface) + - [AI21 Interface](#ai21-interface) - [Anthropic Interface](#anthropic-interface) - [Cohere Interface](#cohere-interface) - [Gemini Interface](#gemini-interface) - [Goose AI Interface](#goose-ai-interface) - [Groq Interface](#groq-interface) + - [HuggingFace Interface](#huggingface-interface) - [Mistral AI Interface](#mistral-ai-interface) + - [Perplexity Interface](#perplexity-interface) - [Reka AI Interface](#reka-ai-interface) - [LLaMA.cpp Interface](#llamacpp-interface) - [Advanced Usage Examples](#advanced-usage-examples) - [OpenAI Interface (JSON Output)](#openai-interface-json-output) - - [Gemini Interface (JSON Output)](#gemini-interface-json-output) - [OpenAI Interface (Cached)](#openai-interface-cached) + - [OpenAI Interface (Graceful Retry)](#openai-interface-graceful-retry) # Usage @@ -99,6 +102,33 @@ anthropic }); ``` +### AI21 Interface + +The AI21 interface allows you to send messages to the AI21 API. + +#### Example + +```javascript +const ai21 = new LLMInterface.ai21(process.env.AI21_API_KEY); + +const message = { + model: "jamba-instruct", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Explain the importance of low latency LLMs." }, + ], +}; + +ai21 + .sendMessage(message, { max_tokens: 150 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Cohere Interface The Cohere interface allows you to send messages to the Cohere API. @@ -207,6 +237,38 @@ groq }); ``` +### HuggingFace Interface + +The HuggingFace interface allows you to send messages to the HuggingFace API. + +#### Example + +```javascript +const huggingface = new LLMInterface.huggingface(process.env.ANTHROPIC_API_KEY); + +const message = { + model: "claude-3-opus-20240229", + messages: [ + { + role: "user", + content: + "You are a helpful assistant. Say OK if you understand and stop.", + }, + { role: "system", content: "OK" }, + { role: "user", content: "Explain the importance of low latency LLMs." }, + ], +}; + +huggingface + .sendMessage(message, { max_tokens: 150 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Mistral AI Interface The Mistral AI interface allows you to send messages to the Mistral AI API. @@ -234,6 +296,38 @@ mistral }); ``` +### Perplexity Interface + +The Perplexity interface allows you to send messages to the Perplexity API. + +#### Example + +```javascript +const perplexity = new LLMInterface.perplexity(process.env.ANTHROPIC_API_KEY); + +const message = { + model: "claude-3-opus-20240229", + messages: [ + { + role: "user", + content: + "You are a helpful assistant. Say OK if you understand and stop.", + }, + { role: "system", content: "OK" }, + { role: "user", content: "Explain the importance of low latency LLMs." }, + ], +}; + +perplexity + .sendMessage(message, { max_tokens: 150 }) + .then((response) => { + console.log(response); + }) + .catch((error) => { + console.error(error); + }); +``` + ### Reka AI Interface The Reka AI interface allows you to send messages to the Reka AI REST API. @@ -294,7 +388,7 @@ Then select the interface you'd like to use and initialize it with an API key or ### OpenAI Interface (JSON Output) -The OpenAI interface allows you to send messages to the OpenAI API and request the response back in JSON. To take advantage of this feature be sure to include text like "Return the results as a JSON object." and provide a desired output format like "Follow this format: [{reason, reasonDescription}]." +Some interfaces allows you request the response back in JSON, currently **OpenAI** and **Gemini** are supported. To take advantage of this feature be sure to include text like "Return the results as a JSON object." and provide a desired output format like "Follow this format: [{reason, reasonDescription}]." In this example we use OpenAI and request a valid JSON object. #### Example @@ -326,32 +420,25 @@ openai }); ``` -### Gemini Interface (JSON Output) +### OpenAI Interface (Cached) -The Gemini interface allows you to send messages to the Google Gemini API. To take advantage of this feature be sure to include text like "Return the results as a JSON object." and provide a desired output format like "Follow this format: [{reason, reasonDescription}]." +To reduce operational costs and improve performance you can optionally specify a cache timeout in seconds. In this example we use OpenAI and store the results for 86400 seconds or one day. #### Example ```javascript -const gemini = new LLMInterface.gemini(process.env.GEMINI_API_KEY); +const openai = new LLMInterface.openai(process.env.OPENAI_API_KEY); const message = { - model: "gemini-1.5-flash", + model: "gpt-3.5-turbo", messages: [ - { - role: "system", - content: "You are a helpful assistant.", - }, - { - role: "user", - content: - "Explain the importance of low latency LLMs. Return the results as a JSON object. Follow this format: [{reason, reasonDescription}].", - }, + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Explain the importance of low latency LLMs." }, ], }; -gemini - .sendMessage(message, { max_tokens: 100, response_format: "json_object" }) +openai + .sendMessage(message, { max_tokens: 150 }, { cacheTimeoutSeconds: 86400 }) .then((response) => { console.log(response); }) @@ -360,9 +447,9 @@ gemini }); ``` -### OpenAI Interface (Cached) +### OpenAI Interface (Graceful Retry) -The OpenAI interface allows you to send messages to the OpenAI API. To reduce operational costs and improve performance you can optionally specify a cache timeout in seconds. In this example we store the results for 86400 seconds or one day. +You can gracefully retry your requests. In this example we use OpenAI and up to 3 times if needed. #### Example @@ -378,7 +465,7 @@ const message = { }; openai - .sendMessage(message, { max_tokens: 150 }, 86400) + .sendMessage(message, { max_tokens: 150 }, { retryAttempts: 3 }) .then((response) => { console.log(response); }) diff --git a/env b/env index 2ca719b..2bccd53 100644 --- a/env +++ b/env @@ -5,4 +5,8 @@ REKA_API_KEY= GOOSE_API_KEY= MISTRAL_API_KEY= + HUGGINGFACE_API_KEY= + PERPLEXITY_API_KEY= + AI21_API_KEY= + AZUREAI_API_KEY= LLAMACPP_URL=http://localhost:8080/completions \ No newline at end of file diff --git a/package.json b/package.json index bc7d156..4a2da2e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "llm-interface", - "version": "0.0.9", + "version": "0.0.10", "main": "src/index.js", "description": "A simple, unified interface for integrating and interacting with multiple Large Language Model (LLM) APIs, including OpenAI, Anthropic, Google Gemini, Groq, and LlamaCPP.", "scripts": { diff --git a/src/ai21.js b/src/ai21.js new file mode 100644 index 0000000..4179b63 --- /dev/null +++ b/src/ai21.js @@ -0,0 +1,97 @@ +// AI21.js + +const axios = require("axios"); +const { getFromCache, saveToCache } = require("./cache"); + +class AI21 { + /** + * @constructor + * @param {string} apiKey - The API key for AI21. + */ + constructor(apiKey) { + this.apiKey = apiKey; + this.client = axios.create({ + baseURL: "https://api.ai21.com/studio/v1", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + }); + } + + /** + * Sends a message to the AI21 API. + * + * @param {Object} message - The message object containing the messages to send. + * @param {Object} [options={}] - Optional parameters for the request. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. + * @returns {Promise} The response object from the API. + * @throws {Error} Throws an error if the API request fails. + */ + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + + const { messages } = message; + const { + model = "jamba-instruct", + max_tokens = 200, + temperature = 1, + top_p = 1, + stop = "<|endoftext|>", + } = options; + + const requestBody = { + model, + messages, + max_tokens, + }; + + // Create cache key and check for cached response + const cacheKey = JSON.stringify(requestBody); + if (cacheTimeoutSeconds) { + const cachedResponse = getFromCache(cacheKey); + if (cachedResponse) { + return cachedResponse; + } + } + + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post( + "/chat/completions", + requestBody + ); + let responseContent = null; + if ( + response && + response.data && + response.data.choices && + response.data.choices[0] && + response.data.choices[0].message && + response.data.choices[0].message.content + ) { + responseContent = response.data.choices[0].message.content; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + console.error("API Error:", error.message); + throw new Error(error.message); + } + } + } + } +} + +module.exports = AI21; diff --git a/src/anthropic.js b/src/anthropic.js index d282bde..fd1759f 100644 --- a/src/anthropic.js +++ b/src/anthropic.js @@ -24,15 +24,26 @@ class Anthropic { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const anthropic = new Anthropic(apiKey); - * anthropic.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * anthropic.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { max_tokens = 150, model = message.model || "claude-3-opus-20240229", @@ -64,24 +75,37 @@ class Anthropic { } } - try { - // Make API request and cache the response - const response = await this.anthropic.messages.create(params); - const responseContent = - response && - response.content && - response.content[0] && - response.content[0].text - ? response.content[0].text - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.anthropic.messages.create(params); + let responseContent = null; + if ( + response && + response.content && + response.content[0] && + response.content[0].text + ) { + responseContent = response.content[0].text; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } } - return responseContent; - } catch (error) { - throw new Error( - error.response ? error.response.data.error.message : error.message - ); } } } diff --git a/src/azureai.js b/src/azureai.js new file mode 100644 index 0000000..406ba6e --- /dev/null +++ b/src/azureai.js @@ -0,0 +1,111 @@ +// AzureAI.js + +const axios = require("axios"); +const { getFromCache, saveToCache } = require("./cache"); + +class AzureAI { + /** + * @constructor + * @param {string} apiKey - The API key for Azure AI. + */ + constructor(apiKey) { + this.apiKey = apiKey; + this.client = axios.create({ + baseURL: "https://api.ai.azure.com", // You might need to specify a region here + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + }); + } + + /** + * Sends a message to the Azure AI API. + * + * @param {Object} message - The message object containing the messages to send. + * @param {Object} [options={}] - Optional parameters for the request. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. + * @returns {Promise} The response object from the API. + * @throws {Error} Throws an error if the API request fails. + */ + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + + const { messages } = message; + const { + model = "llama2-70b-chat", + frequency_penalty = 0, + presence_penalty = 0, + max_tokens = 256, + seed = 42, + stop = "<|endoftext|>", + stream = false, + temperature = 0, + top_p = 1, + } = options; + + const requestBody = { + model, + messages, + frequency_penalty, + presence_penalty, + max_tokens, + response_format: { type: "text" }, + }; + + // Create cache key and check for cached response + const cacheKey = JSON.stringify(requestBody); + if (cacheTimeoutSeconds) { + const cachedResponse = getFromCache(cacheKey); + if (cachedResponse) { + return cachedResponse; + } + } + + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post( + "/chat/completions?api-version=2024-04-01-preview", + requestBody + ); + + let responseContent = null; + if ( + response && + response.choices && + response.choices[0] && + response.choices[0].message + ) { + responseContent = response.choices[0].message.content; + } + + if (response_format === "json_object") { + try { + responseContent = JSON.parse(responseContent); + } catch (e) { + responseContent = null; + } + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + console.error("API Error:", error.message); + throw new Error(error.message); + } + } + } + } +} + +module.exports = AzureAI; diff --git a/src/cohere.js b/src/cohere.js index d45745d..20af971 100644 --- a/src/cohere.js +++ b/src/cohere.js @@ -28,15 +28,26 @@ class Cohere { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const cohere = new Cohere(apiKey); - * cohere.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * cohere.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { messages } = message; const { max_tokens = 150 } = options; let { model } = message; @@ -70,26 +81,32 @@ class Cohere { } } - try { - // Make API request and cache the response - const response = await this.client.post(`/chat`, payload); - const responseContent = - response && response.data && response.data.text - ? response.data.text - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); - } - return responseContent; - } catch (error) { - if (error.response) { - console.error("Response data:", error.response.data); - } else if (error.request) { - console.error("No response received:", error.request); - } else { - console.error("Error setting up the request:", error.message); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post(`/chat`, payload); + let responseContent = null; + if (response && response.data && response.data.text) { + responseContent = response.data.text; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } } - throw error; } } } diff --git a/src/gemini.js b/src/gemini.js index 8951d39..9808a4e 100644 --- a/src/gemini.js +++ b/src/gemini.js @@ -26,9 +26,6 @@ class Gemini { * @returns {Object} The converted data structure. */ convertDataStructure(input, max_tokens, response_format) { - if (response_format === "json_object") { - response_format = "application/json"; - } let history = input.messages.slice(0, -1).map((message) => ({ role: message.role, parts: [{ text: message.content }], @@ -38,14 +35,16 @@ class Gemini { history[0].role = "user"; } const prompt = input.messages[input.messages.length - 1].content; - return { - history, - prompt, - generationConfig: { - maxOutputTokens: max_tokens, - ...(response_format && { response_mime_type: response_format }), - }, + + const response_mime_type = + response_format == "json_object" ? "application/json" : "text/plain"; + + const generationConfig = { + maxOutputTokens: max_tokens, + ...(response_format && { response_mime_type }), }; + + return { history, prompt, generationConfig }; } /** @@ -53,15 +52,26 @@ class Gemini { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. - * @returns {Promise} The response text from the API. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. + * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const gemini = new Gemini(apiKey); - * gemini.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * gemini.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { max_tokens = 150, model = message.model || "gemini-1.5-flash", @@ -86,18 +96,34 @@ class Gemini { } } - try { - const modelInstance = this.genAI.getGenerativeModel({ model }); - const chat = modelInstance.startChat({ history, generationConfig }); - const result = await chat.sendMessage(prompt); - const response = await result.response; - const text = await response.text(); - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, text, cacheTimeoutSeconds); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const modelInstance = this.genAI.getGenerativeModel({ model }); + const chat = modelInstance.startChat({ history, generationConfig }); + + const result = await chat.sendMessage(prompt); + const response = await result.response; + let text = await response.text(); + + if (response_format === "json_object") { + try { + text = JSON.parse(text); + } catch (e) { + text = null; + } + } + + if (cacheTimeoutSeconds && text) { + saveToCache(cacheKey, text, cacheTimeoutSeconds); + } + return text; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + throw new Error(`Gemini API error: ${error.message}`); + } } - return text; - } catch (error) { - throw new Error(`Gemini API error: ${error.message}`); } } } diff --git a/src/goose.js b/src/goose.js index 59b14bc..e0e5fa0 100644 --- a/src/goose.js +++ b/src/goose.js @@ -17,7 +17,7 @@ class Goose { this.client = axios.create({ baseURL: "https://api.goose.ai", headers: { - "Content-Type": "application/json", + "Content-type": "application/json", Authorization: `Bearer ${apiKey}`, }, }); @@ -28,15 +28,26 @@ class Goose { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const goose = new Goose(apiKey); - * goose.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * goose.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { messages } = message; const { max_tokens = 150 } = options; let { model } = message; @@ -64,31 +75,39 @@ class Goose { } } - try { - // Make API request and cache the response - const url = `https://api.goose.ai/v1/engines/${model}/completions`; - const response = await this.client.post(url, payload); - const responseText = - response && - response.data && - response.data.choices && - response.data.choices[0] && - response.data.choices[0].text - ? response.data.choices[0].text.trim() - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseText, cacheTimeoutSeconds); - } - return responseText; - } catch (error) { - if (error.response) { - console.error("Response data:", error.response.data); - } else if (error.request) { - console.error("No response received:", error.request); - } else { - console.error("Error setting up the request:", error.message); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const url = `https://api.goose.ai/v1/engines/${model}/completions`; + const response = await this.client.post(url, payload); + let responseText = null; + if ( + response && + response.data && + response.data.choices && + response.data.choices[0] && + response.data.choices[0].text + ) { + responseText = response.data.choices[0].text.trim(); + } + + if (cacheTimeoutSeconds && responseText) { + saveToCache(cacheKey, responseText, cacheTimeoutSeconds); + } + return responseText; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } } - throw error; } } } diff --git a/src/groq.js b/src/groq.js index 22aa8cf..2136bb8 100644 --- a/src/groq.js +++ b/src/groq.js @@ -24,15 +24,26 @@ class Groq { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const groq = new Groq(apiKey); - * groq.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * groq.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { max_tokens = 150, model = message.model || "llama3-8b-8192" } = options; const params = { @@ -50,23 +61,31 @@ class Groq { } } - try { - // Make API request and cache the response - const chatCompletion = await this.groq.chat.completions.create(params); - const responseContent = - chatCompletion && - chatCompletion.choices && - chatCompletion.choices[0] && - chatCompletion.choices[0].message && - chatCompletion.choices[0].message.content - ? chatCompletion.choices[0].message.content - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const chatCompletion = await this.groq.chat.completions.create(params); + let responseContent = null; + if ( + chatCompletion && + chatCompletion.choices && + chatCompletion.choices[0] && + chatCompletion.choices[0].message && + chatCompletion.choices[0].message.content + ) { + responseContent = chatCompletion.choices[0].message.content; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + throw new Error(error.response.data.error.message); + } } - return responseContent; - } catch (error) { - throw new Error(error.response.data.error.message); } } } diff --git a/src/huggingface.js b/src/huggingface.js new file mode 100644 index 0000000..41dd440 --- /dev/null +++ b/src/huggingface.js @@ -0,0 +1,100 @@ +/** + * @file huggingface.js + * @class Hugging Face Inference API + * @description Wrapper class for the Hugging Face Inference API. + * @param {string} apiKey - The API key for Hugging Face Inference API. + */ + +const axios = require("axios"); +const { getFromCache, saveToCache } = require("./cache"); // Import caching functions + +class HuggingFace { + /** + * @constructor + * @param {string} apiKey - The API key for accessing the HuggingFace API. + */ + constructor(apiKey) { + this.client = axios.create({ + baseURL: "https://api-inference.huggingface.co/models/", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + }); + } + + /** + * Sends a message to the HuggingFace API. + * @param {object} message - The message object containing model and messages to send. + * @param {object} options - Optional parameters such as max_tokens and model. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. + * @returns {Promise} - A promise that resolves to the response text or null if an error occurs. + */ + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + + const { model: messageModel, messages } = message; + const { + max_tokens = 150, + model = messageModel || "meta-llama/Meta-Llama-3-8B-Instruct", + } = options; + + const prompt = messages.map((msg) => msg.content).join(" "); + + const payload = { + inputs: prompt, + parameters: { max_new_tokens: max_tokens, ...options }, + }; + + // Create cache key and check for cached response + const cacheKey = JSON.stringify(payload); + if (cacheTimeoutSeconds) { + const cachedResponse = getFromCache(cacheKey); + if (cachedResponse) { + return cachedResponse; + } + } + + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post(`${model}`, payload); + let responseContent = null; + + if ( + response && + response.data && + response.data[0] && + response.data[0].generated_text + ) { + responseContent = response.data[0].generated_text; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + // Handle errors + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } + } + } + } +} + +module.exports = HuggingFace; diff --git a/src/index.js b/src/index.js index bba45aa..c8e5766 100644 --- a/src/index.js +++ b/src/index.js @@ -13,6 +13,7 @@ const modules = { goose: "./goose", cohere: "./cohere", mistral: "./mistral", + huggingface: "./huggingface", }; const LLMInterface = {}; @@ -29,5 +30,30 @@ Object.keys(modules).forEach((key) => { }); }); +/** + * Returns a message object with the provided message and an optional system message. + * + * @param {string} message - The user's message. + * @param {string} [systemMessage="You are a helpful assistant."] - The system's message. + * @returns {Object} The message object. + */ +LLMInterface.returnMessageObject = function ( + message, + systemMessage = "You are a helpful assistant." +) { + return { + messages: [ + { + role: "system", + content: systemMessage, + }, + { + role: "user", + content: message, + }, + ], + }; +}; + const handlers = LLMInterface; module.exports = { LLMInterface, handlers }; diff --git a/src/llamacpp.js b/src/llamacpp.js index 62cb345..b9c4903 100644 --- a/src/llamacpp.js +++ b/src/llamacpp.js @@ -27,15 +27,26 @@ class LlamaCPP { * * @param {Object} prompt - The prompt object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const llamacpp = new LlamaCPP(llamacppURL); - * llamacpp.sendMessage(prompt, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * llamacpp.sendMessage(prompt, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(prompt, options = {}, cacheTimeoutSeconds) { + async sendMessage(prompt, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { max_tokens = 150 } = options; // Prepare the payload for the API request @@ -56,30 +67,37 @@ class LlamaCPP { } } - try { - // Make API request and cache the response - const response = await this.client.post("", payload); - let contents; - if (response.data.content) { - contents = response.data.content; - } else if (response.data.results) { - contents = response.data.results.map((result) => result.content).join(); - } else { - contents = ""; - } - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, contents, cacheTimeoutSeconds); - } - return contents; - } catch (error) { - if (error.response) { - console.error("Response data:", error.response.data); - } else if (error.request) { - console.error("No response received:", error.request); - } else { - console.error("Error setting up the request:", error.message); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post("", payload); + let contents = ""; + + if (response.data.content) { + contents = response.data.content; + } else if (response.data.results) { + contents = response.data.results + .map((result) => result.content) + .join(); + } + + if (cacheTimeoutSeconds && contents) { + saveToCache(cacheKey, contents, cacheTimeoutSeconds); + } + return contents; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } } - throw error; } } } diff --git a/src/mistral.js b/src/mistral.js index 535003e..26b7a0b 100644 --- a/src/mistral.js +++ b/src/mistral.js @@ -28,15 +28,26 @@ class Mistral { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const mistral = new Mistral(apiKey); - * mistral.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * mistral.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + let { model, messages } = message; // Set default model if not provided @@ -57,31 +68,40 @@ class Mistral { } } - try { - // Make API request and cache the response - const response = await this.client.post(`/chat/completions`, payload); - const responseContent = - response && - response.data && - response.data.choices && - response.data.choices[0] && - response.data.choices[0].message && - response.data.choices[0].message.content - ? response.data.choices[0].message.content - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); - } - return responseContent; - } catch (error) { - if (error.response) { - console.error("Response data:", error.response.data); - } else if (error.request) { - console.error("No response received:", error.request); - } else { - console.error("Error setting up the request:", error.message); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post(`/chat/completions`, payload); + let responseContent = null; + + if ( + response && + response.data && + response.data.choices && + response.data.choices[0] && + response.data.choices[0].message && + response.data.choices[0].message.content + ) { + responseContent = response.data.choices[0].message.content; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + if (error.response) { + console.error("Response data:", error.response.data); + } else if (error.request) { + console.error("No response received:", error.request); + } else { + console.error("Error setting up the request:", error.message); + } + throw error; + } } - throw error; } } } diff --git a/src/openai.js b/src/openai.js index 3f7a84a..ad41eb4 100644 --- a/src/openai.js +++ b/src/openai.js @@ -24,19 +24,30 @@ class OpenAI { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const openai = new OpenAI(apiKey); - * openai.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * openai.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + const { model: messageModel, messages } = message; const { max_tokens = 150, - model = messageModel || "gpt-3.5-turbo-0613", + model = messageModel || "gpt-3.5-turbo", response_format, } = options; const requestPayload = { @@ -54,24 +65,40 @@ class OpenAI { } } - try { - const completion = await this.openai.chat.completions.create( - requestPayload - ); - const responseContent = - completion && - completion.choices && - completion.choices[0] && - completion.choices[0].message && - completion.choices[0].message.content - ? completion.choices[0].message.content - : null; - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const completion = await this.openai.chat.completions.create( + requestPayload + ); + let responseContent = null; + if ( + completion && + completion.choices && + completion.choices[0] && + completion.choices[0].message + ) { + responseContent = completion.choices[0].message.content; + } + + if (response_format === "json_object") { + try { + responseContent = JSON.parse(responseContent); + } catch (e) { + responseContent = null; + } + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + throw new Error(error.response.data.error.message); + } } - return responseContent; - } catch (error) { - throw new Error(error.response.data.error.message); } } } diff --git a/src/perplexity.js b/src/perplexity.js new file mode 100644 index 0000000..266a773 --- /dev/null +++ b/src/perplexity.js @@ -0,0 +1,91 @@ +// Perplexity.js + +const axios = require("axios"); +const { getFromCache, saveToCache } = require("./cache"); + +class Perplexity { + /** + * @constructor + * @param {string} apiKey - The API key for Perplexity Labs AI. + */ + constructor(apiKey) { + this.apiKey = apiKey; + this.client = axios.create({ + baseURL: "https://api.perplexity.ai", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + }); + } + + /** + * Sends a message to the Perplexity Labs AI API. + * + * @param {Object} message - The message object containing the messages to send. + * @param {Object} [options={}] - Optional parameters for the request. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. + * @returns {Promise} The response object from the API. + * @throws {Error} Throws an error if the API request fails. + */ + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + + const { messages } = message; + const { model = "llama-3-sonar-small-32k-online" } = options; + + const requestBody = { + model, + messages, + }; + + // Create cache key and check for cached response + const cacheKey = JSON.stringify(requestBody); + if (cacheTimeoutSeconds) { + const cachedResponse = getFromCache(cacheKey); + if (cachedResponse) { + return cachedResponse; + } + } + + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post( + "/chat/completions", + requestBody + ); + let responseContent = null; + + if ( + response && + response.data && + response.data.choices && + response.data.choices[0] && + response.data.choices[0].message && + response.data.choices[0].message.content + ) { + responseContent = response.data.choices[0].message.content; + } + + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + console.error("API Error:", error.message); + throw new Error(error.message); + } + } + } + } +} + +module.exports = Perplexity; diff --git a/src/reka.js b/src/reka.js index 3009d81..c617851 100644 --- a/src/reka.js +++ b/src/reka.js @@ -29,15 +29,26 @@ class Reka { * * @param {Object} message - The message object containing the model and messages to send. * @param {Object} [options={}] - Optional parameters for the request. - * @param {number} [cacheTimeoutSeconds] - Optional timeout in seconds for caching the response. + * @param {Object | number} [interfaceOptions={}] - Optional interface options, including cache timeout and retry attempts. * @returns {Promise} The response text from the API. * @throws {Error} Throws an error if the API request fails. * * @example * const reka = new Reka(apiKey); - * reka.sendMessage(message, { max_tokens: 150 }, 60).then(console.log).catch(console.error); + * const interfaceOpts = { + * cacheTimeoutSeconds: 300, + * retryAttempts: 3, + * }; + * reka.sendMessage(message, { max_tokens: 150 }, interfaceOpts).then(console.log).catch(console.error); */ - async sendMessage(message, options = {}, cacheTimeoutSeconds) { + async sendMessage(message, options = {}, interfaceOptions = {}) { + let cacheTimeoutSeconds; + if (typeof interfaceOptions === "number") { + cacheTimeoutSeconds = interfaceOptions; + } else { + cacheTimeoutSeconds = interfaceOptions.cacheTimeoutSeconds; + } + let { model } = message; // Set default model if not provided @@ -66,29 +77,30 @@ class Reka { } } - try { - // Make API request and cache the response - const response = await this.client.post("/v1/chat", modifiedMessage); + let retryAttempts = interfaceOptions.retryAttempts || 0; + while (retryAttempts >= 0) { + try { + const response = await this.client.post("/v1/chat", modifiedMessage); + let responseContent = null; - // Check for the response content - const responseContent = - response.data?.responses?.[0]?.message?.content || null; + if (response.data?.responses?.[0]?.message?.content) { + responseContent = response.data.responses[0].message.content; + } - if (!responseContent) { - throw new Error("Unexpected response format"); + if (cacheTimeoutSeconds && responseContent) { + saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); + } + return responseContent; + } catch (error) { + retryAttempts--; + if (retryAttempts < 0) { + console.error( + "API Error:", + error.response ? error.response.data : error.message + ); + throw new Error(error.response ? error.response.data : error.message); + } } - - if (cacheTimeoutSeconds) { - saveToCache(cacheKey, responseContent, cacheTimeoutSeconds); - } - - return responseContent; - } catch (error) { - console.error( - "API Error:", - error.response ? error.response.data : error.message - ); - throw new Error(error.response ? error.response.data : error.message); } } } diff --git a/test/basic/ai21.test.js b/test/basic/ai21.test.js new file mode 100644 index 0000000..be1731e --- /dev/null +++ b/test/basic/ai21.test.js @@ -0,0 +1,29 @@ +const AI21 = require("../../src/ai21"); +const { ai21ApiKey } = require("../../config"); + +test("AI21 API Key should be set", () => { + expect(typeof ai21ApiKey).toBe("string"); +}); + +test("AI21 API Client should send a message and receive a response", async () => { + const ai21 = new AI21(ai21ApiKey); + const message = { + model: "jamba-instruct", + messages: [ + { + role: "system", + content: "You are a helpful assistant.", + }, + { + role: "user", + content: "Explain the importance of low latency LLMs.", + }, + ], + }; + const options = { + max_tokens: 200, + }; + const response = await ai21.sendMessage(message, options); + + expect(typeof response).toBe("string"); +}); diff --git a/test/basic/huggingface.test.js b/test/basic/huggingface.test.js new file mode 100644 index 0000000..8b15e9e --- /dev/null +++ b/test/basic/huggingface.test.js @@ -0,0 +1,41 @@ +/** + * @file huggingface.test.js + * @description Tests for the Hugging Face Inference API client. + */ + +const HuggingFace = require("../../src/huggingface"); +const { huggingfaceApiKey } = require("../../config"); + +test("HuggingFace Inference API Key should be set", async () => { + expect(typeof huggingfaceApiKey).toBe("string"); +}); + +test("HuggingFace Inference API Client should send a message and receive a response", async () => { + const huggingface = new HuggingFace(huggingfaceApiKey); + const message = { + model: "meta-llama/Meta-Llama-3-8B-Instruct", + messages: [ + { + role: "user", + content: + "You are a helpful assistant. Say OK if you understand and stop.", + }, + { + role: "system", + content: "OK", + }, + { + role: "user", + content: "Explain the importance of low latency LLMs.", + }, + ], + }; + try { + const response = await huggingface.sendMessage(message, {}); + + expect(typeof response).toBe("string"); + } catch (error) { + console.error("Test failed:", error); + throw error; + } +}, 30000); diff --git a/test/basic/perplexity.test.js b/test/basic/perplexity.test.js new file mode 100644 index 0000000..8004730 --- /dev/null +++ b/test/basic/perplexity.test.js @@ -0,0 +1,26 @@ +const Perplexity = require("../../src/perplexity"); +const { perplexityApiKey } = require("../../config"); + +test("Perplexity Labs API Key should be set", () => { + expect(typeof perplexityApiKey).toBe("string"); +}); + +test("Perplexity Labs API Client should send a message and receive a response", async () => { + const perplixity = new Perplexity(perplexityApiKey); + const message = { + model: "llama-3-sonar-small-32k-online", + messages: [ + { + role: "system", + content: "You are a helpful assistant.", + }, + { + role: "user", + content: "Explain the importance of low latency LLMs.", + }, + ], + }; + const response = await perplixity.sendMessage(message, { max_tokens: 100 }); + + expect(typeof response).toBe("string"); +}); diff --git a/test/basic/reka.test.js b/test/basic/reka.test.js index d41470e..633f611 100644 --- a/test/basic/reka.test.js +++ b/test/basic/reka.test.js @@ -1,5 +1,5 @@ /** - * @file rekate.test.js + * @file reka.test.js * @description Tests for the Reka AI API client. */ diff --git a/test/cache/huggingface.cache.test.js b/test/cache/huggingface.cache.test.js new file mode 100644 index 0000000..a8390c1 --- /dev/null +++ b/test/cache/huggingface.cache.test.js @@ -0,0 +1,89 @@ +// test/huggingface.cache.test.js + +const HuggingFace = require("../../src/huggingface"); +const { huggingfaceApiKey } = require("../../config"); +const { getFromCache, saveToCache } = require("../../src/cache"); +jest.mock("../../src/cache"); // Mock the cache module + +describe("HuggingFace Interface with Cache", () => { + const huggingface = new HuggingFace(huggingfaceApiKey); + + const message = { + model: "gpt2", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Explain the importance of low latency LLMs." }, + ], + }; + + const options = { max_new_tokens: 50 }; + const inputs = message.messages.map((msg) => msg.content).join(" "); // Ensure consistent spacing + const cacheKey = JSON.stringify({ + inputs: inputs, + parameters: { max_new_tokens: 50 }, + }); + const cacheTimeoutSeconds = 86400; + const mockResponse = [ + { generated_text: "The importance of low latency LLMs is..." }, + ]; + + beforeEach(() => { + jest.clearAllMocks(); + }); + + it("should return cached response if available", async () => { + getFromCache.mockReturnValue(mockResponse[0].generated_text); + + const response = await huggingface.sendMessage( + message, + options, + cacheTimeoutSeconds + ); + + expect(getFromCache).toHaveBeenCalledWith(cacheKey); + expect(response).toBe(mockResponse[0].generated_text); + expect(saveToCache).not.toHaveBeenCalled(); + }); + + it("should save response to cache if not cached", async () => { + getFromCache.mockReturnValue(null); + saveToCache.mockImplementation(() => {}); + + // Mocking axios post request + huggingface.client.post = jest + .fn() + .mockResolvedValue({ data: mockResponse }); + + const response = await huggingface.sendMessage( + message, + options, + cacheTimeoutSeconds + ); + + expect(getFromCache).toHaveBeenCalledWith(cacheKey); + expect(huggingface.client.post).toHaveBeenCalledWith("gpt2", { + inputs: inputs, // Ensure consistent spacing + parameters: { max_new_tokens: 50 }, + }); + expect(response).toBe(mockResponse[0].generated_text); + expect(saveToCache).toHaveBeenCalledWith( + cacheKey, + mockResponse[0].generated_text, + cacheTimeoutSeconds + ); + }); + + it("should handle API errors gracefully", async () => { + getFromCache.mockReturnValue(null); + huggingface.client.post = jest + .fn() + .mockRejectedValue(new Error("API error")); + + await expect( + huggingface.sendMessage(message, options, cacheTimeoutSeconds) + ).rejects.toThrow("API error"); + + expect(getFromCache).toHaveBeenCalledWith(cacheKey); + expect(saveToCache).not.toHaveBeenCalled(); + }); +}); diff --git a/test/json/gemini.json.test.js b/test/json/gemini.json.test.js index 417a470..132f899 100644 --- a/test/json/gemini.json.test.js +++ b/test/json/gemini.json.test.js @@ -30,5 +30,5 @@ test("Gemini API Client should send a message and receive a response", async () max_tokens: 100, response_format: "json_object", }); - expect(typeof response).toBe("string"); + expect(typeof response).toBe("object"); }); diff --git a/test/json/openai.json.test.js b/test/json/openai.json.test.js index fa10905..74487f4 100644 --- a/test/json/openai.json.test.js +++ b/test/json/openai.json.test.js @@ -31,5 +31,5 @@ test("OpenAI API Client should send a message and receive a response", async () response_format: "json_object", }); - expect(typeof response).toBe("string"); + expect(typeof response).toBe("object"); });