diff --git a/CHANGELOG.md b/CHANGELOG.md index b40a646..ff0449c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # CHANGELOG +## v4.4.1 + +* Add support of Serper and Bing search +* Add searching support for Claude models + ## v4.4.0 * Add support for Anthropic Claude 3 diff --git a/package.json b/package.json index bc244bb..3c040de 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "displayName": "ChatGPT Copilot", "icon": "images/ai-logo.png", "description": "An VS Code ChatGPT Copilot Extension", - "version": "4.4.0", + "version": "4.4.1", "aiKey": "", "repository": { "url": "https://github.com/feiskyer/chatgpt-copilot" @@ -523,7 +523,17 @@ "chatgpt.gpt3.googleCSEId": { "type": "string", "markdownDescription": "Google custom search ID.", - "order": 40 + "order": 41 + }, + "chatgpt.gpt3.serperKey": { + "type": "string", + "markdownDescription": "API key of Serper search API.", + "order": 42 + }, + "chatgpt.gpt3.bingKey": { + "type": "string", + "markdownDescription": "API key of Bing search API.", + "order": 43 } } } @@ -587,4 +597,4 @@ "resolutions": { "clone-deep": "^4.0.1" } -} +} \ No newline at end of file diff --git a/src/anthropic.ts b/src/anthropic.ts index 9bae1b9..8c17cc7 100644 --- a/src/anthropic.ts +++ b/src/anthropic.ts @@ -20,33 +20,39 @@ import { XMLAgentOutputParser } from "langchain/agents/xml/output_parser"; import { ChatPromptTemplate as ChatPromptTemplatePackage } from "langchain/prompts"; import { AgentStep } from "langchain/schema"; import { RunnableSequence } from "langchain/schema/runnable"; -import { ChatMessageHistory } from "langchain/stores/message/in_memory"; -import { GoogleCustomSearch, Tool } from "langchain/tools"; +import { BingSerpAPI, GoogleCustomSearch, Serper, Tool } from "langchain/tools"; import { Calculator } from "langchain/tools/calculator"; import { renderTextDescription } from "langchain/tools/render"; import ChatGptViewProvider from "./chatgpt-view-provider"; +import { ModelConfig } from "./model-config"; // initClaudeModel initializes the Claude model with the given parameters. -export async function initClaudeModel(viewProvider: ChatGptViewProvider, apiKey: string, apiBaseUrl: string, maxTokens: number, temperature: number, topP: number, googleCSEApiKey: string, googleCSEId: string, messageHistory: ChatMessageHistory) { +export async function initClaudeModel(viewProvider: ChatGptViewProvider, config: ModelConfig) { const apiClaude = new ChatAnthropic({ - topP: topP, - temperature: temperature, + topP: config.topP, + temperature: config.temperature, modelName: viewProvider.model, - anthropicApiKey: apiKey, - anthropicApiUrl: apiBaseUrl, + anthropicApiKey: config.apiKey, + anthropicApiUrl: config.apiBaseUrl, streaming: true, - maxTokens: maxTokens, + maxTokens: config.maxTokens, }).bind({ stop: ["", ""], }); let tools: Tool[] = [new Calculator()]; - if (googleCSEApiKey != "" && googleCSEId != "") { + if (config.googleCSEApiKey != "" && config.googleCSEId != "") { tools.push(new GoogleCustomSearch({ - apiKey: googleCSEApiKey, - googleCSEId: googleCSEId, + apiKey: config.googleCSEApiKey, + googleCSEId: config.googleCSEId, })); } + if (config.serperKey != "") { + tools.push(new Serper(config.serperKey)); + } + if (config.bingKey != "") { + tools.push(new BingSerpAPI(config.bingKey)); + } const systemContext = `You are ChatGPT helping the User with coding. You are intelligent, helpful and an expert developer, who always gives the correct answer and only does what instructed. @@ -85,7 +91,7 @@ Ensure the final answer is in the same language as the question, unless otherwis try { const steps = super.parse(text); return steps; - } catch (error) { + } catch (error: any) { if (error.message.includes("Could not parse LLM output")) { const msg = error.message.replace("Could not parse LLM output:", ""); const agentFinish: AgentFinish = { @@ -125,7 +131,7 @@ Ensure the final answer is in the same language as the question, unless otherwis viewProvider.tools = tools; viewProvider.chain = new RunnableWithMessageHistory({ runnable: agentExecutor, - getMessageHistory: (_sessionId) => messageHistory, + getMessageHistory: (_sessionId) => config.messageHistory, inputMessagesKey: "input", historyMessagesKey: "chat_history", }); diff --git a/src/chatgpt-view-provider.ts b/src/chatgpt-view-provider.ts index f98d15f..2157f59 100644 --- a/src/chatgpt-view-provider.ts +++ b/src/chatgpt-view-provider.ts @@ -25,6 +25,7 @@ import { ChatMessageHistory } from "langchain/stores/message/in_memory"; import { Tool } from "langchain/tools"; import * as vscode from "vscode"; import { initClaudeModel } from "./anthropic"; +import { ModelConfig } from "./model-config"; import { chatGpt, initGptModel } from "./openai"; import { chatCompletion, initGptLegacyModel } from "./openai-legacy"; @@ -244,6 +245,8 @@ export default class ChatGptViewProvider implements vscode.WebviewViewProvider { const topP = configuration.get("gpt3.top_p") as number; const googleCSEApiKey = configuration.get("gpt3.googleCSEApiKey") as string; const googleCSEId = configuration.get("gpt3.googleCSEId") as string; + const serperKey = configuration.get("gpt3.serperKey") as string; + const bingKey = configuration.get("gpt3.bingKey") as string; let apiBaseUrl = configuration.get("gpt3.apiBaseUrl") as string; if (!apiBaseUrl) { if (this.isGpt35Model) { @@ -301,12 +304,15 @@ export default class ChatGptViewProvider implements vscode.WebviewViewProvider { } this.memory = new ChatMessageHistory(); + const modelConfig = new ModelConfig( + { apiKey, apiBaseUrl, maxTokens, temperature, topP, organization, googleCSEApiKey, googleCSEId, serperKey, bingKey, messageHistory: this.memory }, + ); if (this.isGpt35Model) { - await initGptModel(this, apiKey, apiBaseUrl, maxTokens, temperature, topP, organization, googleCSEApiKey, googleCSEId, this.memory); + await initGptModel(this, modelConfig); } else if (this.isClaude) { - await initClaudeModel(this, apiKey, apiBaseUrl, maxTokens, temperature, topP, googleCSEApiKey, googleCSEId, this.memory); + await initClaudeModel(this, modelConfig); } else { - initGptLegacyModel(this, apiBaseUrl, apiKey, maxTokens, temperature, topP, organization); + initGptLegacyModel(this, modelConfig); } } diff --git a/src/extension.ts b/src/extension.ts index 5ea9dd0..f26ab12 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -110,6 +110,8 @@ export async function activate(context: vscode.ExtensionContext) { e.affectsConfiguration("chatgpt.gpt3.temperature") || e.affectsConfiguration("chatgpt.gpt3.googleCSEId") || e.affectsConfiguration("chatgpt.gpt3.googleCSEApiKey") || + e.affectsConfiguration("chatgpt.gpt3.serperKey") || + e.affectsConfiguration("chatgpt.gpt3.bingKey") || e.affectsConfiguration("chatgpt.gpt3.top_p") ) { provider.prepareConversation(true); diff --git a/src/model-config.ts b/src/model-config.ts new file mode 100644 index 0000000..d2527cb --- /dev/null +++ b/src/model-config.ts @@ -0,0 +1,42 @@ +/* eslint-disable eqeqeq */ +/* eslint-disable @typescript-eslint/naming-convention */ +/** + * @author Pengfei Ni + * + * @license + * Copyright (c) 2024 - Present, Pengfei Ni + * + * All rights reserved. Code licensed under the ISC license + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. +*/ +import { ChatMessageHistory } from "langchain/stores/message/in_memory"; + +export class ModelConfig { + apiKey: string; + apiBaseUrl: string; + maxTokens: number; + temperature: number; + topP: number; + organization: string; + googleCSEApiKey: string; + googleCSEId: string; + serperKey: string; + bingKey: string; + messageHistory: ChatMessageHistory; + + constructor({ apiKey, apiBaseUrl, maxTokens, temperature, topP, organization, googleCSEApiKey, googleCSEId, serperKey, bingKey, messageHistory }: { apiKey: string; apiBaseUrl: string; maxTokens: number; temperature: number; topP: number; organization: string; googleCSEApiKey: string; googleCSEId: string; serperKey: string; bingKey: string; messageHistory: ChatMessageHistory; }) { + this.apiKey = apiKey; + this.apiBaseUrl = apiBaseUrl; + this.maxTokens = maxTokens; + this.temperature = temperature; + this.topP = topP; + this.organization = organization; + this.googleCSEApiKey = googleCSEApiKey; + this.googleCSEId = googleCSEId; + this.serperKey = serperKey; + this.bingKey = bingKey; + this.messageHistory = messageHistory; + } +} \ No newline at end of file diff --git a/src/openai-legacy.ts b/src/openai-legacy.ts index e0b8988..79c6a59 100644 --- a/src/openai-legacy.ts +++ b/src/openai-legacy.ts @@ -21,37 +21,38 @@ import { SystemMessagePromptTemplate } from "langchain/prompts"; import ChatGptViewProvider, { logger } from "./chatgpt-view-provider"; +import { ModelConfig } from "./model-config"; // initGptLegacyModel initializes the GPT legacy model. -export function initGptLegacyModel(viewProvider: ChatGptViewProvider, apiBaseUrl: string, apiKey: string, maxTokens: number, temperature: number, topP: number, organization: string) { - if (apiBaseUrl?.includes("azure")) { - const instanceName = apiBaseUrl.split(".")[0].split("//")[1]; - const deployName = apiBaseUrl.split("/")[apiBaseUrl.split("/").length - 1]; +export function initGptLegacyModel(viewProvider: ChatGptViewProvider, config: ModelConfig) { + if (config.apiBaseUrl?.includes("azure")) { + const instanceName = config.apiBaseUrl.split(".")[0].split("//")[1]; + const deployName = config.apiBaseUrl.split("/")[config.apiBaseUrl.split("/").length - 1]; viewProvider.apiCompletion = new OpenAI({ modelName: viewProvider.model, - azureOpenAIApiKey: apiKey, + azureOpenAIApiKey: config.apiKey, azureOpenAIApiInstanceName: instanceName, azureOpenAIApiDeploymentName: deployName, azureOpenAIApiCompletionsDeploymentName: deployName, azureOpenAIApiVersion: "2024-02-01", - maxTokens: maxTokens, + maxTokens: config.maxTokens, streaming: true, - temperature: temperature, - topP: topP, + temperature: config.temperature, + topP: config.topP, }); } else { // OpenAI viewProvider.apiCompletion = new OpenAI({ - openAIApiKey: apiKey, + openAIApiKey: config.apiKey, modelName: viewProvider.model, - maxTokens: maxTokens, + maxTokens: config.maxTokens, streaming: true, - temperature: temperature, - topP: topP, + temperature: config.temperature, + topP: config.topP, configuration: { - apiKey: apiKey, - baseURL: apiBaseUrl, - organization: organization, + apiKey: config.apiKey, + baseURL: config.apiBaseUrl, + organization: config.organization, }, }); } diff --git a/src/openai.ts b/src/openai.ts index 4232ae6..c3e432c 100644 --- a/src/openai.ts +++ b/src/openai.ts @@ -21,33 +21,39 @@ import { MessagesPlaceholder, SystemMessagePromptTemplate } from "langchain/prompts"; -import { ChatMessageHistory } from "langchain/stores/message/in_memory"; -import { GoogleCustomSearch, Tool } from "langchain/tools"; +import { BingSerpAPI, GoogleCustomSearch, Serper, Tool } from "langchain/tools"; import { Calculator } from "langchain/tools/calculator"; import { WebBrowser } from "langchain/tools/webbrowser"; import ChatGptViewProvider, { logger } from "./chatgpt-view-provider"; +import { ModelConfig } from "./model-config"; // initGptModel initializes the GPT model. -export async function initGptModel(viewProvider: ChatGptViewProvider, apiKey: string, apiBaseUrl: string, maxTokens: number, temperature: number, topP: number, organization: string, googleCSEApiKey: string, googleCSEId: string, messageHistory: ChatMessageHistory) { +export async function initGptModel(viewProvider: ChatGptViewProvider, config: ModelConfig) { let tools: Tool[] = [new Calculator()]; - if (googleCSEApiKey != "" && googleCSEId != "") { + if (config.googleCSEApiKey != "" && config.googleCSEId != "") { tools.push(new GoogleCustomSearch({ - apiKey: googleCSEApiKey, - googleCSEId: googleCSEId, + apiKey: config.googleCSEApiKey, + googleCSEId: config.googleCSEId, })); } + if (config.serperKey != "") { + tools.push(new Serper(config.serperKey)); + } + if (config.bingKey != "") { + tools.push(new BingSerpAPI(config.bingKey)); + } let embeddings = new OpenAIEmbeddings({ modelName: "text-embedding-ada-002", - openAIApiKey: apiKey, + openAIApiKey: config.apiKey, }); // AzureOpenAI - if (apiBaseUrl?.includes("azure")) { - const instanceName = apiBaseUrl.split(".")[0].split("//")[1]; - const deployName = apiBaseUrl.split("/")[apiBaseUrl.split("/").length - 1]; + if (config.apiBaseUrl?.includes("azure")) { + const instanceName = config.apiBaseUrl.split(".")[0].split("//")[1]; + const deployName = config.apiBaseUrl.split("/")[config.apiBaseUrl.split("/").length - 1]; embeddings = new OpenAIEmbeddings({ azureOpenAIApiEmbeddingsDeploymentName: "text-embedding-ada-002", - azureOpenAIApiKey: apiKey, + azureOpenAIApiKey: config.apiKey, azureOpenAIApiInstanceName: instanceName, azureOpenAIApiDeploymentName: deployName, azureOpenAIApiCompletionsDeploymentName: deployName, @@ -55,29 +61,29 @@ export async function initGptModel(viewProvider: ChatGptViewProvider, apiKey: st }); viewProvider.apiChat = new ChatOpenAI({ modelName: viewProvider.model, - azureOpenAIApiKey: apiKey, + azureOpenAIApiKey: config.apiKey, azureOpenAIApiInstanceName: instanceName, azureOpenAIApiDeploymentName: deployName, azureOpenAIApiCompletionsDeploymentName: deployName, azureOpenAIApiVersion: "2024-02-01", - maxTokens: maxTokens, + maxTokens: config.maxTokens, streaming: true, - temperature: temperature, - topP: topP, + temperature: config.temperature, + topP: config.topP, }); } else { // OpenAI viewProvider.apiChat = new ChatOpenAI({ - openAIApiKey: apiKey, + openAIApiKey: config.apiKey, modelName: viewProvider.model, - maxTokens: maxTokens, + maxTokens: config.maxTokens, streaming: true, - temperature: temperature, - topP: topP, + temperature: config.temperature, + topP: config.topP, configuration: { - apiKey: apiKey, - baseURL: apiBaseUrl, - organization: organization, + apiKey: config.apiKey, + baseURL: config.apiBaseUrl, + organization: config.organization, }, }); } @@ -109,7 +115,7 @@ where necessary. Respond in the same language as the query, unless otherwise spe viewProvider.tools = tools; viewProvider.chain = new RunnableWithMessageHistory({ runnable: agentExecutor, - getMessageHistory: (_sessionId) => messageHistory, + getMessageHistory: (_sessionId) => config.messageHistory, inputMessagesKey: "input", historyMessagesKey: "chat_history", });