diff --git a/docs/features/langchain.mdx b/docs/features/langchain.mdx new file mode 100644 index 00000000..92c2b320 --- /dev/null +++ b/docs/features/langchain.mdx @@ -0,0 +1,48 @@ +--- +title: "LangChain Integration" +--- +Pezzo supports integration with LangChain for observability and monitoring. Integration is as easy as configuring the LLM to proxy requests to Pezzo. + +## Example: LangChain with OpenAI + +Below is an example using `ChatOpenAI`. The same can be applied to chains and agents. + + + +```ts +import { ChatOpenAI } from "langchain/chat_models/openai"; + +const llm = new ChatOpenAI({ + openAIApiKey: process.env.OPENAI_API_KEY, + temperature: 0, + configuration: { + baseURL: "https://proxy.pezzo.ai/openai/v1", + defaultHeaders: { + "X-Pezzo-Api-Key": "", + "X-Pezzo-Project-Id": "", + "X-Pezzo-Environment": "Production", + }, + }, +}); + +const llmResult = await llm.predict("Tell me 5 fun facts about yourself!"); +``` + + +```py +from langchain.chat_models import ChatOpenAI + +llm = ChatOpenAI( + openai_api_key='<>', + openai_api_base="https://proxy.pezzo.ai/openai/v1", + default_headers={ + "X-Pezzo-Api-Key": "", + "X-Pezzo-Project-Id": "", + "X-Pezzo-Environment": "Production", + } +) + +llm_result = llm.predict("Tell me 5 fun facts about yourself!") +``` + + \ No newline at end of file diff --git a/docs/mint.json b/docs/mint.json index 83d35b20..542ff8d3 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -46,6 +46,7 @@ "platform/proxy/overview", "platform/observability/overview", "client/request-caching", + "features/langchain", "platform/prompt-management/environments", "platform/prompt-management/prompt-editor", "platform/prompt-management/versioning-and-deployments"