From 93049d15638dabb5c9d36a246bdbffa007cc8798 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 30 May 2024 00:17:33 -0700 Subject: [PATCH 01/54] docs: make llm cache its own section (#22301) --- docs/docs/integrations/{llms => }/llm_caching.ipynb | 2 +- docs/docs/integrations/providers/astradb.mdx | 4 ++-- docs/docs/integrations/providers/cassandra.mdx | 4 ++-- docs/docs/integrations/providers/motherduck.mdx | 2 +- docs/sidebars.js | 1 + docs/vercel.json | 4 ++++ 6 files changed, 11 insertions(+), 6 deletions(-) rename docs/docs/integrations/{llms => }/llm_caching.ipynb (99%) diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llm_caching.ipynb similarity index 99% rename from docs/docs/integrations/llms/llm_caching.ipynb rename to docs/docs/integrations/llm_caching.ipynb index 1bf260e69b5f0..42e2036eef7e6 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llm_caching.ipynb @@ -5,7 +5,7 @@ "id": "f36d938c", "metadata": {}, "source": [ - "# LLM Caching integrations\n", + "# Model caches\n", "\n", "This notebook covers how to cache results of individual LLM calls using different caches." ] diff --git a/docs/docs/integrations/providers/astradb.mdx b/docs/docs/integrations/providers/astradb.mdx index 49a41ee5caa91..d545d1ea02625 100644 --- a/docs/docs/integrations/providers/astradb.mdx +++ b/docs/docs/integrations/providers/astradb.mdx @@ -64,7 +64,7 @@ set_llm_cache(AstraDBCache( )) ``` -Learn more in the [example notebook](/docs/integrations/llms/llm_caching#astra-db-caches) (scroll to the Astra DB section). +Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the Astra DB section). ## Semantic LLM Cache @@ -80,7 +80,7 @@ set_llm_cache(AstraDBSemanticCache( )) ``` -Learn more in the [example notebook](/docs/integrations/llms/llm_caching#astra-db-caches) (scroll to the appropriate section). +Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the appropriate section). Learn more in the [example notebook](/docs/integrations/memory/astradb_chat_message_history). diff --git a/docs/docs/integrations/providers/cassandra.mdx b/docs/docs/integrations/providers/cassandra.mdx index be28a3496583f..cbef4c693bc22 100644 --- a/docs/docs/integrations/providers/cassandra.mdx +++ b/docs/docs/integrations/providers/cassandra.mdx @@ -40,7 +40,7 @@ from langchain_community.cache import CassandraCache set_llm_cache(CassandraCache()) ``` -Learn more in the [example notebook](/docs/integrations/llms/llm_caching#cassandra-caches) (scroll to the Cassandra section). +Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the Cassandra section). ## Semantic LLM Cache @@ -54,7 +54,7 @@ set_llm_cache(CassandraSemanticCache( )) ``` -Learn more in the [example notebook](/docs/integrations/llms/llm_caching#cassandra-caches) (scroll to the appropriate section). +Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the appropriate section). ## Document loader diff --git a/docs/docs/integrations/providers/motherduck.mdx b/docs/docs/integrations/providers/motherduck.mdx index 827c654f92f08..790f8167aaa75 100644 --- a/docs/docs/integrations/providers/motherduck.mdx +++ b/docs/docs/integrations/providers/motherduck.mdx @@ -48,6 +48,6 @@ eng = sqlalchemy.create_engine(conn_str) set_llm_cache(SQLAlchemyCache(engine=eng)) ``` -From here, see the [LLM Caching](/docs/integrations/llms/llm_caching) documentation on how to use. +From here, see the [LLM Caching](/docs/integrations/llm_caching) documentation on how to use. diff --git a/docs/sidebars.js b/docs/sidebars.js index 1710fb705a06e..5311f18192035 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -353,6 +353,7 @@ module.exports = { id: "integrations/stores/index", }, }, + "integrations/llm_caching", ], link: { type: "generated-index", diff --git a/docs/vercel.json b/docs/vercel.json index 52ad6b46bc083..d14a466f4772f 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -13,6 +13,10 @@ } ], "redirects": [ + { + "source": "/docs/integrations/llms/llm_caching(/?)", + "destination": "docs/integration/llm_caching/" + }, { "source": "/docs/how_to/tool_calls_multi_modal(/?)", "destination": "/docs/how_to/multimodal_inputs/" From 569d325a5977ac107ec641720c475cb40a7f5009 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 30 May 2024 00:17:59 -0700 Subject: [PATCH 02/54] docs: link GH org (#22308) --- docs/docusaurus.config.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 590518d18ac6c..84e0a32d96650 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -292,6 +292,10 @@ const config = { { title: "GitHub", items: [ + { + label: "Organization", + href: "https://github.com/langchain-ai", + }, { label: "Python", href: "https://github.com/langchain-ai/langchain", From 10b12e1c08925b593285b55c76c48848c9a242dc Mon Sep 17 00:00:00 2001 From: Dobiichi-Origami <56953648+Dobiichi-Origami@users.noreply.github.com> Date: Thu, 30 May 2024 22:59:08 +0800 Subject: [PATCH 03/54] community: adding tool_call_id for every ToolCall (#22323) - **Description:** This PR contains a bugfix which result in malfunction of multi-turn conversation in QianfanChatEndpoint and adaption for ToolCall and ToolMessage --- .../chat_models/baidu_qianfan_endpoint.py | 37 ++++++++++++------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index 95b5fc16390ac..0305c816f14c7 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -1,4 +1,5 @@ import logging +import uuid from operator import itemgetter from typing import ( Any, @@ -29,6 +30,7 @@ FunctionMessage, HumanMessage, SystemMessage, + ToolMessage, ) from langchain_core.output_parsers.base import OutputParserLike from langchain_core.output_parsers.openai_tools import ( @@ -59,7 +61,7 @@ def convert_message_to_dict(message: BaseMessage) -> dict: # If function call only, content is None not empty string if message_dict["content"] == "": message_dict["content"] = None - elif isinstance(message, FunctionMessage): + elif isinstance(message, (FunctionMessage, ToolMessage)): message_dict = { "role": "function", "content": message.content, @@ -81,21 +83,28 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage: additional_kwargs["function_call"].pop("thoughts") additional_kwargs = {**_dict.get("body", {}), **additional_kwargs} + msg_additional_kwargs = dict( + finish_reason=additional_kwargs.get("finish_reason", ""), + request_id=additional_kwargs["id"], + object=additional_kwargs.get("object", ""), + search_info=additional_kwargs.get("search_info", []), + ) + + if additional_kwargs.get("function_call", {}): + msg_additional_kwargs["function_call"] = additional_kwargs.get( + "function_call", {} + ) + msg_additional_kwargs["tool_calls"] = [ + { + "type": "function", + "function": additional_kwargs.get("function_call", {}), + "id": str(uuid.uuid4()), + } + ] + return AIMessage( content=content, - additional_kwargs=dict( - finish_reason=additional_kwargs.get("finish_reason", ""), - request_id=additional_kwargs["id"], - object=additional_kwargs.get("object", ""), - search_info=additional_kwargs.get("search_info", []), - function_call=additional_kwargs.get("function_call", {}), - tool_calls=[ - { - "type": "function", - "function": additional_kwargs.get("function_call", {}), - } - ], - ), + additional_kwargs=msg_additional_kwargs, ) From c64b0a30951391d6f3865e3e8f93d6dd02e0b2b1 Mon Sep 17 00:00:00 2001 From: KhoPhi Date: Thu, 30 May 2024 15:06:45 +0000 Subject: [PATCH 04/54] Docs: Ollama (LLM, Chat Model & Text Embedding) (#22321) - [x] Docs Update: Ollama - llm/ollama - Switched to using llama3 as model with reference to templating and prompting - Added concurrency notes to llm/ollama docs - chat_models/ollama - Added concurrency notes to llm/ollama docs - text_embedding/ollama - include example for specific embedding models from Ollama --- docs/docs/integrations/chat/ollama.ipynb | 34 ++++++++--- docs/docs/integrations/llms/ollama.ipynb | 57 +++++++++++++----- .../integrations/text_embedding/ollama.ipynb | 58 +++++++++---------- 3 files changed, 96 insertions(+), 53 deletions(-) diff --git a/docs/docs/integrations/chat/ollama.ipynb b/docs/docs/integrations/chat/ollama.ipynb index 22a87ebfb76a7..d8e3b6ca4aa1f 100644 --- a/docs/docs/integrations/chat/ollama.ipynb +++ b/docs/docs/integrations/chat/ollama.ipynb @@ -54,12 +54,12 @@ "\n", "Here are a few ways to interact with pulled local models\n", "\n", - "#### directly in the terminal:\n", + "#### In the terminal:\n", "\n", "* All of your local models are automatically served on `localhost:11434`\n", "* Run `ollama run ` to start interacting via the command line directly\n", "\n", - "### via an API\n", + "#### Via an API\n", "\n", "Send an `application/json` request to the API endpoint of Ollama to interact.\n", "\n", @@ -72,9 +72,11 @@ "\n", "See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n", "\n", - "#### via LangChain\n", + "#### Via LangChain\n", "\n", - "See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application." + "See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n", + "\n", + "View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more." ] }, { @@ -105,7 +107,7 @@ "\n", "# using LangChain Expressive Language chain syntax\n", "# learn more about the LCEL on\n", - "# /docs/expression_language/why\n", + "# /docs/concepts/#langchain-expression-language-lcel\n", "chain = prompt | llm | StrOutputParser()\n", "\n", "# for brevity, response is printed in terminal\n", @@ -189,7 +191,7 @@ "\n", "## Building from source\n", "\n", - "For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/jmorganca/ollama?tab=readme-ov-file#building)" + "For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)" ] }, { @@ -333,7 +335,7 @@ } ], "source": [ - "pip install --upgrade --quiet pillow" + "!pip install --upgrade --quiet pillow" ] }, { @@ -444,6 +446,24 @@ "\n", "print(query_chain)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Concurrency Features\n", + "\n", + "Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n", + "\n", + "Start the Ollama server with:\n", + "\n", + "* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n", + "* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n", + "\n", + "Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n", + "\n", + "Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)." + ] } ], "metadata": { diff --git a/docs/docs/integrations/llms/ollama.ipynb b/docs/docs/integrations/llms/ollama.ipynb index 7c6be1a28c843..e80ce6e4b776b 100644 --- a/docs/docs/integrations/llms/ollama.ipynb +++ b/docs/docs/integrations/llms/ollama.ipynb @@ -12,16 +12,15 @@ "\n", "It optimizes setup and configuration details, including GPU usage.\n", "\n", - "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n", + "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n", "\n", "## Setup\n", "\n", - "First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n", + "First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n", "\n", "* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n", "* Fetch available LLM model via `ollama pull `\n", - " * View a list of available models via the [model library](https://ollama.ai/library)\n", - " * e.g., `ollama pull llama3`\n", + " * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n", "* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n", "\n", "> On Mac, the models will be download to `~/.ollama/models`\n", @@ -29,28 +28,29 @@ "> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n", "\n", "* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n", - "* To view all pulled models, use `ollama list`\n", + "* To view all pulled models on your local instance, use `ollama list`\n", "* To chat directly with a model from the command line, use `ollama run `\n", - "* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n", + "* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n", + "* Run `ollama help` in the terminal to see available commands too.\n", "\n", "## Usage\n", "\n", - "You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n", + "You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n", "\n", - "If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n", + "If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n", "\n", - "This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n", + "This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n", "\n", "## Interacting with Models \n", "\n", "Here are a few ways to interact with pulled local models\n", "\n", - "#### directly in the terminal:\n", + "#### In the terminal:\n", "\n", "* All of your local models are automatically served on `localhost:11434`\n", "* Run `ollama run ` to start interacting via the command line directly\n", "\n", - "### via an API\n", + "#### Via the API\n", "\n", "Send an `application/json` request to the API endpoint of Ollama to interact.\n", "\n", @@ -61,11 +61,20 @@ "}'\n", "```\n", "\n", - "See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n", + "See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n", "\n", "#### via LangChain\n", "\n", - "See a typical basic example of using Ollama chat model in your LangChain application." + "See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install langchain-community" ] }, { @@ -87,7 +96,9 @@ "source": [ "from langchain_community.llms import Ollama\n", "\n", - "llm = Ollama(model=\"llama3\")\n", + "llm = Ollama(\n", + " model=\"llama3\"\n", + ") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n", "\n", "llm.invoke(\"Tell me a joke\")" ] @@ -280,6 +291,24 @@ "llm_with_image_context = bakllava.bind(images=[image_b64])\n", "llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Concurrency Features\n", + "\n", + "Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n", + "\n", + "Start the Ollama server with:\n", + "\n", + "* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n", + "* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n", + "\n", + "Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n", + "\n", + "Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)." + ] } ], "metadata": { diff --git a/docs/docs/integrations/text_embedding/ollama.ipynb b/docs/docs/integrations/text_embedding/ollama.ipynb index 915f9edab02d9..c7af848287d1d 100644 --- a/docs/docs/integrations/text_embedding/ollama.ipynb +++ b/docs/docs/integrations/text_embedding/ollama.ipynb @@ -7,36 +7,42 @@ "source": [ "# Ollama\n", "\n", - "Let's load the Ollama Embeddings class." + "\"Ollama supports embedding models, making it possible to build retrieval augmented generation (RAG) applications that combine text prompts with existing documents or other data.\" Learn more about the introduction to [Ollama Embeddings](https://ollama.com/blog/embedding-models) in the blog post.\n", + "\n", + "To use Ollama Embeddings, first, install [LangChain Community](https://pypi.org/project/langchain-community/) package:" ] }, { "cell_type": "code", - "execution_count": 1, - "id": "0be1af71", + "execution_count": null, + "id": "854d6a2e", "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OllamaEmbeddings" + "!pip install langchain-community" ] }, { - "cell_type": "code", - "execution_count": 2, - "id": "2c66e5da", + "cell_type": "markdown", + "id": "54fbb4cd", "metadata": {}, - "outputs": [], "source": [ - "embeddings = OllamaEmbeddings()" + "Load the Ollama Embeddings class:" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "01370375", + "execution_count": 1, + "id": "0be1af71", "metadata": {}, "outputs": [], "source": [ + "from langchain_community.embeddings import OllamaEmbeddings\n", + "\n", + "embeddings = (\n", + " OllamaEmbeddings()\n", + ") # by default, uses llama2. Run `ollama pull llama2` to pull down the model\n", + "\n", "text = \"This is a test document.\"" ] }, @@ -105,7 +111,13 @@ "id": "bb61bbeb", "metadata": {}, "source": [ - "Let's load the Ollama Embeddings class with smaller model (e.g. llama:7b). Note: See other supported models [https://ollama.ai/library](https://ollama.ai/library)" + "### Embedding Models\n", + "\n", + "Ollama has embedding models, that are lightweight enough for use in embeddings, with the smallest about the size of 25Mb. See some of the available [embedding models from Ollama](https://ollama.com/blog/embedding-models).\n", + "\n", + "Let's load the Ollama Embeddings class with smaller model (e.g. `mxbai-embed-large`). \n", + "\n", + "> Note: See other supported models [https://ollama.ai/library](https://ollama.ai/library)" ] }, { @@ -115,26 +127,8 @@ "metadata": {}, "outputs": [], "source": [ - "embeddings = OllamaEmbeddings(model=\"llama2:7b\")" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "14aefb64", - "metadata": {}, - "outputs": [], - "source": [ - "text = \"This is a test document.\"" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "3c39ed33", - "metadata": {}, - "outputs": [], - "source": [ + "embeddings = OllamaEmbeddings(model=\"mxbai-embed-large\")\n", + "text = \"This is a test document.\"\n", "query_result = embeddings.embed_query(text)" ] }, From 596c062cbaff9f15b0427670c50b18f4d5b40637 Mon Sep 17 00:00:00 2001 From: maang-h <55082429+maang-h@users.noreply.github.com> Date: Thu, 30 May 2024 23:08:32 +0800 Subject: [PATCH 05/54] community[patch]: Standardize qianfan model init args name (#22322) - **Description:** - Standardize qianfan chat model intialization arguments name - qianfan_ak (qianfan api key) -> api_key - qianfan_sk (qianfan secret key) -> secret_key - Delete unuse variable - **Issue:** #20085 --- .../chat_models/baidu_qianfan_endpoint.py | 13 +++++++------ .../chat_models/test_qianfan_endpoint.py | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index 0305c816f14c7..9003140497021 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -133,11 +133,12 @@ class QianfanChatEndpoint(BaseChatModel): model_kwargs: Dict[str, Any] = Field(default_factory=dict) """extra params for model invoke using with `do`.""" - client: Any - - qianfan_ak: Optional[SecretStr] = None - qianfan_sk: Optional[SecretStr] = None + client: Any #: :meta private: + qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key") + """Qianfan API KEY""" + qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key") + """Qianfan SECRET KEY""" streaming: Optional[bool] = False """Whether to stream the results or not.""" @@ -145,7 +146,9 @@ class QianfanChatEndpoint(BaseChatModel): """request timeout for chat http requests""" top_p: Optional[float] = 0.8 + """What probability mass to use.""" temperature: Optional[float] = 0.95 + """What sampling temperature to use.""" penalty_score: Optional[float] = 1 """Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. In the case of other model, passing these params will not affect the result. @@ -292,7 +295,6 @@ def _generate( """ if self.streaming: completion = "" - token_usage = {} chat_generation_info: Dict = {} for chunk in self._stream(messages, stop, run_manager, **kwargs): chat_generation_info = ( @@ -337,7 +339,6 @@ async def _agenerate( ) -> ChatResult: if self.streaming: completion = "" - token_usage = {} chat_generation_info: Dict = {} async for chunk in self._astream(messages, stop, run_manager, **kwargs): chat_generation_info = ( diff --git a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py index 82e568123d27d..91a7fb9d23b40 100644 --- a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py @@ -362,3 +362,19 @@ def test_uses_actual_secret_value_from_secret_str() -> None: ) assert cast(SecretStr, chat.qianfan_ak).get_secret_value() == "test-api-key" assert cast(SecretStr, chat.qianfan_sk).get_secret_value() == "test-secret-key" + + +def test_init_api_key_param() -> None: + """Test the standardized parameters -- api_key and secret_key""" + for chat in [ + QianfanChatEndpoint( # type: ignore[call-arg] + api_key="test-api-key", # type: ignore[arg-type] + secret_key="test-secret-key", # type: ignore[arg-type] + ), + QianfanChatEndpoint( # type: ignore[call-arg] + qianfan_ak="test-api-key", # type: ignore[arg-type] + qianfan_sk="test-secret-key", # type: ignore[arg-type] + ), + ]: + assert cast(SecretStr, chat.qianfan_ak).get_secret_value() == "test-api-key" + assert cast(SecretStr, chat.qianfan_sk).get_secret_value() == "test-secret-key" From 86698b02a97c0e614ae44a0c34f2c85532f414ca Mon Sep 17 00:00:00 2001 From: WU LIFU Date: Thu, 30 May 2024 23:15:04 +0800 Subject: [PATCH 06/54] doc: fix wrong documentation on FAISS load_local function (#22310) ### Issue: #22299 ### descriptions The documentation appears to be wrong. When the user actually sets this parameter "asynchronous" to be True, it fails because the __init__ function of FAISS class doesn't allow this parameter. In fact, most of the class/instance functions of this class have both the sync/async version, so it looks like what we need is just to remove this parameter from the doc. Thank you for contributing to LangChain! - [x] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [x] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17. Co-authored-by: Lifu Wu --- libs/community/langchain_community/vectorstores/faiss.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/community/langchain_community/vectorstores/faiss.py b/libs/community/langchain_community/vectorstores/faiss.py index 749125f12fe2a..ae0f45f7e0774 100644 --- a/libs/community/langchain_community/vectorstores/faiss.py +++ b/libs/community/langchain_community/vectorstores/faiss.py @@ -1072,7 +1072,6 @@ def load_local( Pickle files can be modified by malicious actors to deliver a malicious payload that results in execution of arbitrary code on your machine. - asynchronous: whether to use async version or not """ if not allow_dangerous_deserialization: raise ValueError( From 2443e8553364452cdb1a25415e07b72bb8734edc Mon Sep 17 00:00:00 2001 From: ChengZi Date: Thu, 30 May 2024 23:28:55 +0800 Subject: [PATCH 07/54] docs: fix milvus import and update template (#22306) docs: fix milvus import problem update milvus-rag template with milvus-lite Signed-off-by: ChengZi --- .../self_query/milvus_self_query.ipynb | 17 +- .../integrations/vectorstores/milvus.ipynb | 4 +- libs/partners/milvus/README.md | 8 +- templates/rag-milvus/poetry.lock | 365 +----------------- templates/rag-milvus/pyproject.toml | 4 +- templates/rag-milvus/rag_milvus/chain.py | 18 +- 6 files changed, 41 insertions(+), 375 deletions(-) diff --git a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb index 18ae2263c7d97..477b9b5f019e9 100644 --- a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb @@ -20,7 +20,7 @@ "\n", "I have used the cloud version of Milvus, thus I need `uri` and `token` as well.\n", "\n", - "NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `pymilvus` package." + "NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `langchain_milvus` package." ] }, { @@ -29,16 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet lark" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install --upgrade --quiet pymilvus" + "%pip install --upgrade --quiet lark langchain_milvus" ] }, { @@ -67,8 +58,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.vectorstores import Milvus\n", "from langchain_core.documents import Document\n", + "from langchain_milvus.vectorstores import Milvus\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -388,4 +379,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index 7fb751453c592..24cb43b436bdb 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -23,7 +23,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet pymilvus" + "%pip install --upgrade --quiet langchain_milvus" ] }, { @@ -67,7 +67,7 @@ "outputs": [], "source": [ "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.vectorstores import Milvus\n", + "from langchain_milvus.vectorstores import Milvus\n", "from langchain_openai import OpenAIEmbeddings\n", "from langchain_text_splitters import CharacterTextSplitter" ] diff --git a/libs/partners/milvus/README.md b/libs/partners/milvus/README.md index 2908408102888..80820f32d1b6a 100644 --- a/libs/partners/milvus/README.md +++ b/libs/partners/milvus/README.md @@ -10,7 +10,7 @@ pip install -U langchain-milvus ## Milvus vector database -See a [usage example](https://python.langchain.com/docs/integrations/vectorstores/milvus/) +See a [usage example](https://python.langchain.com/v0.2/docs/integrations/vectorstores/milvus/) ```python from langchain_milvus import Milvus @@ -18,7 +18,7 @@ from langchain_milvus import Milvus ## Milvus hybrid search -See a [usage example](https://python.langchain.com/docs/integrations/retrievers/milvus_hybrid_search/). +See a [usage example](https://python.langchain.com/v0.2/docs/integrations/retrievers/milvus_hybrid_search/). ```python from langchain_milvus import MilvusCollectionHybridSearchRetriever @@ -27,7 +27,7 @@ from langchain_milvus import MilvusCollectionHybridSearchRetriever ## Zilliz Cloud vector database -See a [usage example](https://python.langchain.com/docs/integrations/vectorstores/zilliz/). +See a [usage example](https://python.langchain.com/v0.2/docs/integrations/vectorstores/zilliz/). ```python from langchain_milvus import Zilliz @@ -35,7 +35,7 @@ from langchain_milvus import Zilliz ## Zilliz Cloud Pipeline Retriever -See a [usage example](https://python.langchain.com/docs/integrations/retrievers/zilliz_cloud_pipeline). +See a [usage example](https://python.langchain.com/v0.2/docs/integrations/retrievers/zilliz_cloud_pipeline/). ```python from langchain_milvus import ZillizCloudPipelineRetriever diff --git a/templates/rag-milvus/poetry.lock b/templates/rag-milvus/poetry.lock index 48776d16b44d0..be7ee484de749 100644 --- a/templates/rag-milvus/poetry.lock +++ b/templates/rag-milvus/poetry.lock @@ -145,63 +145,6 @@ doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd- test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (<0.22)"] -[[package]] -name = "argon2-cffi" -version = "23.1.0" -description = "Argon2 for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, - {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, -] - -[package.dependencies] -argon2-cffi-bindings = "*" - -[package.extras] -dev = ["argon2-cffi[tests,typing]", "tox (>4)"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] -tests = ["hypothesis", "pytest"] -typing = ["mypy"] - -[[package]] -name = "argon2-cffi-bindings" -version = "21.2.0" -description = "Low-level CFFI bindings for Argon2" -optional = false -python-versions = ">=3.6" -files = [ - {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, - {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, -] - -[package.dependencies] -cffi = ">=1.0.1" - -[package.extras] -dev = ["cogapp", "pre-commit", "pytest", "wheel"] -tests = ["pytest"] - [[package]] name = "async-timeout" version = "4.0.3" @@ -232,45 +175,6 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] -[[package]] -name = "azure-core" -version = "1.30.1" -description = "Microsoft Azure Core Library for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "azure-core-1.30.1.tar.gz", hash = "sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f"}, - {file = "azure_core-1.30.1-py3-none-any.whl", hash = "sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74"}, -] - -[package.dependencies] -requests = ">=2.21.0" -six = ">=1.11.0" -typing-extensions = ">=4.6.0" - -[package.extras] -aio = ["aiohttp (>=3.0)"] - -[[package]] -name = "azure-storage-blob" -version = "12.19.1" -description = "Microsoft Azure Blob Storage Client Library for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "azure-storage-blob-12.19.1.tar.gz", hash = "sha256:13e16ba42fc54ac2c7e8f976062173a5c82b9ec0594728e134aac372965a11b0"}, - {file = "azure_storage_blob-12.19.1-py3-none-any.whl", hash = "sha256:c5530dc51c21c9564e4eb706cd499befca8819b10dd89716d3fc90d747556243"}, -] - -[package.dependencies] -azure-core = ">=1.28.0,<2.0.0" -cryptography = ">=2.1.4" -isodate = ">=0.6.1" -typing-extensions = ">=4.3.0" - -[package.extras] -aio = ["azure-core[aio] (>=1.28.0,<2.0.0)"] - [[package]] name = "certifi" version = "2024.2.2" @@ -282,70 +186,6 @@ files = [ {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] -[[package]] -name = "cffi" -version = "1.16.0" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = ">=3.8" -files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, -] - -[package.dependencies] -pycparser = "*" - [[package]] name = "charset-normalizer" version = "3.3.2" @@ -470,60 +310,6 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "cryptography" -version = "42.0.5" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = ">=3.7" -files = [ - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, - {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, - {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, - {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, - {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, - {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, - {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, -] - -[package.dependencies] -cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] -nox = ["nox"] -pep8test = ["check-sdist", "click", "mypy", "ruff"] -sdist = ["build"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] -test-randomorder = ["pytest-randomly"] - [[package]] name = "dataclasses-json" version = "0.6.4" @@ -927,20 +713,6 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] -[[package]] -name = "isodate" -version = "0.6.1" -description = "An ISO 8601 date/time/duration parser and formatter" -optional = false -python-versions = "*" -files = [ - {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, - {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, -] - -[package.dependencies] -six = "*" - [[package]] name = "jsonpatch" version = "1.33" @@ -1200,23 +972,17 @@ files = [ ] [[package]] -name = "minio" -version = "7.2.6" -description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" +name = "milvus-lite" +version = "2.4.6" +description = "A lightweight version of Milvus wrapped with Python." optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "minio-7.2.6-py3-none-any.whl", hash = "sha256:4972273a924f274e2d71f38f6d2afdf841a034801e60ba758e5c5aff4234b768"}, - {file = "minio-7.2.6.tar.gz", hash = "sha256:c545d0dda1ff26cefcfc754242be3d27a4e620e37ef3e51ecbe7212cf7ecc274"}, + {file = "milvus_lite-2.4.6-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:43ac9f36903b31455e50a8f1d9cb033e18971643029c89eb5c9610f01c1f2e26"}, + {file = "milvus_lite-2.4.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:95afe2ee019c569713926747bbe18ab5944927797374fed796f00fbe564cccd6"}, + {file = "milvus_lite-2.4.6-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2f9116bfc6a0d95636d3aa144582486b622c492689f3c93c519101bd7158b7db"}, ] -[package.dependencies] -argon2-cffi = "*" -certifi = "*" -pycryptodome = "*" -typing-extensions = "*" -urllib3 = "*" - [[package]] name = "multidict" version = "6.0.5" @@ -1545,106 +1311,6 @@ files = [ {file = "protobuf-5.26.1.tar.gz", hash = "sha256:8ca2a1d97c290ec7b16e4e5dff2e5ae150cc1582f55b5ab300d45cb0dfa90e51"}, ] -[[package]] -name = "pyarrow" -version = "16.0.0" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyarrow-16.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:22a1fdb1254e5095d629e29cd1ea98ed04b4bbfd8e42cc670a6b639ccc208b60"}, - {file = "pyarrow-16.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:574a00260a4ed9d118a14770edbd440b848fcae5a3024128be9d0274dbcaf858"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0815d0ddb733b8c1b53a05827a91f1b8bde6240f3b20bf9ba5d650eb9b89cdf"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df0080339387b5d30de31e0a149c0c11a827a10c82f0c67d9afae3981d1aabb7"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:edf38cce0bf0dcf726e074159c60516447e4474904c0033f018c1f33d7dac6c5"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91d28f9a40f1264eab2af7905a4d95320ac2f287891e9c8b0035f264fe3c3a4b"}, - {file = "pyarrow-16.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:99af421ee451a78884d7faea23816c429e263bd3618b22d38e7992c9ce2a7ad9"}, - {file = "pyarrow-16.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d22d0941e6c7bafddf5f4c0662e46f2075850f1c044bf1a03150dd9e189427ce"}, - {file = "pyarrow-16.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:266ddb7e823f03733c15adc8b5078db2df6980f9aa93d6bb57ece615df4e0ba7"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cc23090224b6594f5a92d26ad47465af47c1d9c079dd4a0061ae39551889efe"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56850a0afe9ef37249d5387355449c0f94d12ff7994af88f16803a26d38f2016"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:705db70d3e2293c2f6f8e84874b5b775f690465798f66e94bb2c07bab0a6bb55"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:5448564754c154997bc09e95a44b81b9e31ae918a86c0fcb35c4aa4922756f55"}, - {file = "pyarrow-16.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:729f7b262aa620c9df8b9967db96c1575e4cfc8c25d078a06968e527b8d6ec05"}, - {file = "pyarrow-16.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fb8065dbc0d051bf2ae2453af0484d99a43135cadabacf0af588a3be81fbbb9b"}, - {file = "pyarrow-16.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:20ce707d9aa390593ea93218b19d0eadab56390311cb87aad32c9a869b0e958c"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5823275c8addbbb50cd4e6a6839952682a33255b447277e37a6f518d6972f4e1"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ab8b9050752b16a8b53fcd9853bf07d8daf19093533e990085168f40c64d978"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:42e56557bc7c5c10d3e42c3b32f6cff649a29d637e8f4e8b311d334cc4326730"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a7abdee4a4a7cfa239e2e8d721224c4b34ffe69a0ca7981354fe03c1328789b"}, - {file = "pyarrow-16.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ef2f309b68396bcc5a354106741d333494d6a0d3e1951271849787109f0229a6"}, - {file = "pyarrow-16.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ed66e5217b4526fa3585b5e39b0b82f501b88a10d36bd0d2a4d8aa7b5a48e2df"}, - {file = "pyarrow-16.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc8814310486f2a73c661ba8354540f17eef51e1b6dd090b93e3419d3a097b3a"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c2f5e239db7ed43e0ad2baf46a6465f89c824cc703f38ef0fde927d8e0955f7"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f293e92d1db251447cb028ae12f7bc47526e4649c3a9924c8376cab4ad6b98bd"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:dd9334a07b6dc21afe0857aa31842365a62eca664e415a3f9536e3a8bb832c07"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d91073d1e2fef2c121154680e2ba7e35ecf8d4969cc0af1fa6f14a8675858159"}, - {file = "pyarrow-16.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:71d52561cd7aefd22cf52538f262850b0cc9e4ec50af2aaa601da3a16ef48877"}, - {file = "pyarrow-16.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b93c9a50b965ee0bf4fef65e53b758a7e8dcc0c2d86cebcc037aaaf1b306ecc0"}, - {file = "pyarrow-16.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d831690844706e374c455fba2fb8cfcb7b797bfe53ceda4b54334316e1ac4fa4"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35692ce8ad0b8c666aa60f83950957096d92f2a9d8d7deda93fb835e6053307e"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dd3151d098e56f16a8389c1247137f9e4c22720b01c6f3aa6dec29a99b74d80"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bd40467bdb3cbaf2044ed7a6f7f251c8f941c8b31275aaaf88e746c4f3ca4a7a"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:00a1dcb22ad4ceb8af87f7bd30cc3354788776c417f493089e0a0af981bc8d80"}, - {file = "pyarrow-16.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fda9a7cebd1b1d46c97b511f60f73a5b766a6de4c5236f144f41a5d5afec1f35"}, - {file = "pyarrow-16.0.0.tar.gz", hash = "sha256:59bb1f1edbbf4114c72415f039f1359f1a57d166a331c3229788ccbfbb31689a"}, -] - -[package.dependencies] -numpy = ">=1.16.6" - -[[package]] -name = "pycparser" -version = "2.22" -description = "C parser in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, -] - -[[package]] -name = "pycryptodome" -version = "3.20.0" -description = "Cryptographic library for Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "pycryptodome-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818"}, - {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044"}, - {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4"}, - {file = "pycryptodome-3.20.0-cp35-abi3-win32.whl", hash = "sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72"}, - {file = "pycryptodome-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9"}, - {file = "pycryptodome-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a"}, - {file = "pycryptodome-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e"}, - {file = "pycryptodome-3.20.0.tar.gz", hash = "sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7"}, -] - [[package]] name = "pydantic" version = "2.7.1" @@ -1772,29 +1438,28 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymilvus" -version = "2.4.0" +version = "2.4.3" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.4.0-1-py3-none-any.whl", hash = "sha256:9f8212af51bc235a4c1230c344a852b6f75a4be01cac38f21dab5b65695a9598"}, - {file = "pymilvus-2.4.0.tar.gz", hash = "sha256:f1d1a2e9d5172fea3e0a5d396bed9de561a2b93a4d3e1945fdf68b74d771fb05"}, + {file = "pymilvus-2.4.3-py3-none-any.whl", hash = "sha256:38239e89f8d739f665141d0b80908990b5f59681e889e135c234a4a45669a5c8"}, + {file = "pymilvus-2.4.3.tar.gz", hash = "sha256:703ac29296cdce03d6dc2aaebbe959e57745c141a94150e371dc36c61c226cc1"}, ] [package.dependencies] -azure-storage-blob = "*" environs = "<=9.5.0" -grpcio = ">=1.49.1,<=1.60.0" -minio = ">=7.0.0" +grpcio = ">=1.49.1,<=1.63.0" +milvus-lite = ">=2.4.0,<2.5.0" numpy = {version = "<1.25.0", markers = "python_version <= \"3.8\""} pandas = ">=1.2.4" protobuf = ">=3.20.0" -pyarrow = ">=12.0.0" -requests = "*" setuptools = ">=67" ujson = ">=2.0.0" [package.extras] +bulk-writer = ["azure-storage-blob", "minio (>=7.0.0)", "pyarrow (>=12.0.0)", "requests"] +dev = ["black", "grpcio (==1.62.2)", "grpcio-testing (==1.62.2)", "grpcio-tools (==1.62.2)", "pytest (>=5.3.4)", "pytest-cov (>=2.8.1)", "pytest-timeout (>=1.3.4)", "ruff (>0.4.0)"] model = ["milvus-model (>=0.1.0)"] [[package]] @@ -2618,4 +2283,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "e184bab7c13245b660890fcac2dfe03a38296251a832daf1ace8478ccb95c694" +content-hash = "4b2f03de1a299d1cd5aa42632d68be0d4bb3bb2f5d89ed6fde5931b3df1e0579" diff --git a/templates/rag-milvus/pyproject.toml b/templates/rag-milvus/pyproject.toml index 2adf18cd7def0..4060fba7859d8 100644 --- a/templates/rag-milvus/pyproject.toml +++ b/templates/rag-milvus/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rag-milvus" -version = "0.1.0" +version = "0.1.1" description = "RAG using Milvus" authors = [] readme = "README.md" @@ -11,7 +11,7 @@ langchain = "^0.1" langchain-core = "^0.1" langchain-openai = "^0.1" langchain-community = "^0.0.30" -pymilvus = "^2.4" +pymilvus = "^2.4.3" scipy = "^1.9" [tool.poetry.group.dev.dependencies] diff --git a/templates/rag-milvus/rag_milvus/chain.py b/templates/rag-milvus/rag_milvus/chain.py index b48edd6b2cf31..57c5300694520 100644 --- a/templates/rag-milvus/rag_milvus/chain.py +++ b/templates/rag-milvus/rag_milvus/chain.py @@ -1,12 +1,23 @@ -from langchain_community.vectorstores import Milvus from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough +from langchain_milvus.vectorstores import Milvus from langchain_openai import ChatOpenAI, OpenAIEmbeddings # Example for document loading (from url), splitting, and creating vectorstore +# Setting the URI as a local file, e.g.`./milvus.db`, is the most convenient method, +# as it automatically utilizes Milvus Lite to store all data in this file. +# +# If you have large scale of data such as more than a million docs, +# we recommend setting up a more performant Milvus server on docker or kubernetes. +# (https://milvus.io/docs/quickstart.md) +# When using this setup, please use the server URI, +# e.g.`http://localhost:19530`, as your URI. + +URI = "./milvus.db" + """ # Load from langchain_community.document_loaders import WebBaseLoader @@ -25,6 +36,7 @@ collection_name="rag_milvus", embedding=OpenAIEmbeddings(), drop_old=True, + connection_args={"uri": URI}, ) retriever = vectorstore.as_retriever() """ @@ -35,9 +47,7 @@ collection_name="rag_milvus", embedding=OpenAIEmbeddings(), drop_old=True, - connection_args={ - "uri": "http://127.0.0.1:19530", - }, + connection_args={"uri": URI}, ) retriever = vectorstore.as_retriever() From f34337447fbb60c7f4fd8d505a62307892c96b76 Mon Sep 17 00:00:00 2001 From: ccurme Date: Thu, 30 May 2024 12:31:28 -0400 Subject: [PATCH 08/54] openai: update ChatOpenAI api ref (#22324) Update to reflect that token usage is no longer default in streaming mode. Add detail for streaming context under Token Usage section. --- .../langchain_openai/chat_models/base.py | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 3cd2aac557c77..3861f264040b5 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -1219,7 +1219,6 @@ class ChatOpenAI(BaseChatOpenAI): AIMessageChunk(content=' programmation', id='run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0') AIMessageChunk(content='.', id='run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0') AIMessageChunk(content='', response_metadata={'finish_reason': 'stop'}, id='run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0') - AIMessageChunk(content='', id='run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0', usage_metadata={'input_tokens': 31, 'output_tokens': 5, 'total_tokens': 36}) .. code-block:: python @@ -1231,7 +1230,7 @@ class ChatOpenAI(BaseChatOpenAI): .. code-block:: python - AIMessageChunk(content="J'adore la programmation.", response_metadata={'finish_reason': 'stop'}, id='run-bf917526-7f58-4683-84f7-36a6b671d140', usage_metadata={'input_tokens': 31, 'output_tokens': 5, 'total_tokens': 36}) + AIMessageChunk(content="J'adore la programmation.", response_metadata={'finish_reason': 'stop'}, id='run-bf917526-7f58-4683-84f7-36a6b671d140') Async: .. code-block:: python @@ -1353,6 +1352,33 @@ class Joke(BaseModel): {'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33} + When streaming, set the ``stream_options`` model kwarg: + + .. code-block:: python + + stream = llm.stream(messages, stream_options={"include_usage": True}) + full = next(stream) + for chunk in stream: + full += chunk + full.usage_metadata + + .. code-block:: python + + {'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33} + + Alternatively, setting ``stream_options`` when instantiating the model can be + useful when incorporating ``ChatOpenAI`` into LCEL chains-- or when using + methods like ``.with_structured_output``, which generate chains under the + hood. + + .. code-block:: python + + llm = ChatOpenAI( + model="gpt-4o", + model_kwargs={"stream_options": {"include_usage": True}}, + ) + structured_llm = llm.with_structured_output(...) + Logprobs: .. code-block:: python From dcec133b85381a9e40ad8afa2c6f8226b8db1e3b Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Thu, 30 May 2024 10:34:49 -0700 Subject: [PATCH 09/54] [Core] Update Tracing Interops (#22318) LangSmith and LangChain context var handling evolved in parallel since originally we didn't expect people to want to interweave the decorator and langchain code. Once we get a new langsmith release, this PR will let you seemlessly hand off between @traceable context and runnable config context so you can arbitrarily nest code. It's expected that this fails right now until we get another release of the SDK --- libs/core/langchain_core/callbacks/manager.py | 16 +- libs/core/langchain_core/runnables/base.py | 10 +- libs/core/langchain_core/runnables/config.py | 19 + libs/core/langchain_core/tools.py | 6 +- libs/core/poetry.lock | 339 +++++++++--------- libs/core/pyproject.toml | 2 +- .../runnables/test_tracing_interops.py | 170 +++++++++ 7 files changed, 377 insertions(+), 185 deletions(-) create mode 100644 libs/core/tests/unit_tests/runnables/test_tracing_interops.py diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index db1e6faa4931b..8d3ecb6c05925 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -1918,7 +1918,7 @@ def _configure( ) run_tree = get_run_tree_context() - parent_run_id = None if run_tree is None else getattr(run_tree, "id") + parent_run_id = None if run_tree is None else run_tree.id callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: @@ -1929,10 +1929,22 @@ def _configure( parent_run_id=parent_run_id, ) else: + parent_run_id_ = inheritable_callbacks.parent_run_id + # Break ties between the external tracing context and inherited context + if parent_run_id is not None: + if parent_run_id_ is None: + parent_run_id_ = parent_run_id + # If the LC parent has already been reflected + # in the run tree, we know the run_tree is either the + # same parent or a child of the parent. + elif run_tree and str(parent_run_id_) in run_tree.dotted_order: + parent_run_id_ = parent_run_id + # Otherwise, we assume the LC context has progressed + # beyond the run tree and we should not inherit the parent. callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers.copy(), inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), - parent_run_id=inheritable_callbacks.parent_run_id, + parent_run_id=parent_run_id_, tags=inheritable_callbacks.tags.copy(), inheritable_tags=inheritable_callbacks.inheritable_tags.copy(), metadata=inheritable_callbacks.metadata.copy(), diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index eee42e876e6eb..b51cb5d89fc87 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -45,6 +45,7 @@ from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables.config import ( RunnableConfig, + _set_config_context, acall_func_with_variable_args, call_func_with_variable_args, ensure_config, @@ -55,7 +56,6 @@ merge_configs, patch_config, run_in_executor, - var_child_runnable_config, ) from langchain_core.runnables.graph import Graph from langchain_core.runnables.schema import StreamEvent @@ -1503,7 +1503,7 @@ def _call_with_config( try: child_config = patch_config(config, callbacks=run_manager.get_child()) context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) output = cast( Output, context.run( @@ -1551,7 +1551,7 @@ async def _acall_with_config( try: child_config = patch_config(config, callbacks=run_manager.get_child()) context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) coro = acall_func_with_variable_args( func, input, config, run_manager, **kwargs ) @@ -1760,7 +1760,7 @@ def _transform_stream_with_config( if accepts_run_manager(transformer): kwargs["run_manager"] = run_manager context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type] if stream_handler := next( ( @@ -1860,7 +1860,7 @@ async def _atransform_stream_with_config( if accepts_run_manager(transformer): kwargs["run_manager"] = run_manager context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type] if stream_handler := next( diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index 4bf32c605f057..f69a0b000006a 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -109,6 +109,25 @@ class RunnableConfig(TypedDict, total=False): ) +def _set_config_context(config: RunnableConfig) -> None: + """Set the child runnable config + tracing context + + Args: + config (RunnableConfig): The config to set. + """ + from langsmith import ( + RunTree, # type: ignore + run_helpers, # type: ignore + ) + + var_child_runnable_config.set(config) + if hasattr(RunTree, "from_runnable_config"): + # import _set_tracing_context, get_tracing_context + rt = RunTree.from_runnable_config(dict(config)) + tc = run_helpers.get_tracing_context() + run_helpers._set_tracing_context({**tc, "parent": rt}) + + def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig: """Ensure that a config is a dict with all keys present. diff --git a/libs/core/langchain_core/tools.py b/libs/core/langchain_core/tools.py index b1321042ec832..26348fc7b524e 100644 --- a/libs/core/langchain_core/tools.py +++ b/libs/core/langchain_core/tools.py @@ -65,9 +65,9 @@ ensure_config, ) from langchain_core.runnables.config import ( + _set_config_context, patch_config, run_in_executor, - var_child_runnable_config, ) from langchain_core.runnables.utils import accepts_context @@ -402,7 +402,7 @@ def run( callbacks=run_manager.get_child(), ) context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) parsed_input = self._parse_input(tool_input) tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( @@ -502,7 +502,7 @@ async def arun( callbacks=run_manager.get_child(), ) context = copy_context() - context.run(var_child_runnable_config.set, child_config) + context.run(_set_config_context, child_config) coro = ( context.run( self._arun, *tool_args, run_manager=run_manager, **tool_kwargs diff --git a/libs/core/poetry.lock b/libs/core/poetry.lock index eda00e260fae1..15a5b91490f67 100644 --- a/libs/core/poetry.lock +++ b/libs/core/poetry.lock @@ -1,14 +1,14 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -780,21 +780,21 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pa [[package]] name = "ipywidgets" -version = "8.1.2" +version = "8.1.3" description = "Jupyter interactive widgets" optional = false python-versions = ">=3.7" files = [ - {file = "ipywidgets-8.1.2-py3-none-any.whl", hash = "sha256:bbe43850d79fb5e906b14801d6c01402857996864d1e5b6fa62dd2ee35559f60"}, - {file = "ipywidgets-8.1.2.tar.gz", hash = "sha256:d0b9b41e49bae926a866e613a39b0f0097745d2b9f1f3dd406641b4a57ec42c9"}, + {file = "ipywidgets-8.1.3-py3-none-any.whl", hash = "sha256:efafd18f7a142248f7cb0ba890a68b96abd4d6e88ddbda483c9130d12667eaf2"}, + {file = "ipywidgets-8.1.3.tar.gz", hash = "sha256:f5f9eeaae082b1823ce9eac2575272952f40d748893972956dc09700a6392d9c"}, ] [package.dependencies] comm = ">=0.1.3" ipython = ">=6.1.0" -jupyterlab-widgets = ">=3.0.10,<3.1.0" +jupyterlab-widgets = ">=3.0.11,<3.1.0" traitlets = ">=4.3.1" -widgetsnbextension = ">=4.0.10,<4.1.0" +widgetsnbextension = ">=4.0.11,<4.1.0" [package.extras] test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] @@ -882,7 +882,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -953,13 +952,13 @@ qtconsole = "*" [[package]] name = "jupyter-client" -version = "8.6.1" +version = "8.6.2" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.1-py3-none-any.whl", hash = "sha256:3b7bd22f058434e3b9a7ea4b1500ed47de2713872288c0d511d19926f99b459f"}, - {file = "jupyter_client-8.6.1.tar.gz", hash = "sha256:e842515e2bab8e19186d89fdfea7abd15e39dd581f94e399f00e2af5a1652d3f"}, + {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, + {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, ] [package.dependencies] @@ -972,7 +971,7 @@ traitlets = ">=5.3" [package.extras] docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] [[package]] name = "jupyter-console" @@ -1115,13 +1114,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (> [[package]] name = "jupyterlab" -version = "4.2.0" +version = "4.2.1" description = "JupyterLab computational environment" optional = false python-versions = ">=3.8" files = [ - {file = "jupyterlab-4.2.0-py3-none-any.whl", hash = "sha256:0dfe9278e25a145362289c555d9beb505697d269c10e99909766af7c440ad3cc"}, - {file = "jupyterlab-4.2.0.tar.gz", hash = "sha256:356e9205a6a2ab689c47c8fe4919dba6c076e376d03f26baadc05748c2435dd5"}, + {file = "jupyterlab-4.2.1-py3-none-any.whl", hash = "sha256:6ac6e3827b3c890e6e549800e8a4f4aaea6a69321e2240007902aa7a0c56a8e4"}, + {file = "jupyterlab-4.2.1.tar.gz", hash = "sha256:a10fb71085a6900820c62d43324005046402ffc8f0fde696103e37238a839507"}, ] [package.dependencies] @@ -1161,13 +1160,13 @@ files = [ [[package]] name = "jupyterlab-server" -version = "2.27.1" +version = "2.27.2" description = "A set of server components for JupyterLab and JupyterLab like applications." optional = false python-versions = ">=3.8" files = [ - {file = "jupyterlab_server-2.27.1-py3-none-any.whl", hash = "sha256:f5e26156e5258b24d532c84e7c74cc212e203bff93eb856f81c24c16daeecc75"}, - {file = "jupyterlab_server-2.27.1.tar.gz", hash = "sha256:097b5ac709b676c7284ac9c5e373f11930a561f52cd5a86e4fc7e5a9c8a8631d"}, + {file = "jupyterlab_server-2.27.2-py3-none-any.whl", hash = "sha256:54aa2d64fd86383b5438d9f0c032f043c4d8c0264b8af9f60bd061157466ea43"}, + {file = "jupyterlab_server-2.27.2.tar.gz", hash = "sha256:15cbb349dc45e954e09bacf81b9f9bcb10815ff660fb2034ecd7417db3a7ea27"}, ] [package.dependencies] @@ -1187,13 +1186,13 @@ test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-v [[package]] name = "jupyterlab-widgets" -version = "3.0.10" +version = "3.0.11" description = "Jupyter interactive widgets for JupyterLab" optional = false python-versions = ">=3.7" files = [ - {file = "jupyterlab_widgets-3.0.10-py3-none-any.whl", hash = "sha256:dd61f3ae7a5a7f80299e14585ce6cf3d6925a96c9103c978eda293197730cb64"}, - {file = "jupyterlab_widgets-3.0.10.tar.gz", hash = "sha256:04f2ac04976727e4f9d0fa91cdc2f1ab860f965e504c29dbd6a65c882c9d04c0"}, + {file = "jupyterlab_widgets-3.0.11-py3-none-any.whl", hash = "sha256:78287fd86d20744ace330a61625024cf5521e1c012a352ddc0a3cdc2348becd0"}, + {file = "jupyterlab_widgets-3.0.11.tar.gz", hash = "sha256:dd5ac679593c969af29c9bed054c24f26842baa51352114736756bc035deee27"}, ] [[package]] @@ -1217,13 +1216,13 @@ url = "../text-splitters" [[package]] name = "langsmith" -version = "0.1.59" +version = "0.1.65" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.59-py3-none-any.whl", hash = "sha256:445e3bc1d3baa1e5340cd979907a19483b9763a2ed37b863a01113d406f69345"}, - {file = "langsmith-0.1.59.tar.gz", hash = "sha256:e748a89f4dd6aa441349143e49e546c03b5dfb43376a25bfef6a5ca792fe1437"}, + {file = "langsmith-0.1.65-py3-none-any.whl", hash = "sha256:ab4487029240e69cca30da1065f1e9138e5a7ca2bbe8c697f0bd7d5839f71cf7"}, + {file = "langsmith-0.1.65.tar.gz", hash = "sha256:d3c2eb2391478bd79989f02652cf66e29a7959d677614b6993a47cef43f7f43b"}, ] [package.dependencies] @@ -1738,13 +1737,13 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.43" +version = "3.0.45" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, - {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, + {file = "prompt_toolkit-3.0.45-py3-none-any.whl", hash = "sha256:a29b89160e494e3ea8622b09fa5897610b437884dcdcd054fdc1308883326c2a"}, + {file = "prompt_toolkit-3.0.45.tar.gz", hash = "sha256:07c60ee4ab7b7e90824b61afa840c8f5aad2d46b3e2e10acc33d8ecc94a49089"}, ] [package.dependencies] @@ -1816,18 +1815,18 @@ files = [ [[package]] name = "pydantic" -version = "2.7.1" +version = "2.7.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.7.2-py3-none-any.whl", hash = "sha256:834ab954175f94e6e68258537dc49402c4a5e9d0409b9f1b86b7e934a8372de7"}, + {file = "pydantic-2.7.2.tar.gz", hash = "sha256:71b2945998f9c9b7919a45bde9a50397b289937d215ae141c1d0903ba7149fd7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" +pydantic-core = "2.18.3" typing-extensions = ">=4.6.1" [package.extras] @@ -1835,90 +1834,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.18.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:744697428fcdec6be5670460b578161d1ffe34743a5c15656be7ea82b008197c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b40c05ced1ba4218b14986fe6f283d22e1ae2ff4c8e28881a70fb81fbfcda7"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a9a75622357076efb6b311983ff190fbfb3c12fc3a853122b34d3d358126c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e2e253af04ceaebde8eb201eb3f3e3e7e390f2d275a88300d6a1959d710539e2"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:855ec66589c68aa367d989da5c4755bb74ee92ccad4fdb6af942c3612c067e34"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d3e42bb54e7e9d72c13ce112e02eb1b3b55681ee948d748842171201a03a98a"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6ac9ffccc9d2e69d9fba841441d4259cb668ac180e51b30d3632cd7abca2b9b"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c56eca1686539fa0c9bda992e7bd6a37583f20083c37590413381acfc5f192d6"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:17954d784bf8abfc0ec2a633108207ebc4fa2df1a0e4c0c3ccbaa9bb01d2c426"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:98ed737567d8f2ecd54f7c8d4f8572ca7c7921ede93a2e52939416170d357812"}, + {file = "pydantic_core-2.18.3-cp310-none-win32.whl", hash = "sha256:9f9e04afebd3ed8c15d67a564ed0a34b54e52136c6d40d14c5547b238390e779"}, + {file = "pydantic_core-2.18.3-cp310-none-win_amd64.whl", hash = "sha256:45e4ffbae34f7ae30d0047697e724e534a7ec0a82ef9994b7913a412c21462a0"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9ebe8231726c49518b16b237b9fe0d7d361dd221302af511a83d4ada01183ab"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b8e20e15d18bf7dbb453be78a2d858f946f5cdf06c5072453dace00ab652e2b2"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0d9ff283cd3459fa0bf9b0256a2b6f01ac1ff9ffb034e24457b9035f75587cb"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f7ef5f0ebb77ba24c9970da18b771711edc5feaf00c10b18461e0f5f5949231"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73038d66614d2e5cde30435b5afdced2b473b4c77d4ca3a8624dd3e41a9c19be"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6afd5c867a74c4d314c557b5ea9520183fadfbd1df4c2d6e09fd0d990ce412cd"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd7df92f28d351bb9f12470f4c533cf03d1b52ec5a6e5c58c65b183055a60106"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:80aea0ffeb1049336043d07799eace1c9602519fb3192916ff525b0287b2b1e4"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:aaee40f25bba38132e655ffa3d1998a6d576ba7cf81deff8bfa189fb43fd2bbe"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9128089da8f4fe73f7a91973895ebf2502539d627891a14034e45fb9e707e26d"}, + {file = "pydantic_core-2.18.3-cp311-none-win32.whl", hash = "sha256:fec02527e1e03257aa25b1a4dcbe697b40a22f1229f5d026503e8b7ff6d2eda7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_amd64.whl", hash = "sha256:58ff8631dbab6c7c982e6425da8347108449321f61fe427c52ddfadd66642af7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_arm64.whl", hash = "sha256:3fc1c7f67f34c6c2ef9c213e0f2a351797cda98249d9ca56a70ce4ebcaba45f4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f0928cde2ae416a2d1ebe6dee324709c6f73e93494d8c7aea92df99aab1fc40f"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bee9bb305a562f8b9271855afb6ce00223f545de3d68560b3c1649c7c5295e9"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e862823be114387257dacbfa7d78547165a85d7add33b446ca4f4fae92c7ff5c"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a36f78674cbddc165abab0df961b5f96b14461d05feec5e1f78da58808b97e7"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba905d184f62e7ddbb7a5a751d8a5c805463511c7b08d1aca4a3e8c11f2e5048"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fdd362f6a586e681ff86550b2379e532fee63c52def1c666887956748eaa326"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b214b7ee3bd3b865e963dbed0f8bc5375f49449d70e8d407b567af3222aae4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691018785779766127f531674fa82bb368df5b36b461622b12e176c18e119022"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:60e4c625e6f7155d7d0dcac151edf5858102bc61bf959d04469ca6ee4e8381bd"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4e651e47d981c1b701dcc74ab8fec5a60a5b004650416b4abbef13db23bc7be"}, + {file = "pydantic_core-2.18.3-cp312-none-win32.whl", hash = "sha256:ffecbb5edb7f5ffae13599aec33b735e9e4c7676ca1633c60f2c606beb17efc5"}, + {file = "pydantic_core-2.18.3-cp312-none-win_amd64.whl", hash = "sha256:2c8333f6e934733483c7eddffdb094c143b9463d2af7e6bd85ebcb2d4a1b82c6"}, + {file = "pydantic_core-2.18.3-cp312-none-win_arm64.whl", hash = "sha256:7a20dded653e516a4655f4c98e97ccafb13753987434fe7cf044aa25f5b7d417"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:eecf63195be644b0396f972c82598cd15693550f0ff236dcf7ab92e2eb6d3522"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c44efdd3b6125419c28821590d7ec891c9cb0dff33a7a78d9d5c8b6f66b9702"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e59fca51ffbdd1638b3856779342ed69bcecb8484c1d4b8bdb237d0eb5a45e2"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70cf099197d6b98953468461d753563b28e73cf1eade2ffe069675d2657ed1d5"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63081a49dddc6124754b32a3774331467bfc3d2bd5ff8f10df36a95602560361"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:370059b7883485c9edb9655355ff46d912f4b03b009d929220d9294c7fd9fd60"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a64faeedfd8254f05f5cf6fc755023a7e1606af3959cfc1a9285744cc711044"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19d2e725de0f90d8671f89e420d36c3dd97639b98145e42fcc0e1f6d492a46dc"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:67bc078025d70ec5aefe6200ef094576c9d86bd36982df1301c758a9fff7d7f4"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:adf952c3f4100e203cbaf8e0c907c835d3e28f9041474e52b651761dc248a3c0"}, + {file = "pydantic_core-2.18.3-cp38-none-win32.whl", hash = "sha256:9a46795b1f3beb167eaee91736d5d17ac3a994bf2215a996aed825a45f897558"}, + {file = "pydantic_core-2.18.3-cp38-none-win_amd64.whl", hash = "sha256:200ad4e3133cb99ed82342a101a5abf3d924722e71cd581cc113fe828f727fbc"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:304378b7bf92206036c8ddd83a2ba7b7d1a5b425acafff637172a3aa72ad7083"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c826870b277143e701c9ccf34ebc33ddb4d072612683a044e7cce2d52f6c3fef"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e201935d282707394f3668380e41ccf25b5794d1b131cdd96b07f615a33ca4b1"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5560dda746c44b48bf82b3d191d74fe8efc5686a9ef18e69bdabccbbb9ad9442"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b32c2a1f8032570842257e4c19288eba9a2bba4712af542327de9a1204faff8"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:929c24e9dea3990bc8bcd27c5f2d3916c0c86f5511d2caa69e0d5290115344a9"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a8376fef60790152564b0eab376b3e23dd6e54f29d84aad46f7b264ecca943"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dccf3ef1400390ddd1fb55bf0632209d39140552d068ee5ac45553b556780e06"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41dbdcb0c7252b58fa931fec47937edb422c9cb22528f41cb8963665c372caf6"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:666e45cf071669fde468886654742fa10b0e74cd0fa0430a46ba6056b24fb0af"}, + {file = "pydantic_core-2.18.3-cp39-none-win32.whl", hash = "sha256:f9c08cabff68704a1b4667d33f534d544b8a07b8e5d039c37067fceb18789e78"}, + {file = "pydantic_core-2.18.3-cp39-none-win_amd64.whl", hash = "sha256:4afa5f5973e8572b5c0dcb4e2d4fda7890e7cd63329bd5cc3263a25c92ef0026"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:77319771a026f7c7d29c6ebc623de889e9563b7087911b46fd06c044a12aa5e9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:df11fa992e9f576473038510d66dd305bcd51d7dd508c163a8c8fe148454e059"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d531076bdfb65af593326ffd567e6ab3da145020dafb9187a1d131064a55f97c"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33ce258e4e6e6038f2b9e8b8a631d17d017567db43483314993b3ca345dcbbb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f9cd7f5635b719939019be9bda47ecb56e165e51dd26c9a217a433e3d0d59a9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cd4a032bb65cc132cae1fe3e52877daecc2097965cd3914e44fbd12b00dae7c5"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f2718430098bcdf60402136c845e4126a189959d103900ebabb6774a5d9fdb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0037a92cf0c580ed14e10953cdd26528e8796307bb8bb312dc65f71547df04d"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b95a0972fac2b1ff3c94629fc9081b16371dad870959f1408cc33b2f78ad347a"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a62e437d687cc148381bdd5f51e3e81f5b20a735c55f690c5be94e05da2b0d5c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b367a73a414bbb08507da102dc2cde0fa7afe57d09b3240ce82a16d608a7679c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ecce4b2360aa3f008da3327d652e74a0e743908eac306198b47e1c58b03dd2b"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd4435b8d83f0c9561a2a9585b1de78f1abb17cb0cef5f39bf6a4b47d19bafe3"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:616221a6d473c5b9aa83fa8982745441f6a4a62a66436be9445c65f241b86c94"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7e6382ce89a92bc1d0c0c5edd51e931432202b9080dc921d8d003e616402efd1"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff58f379345603d940e461eae474b6bbb6dab66ed9a851ecd3cb3709bf4dcf6a"}, + {file = "pydantic_core-2.18.3.tar.gz", hash = "sha256:432e999088d85c8f36b9a3f769a8e2b57aabd817bbb729a90d1fe7f18f6f1f39"}, ] [package.dependencies] @@ -2129,7 +2128,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2137,16 +2135,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2163,7 +2153,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2171,7 +2160,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2336,13 +2324,13 @@ rpds-py = ">=0.7.0" [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -2780,13 +2768,13 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.20240406" +version = "2.32.0.20240523" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, + {file = "types-requests-2.32.0.20240523.tar.gz", hash = "sha256:26b8a6de32d9f561192b9942b41c0ab2d8010df5677ca8aa146289d11d505f57"}, + {file = "types_requests-2.32.0.20240523-py3-none-any.whl", hash = "sha256:f19ed0e2daa74302069bbbbf9e82902854ffa780bc790742a810a9aaa52f65ec"}, ] [package.dependencies] @@ -2794,13 +2782,13 @@ urllib3 = ">=2" [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -2836,40 +2824,43 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.1" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, ] [package.extras] @@ -2930,24 +2921,24 @@ test = ["websockets"] [[package]] name = "widgetsnbextension" -version = "4.0.10" +version = "4.0.11" description = "Jupyter interactive widgets for Jupyter Notebook" optional = false python-versions = ">=3.7" files = [ - {file = "widgetsnbextension-4.0.10-py3-none-any.whl", hash = "sha256:d37c3724ec32d8c48400a435ecfa7d3e259995201fbefa37163124a9fcb393cc"}, - {file = "widgetsnbextension-4.0.10.tar.gz", hash = "sha256:64196c5ff3b9a9183a8e699a4227fb0b7002f252c814098e66c4d1cd0644688f"}, + {file = "widgetsnbextension-4.0.11-py3-none-any.whl", hash = "sha256:55d4d6949d100e0d08b94948a42efc3ed6dfdc0e9468b2c4b128c9a2ce3a7a36"}, + {file = "widgetsnbextension-4.0.11.tar.gz", hash = "sha256:8b22a8f1910bfd188e596fe7fc05dcbd87e810c8a4ba010bdb3da86637398474"}, ] [[package]] name = "zipp" -version = "3.18.2" +version = "3.19.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"}, - {file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"}, + {file = "zipp-3.19.0-py3-none-any.whl", hash = "sha256:96dc6ad62f1441bcaccef23b274ec471518daf4fbbc580341204936a5a3dddec"}, + {file = "zipp-3.19.0.tar.gz", hash = "sha256:952df858fb3164426c976d9338d3961e8e8b3758e2e059e0f754b8c4262625ee"}, ] [package.extras] @@ -2960,4 +2951,4 @@ extended-testing = ["jinja2"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "c26c35cf1c6529b38924a1b9d3186fdefb3b3a1fecc5197559586451bb913f4a" +content-hash = "4feba718f092ba77ab3263f6002898eddf6661fcfdae8b29b607936abb7e33c9" diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index d7ec0b6bb0230..31292512d1809 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -11,7 +11,7 @@ repository = "https://github.com/langchain-ai/langchain" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" pydantic = ">=1,<3" -langsmith = "^0.1.0" +langsmith = "^0.1.65" tenacity = "^8.1.0" jsonpatch = "^1.33" PyYAML = ">=5.3" diff --git a/libs/core/tests/unit_tests/runnables/test_tracing_interops.py b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py new file mode 100644 index 0000000000000..ef9543b0467e1 --- /dev/null +++ b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py @@ -0,0 +1,170 @@ +import json +import sys +import time +from unittest.mock import MagicMock + +import pytest +from langsmith import Client, traceable + +from langchain_core.runnables.base import RunnableLambda +from langchain_core.tracers.langchain import LangChainTracer + + +def _get_posts(client: Client) -> list: + mock_calls = client.session.request.mock_calls # type: ignore + posts = [] + for call in mock_calls: + if call.args: + if call.args[0] != "POST": + continue + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + posts.extend(body["post"]) + return posts + + +def test_config_traceable_handoff() -> None: + mock_session = MagicMock() + mock_client_ = Client(session=mock_session, api_key="test") + tracer = LangChainTracer(client=mock_client_) + + @traceable + def my_great_great_grandchild_function(a: int) -> int: + return a + 1 + + @RunnableLambda + def my_great_grandchild_function(a: int) -> int: + return my_great_great_grandchild_function(a) + + @RunnableLambda + def my_grandchild_function(a: int) -> int: + return my_great_grandchild_function.invoke(a) + + @traceable + def my_child_function(a: int) -> int: + return my_grandchild_function.invoke(a) * 3 + + @traceable() + def my_function(a: int) -> int: + return my_child_function(a) + + def my_parent_function(a: int) -> int: + return my_function(a) + + my_parent_runnable = RunnableLambda(my_parent_function) + + assert my_parent_runnable.invoke(1, {"callbacks": [tracer]}) == 6 + for _ in range(15): + time.sleep(0.1) + posts = _get_posts(mock_client_) + if len(posts) == 6: + break + # There should have been 6 runs created, + # one for each function invocation + assert len(posts) == 6 + name_to_body = {post["name"]: post for post in posts} + ordered_names = [ + "my_parent_function", + "my_function", + "my_child_function", + "my_grandchild_function", + "my_great_grandchild_function", + "my_great_great_grandchild_function", + ] + trace_id = posts[0]["trace_id"] + last_dotted_order = None + parent_run_id = None + for name in ordered_names: + id_ = name_to_body[name]["id"] + parent_run_id_ = name_to_body[name]["parent_run_id"] + if parent_run_id_ is not None: + assert parent_run_id == parent_run_id_ + assert name in name_to_body + # All within the same trace + assert name_to_body[name]["trace_id"] == trace_id + dotted_order: str = name_to_body[name]["dotted_order"] + assert dotted_order is not None + if last_dotted_order is not None: + assert dotted_order > last_dotted_order + assert dotted_order.startswith(last_dotted_order), ( + "Unexpected dotted order for run" + f" {name}\n{dotted_order}\n{last_dotted_order}" + ) + last_dotted_order = dotted_order + parent_run_id = id_ + + +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="Asyncio context vars require Python 3.11+" +) +async def test_config_traceable_async_handoff() -> None: + mock_session = MagicMock() + mock_client_ = Client(session=mock_session, api_key="test") + tracer = LangChainTracer(client=mock_client_) + + @traceable + def my_great_great_grandchild_function(a: int) -> int: + return a + 1 + + @RunnableLambda + def my_great_grandchild_function(a: int) -> int: + return my_great_great_grandchild_function(a) + + @RunnableLambda # type: ignore + async def my_grandchild_function(a: int) -> int: + return my_great_grandchild_function.invoke(a) + + @traceable + async def my_child_function(a: int) -> int: + return await my_grandchild_function.ainvoke(a) * 3 # type: ignore + + @traceable() + async def my_function(a: int) -> int: + return await my_child_function(a) + + async def my_parent_function(a: int) -> int: + return await my_function(a) + + my_parent_runnable = RunnableLambda(my_parent_function) # type: ignore + result = await my_parent_runnable.ainvoke(1, {"callbacks": [tracer]}) + assert result == 6 + for _ in range(15): + time.sleep(0.1) + posts = _get_posts(mock_client_) + if len(posts) == 6: + break + # There should have been 6 runs created, + # one for each function invocation + assert len(posts) == 6 + name_to_body = {post["name"]: post for post in posts} + ordered_names = [ + "my_parent_function", + "my_function", + "my_child_function", + "my_grandchild_function", + "my_great_grandchild_function", + "my_great_great_grandchild_function", + ] + trace_id = posts[0]["trace_id"] + last_dotted_order = None + parent_run_id = None + for name in ordered_names: + id_ = name_to_body[name]["id"] + parent_run_id_ = name_to_body[name]["parent_run_id"] + if parent_run_id_ is not None: + assert parent_run_id == parent_run_id_ + assert name in name_to_body + # All within the same trace + assert name_to_body[name]["trace_id"] == trace_id + dotted_order: str = name_to_body[name]["dotted_order"] + assert dotted_order is not None + if last_dotted_order is not None: + assert dotted_order > last_dotted_order + assert dotted_order.startswith(last_dotted_order), ( + "Unexpected dotted order for run" + f" {name}\n{dotted_order}\n{last_dotted_order}" + ) + last_dotted_order = dotted_order + parent_run_id = id_ From ee32369265103ac64b412a73a758ae0d1649cafe Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Thu, 30 May 2024 11:26:41 -0700 Subject: [PATCH 10/54] core[patch]: fix runnable history and add docs (#22283) --- docs/docs/how_to/message_history.ipynb | 795 ++++++++++-------- docs/static/img/message_history.png | Bin 0 -> 40253 bytes libs/core/langchain_core/runnables/history.py | 21 +- libs/core/langchain_core/tracers/base.py | 14 +- .../core/langchain_core/tracers/log_stream.py | 2 +- .../langchain_core/tracers/root_listeners.py | 2 +- libs/core/tests/unit_tests/fake/memory.py | 2 + 7 files changed, 487 insertions(+), 349 deletions(-) create mode 100644 docs/static/img/message_history.png diff --git a/docs/docs/how_to/message_history.ipynb b/docs/docs/how_to/message_history.ipynb index dd343fc2661a6..fbc88b00b22f2 100644 --- a/docs/docs/how_to/message_history.ipynb +++ b/docs/docs/how_to/message_history.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "6a4becbd-238e-4c1d-a02d-08e61fbc3763", + "id": "f47033eb", "metadata": {}, "source": [ "# How to add message history\n", @@ -18,420 +18,465 @@ "\n", ":::\n", "\n", - "Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it.\n", + "Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history.\n", "\n", - "Specifically, it can be used for any Runnable that takes as input one of:\n", + "![index_diagram](../../static/img/message_history.png)\n", "\n", - "* a sequence of [`BaseMessages`](/docs/concepts/#message-types)\n", - "* a dict with a key that takes a sequence of `BaseMessages`\n", - "* a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessages`, and a separate key that takes historical messages\n", + "In practice this looks something like:\n", "\n", - "And returns as output one of\n", + "```python\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", "\n", - "* a string that can be treated as the contents of an `AIMessage`\n", - "* a sequence of `BaseMessage`\n", - "* a dict with a key that contains a sequence of `BaseMessage`\n", "\n", - "Let's take a look at some examples to see how it works. First we construct a runnable (which here accepts a dict as input and returns a message as output):\n", + "with_message_history = RunnableWithMessageHistory(\n", + " # The underlying runnable\n", + " runnable, \n", + " # A function that takes in a session id and returns a memory object\n", + " get_session_history, \n", + " # Other parameters that may be needed to align the inputs/outputs\n", + " # of the Runnable with the memory object\n", + " ... \n", + ")\n", "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "with_message_history.invoke(\n", + " # The same input as before\n", + " {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n", + " # Configuration specifying the `session_id`,\n", + " # which controls which conversation to load\n", + " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", + ")\n", + "```\n", "\n", - "\n", - "```" + "\n", + "In order to properly set this up there are two main things to consider:\n", + "\n", + "1. How to store and load messages? (this is `get_session_history` in the example above)\n", + "2. What is the underlying Runnable you are wrapping and what are its inputs/outputs? (this is `runnable` in the example above, as well any additional parameters you pass to `RunnableWithMessageHistory` to align the inputs/outputs)\n", + "\n", + "Let's walk through these pieces (and more) below." ] }, { - "cell_type": "code", - "execution_count": 1, - "id": "6489f585", + "cell_type": "markdown", + "id": "734123cb", "metadata": {}, - "outputs": [], "source": [ - "# | output: false\n", - "# | echo: false\n", + "## How to store and load messages\n", "\n", - "%pip install -qU langchain langchain_anthropic\n", + "A key part of this is storing and loading messages.\n", + "When constructing `RunnableWithMessageHistory` you need to pass in a `get_session_history` function.\n", + "This function should take in a `session_id` and return a `BaseChatMessageHistory` object.\n", "\n", - "import os\n", - "from getpass import getpass\n", + "**What is `session_id`?** \n", "\n", - "from langchain_anthropic import ChatAnthropic\n", + "`session_id` is an identifier for the session (conversation) thread that these input messages correspond to. This allows you to maintain several conversations/threads with the same chain at the same time.\n", "\n", - "os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n", + "**What is `BaseChatMessageHistory`?** \n", "\n", - "model = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)" + "`BaseChatMessageHistory` is a class that can load and save message objects. It will be called by `RunnableWithMessageHistory` to do exactly that. These classes are usually initialized with a session id.\n", + "\n", + "Let's create a `get_session_history` object to use for this example. To keep things simple, we will use a simple SQLiteMessage" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e8210560", + "metadata": {}, + "outputs": [], + "source": [ + "! rm memory.db" ] }, { "cell_type": "code", "execution_count": 2, - "id": "2ed413b4-33a1-48ee-89b0-2d4917ec101a", + "id": "27f36241", "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_message_histories import SQLChatMessageHistory\n", "\n", - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You're an assistant who's good at {ability}. Respond in 20 words or fewer\",\n", - " ),\n", - " MessagesPlaceholder(variable_name=\"history\"),\n", - " (\"human\", \"{input}\"),\n", - " ]\n", - ")\n", - "runnable = prompt | model" + "\n", + "def get_session_history(session_id):\n", + " return SQLChatMessageHistory(session_id, \"sqlite:///memory.db\")" ] }, { "cell_type": "markdown", - "id": "9fd175e1-c7b8-4929-a57e-3331865fe7aa", + "id": "c200cb3a", "metadata": {}, "source": [ - "To manage the message history, we will need:\n", - "1. This runnable;\n", - "2. A callable that returns an instance of `BaseChatMessageHistory`.\n", + "Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers (Redis, Postgres, etc)." + ] + }, + { + "cell_type": "markdown", + "id": "a531da5e", + "metadata": {}, + "source": [ + "## What is the runnable you are trying wrap?\n", + "\n", + "`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n", + "\n", + "* a sequence of [`BaseMessages`](/docs/concepts/#message-types)\n", + "* a dict with a key that takes a sequence of `BaseMessages`\n", + "* a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessages`, and a separate key that takes historical messages\n", + "\n", + "And returns as output one of\n", + "\n", + "* a string that can be treated as the contents of an `AIMessage`\n", + "* a sequence of `BaseMessage`\n", + "* a dict with a key that contains a sequence of `BaseMessage`\n", "\n", - "Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using Redis and other providers. Here we demonstrate using an in-memory `ChatMessageHistory` as well as more persistent storage using `RedisChatMessageHistory`." + "Let's take a look at some examples to see how it works. " ] }, { "cell_type": "markdown", - "id": "3d83adad-9672-496d-9f25-5747e7b8c8bb", + "id": "6a4becbd-238e-4c1d-a02d-08e61fbc3763", "metadata": {}, "source": [ - "## In-memory\n", + "### Setup\n", "\n", - "Below we show a simple example in which the chat history lives in memory, in this case via a global Python dict.\n", + "First we construct a runnable (which here accepts a dict as input and returns a message as output):\n", "\n", - "We construct a callable `get_session_history` that references this dict to return an instance of `ChatMessageHistory`. The arguments to the callable can be specified by passing a configuration to the `RunnableWithMessageHistory` at runtime. By default, the configuration parameter is expected to be a single string `session_id`. This can be adjusted via the `history_factory_config` kwarg.\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "Using the single-parameter default:" + "\n", + "```" ] }, { "cell_type": "code", "execution_count": 3, - "id": "54348d02-d8ee-440c-bbf9-41bc0fbbc46c", + "id": "6489f585", "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_message_histories import ChatMessageHistory\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", - "\n", - "store = {}\n", + "# | output: false\n", + "# | echo: false\n", "\n", + "# %pip install -qU langchain langchain_anthropic\n", "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", + "# import os\n", + "# from getpass import getpass\n", "\n", + "# os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n", + "from langchain_anthropic import ChatAnthropic\n", "\n", - "with_message_history = RunnableWithMessageHistory(\n", - " runnable,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"history\",\n", - ")" + "model = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2ed413b4-33a1-48ee-89b0-2d4917ec101a", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import HumanMessage\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory" ] }, { "cell_type": "markdown", - "id": "01acb505-3fd3-4ab4-9f04-5ea07e81542e", + "id": "e8816b01", "metadata": {}, "source": [ - ":::info\n", - "\n", - "Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n", + "### Messages input, message(s) output\n", "\n", - ":::" + "The simplest form is just adding memory to a ChatModel.\n", + "ChatModels accept a list of messages as input and output a message.\n", + "This makes it very easy to use `RunnableWithMessageHistory` - no additional configuration is needed!" ] }, { - "cell_type": "markdown", - "id": "35222c30", + "cell_type": "code", + "execution_count": 5, + "id": "0521d551", "metadata": {}, + "outputs": [], "source": [ - "When invoking this new runnable, we specify the corresponding chat history via a configuration parameter:" + "runnable_with_history = RunnableWithMessageHistory(\n", + " model,\n", + " get_session_history,\n", + ")" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "01384412-f08e-4634-9edb-3f46f475b582", + "execution_count": 6, + "id": "d5142e1a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_01DH8iRBELVbF3sqM8U5sk8A', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 31}}, id='run-e07fc012-a4f6-4e47-8ef8-250f296eba5b-0')" + "AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UHCCMiZz9yNYjt41xUJrtk', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-55f6a451-606b-4e04-9e39-e03b81035c1f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})" ] }, - "execution_count": 4, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"hi - im bob!\")],\n", + " config={\"configurable\": {\"session_id\": \"1\"}},\n", ")" ] }, { "cell_type": "code", - "execution_count": 5, - "id": "954688a2-9a3f-47ee-a9e8-fa0c83e69477", + "execution_count": 7, + "id": "768e0c12", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The inverse of the cosine function is called the arccosine or inverse cosine.', response_metadata={'id': 'msg_015TeeRQBvTvc7XG1JxYqZyq', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 72, 'output_tokens': 22}}, id='run-32ae22ea-3b2f-4d38-8c8a-cb8702e2f3e7-0')" + "AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_018L96tAxiexMKsHBQz22CcE', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-7399ddb5-bb06-444b-bfb2-2f65674105dd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})" ] }, - "execution_count": 5, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Remembers\n", - "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What is its inverse called?\"},\n", - " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"whats my name?\")],\n", + " config={\"configurable\": {\"session_id\": \"1\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "e0c651e5", + "id": "9d942227", "metadata": {}, "source": [ ":::info\n", "\n", - "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows that \"it\" refers to \"cosine\" in this case.\n", + "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", "\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "a44f8d5f", - "metadata": {}, - "source": [ - "Now let's try a different `session_id`" + ":::\n", + "\n", + "We can now try this with a new session id and see that it does not remember." ] }, { "cell_type": "code", - "execution_count": 6, - "id": "39350d7c-2641-4744-bc2a-fd6a57c4ea90", + "execution_count": 14, + "id": "addddd03", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The inverse of a function is the function that undoes the original function.', response_metadata={'id': 'msg_01M8WbHWg2sjWTz3m3NKqZuF', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 18}}, id='run-b64c73d6-03ee-4b0a-85e0-34beb45408d4-0')" + "AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_01LhbWu7mSKTvKAx7iQpMPzd', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-cf86cad2-21f2-4525-afc8-09bfd1e8af70-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})" ] }, - "execution_count": 6, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# New session_id --> does not remember.\n", - "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What is its inverse called?\"},\n", - " config={\"configurable\": {\"session_id\": \"def234\"}},\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"whats my name?\")],\n", + " config={\"configurable\": {\"session_id\": \"1a\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "5416e195", + "id": "8b26a0c0", "metadata": {}, "source": [ - "When we pass a different `session_id`, we start a new chat history, so the model does not know what \"it\" refers to." + ":::info \n", + "\n", + "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", + "\n", + ":::" ] }, { "cell_type": "markdown", - "id": "a6710e65", + "id": "e5bb5c7c", "metadata": {}, "source": [ - "### Customization" + "### Dictionary input, message(s) output\n", + "\n", + "Besides just wrapping a raw model, the next step up is wrapping a prompt + LLM. This now changes the input to be a **dictionary** (because the input to a prompt is a dictionary). This adds two bits of complication.\n", + "\n", + "First: a dictionary can have multiple keys, but we only want to save ONE as input. In order to do this, we now now need to specify a key to save as the input.\n", + "\n", + "Second: once we load the messages, we need to know how to save them to the dictionary. That equates to know which key in the dictionary to save them in. Therefore, we need to specify a key to save the loaded messages in.\n", + "\n", + "Putting it all together, that ends up looking something like:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "34edd990", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "\n", + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\n", + " \"system\",\n", + " \"You're an assistant who speaks in {language}. Respond in 20 words or fewer\",\n", + " ),\n", + " MessagesPlaceholder(variable_name=\"history\"),\n", + " (\"human\", \"{input}\"),\n", + " ]\n", + ")\n", + "\n", + "runnable = prompt | model\n", + "\n", + "runnable_with_history = RunnableWithMessageHistory(\n", + " runnable,\n", + " get_session_history,\n", + " input_messages_key=\"input\",\n", + " history_messages_key=\"history\",\n", + ")" ] }, { "cell_type": "markdown", - "id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c", + "id": "c0baa075", "metadata": {}, "source": [ - "The configuration parameters by which we track message histories can be customized by passing in a list of ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter. Below, we use two parameters: a `user_id` and `conversation_id`." + ":::info\n", + "\n", + "Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n", + "\n", + ":::" ] }, { "cell_type": "code", - "execution_count": 7, - "id": "1c89daee-deff-4fdf-86a3-178f7d8ef536", + "execution_count": 16, + "id": "5877544f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"Why can't a bicycle stand up on its own? It's two-tired!\", response_metadata={'id': 'msg_011qHi8pvbNkKhRb9XYRm2kc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 20}}, id='run-5d1d5b5a-ccec-4c2c-b11a-f1953dbe85a3-0')" + "AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_0121ADUEe4G1hMC6zbqFWofr', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-246a70df-aad6-43d6-a7e8-166d96e0d67e-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})" ] }, - "execution_count": 7, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain_core.runnables import ConfigurableFieldSpec\n", - "\n", - "store = {}\n", - "\n", - "\n", - "def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:\n", - " if (user_id, conversation_id) not in store:\n", - " store[(user_id, conversation_id)] = ChatMessageHistory()\n", - " return store[(user_id, conversation_id)]\n", - "\n", - "\n", - "with_message_history = RunnableWithMessageHistory(\n", - " runnable,\n", - " get_session_history,\n", - " input_messages_key=\"input\",\n", - " history_messages_key=\"history\",\n", - " history_factory_config=[\n", - " ConfigurableFieldSpec(\n", - " id=\"user_id\",\n", - " annotation=str,\n", - " name=\"User ID\",\n", - " description=\"Unique identifier for the user.\",\n", - " default=\"\",\n", - " is_shared=True,\n", - " ),\n", - " ConfigurableFieldSpec(\n", - " id=\"conversation_id\",\n", - " annotation=str,\n", - " name=\"Conversation ID\",\n", - " description=\"Unique identifier for the conversation.\",\n", - " default=\"\",\n", - " is_shared=True,\n", - " ),\n", - " ],\n", - ")\n", - "\n", - "with_message_history.invoke(\n", - " {\"ability\": \"jokes\", \"input\": \"Tell me a joke\"},\n", - " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", + "runnable_with_history.invoke(\n", + " {\"language\": \"italian\", \"input\": \"hi im bob!\"},\n", + " config={\"configurable\": {\"session_id\": \"2\"}},\n", ")" ] }, { "cell_type": "code", - "execution_count": 8, - "id": "4f282883", + "execution_count": 17, + "id": "8605c2b1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The joke was about a bicycle not being able to stand up on its own because it\\'s \"two-tired\" (too tired).', response_metadata={'id': 'msg_01LbrkfidZgseBMxxRjQXJQH', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 59, 'output_tokens': 30}}, id='run-8b2ca810-77d7-44b8-b27b-677e0062b19a-0')" + "AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01EDUZG6nRLGeti9KhFN5cek', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-294b4a72-81bc-4c43-b199-3aafdff87cb3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})" ] }, - "execution_count": 8, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# remembers\n", - "with_message_history.invoke(\n", - " {\"ability\": \"jokes\", \"input\": \"What was the joke about?\"},\n", - " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", + "runnable_with_history.invoke(\n", + " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", + " config={\"configurable\": {\"session_id\": \"2\"}},\n", ")" ] }, + { + "cell_type": "markdown", + "id": "3ab7c09f", + "metadata": {}, + "source": [ + ":::info\n", + "\n", + "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "\n", + ":::\n", + "\n", + "We can now try this with a new session id and see that it does not remember." + ] + }, { "cell_type": "code", - "execution_count": 9, - "id": "fc122c18", + "execution_count": 19, + "id": "c7ddad6b", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"I'm afraid I don't have enough context to provide a relevant joke. As an AI assistant, I don't actually have pre-programmed jokes. I'd be happy to try generating a humorous response if you provide more details about the context.\", response_metadata={'id': 'msg_01PgSp46hNJnKyNfNKPDauQ9', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 54}}, id='run-ed202892-27e4-4da9-a26d-e0dc16b10940-0')" + "AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_01Lyd9FAGQJTxxAZoFi3sQpQ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-19a82197-3b1c-4b5f-a68d-f91f4a2ba523-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})" ] }, - "execution_count": 9, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# New user_id --> does not remember\n", - "with_message_history.invoke(\n", - " {\"ability\": \"jokes\", \"input\": \"What was the joke about?\"},\n", - " config={\"configurable\": {\"user_id\": \"456\", \"conversation_id\": \"1\"}},\n", + "runnable_with_history.invoke(\n", + " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", + " config={\"configurable\": {\"session_id\": \"2a\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "3ce37565", - "metadata": {}, - "source": [ - "Note that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same." - ] - }, - { - "cell_type": "markdown", - "id": "18f1a459-3f88-4ee6-8542-76a907070dd6", + "id": "a05e6c12", "metadata": {}, "source": [ - "### Examples with runnables of different signatures\n", + ":::info \n", "\n", - "The above runnable takes a dict as input and returns a BaseMessage. Below we show some alternatives." + "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", + "\n", + ":::" ] }, { "cell_type": "markdown", - "id": "48eae1bf-b59d-4a61-8e62-b6dbf667e866", + "id": "717440a9", "metadata": {}, "source": [ - "#### Messages input, dict output" + "### Messages input, dict output\n", + "\n", + "This format is useful when you are using a model to generate one key in a dictionary." ] }, { "cell_type": "code", - "execution_count": 10, - "id": "17733d4f-3a32-4055-9d44-5d58b9446a26", + "execution_count": 20, + "id": "80b8efb0", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'output_message': AIMessage(content='Simone de Beauvoir was a prominent French existentialist philosopher who had some key beliefs about free will:\\n\\n1. Radical Freedom: De Beauvoir believed that humans have radical freedom - the ability to choose and define themselves through their actions. She rejected determinism and believed that we are not simply products of our biology, upbringing, or social circumstances.\\n\\n2. Ambiguity of the Human Condition: However, de Beauvoir also recognized the ambiguity of the human condition. While we have radical freedom, we are also situated beings constrained by our facticity (our given circumstances and limitations). This creates a tension and anguish in the human experience.\\n\\n3. Responsibility and Bad Faith: With radical freedom comes great responsibility. De Beauvoir criticized \"bad faith\" - the denial or avoidance of this responsibility by making excuses or pretending we lack free will. She believed we must courageously embrace our freedom and the burdens it entails.\\n\\n4. Ethical Engagement: For de Beauvoir, freedom is not just an abstract philosophical concept, but something that must be exercised through ethical engagement with the world and others. Our choices and actions have moral implications that we must grapple with.\\n\\nOverall, de Beauvoir\\'s perspective on free will was grounded in existentialist principles - the belief that we are fundamentally free, yet this freedom is fraught with difficulty and responsibility. Her views emphasized the centrality of human agency and the ethical dimensions of our choices.', response_metadata={'id': 'msg_01QFXHx74GSzcMWnWc8YxYSJ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 20, 'output_tokens': 324}}, id='run-752513bc-2b4f-4cad-87f0-b96fee6ebe43-0')}" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from langchain_core.messages import HumanMessage\n", "from langchain_core.runnables import RunnableParallel\n", @@ -439,317 +484,387 @@ "chain = RunnableParallel({\"output_message\": model})\n", "\n", "\n", - "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", - " if session_id not in store:\n", - " store[session_id] = ChatMessageHistory()\n", - " return store[session_id]\n", - "\n", - "\n", - "with_message_history = RunnableWithMessageHistory(\n", + "runnable_with_history = RunnableWithMessageHistory(\n", " chain,\n", " get_session_history,\n", " output_messages_key=\"output_message\",\n", - ")\n", - "\n", - "with_message_history.invoke(\n", - " [HumanMessage(content=\"What did Simone de Beauvoir believe about free will\")],\n", - " config={\"configurable\": {\"session_id\": \"baz\"}},\n", ")" ] }, + { + "cell_type": "markdown", + "id": "9040c535", + "metadata": {}, + "source": [ + ":::info\n", + "\n", + "Note that we've specified `output_messages_key` (the key to be treated as the output to save).\n", + "\n", + ":::" + ] + }, { "cell_type": "code", - "execution_count": 11, - "id": "efb57ef5-91f9-426b-84b9-b77f071a9dd7", + "execution_count": 21, + "id": "8b26a209", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'output_message': AIMessage(content='Simone de Beauvoir\\'s views on free will were quite similar to those of her long-time partner and fellow existentialist philosopher, Jean-Paul Sartre. There are some key parallels and differences:\\n\\nSimilarities:\\n\\n1. Radical Freedom: Both de Beauvoir and Sartre believed that humans have radical, unconditioned freedom to choose and define themselves.\\n\\n2. Rejection of Determinism: They both rejected deterministic views that see humans as products of their circumstances or nature.\\n\\n3. Emphasis on Responsibility: They agreed that with radical freedom comes great responsibility for one\\'s choices and actions.\\n\\n4. Critique of \"Bad Faith\": Both philosophers criticized the tendency of people to deny or avoid their freedom through self-deception and making excuses.\\n\\nDifferences:\\n\\n1. Gendered Perspectives: While Sartre developed a more gender-neutral existentialist philosophy, de Beauvoir brought a distinctly feminist lens, exploring the unique challenges and experiences of women\\'s freedom.\\n\\n2. Ethical Engagement: De Beauvoir placed more emphasis on the importance of ethical engagement with the world and others, whereas Sartre\\'s focus was more individualistic.\\n\\n3. Ambiguity of the Human Condition: De Beauvoir was more attuned to the ambiguity and tensions inherent in the human condition, whereas Sartre\\'s views were sometimes seen as more absolutist.\\n\\n4. Influence of Phenomenology: De Beauvoir was more influenced by the phenomenological tradition, which shaped her understanding of embodied, situated freedom.\\n\\nOverall, while Sartre and de Beauvoir shared a core existentialist framework, de Beauvoir\\'s unique feminist perspective and emphasis on ethical engagement with others distinguished her views on free will and the human condition.', response_metadata={'id': 'msg_01BEANW4VX6cUWYjkv3CanLz', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 355, 'output_tokens': 388}}, id='run-e786ab3a-1a42-45f3-94a3-f0c591430df3-0')}" + "{'output_message': AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01WWJSyUyGGKuBqTs3h18ZMM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-0f50cb43-a734-447c-b535-07c615a0984c-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})}" ] }, - "execution_count": 11, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "with_message_history.invoke(\n", - " [HumanMessage(content=\"How did this compare to Sartre\")],\n", - " config={\"configurable\": {\"session_id\": \"baz\"}},\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"hi - im bob!\")],\n", + " config={\"configurable\": {\"session_id\": \"3\"}},\n", ")" ] }, - { - "cell_type": "markdown", - "id": "a39eac5f-a9d8-4729-be06-5e7faf0c424d", - "metadata": {}, - "source": [ - "#### Messages input, messages output" - ] - }, { "cell_type": "code", - "execution_count": 12, - "id": "e45bcd95-e31f-4a9a-967a-78f96e8da881", + "execution_count": 22, + "id": "743edcf8", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "RunnableWithMessageHistory(bound=RunnableBinding(bound=RunnableBinding(bound=RunnableLambda(_enter_history), config={'run_name': 'load_history'})\n", - "| RunnableBinding(bound=ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x106aeef20>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" + "{'output_message': AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_01TEGrhfLXTwo36rC7svdTy4', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-178e8f3f-da21-430d-9edc-ef07797a5e2d-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})}" ] }, - "execution_count": 12, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "RunnableWithMessageHistory(\n", - " model,\n", - " get_session_history,\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"whats my name?\")],\n", + " config={\"configurable\": {\"session_id\": \"3\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "04daa921-a2d1-40f9-8cd1-ae4e9a4163a7", + "id": "81efb7f1", "metadata": {}, "source": [ - "#### Dict with single key for all messages input, messages output" + ":::info\n", + "\n", + "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "\n", + ":::\n", + "\n", + "We can now try this with a new session id and see that it does not remember." ] }, { "cell_type": "code", - "execution_count": 13, - "id": "27157f15-9fb0-4167-9870-f4d7f234b3cb", + "execution_count": 23, + "id": "b8b04907", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "RunnableWithMessageHistory(bound=RunnableBinding(bound=RunnableBinding(bound=RunnableAssign(mapper={\n", - " input_messages: RunnableBinding(bound=RunnableLambda(_enter_history), config={'run_name': 'load_history'})\n", - "}), config={'run_name': 'insert_history'})\n", - "| RunnableBinding(bound=RunnableLambda(itemgetter('input_messages'))\n", - " | ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x106aef560>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, input_messages_key='input_messages', history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" + "{'output_message': AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_0118ZBudDXAC9P6smf91NhCX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-deb14a3a-0336-42b4-8ace-ad1e52ca5910-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})}" ] }, - "execution_count": 13, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from operator import itemgetter\n", - "\n", - "RunnableWithMessageHistory(\n", - " itemgetter(\"input_messages\") | model,\n", - " get_session_history,\n", - " input_messages_key=\"input_messages\",\n", + "runnable_with_history.invoke(\n", + " [HumanMessage(content=\"whats my name?\")],\n", + " config={\"configurable\": {\"session_id\": \"3a\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "418ca7af-9ed9-478c-8bca-cba0de2ca61e", + "id": "6716a068", "metadata": {}, "source": [ - "## Persistent storage" + ":::info \n", + "\n", + "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", + "\n", + ":::" ] }, { "cell_type": "markdown", - "id": "76799a13-d99a-4c4f-91f2-db699e40b8df", + "id": "ec4187d0", "metadata": {}, "source": [ - "In many cases it is preferable to persist conversation histories. `RunnableWithMessageHistory` is agnostic as to how the `get_session_history` callable retrieves its chat message histories. See [here](https://github.com/langchain-ai/langserve/blob/main/examples/chat_with_persistence_and_user/server.py) for an example using a local filesystem. Below we demonstrate how one could use Redis. Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers." + "### Dict with single key for all messages input, messages output\n", + "\n", + "This is a specific case of \"Dictionary input, message(s) output\". In this situation, because there is only a single key we don't need to specify as much - we only need to specify the `input_messages_key`." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "7530c4ed", + "metadata": {}, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "\n", + "runnable_with_history = RunnableWithMessageHistory(\n", + " itemgetter(\"input_messages\") | model,\n", + " get_session_history,\n", + " input_messages_key=\"input_messages\",\n", + ")" ] }, { "cell_type": "markdown", - "id": "6bca45e5-35d9-4603-9ca9-6ac0ce0e35cd", + "id": "def75152", "metadata": {}, "source": [ - "### Setup\n", + ":::info\n", "\n", - "We'll need to install Redis if it's not installed already:" + "Note that we've specified `input_messages_key` (the key to be treated as the latest input message).\n", + "\n", + ":::" ] }, { "cell_type": "code", - "execution_count": 14, - "id": "477d04b3-c2b6-4ba5-962f-492c0d625cd5", + "execution_count": 25, + "id": "659bc1bf", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UdD5wz1J5xwoz5D94onaQC', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-91bee6eb-0814-4557-ad71-fef9b0270358-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "%pip install --upgrade --quiet redis" + "runnable_with_history.invoke(\n", + " {\"input_messages\": [HumanMessage(content=\"hi - im bob!\")]},\n", + " config={\"configurable\": {\"session_id\": \"4\"}},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "6da2835e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_012WUygxBKXcVJPeTW14LNrc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-fcbaaa1a-8c33-4eec-b0b0-5b800a47bddd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "runnable_with_history.invoke(\n", + " {\"input_messages\": [HumanMessage(content=\"whats my name?\")]},\n", + " config={\"configurable\": {\"session_id\": \"4\"}},\n", + ")" ] }, { "cell_type": "markdown", - "id": "6a0ec9e0-7b1c-4c6f-b570-e61d520b47c6", + "id": "d4c7a6f2", "metadata": {}, "source": [ - "Start a local Redis Stack server if we don't have an existing Redis deployment to connect to:\n", - "```bash\n", - "docker run -d -p 6379:6379 -p 8001:8001 redis/redis-stack:latest\n", - "```" + ":::info\n", + "\n", + "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n", + "\n", + ":::\n", + "\n", + "We can now try this with a new session id and see that it does not remember." ] }, { "cell_type": "code", - "execution_count": 15, - "id": "cd6a250e-17fe-4368-a39d-1fe6b2cbde68", + "execution_count": 27, + "id": "6cf6abd6", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_017xW3Ki5y4UBYzCU9Mf1pgM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-d2f372f7-3679-4a5c-9331-a55b820ec03e-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "REDIS_URL = \"redis://localhost:6379/0\"" + "runnable_with_history.invoke(\n", + " {\"input_messages\": [HumanMessage(content=\"whats my name?\")]},\n", + " config={\"configurable\": {\"session_id\": \"4a\"}},\n", + ")" ] }, { "cell_type": "markdown", - "id": "36f43b87-655c-4f64-aa7b-bd8c1955d8e5", + "id": "9839a6d1", "metadata": {}, "source": [ - "### [LangSmith](https://docs.smith.langchain.com)\n", + ":::info \n", "\n", - "LangSmith is especially useful for something like message history injection, where it can be hard to otherwise understand what the inputs are to various parts of the chain.\n", + "When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. \n", "\n", - "Note that LangSmith is not needed, but it is helpful.\n", - "If you do want to use LangSmith, after you sign up at the link above, make sure to uncoment the below and set your environment variables to start logging traces:" + ":::" ] }, { - "cell_type": "code", - "execution_count": 16, - "id": "2afc1556-8da1-4499-ba11-983b66c58b18", + "cell_type": "markdown", + "id": "a6710e65", "metadata": {}, - "outputs": [], "source": [ - "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", - "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" + "## Customization" ] }, { "cell_type": "markdown", - "id": "f9d81796-ce61-484c-89e2-6c567d5e54ef", + "id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c", "metadata": {}, "source": [ - "Updating the message history implementation just requires us to define a new callable, this time returning an instance of `RedisChatMessageHistory`:" + "The configuration parameters by which we track message histories can be customized by passing in a list of ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter. Below, we use two parameters: a `user_id` and `conversation_id`." ] }, { "cell_type": "code", - "execution_count": 17, - "id": "ca7c64d8-e138-4ef8-9734-f82076c47d80", + "execution_count": 30, + "id": "1c89daee-deff-4fdf-86a3-178f7d8ef536", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_016RJebCoiAgWaNcbv9wrMNW', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-40425414-8f72-47d4-bf1d-a84175d8b3f8-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", + "from langchain_core.runnables import ConfigurableFieldSpec\n", "\n", "\n", - "def get_message_history(session_id: str) -> RedisChatMessageHistory:\n", - " return RedisChatMessageHistory(session_id, url=REDIS_URL)\n", + "def get_session_history(user_id: str, conversation_id: str):\n", + " return SQLChatMessageHistory(f\"{user_id}--{conversation_id}\", \"sqlite:///memory.db\")\n", "\n", "\n", "with_message_history = RunnableWithMessageHistory(\n", " runnable,\n", - " get_message_history,\n", + " get_session_history,\n", " input_messages_key=\"input\",\n", " history_messages_key=\"history\",\n", + " history_factory_config=[\n", + " ConfigurableFieldSpec(\n", + " id=\"user_id\",\n", + " annotation=str,\n", + " name=\"User ID\",\n", + " description=\"Unique identifier for the user.\",\n", + " default=\"\",\n", + " is_shared=True,\n", + " ),\n", + " ConfigurableFieldSpec(\n", + " id=\"conversation_id\",\n", + " annotation=str,\n", + " name=\"Conversation ID\",\n", + " description=\"Unique identifier for the conversation.\",\n", + " default=\"\",\n", + " is_shared=True,\n", + " ),\n", + " ],\n", + ")\n", + "\n", + "with_message_history.invoke(\n", + " {\"language\": \"italian\", \"input\": \"hi im bob!\"},\n", + " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", ")" ] }, - { - "cell_type": "markdown", - "id": "37eefdec-9901-4650-b64c-d3c097ed5f4d", - "metadata": {}, - "source": [ - "We can invoke as before:" - ] - }, { "cell_type": "code", - "execution_count": 18, - "id": "a85bcc22-ca4c-4ad5-9440-f94be7318f3e", + "execution_count": 32, + "id": "4f282883", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_01DwU2BD8KPLoXeZ6bZPqxxJ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 164, 'output_tokens': 31}}, id='run-c2a443c4-79b1-4b07-bb42-5e9112e5bbfc-0')" + "AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01Kktiy3auFDKESY54KtTWPX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-c7768420-3f30-43f5-8834-74b1979630dd-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})" ] }, - "execution_count": 18, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "# remembers\n", "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n", - " config={\"configurable\": {\"session_id\": \"foobar\"}},\n", + " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", + " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", ")" ] }, { "cell_type": "code", - "execution_count": 19, - "id": "ab29abd3-751f-41ce-a1b0-53f6b565e79d", + "execution_count": 33, + "id": "fc122c18", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The inverse of cosine is called arccosine or inverse cosine.', response_metadata={'id': 'msg_01XYH5iCUokxV1UDhUa8xzna', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 202, 'output_tokens': 19}}, id='run-97dda3a2-01e3-42e5-8241-f948e7535ffc-0')" + "AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_0178FpbpPNioB7kqvyHk7rjD', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-df1f1768-aab6-4aec-8bba-e33fc9e90b8d-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})" ] }, - "execution_count": 19, + "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "# New user_id --> does not remember\n", "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What's its inverse\"},\n", - " config={\"configurable\": {\"session_id\": \"foobar\"}},\n", + " {\"language\": \"italian\", \"input\": \"whats my name?\"},\n", + " config={\"configurable\": {\"user_id\": \"456\", \"conversation_id\": \"1\"}},\n", ")" ] }, { "cell_type": "markdown", - "id": "da3d1feb-b4bb-4624-961c-7db2e1180df7", - "metadata": {}, - "source": [ - ":::tip\n", - "\n", - "[Langsmith trace](https://smith.langchain.com/public/bd73e122-6ec1-48b2-82df-e6483dc9cb63/r)\n", - "\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "61d5115e-64a1-4ad5-b676-8afd4ef6093e", - "metadata": {}, - "source": [ - "Looking at the Langsmith trace for the second call, we can see that when constructing the prompt, a \"history\" variable has been injected which is a list of two messages (our first input and first output)." - ] - }, - { - "cell_type": "markdown", - "id": "fd510b68", + "id": "3ce37565", "metadata": {}, "source": [ - "## Next steps\n", - "\n", - "You have now learned one way to manage message history for a runnable.\n", - "\n", - "To learn more, see the other how-to guides on runnables in this section." + "Note that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same." ] } ], @@ -769,7 +884,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.10.1" } }, "nbformat": 4, diff --git a/docs/static/img/message_history.png b/docs/static/img/message_history.png new file mode 100644 index 0000000000000000000000000000000000000000..31f7664d286bf04d75ac72799e00ffcec7767254 GIT binary patch literal 40253 zcmeFZg;!k55;uyw4^Du=-QC@SCTN1YI|TPZgA?2x0>Le~ySuw2xJ$4%IrrRi&wJ;5 z|G-<{tUYV@?3Sus)m>fP{i_LAQIbJLB1D3KfIyX#m3$8Y0b>aP0SSu$2Y#YL*vtgJ zK{~&e5r?Q4fAbUk!^l)i&P-7ef*zbkfPf6Ogn;>71pFlgf5Fw`Lm=S6cP#K%B?l4$ z8hnTRou32s-;ywvIne)2L&AddA;eTACzq=6mA0_y~>EDl8$bo+paj_O6 z*HTmgO4vJ@0=b!4nOVt&k$^y;pp%Ii|9eU4KdXa(36Wd4xH#~$u(-LoF}rav+dG-F zu<`Nnv9PkUu(LCPOE5Wm*tr~p{SKHvKg1?{gt5~|5+I*0-v;~U?OhcHR zn@#YK^8d*BAH~1b)cRjdHa2#?zgPWR*8jJvy0fX1guN}8ri<|ZRP$%$zi0kgQIO^L ztN%?D|B>?_Pr-r~MiONCOKHML*M8r7As|E{YJ+vLuqB1*z& zbBye1Z<%i|dn=U7Coa9`JKFBnz3gctaLQ%zzA($4VZ0STe0@6IXG|EgtC9P}Q=7iA zRhzn$D&ThdJkHp1$8(U8u2D|Si3NoGw<~~6`kdt7Iff*~$dTO%`rJwV zuMNk7+g*qLH)fzHH=&`hExjEu7h(QQ6p&asvOQk^CIj%>dyD#$N1-!ArT_bjg4=aR z|95`;(GgS%F1n-)0RJvTe{$5be^m`6);T0t_`*#HHcj{H|>#c8C%r0=EZAgIouR}!kx8Vyn zCQjxMi|s0N*g~&(v4A18mjKS2xU3G?cJucUu@F}z{L{9HZX*=nlu@0!2&%dwih`D& zMwlwnhLPB(vQ|Ju5!_Q)#(e)`eM*GEM0XxR8sR^pcpB%OCx zy9&&=T+b7iD%pGd8&L>EwJZp!v|*0VkC+$=!&gacxdo>=={nlSKCVO|S7}ba#-hI~ONOR0qpv zTcu#qvOT9iFaIwVpG(T0ToE-c6yLa8hSIQzS~4}lL5*@*EB&XbDyG~i%|Gp2z%+OY z`Ld6>;6^=4nLw*bf^iL9Da&U|yZA*W=J8)FR1POtP=y}7*O}pL=P6*h1O&s04OY3X zB$vxy)gaO==fog<&I?tn(bH{6|Hs?{TnCfuO>yv8r=nIfNTu&q@VEyqg8()Tk>RTK z0TLdOO)>wdVB_MNxrLEiOl9&BXG^yWsA@?-Z3D8yf$QELKrL_G=$ zVf)~UVKS=S+u%q5RLL@cwnO>6ohkkg(Www2suYy=su#1WB5J+EEi?d7(c)74LtL^R z;E9I5sTbRf9s;{(4a3F&mw$i#%M%+q5#vQ(+&?&p75$2tBMaD%c%Om@P1Th?46vER zMyAR)NZ3T~nfYh=-Y9S;-FPB1lrvhFGl2J+Bg6K=%IZBQGf3&|4GY<=V*N9 z1Q*9PeRAg6Nn3J7UZ8hh?4(@;<*Rx%L}~MXzjfg*DBek$U2hM~DY;y694S(J&?1rj zErD=K)*QtR$*cvw}<4x@L(M_@Z~4jJ$T$C7sRe)IHe7xkCqfsjDOYdpAZ6j17=B!RQUt?*hkhuQ+{ zVlP?Qg!0H`%6(~O>2#9A`p42~p2y>Yw|8N&l`1DF|S}KF*Kptu`P!IW(tvp_^LW*$olH-TQ|Ce4nMVqj@h*c zNnX{(mo>Cu(kEFQg>B^qPx=!6=cI}1Ac9`{dxq`>SSkiT@FDg_`rAvUMSuvd%vj^7 z&0w_R&JayRrrt+5Ysi@mv=wYkP9fUc7xeZt;c@Vv?kua?-=zcAD-N?*uSXugOPm^y zryh#$UHtXZl$!vkt?s1L{fbIrin#_ja~YOIN1PwYs{g*ZV87JGI5h+QM-HBVen62C#kF zHw`ML?`dxu&dn1|7!(LOApgri1@r>WUv7Jr9v)dP7W~aA1BNkVQ)E@MSc78YOwNT% zP!`i@(|Ps?h6r5S1ze>^hzq7tRju*}_Ofc-cd&+hzqTX&0rQzECYUAY%i%@1!;L5d>?U%WlCe9dwM`+}_ zRxuAtr)8%P8^aWxKO+@vX4@AUOySGfPk)$ZKa#275nqbreunLqnM(WczRs><_C!R7 zBKuIfq8RSBV9t^;)fWEk)L};kkD~NZCS|^9b{G?#&UFIz=tJ+xgjyVR*}?_p$ow@L zfH3IJC7~klXWLj+G+y`PD3;F##i-@#w7+|3%INb7C7v2al zhHxb-Z_TVx7D$KK+m}!zSea@v%AuN3D}G5WME6iD65pNZ7gi#>&B&yihBkpCH-Y4J zft;-T<%!@dtVHE~@tumCeYZ#=hFdC`vtrKIO!TglQAr@M0fbikFM(!eO@D{ z{~4EGWFXKFLzq&JBO+(0x^v*_)~Z`2emfVyE|{-O(;$F3D{U}U{z9a6(}D77%mVxd zSzYN%fbv@n^szXsE-$#e?vP|R6!-ogaX+J$<*PjhvF&x}A8Lg$DDQ%cBd@CHN(qLX zkIo+bfd_9(#LmMT7^8zb;~qzB-}{}<9Ma8b#rSA*`}i*7o{=&oXf&33&Xgx~o9>wI zz=ytF)3uNF^jc?r;MmZ#&yC4seQzFWC>8*BvmYs_TEnatHD{&mxb8Xh{Nhwvy8}xP zhw5;%r|6;K)ZX4XwhcmsMV67D`dXS+NsSSmP?0O~Lzx~Qgx}iAXLHWS&XVqq$fd04 zIk(l5%n=16n{$hy)!-OhKCa7i zyzW-I3AvYR9#|65sxd-lldTUWWm7)^1w&NUg+ql$1>;J|aKlfxel-twUR(vG-@Wvk z?(uHc5#7IuP6pr{RA7cNR9AK5Bv_KNPX z)0vtFzc#WZ<%YNndlpwc=5+LW>@w#?xMWTfp-C{Znw7d~xH7Uegbm#`4qbx>3)Hh&-;^HTFAm#R4M1 z4f4{Cjh<iGshP5q7!@mA1SWoYH@&%CC#a!z)BQexD_^~mt^Y;ej_>q8rm}<>W;DIu)4Bt2 zBJ|ts;F8Z7CXdpLtfs58ZeX|&5D<4F%oJt=zWEZy1$)&Vf5US9UODesUG2k>QWnpP zSfhgV;=IR5Cjh29>dIuOBtQ4PG7Ky__UQ60wx!e5rKkl~QSLmb*8@PBa6FJ+rgde{ zgn4hMn_81XfTYoMui5yejN26X&pvvh4&v)0y1fB;vZN*Y5LG#TxGhAzB zLeDAu56sqp02=C6Mz_~rO&MH9O~ z94GduPifRNjWPGY0tS2qAlwv{dMXOWgDuuP%vY*ucL~nqAANAs{FNziVqmf^CBA(ZGuUbDZ}V?+HGy8(++v{<5ATX4z$0ZdexsoLD7HFf~G3se=QT zKabyC4&EQ^mE27v0fhOLg)<(4cX4f$KEAVQp$IFj4S52?mA3%|CprRlqNvK z_$UF_=Gg@3$B4#}t9q~USf zHLdiLYGI@GDhJh}{r~YL+HS6!csWe`p zR-%46ruk^Ko>;72Mf;`PHl+CtH}zX==3jDGBdvu-QrD(4>8|J*($3qRhiI+REAbT% zy4!}2Ft${1kv+WX1mE?nNON|XSL=kSN|AjG7S(L^5^z3{$r)K0Z94oS>gRU!7^QE z2OyZgGx=^?X}vqFJDwcWa>77x=*n(Aw&66Qj6O(GNlK;v^4xaz2&f3a5H*N8qi!H>? zmE(qN()h=L!?SpdN6#1hM$;(0mDk5TLpNKI%x87k(&09)TzJ=M@Y)Vb*}nr7Fp&ZF zt}PJMygt#$` pfCxuQ1E}CK5uz?bWTa+TMH1z#XT>BX0=Q%RL;)AVl$JLif2b|p z=&%1JCZPr1K?dnHwjz=knP`=E$HMYuouX- zIgRS@V!m^mN3 zGf7aI(|ye`Ke@IG|BzVYNka4BESwbB(J|@y;)oJ~%+q2r(8tG+nT!ULN}kz=q$g$J z=J+B_)8Nj)pmUkN%f0q-;(M~^zBdKInCII}-{htbg(~Cm0OO*vkby{q2<;nmFI{dQ zmoJ{{3Mwo`Ypv?kef%JTR@OQ0-;QXgkB0c2`dyppSuu^GtK8+i3EBBJsr^Xv*wTB= z9tD==2Szy-%x_b^?nsd-SM`GIryewvoh?R7{fSP}**gh^UKN?tc6B9<>cJ=Ds#7rY zz%t)pQwe16eCVsQmxm$S7w0*JrTm`GN%F2ce217cp5?ZU`ZXn}YLwfl^0?`GOBp`v z(A-m@7cH!vDP+GyKR=)R5Fw`cn_rpl^gE6!@ZGc0gXtBgub0bX_tG`q9>FmM4J0*Q zhI)>sv=#D&1E;$-x1fXkuMUP&*{gI#?y~&e0xXKUsZsP4a?P^LzY#UWvk@vGv^b8o zO&7|^{9N&DncT){A9$`Qm1OlYyF9vF%CI-N2q_Hoejim!IGg~#L1jy1Mav6z#ejA& ztM)zdOCo$Ds;jKEMpz}B0al+mKS_K2iYeqNtuG?cpEaR~#s8qvhmDszkVomS6p58$k%EZ{ZuO{;! z&7d3zy=T4Sc-~7nDEf&uUltTGS|Op<*{RxFH=H;(N%wAopmy{ddP4uf^NyK3{d$wu z$_%TGFCop3Hv-*g4D~h+yDj`aP1K6MKFRM_$}ZvZRKPQeTD6R`#1Hh#QD*qJJBH4g zDhWkFVUeGLDD&09tLXAmnYTNBW3etc?(svw!A%Yfr4%udZMyLs-^wHU)sL2xDntm^ zR_*oPY7pp-=3pv|Sni8zdf z;X8L;(DfUFHiOThl!BQrbVI7DGE&@&k?PKQxRz#L5F~i*_KN11;ong_j|W+pPkpDl)XqDDvI ze&kPPg0V*RDVYSSBfwf+^RH=Es%zAwq<5OrobOnF!N=r+m5(4vk+Rq7c%;}OxAxMr zb}Q>Q-FS#&nOxbhP{+CxEXG2}f?BbF3#C10eb^iU`T7=zBR0FB7>Q@$*eREjVTs51 zCIg7}1sTUVP=KL2P|PwH&2D4_6S|91z&?nxa-kfHE1AhE3ytYe^H{D{Zvy`F7he2z zs{H+!PeHrFOkVP@4L*g@A$i^8!K!35XfOo!`q|l+MFK?!8fyjiWwvX-`VNOc3&#Ac zes-5ccIkKMt(nos9V~4De90zyey7?QmjtfPKqSb zSr7DjeR%ik^!AdoE1}Q&M{#gT{kyXym&`AlNkWyExQ<7vYF{379-e9}yh|z9l5by= zGxGHm(O+I7pT1jZr0y)2xQ*v(pRqhIU&ozjUq{dii*r7Ct%*RzWXtB^i(flrT@W53 z=T>~17m_>@c5N|Uz3IAPn)Z`uKGFP2e180(dPTX#fm3?=9{wB^u>b+meWuB>DOo`& zj-;B1GWH2(n|vA{7hpKr0=F_&)(Ka0UJ>x&!(D%+-|P{zA+Ng>`=}CVw|kH#XtKDw z{+=zKnt(9m%aUo8$t~}&$Gm%tObVdY?ELE>y=p^VGkpu`_S>F>{-*YV(pWWuARx3O zz53YcL4=^$L}( z#C&T`q7UUkUSM%tIsA&D7iT+&yla3+GYxbbiON57%+=RG|8@6kE9SfJ$r4-Z-EJzs zH7?w4CGktiwdLGOFlDN!Qv*%;(4nMIrIL>_KhIBKP|GRPl}i$n4!XO+D(eq+1`R9A zED=2x{4j|!?r(?!VFO$ZHJ>s!QZGu&H(L8@v1wSz@@9vw$K(mA;aSkno*^sClhJ$z zb+7FAUuiXcz2r-K9I}cCu~0BEj?8{{C?NJ(W`}T`K5mQ~nMl(@tjte;X0MNNVe+d2 z1fCr{-&S~yQX_taLX%l_^K)AsB;A^8gok2@rvoQJguMf*!fm|x+h&hsms{;CMILUA zKdVmRha~NeGk`Yyq1%R6v2n8DG zud@2C-h<#9t12KbQWsC2(B|wrWx%p%rx{0gjz@CrD#beFeZ`4e(h-^h8Iif-A=Zt*5={xJE6);MkvJuY)v zRyXmC5F1j*^f)FQyS+w-q2u<2GBFY0CQ^c$Pj==n;5qIL%&5+vP9mAM>hN783YJRN zdE-XAGo9qSZCDqD?>?8Ui4q%irOODvqo0VT3Xs-6U47)|!4IX8Dq&h{5_CzpYIXNG zY_Su-QgXAO@7t>u)5}CR^R3u&82BzI{mCG--*&GfD5T|agkgNqcUO?Ttb}p6t7ng~ z?Zwaf{>uXJcC~c?#|ad25L|oq`tX4y`#c`NUq#wY3e%|)WG*N5+{1P?&*T!y{cdXr zT9!iwJqIf77oO_D8!=pgIr)&;JfavNK)STa~NJtQF zn?z*AH!vbS{AJwdvQ#!x!LFWc?kl`%8+6dGPM9<&FA+jQc$XpH>{aav=j}Yb5?}h@ zGU{{*{U8>*u=A}$xM7Ultjpf6O*P^6aPKnZs+={TFqZ}dyWO|9v3@+Hag8VPA4-%dkDHSPLT`w- zOhZ43i0Y-^wvW!eVb_x|G6AqMFaK2FtK6`(<1wr2=1MCH4>3eoVP`>c@Y|_G&m54W z8JohgN~HT-RRRzZ-dGdB2&-7$gQ9s+&=FRzcz!`k&S;ETA6QHKgp>tGB9+2*w1aig z`w3WZoR>N7#PQ)qqYF(>Z|W9(d%x0Cq-Rt8G9p6b_Cw7t%we-n3-+FVr75-g2SOQ* z_#h4)Jbd0pHRT**FJj#JtNud^6?P`?f=7dke$zr2!Z4p?i*^c7j@VPnNilrpAhfSO zs?UtaNRD0I-c(m(iD9<`#R@=in+WqH4Pi*ZD(}18B8C2N+?1^Aq-i5#F`WR=@3NxPPA8IO z7Esx_k~o<{3}6jgn|*|9pm&SX7C-jignGBTW2}#Y_OeM3MDxsgEW_un0~Gurjss=j zSDta2)YH*6lo$X%EFdFv0{27lhzpT=RtjN)$j^6oIOGx9K0pSoRZ)0O_{BbEL>Cie zMpn%*iq+124gz(m!kq+0sblWdDYL|>3V$C&N8&L{5;4C$?8wY9;ovoV)u8t^ zz@;J@&2D~X`;?;HT?=;`GrqpdCBP#{PM+!k6+k6!37iz&756hde_vybO4co&IezS% zhgLJQ*S{7DUnVRgq_9a0U0cOio=@EPe2t-z*;X*~DLa>h3BJQQ0$@?)c!vhPDy)-@ z9_=xXh_whIb{uv$u75{Mr zn>Wk4-<$`%PsM;S_qQY8CW-Nf?cujKXSz2iwI=PGMhmlE>q=Ryv>?DXNvJ5C z;pww+mTMjX46jbvil46*n&$!K0b4Np)@b}kJlx<8^jP=h<5gm1oZW`%H)0qD8!Gnn z2-+3{XKBhk-4GM2TfGK+Z(@uw%od9GxGVVI?9{{0&LF^@r2#xiPuKcf|2&_T%V~yjs7z%whRU^-Sj)|eMC6zgVu=C6FT#i+ zN0b8sih_m749~3zi%g(dcMqB*h16_B#_h6LxZCRMf546j8Rx#SC4}kbVYO`}0736E z1(a0TAFQpJ3GV4v)5UJ|3=8?3ukqu!(u^tBmaTAlEOGMChWJC1?cTP%7)#B4-fGJk z42p`ecK_lLyy+wDxp_o1AXK!aGUx%F`(sfr6izBq?xRR~l_UaNH5{}trE>5VuY>qt zIF(Bu`@ps)fjT>wIB&?P65qXj z#MSBj8HH(Ryh*?F9MQ03kX#xK0e6FBDWGqh@HR~=+poLLGJ(3Utv_uJxV5~t( zo7JE|F@Gfj3#mF-D!343B{$vlr3`qMP_b~7de%bVPtDI%^@Rpu`we1Ww@SOM> z647;RrZj&YJ@Kc0aliKES`TmLVc2e!ol!|fbJ18J+hM+B3;#I1#K)fuXKulH`Uvz{Fih_ zaqYDZL71(fZm{vQ+lWt5#;2}^*azN@!2qqzy&CJsOr7=wJoV0{3R4!bADDu;o2dqH zl>q!hE-x|oYVMI~G^*V92p5*`i{o(O%G}y$Qz9`{ReM$fbS?2$($ zVXODg>R$(YcCwvS-^DyV3fx5hl3(ZDcBHY9(&pChi8Sc2@Xf|ugFkZF+ZcL%z6@J| zV_^l2XHN zN|(kqOJTP#RI4d6(6SXC4H$MzKuA1k!_!a@j_i$jfDsB|M+uayh zLXp6*>>69GvVPQ)+@VQje*>s~(#riDzFei7Iow{`0%yFziFONe~#0et`>S~nJD>XP8^ zzIkxJO&*8d^B$ABoNFarow+aTX!(Xsjlvjr1yef$bPEo)>~!v92r;Na3X%;H6GqWP zE4Fc~(;grVlNw|R^xR|$>>`8fH2)bXrx(Nn1=o_aiRYSie&ONt`oiO0MK`` z?b}?4Uoc`t4#IcL3JL9On4YA=kH_d~>Fq^Fxr{@H&HL=Od5q-sk|Kl5y(I{?2Vb1T z$KTY>G>WuG;-TKAb|#b#c#L&xO|IV+#W;L-W-K^}{dVUp#T3r8E}C%}p(k@AxERhU zt7GF~wRyqOWWAXib*HoL3SuVX8PdFJcOzH)G&HKblceC-2OQtR8LD_X#l>q5o=nd@ z%nZt489WFw87{Vx#RU<#{|8rWRAaBh0_B zMls;$Ua?v|;p$ZAiST?r!J?GoUxIJd^wKnc7vr;h5>e#$Nq^HHj-Mi7fz=xO^j)P` zXQ1qt+&c~w@zpAp3MbWM3P!~5nM!KX_mQjlWV*i(iTa8|xp*dD#45QS%a!yliUE*Y zd!Seje6#v)^Sv*&F^+juX3Abtee%1qfua_7{4gDO=w6fI$+*Hi+IOKGVX?C1r~%m7 z43)&Ws$DaG0+u?_VW2k3eJ$j^*!rsk;W;e=XM3?wR<`h6j3MlD-)V_2BJ$uj8aFBE zLN%SbdpHjptTkv)kd1Eo*5IA8MR-exf}JU5CTz#oyq}}UF#6FT(`;J;zYcSE z>Xm=n%k*!uaB$d{*R-sxCQ7z(g)<7DJR;G|r zgTP-eEsv)BCCatrhRN`WVY~?M#JhL8)^0!T0o%nU{X?Zo$Y7B2Ad)bst3Y?ddiHN9 zWLo`-=QP3-xS|VTyevBPKxT6&OYBy){ zy{QrN7c8d9!Y2VfWn57DQlu0HFNUvL>Wdg}Pg|q?ftV5}GPkTIf1-DEt9VrK?TsaY zY#r(aOvqAV?5@Wvk8&*d1Z|>iJ)5ulq=(c#j0lG98t-l;SYNkHbspY{ip*xXC~LqR zm4XVoHpjj@5C;7P-dVV1N*!=3A9o_=_|<&O3d|jH_z=b3;oGx``;*rbLaq0 zpJA#(%T5@dz7m(pf z&LgiPF!7t1wK^72dD&9>JB4Rs-6$A@7*Z%n?6DlnFQw=5tmB!eym?M+Aqzsy$|SV4c4{Q=kxSmzs2 zUa>)3GiLGiWQTCqttBl4hV;x&xvaA+z${qUyo z^zqmxii>a*Kr9f19Q8*yI|4%~nR5;E={7t*#lm>Y0!<~;R2p&(-{n4@2uTr9#I=rC zND^G5bw}QL(%8r3+w)1K{rFH#i8#v*wK4kouHtoTyvhh#OSGg(9WZb@U7^1^DLS~A zGvg2%A>t^ItAR501CZHFIFl{$B|!9c@g8sQfzDo9@I=(qKeg7T*)~ilW!S3%Ns3Ud z7||;osi>G+8P{Gak?u&Sg8t}SFJKbx49opX*VpNeNoNUQ^IHiB`}L~`1M;7f?CW>u zoq=~#!x`%1#)ZKV8s|+H5uevDo2U`eYj)SxeJ*tT99C=Xez~8`9$g5M9#KPfV7b3F z7xaGHXCA@n+a_+KX-g4XLmqV}F3qsGiBHr^u^POu421#qX_Km58_7mV0cNAl)CzH5 z)+OVp3t3nMWT57#{bRE+o!dXnDE2VGh5I&IPOT{l5c8|5)+_H{Apse^3m(GFzGX3_ z6e0$qbUVfpQOCO_WH*Dn83j6As;*Fo{PvSf7Uug+&a!VPG~`Fm>l;JL$plZza_yTb zyQ{yZ-^7a6fZ@RjHky$qimp(mK6$ED1Jq56HG)(9-6mDNomm_*4?7_xk&P%71Qoir zIA%|deTzuoLueDAzhzI%%m@GTuP1OUDTY4EO7tixxwI;{VXoQNg{s2alK;E+xb?_muh0-u)s5eoDqnKS>HGZ&BIBMZ?R^FXLJ8*w`6*L6^8ge1W% zf?-IW!nI8s)c{lGu^H&XZs$LkS4ig>?oY8)zw6m^Njjo}$P$bGFuX>PQm4YJfc%D z4EM?$tu{!lj1(vh;TR2244v@rL;sZMR|SI=Tp?JNk1jB23)a+bXI>I6!54^o;D!f$1!a^TT^-=`x{6S49zaVgo0i^ zp=JL$N65#c491tKB!R)~H3~*GeGKg&fNg6as*=TnroxYKIf`waa74B}j}fg7g@SPB z1g*az7XQKrQ!;^3{D{pixp|5f7GFG87N>hrMZ&rD-oY&%x8aff8p5Dv!Tf*6V)(xz zMryM!y+)ZJ%jMvL!_-tI@RoY<7|9kvTRr&`34(N$Y#z!j zc%^@&^&2Aa&+DraSe$c9eS5ND%=@}QfE3mpq114?^=G{;xRr}`K?!2&{u~{Tz{ojc zSN4pdFfbtPuP*RcqMAZ9j`|a$@y7&@K{d+7MY*mbTwGCnyR1;%u1rY$w-&}&6F>-N*phi989>zY)Of)NUh-t z6wI~{>cSlG$5$jlb<7HS25I$rTduH#V(LG~N)86T-VvH)b{4k-22cW3<|d@nqdyno!$J-)V5dSz8qy%z@pH%hma7#R zSRZoUQ*D8$&0DA*HQpS0as5Z?##!vV<1NHzmZdTfWXjOXrzxJq#xcWGtag?^OF7qk za>yL&pY>P`i|C;ToB~3Zeuj$WxH%9Og|A9Ni9t14Iyo!EdyATdJx?MT6$_ld?>` zumkbrs6aEQ82SLx_SZ11KqlsD(lCQ{w(rOJpA7L!K-4_A>^hme-V@4L9heMJ)p!X+ z@-Ld@V#?=!f#KlIqqLu8+$rJr^}2PHs0(R6U}POQXuz`1YG}cxAq5N`AcO*k2<(9p zz%w~Xa=^VRK;JPh@+>&q!eH)_{i0ds$sZ?(9mE-oHVz!-6gm@JkOWvfP3kp(Lp*`) zFbYlKl*>inNTk3~RVs2H#`ap{2Ub{gpl>|?ukXK0D4Oz!}T=!_5uP0kCXN_km4v%@?Yt)69b=IoO8FZe9 zs>Vh-=N(VZAz+iNHpYe=6yLW+VHo(P);6f|_@yQgd%s>CPd`=&Hr1jWt}`}?xCE0c z(Z(INfpIA}_ZJpe=Uf3=Uel6T=I6%fB9!FeulYsyqHX{Lu%J2zQ! z^~F263(;rb_}s5bK()#GwB-^!J@$y^+>Hh5;l&)0w6h}zbTtK0R^f)o+by|*oo`w0 zjNi@@zX>eEGfd5shJSawtwDT_R_Z-tyejZGjeWfLs;J@lJTa0kgCa=)FcOza(fVX~ zN21-EA-0wdQx-BP!A297zlsyVxwpNgge7YSRgG;a?sp!bK<>*)d53fEcI2pNm@5LO zDpqoV*2LN=uBFP3R`=usU+qc>4z6o`b<1(P_k$K!WdHEvjthQm$mOP?j%(G;MX)2A zZra8$yWBb)*Z6A>qBt6C#vlyADRwIA9oX=G!9aNK3LU$6$`w5iC);$*zD45Fx;vFu zur_kTJp+DT=cwKpPLs{y#t;nyV1@Po(BbR_pR(f_bCuW6tbLVf?ud_`NgB>97`N)L zS7RUJTbylt>JdRm>=&1jhzB@3s9rU(rNf>-a@q=4o??u*aqPkRk+0{YvWC4bkeg>1 z_>>@u4P&$=w*%%wb=r#>>0C~WlG9C${k18(<7&K%VbZi4w8^je+t30(LSdLIlytET zVcddV2SR-u6OWA8Ur9+$xW3x%wRQ}gYctM^qY7C*dC8;#uR1j$$@1%(-qn8!jbPE+ zqQWJUyYu7Q;6m9}bY1!E&0DohxjdQBuj}**rakp#$l@d35xNHFfl2DJSZCIHPUFS> zI~4)iUmeL|iGFs2(MH{RHJ&q@;f(~X78buPg$fQ7f^*=YbsmdZ*p=5{!5(L>sA?j^ z>8tT*ZNJA1<5a;UVU5ko(}rh56+298o`O0o8o&_ay&7Pr*C8;%&D-lW8M2FxkXZwF zWSM+Xr67ENJWQ2fwWKI)8(aMu11?STR{GAD_Z=MRq!w*{z}1DlEzqA7Lhtj9Dv|w~ z&K*3YjErHA&1UCXj3zC&ZFhbB5+T|A z=GKP-P;WIKg|!?uBoUsp00|1uxPeK8{Ho&CbQXrbw-=t86%W02aZ$&Eni7T0;NqR z^ht;6_BWCh6J}E=C^=Z$T z&by6yWqXp%f|6OgzgGLu-yq@ZIv2KOgU&%yd7s1=#5+S&)TpmNpks*EylT_wf8yBx z@Bk!hlUvf?cuq~a@>?=%D-a&~(1*T(U%zAH6U(5#2;rxakDuVpHK+$`jEz!76M!gD z&^1zNhOWN(L9FzbN7jG;3qaln>Ry*yK_UxdJ4s<@$DwwxwFN#H1ZBU-wGm&q+5lr%9~k&$KIm{NN7!k zE~?21_!Xe67WSq4AD{*k<@Zw|&XPdg=f;rw-EU14yVM^v*A|1BGetCLDDJFm>9kSy z9Z@t!SI7VL{&u(0eVXDtGh%7BJM+4s$YG5Jkfln;ZVL$#??9x8}1(wQx~ z!Pscy>-~Z(*fWf53q<)^D*NREiIw(quS~d;HnKXEL<_M}Z}kfZVCNv%m#z{~dPBlC zVHRo6gH>;D8BUv!d$8`!IRu_Oz>_bK55Zj`>nTNavAq($?q~WN(WT)Oj2s?C1=jvT zEF=!cu0wjREm(O3?yWqTR%1;D(hl6dVl%M&v}r}k`^E@Lx~WGolFbwxq*xK1M6>M< z`^K3@BD<9oV#Ljuppk$cR(!~oo7ddi%SnIWYs0$l)0TVld(A}hnBT#K0r!Bav!na$ zaPxBjr02~2Pb-*ZBjY}Y9Z4_Vvpcrj*Ai*FI7C)%jhkpyN9^?-*jo*5C)<~0iMXP` zqj&3}V_vi%f-v^d`>rb_-cLa?PFyROTL_@HsTRfq9;uFN9Wbo4JjUKFaV5I)_9SU0 z$R6SrP=BmGw#7Q_JuF){7Op*`25|1&GS9Q}WHqlAXs6xT5(h%LlzrrpZOS7q%$nG& z*dzYvnH%q|<5r8yV;8ikae@hs%nJ&`nHx~<0y?Xyms29bZ9k#RTfWe25|=-A)Z38# zxghJ!@~1+N_8FTii-W#CypX@mUkl^ML>?=2yaKy z9yi}nU}O`nucm#w*qpfJ^yPIP%v6OOCC;pB@1CKYdJKWR2t$hXQu}5QyWIx^MscS!7zAFSg4a~Aa+Fx!G+Pz(z znK|NOO!~4h2ye;d7S3iT_!7?aKs1FF!NKJw)SXGN^PVW(b0C-2+IYcX;K7hcDlNVY zLkYGXxfeYnF3ir7r_G1f;foJzIb3V8mNDm3<8HFAO~nwNb4Q}$10TCyaGFF5DMtER zfR=ns^Seje{X`f&_AKyHBqs^eV0!c8r>-8C1(K4^^pK8?ZitAUA{BJhXq+7gz7X~0 zMQrtDeC&}s=EI{EHL7~O0>t9LZK%s0IHWnAM-@pdn{!CE+M3%ec`VwP&RLup)1m%7 z_NJrtJ)E*?AEGNBz5qDXL8j5N>Q8D1y{ALv&F}uZAL(DNDt-&A0wz0pfG)7H&Q-paVV4hnMwghQ)avuFbkt9MwEHg7j7GtH)4? zsnfD~@>^-o5PPY#ha+6LxXK}2BAo-IKa?(diFsZYnzdITSL9GEqB;Hr)43yVsq8hW z{aa(gXE$Mn2Rj7P`b7bv{Ft=Fc*TooVm>p4ae14acUK`bjwQBt@|9MFvVd`~5YoEd zwfJ0urjs5_Ep~my?Fz}uEK9{x{)PLKkJs0>H2dY5q&C{`9N#$z5op?A#75ehr~m%1 z5pam=6L*=aMfMZPe$7Wx4R66Cd5@#Y7}KSGoUZ-X29rFL6Q6=Qn@Pc@D2Cr>33>tj zmFsxEyZZ~=skA{C#cfa6FKS1UgsNl5A-TN9lvZZp=m>i2M@D&3KJHdUrh;YJh)>P# zf!6EHC!a2DISxD}({OJZFKq=>3k`iqMGKM4Z-ah&_Vf}4UBCJ+H_s*p8?XH6T^GSy zsfo5}ZpndpP?qCogYRv={t7`?`}($Gd#4pDDjV(+r!j+Sn8LrBI**qTk~7Kodwc!c zf(s-_Z1dq3Q$WWA3<1Ex#&QL)k+2SOEEiX1!mzp7Ae&Coq>U3 zqxnDLv?x_9q0Y0|0{$PG&cdyYF53Dnu7yH@;!vQtOK>f2E$&jhxH}Xtr9g3acXtU^ z+#N!22p%9na6aC9@AnVP^URsqv(H}Vx7MI+EWLnsT!>wdWL^ICz|1~RD;e|ZuRk8s zFtQge_Z2XM-%^MpD7?YB4b1LZ++(?pia=&pIzdnUYQ`&7+uKwlujs8xQUk{lhaqYu zwCKA*_@KnUcy43so(Rmm!1+?z%nJVe$Cyj1tqQVcffWp{Q?FdXGF?JpZRs?5aEW(> zyV%$8Q?K8Rj`PJDymv=FPm{(u!|YK*iYgM-R2t|I!Q8RzqEYXSKgRbFej<*IcEcMC z|DC%i2eq}C%aj;Rv?N?icBKm-tR8a<~W}TR=ggJmr5f&o?N#p!zg-jOWZRW zz;+QfHa47Hg0EO||Dbkbpz#L$ey_w9uW4JAvy;B@2%}jlE6~I$8_v#n3>W6x`e(v9jeP1nv zyO9(d6WALjnAQB`e1r|+w+p)s2)K*&eEmq~=c_PdM+v^7?z3hU)fS_m{=bo9#jjT8 z^Past=cy!t{Ss;5|B#J9L_4dH%KOV#wo$&tGRq!6r782LCsrgHmfk59F4MdTS-Wp+ z`uhVlzO|8utv-AAe-c^5>xt$$H9e@vrZy7>W7P9hZqZu4=sjl0Vw)V810JltnrPca zuZD`TP5G4rb|7jc*>NJ@^UEb>~uMS?A^=fL>GX>$GyfF z;H1m_-$zp;UOc^UWDjvF;hWTkn(4k*5GwPRBrAF#+XK)&SeXAz7h9c)O_Mn^O=?|W z0bMBd`Vf#syUFdJ>M}{BJ^I=L6qEjCBs7bR+Gn)+sHFK*j^7&a?b^FRM7%o+z|S+7 z0ZCE7_VuTj9;pmN_sh2|fIY~th*=JUIx1jZ1!0|&*2=luAO2=A=kqvqenGLCV_~5j zVrsG%VGE&DX8Z#pO3=g~$>_M!tg-)8dBFgsDXDyfT21-X_ehCW5o`3=4J55Qt9*8Q z11V1(CtK@4F|*LM2T+28-ZWo3Gz#BHy(Xxf-0J&>R_vv!@ICf;Sv+XQ397hN))d^VKty2*)5PTN`Ds(YKO{tjb*;<@k-RF|KcbK@QB2B!TtO={?~W!Az>q- z!1vwzP(?%Gqj0XfUEF%CjGZ*)_t2&N>IBq)K$42f4KTaw#?!Es=Uoe+!~Q=pG)MK} zOE!f|EY`7aJ&4atQ#WtubTcOv|EJYiZJhSVt5ES?Ed0cK=x=f`^**jnUJd$YyKfi? znrCElBEHR=_vI{SlU|mUEb`MpFUUD8x3Rb1gi>}T*-P23a z6c%FL4dv85*T;xhm#9T{_iRC(lK))7;+^Pr*@BubGO@{hCnsJxzpoR-=aK%x_H>@u zD4ZWFCi_uPpR3!pvomk&E6i?N00I{%4Se0M^KQo5~4hmhx9 ztZ!NAjh=mzyH2zGe${y7U0DQBK;7YK7n-_6 zY9D6yGMOwznf@WATN)H@Jtk3FXB{J+JwG7hT9_!e5%jBN;PL-l#GLwmb5B`gkKJ3cYFi~HvyI?`kT&jVw^XwT{~1}k-A zsh=0^n(P~mY7WmA0YO5%2DP!EeU1_K@geHVv3%zwUFzCwYR}1j)m0sZV67>8Ir2*gT2L_xZzcMbb^Nr@GuNt(-m_#z3rO9Cg zj;o&}Wj(1BR-xYQ0?4fs;~gGeBHSlhi4W;61BaU!OF@$EAxp&6X6qK zE|XD#quHJ@2`bu83=eA}Mjk-3OuJVm@^ZQ~a6YQi%{te);^Z=GBV|N=gFR8%n|H@X zz;a(N?P|)$J79c!l&?!>)VDz3Rh;^VPWwI@{SOPCb1_n8|3h!^rF78ex6}Au7zR_u z`{qKbj9!WoiHZxhf7ltkdc|o?lb?H(QB*&AwQi!Wf&?G%jN-63_hZStG9D|S@>~Ij zABY<^0V@yDW`&9gF-K3As^_v4qg`GHAr3+T2mY@%o3{Uf6OIOBTTpRYrIGVBUDjkF z?UaB1Kb|mYPnykz+U4rPLCqOMy5{cK*7~wi&*2-{$^C?%Qg~Z#$mUcB)pD$7z1WF{ zf3G)eTAf?Z)SZlII>!c?=cDxJK4dY8i+aAPD~tEQ~jSQB;Z+|EC|y>us;h+(Q8^fm$Tph z^z1a=US*n*(a~bNu9A^Goz50878k3MEN#n|a=4ftedjVo1ltR_VBT$lHK$BY949fV zCtt5tsb6hnmVjlU(w1;jVxkv`e`OGORm4%D|L*SMwQ4dceLrqm(LSRZXfE4j^T;O~d!z~R z6nMLq0ioUT3q?`$#~a2+b}fdQPN^MIm`3kK-tROF1`YCdN z$`SU*uloi`;7$4(?M8BMbE?^%&ED@uEDj+M)v2oXkt9U+zmSG#xEJTB`PS9Z`vG5o*^wsk}?Nwu92CbVAX`M$+{d_*qUa%hV6zPM8HSC+li)bE)hy9FnkGa*QI^fTBs-4cSHRWa&Na(=HK$X8vQs?QL%^ zrH2|Dv}qum^(Z`#OtL!3`aMB@2*BEGF5#mFx;3Dq3X>}!P5a1Sas=QAv#z~Cl-d3D zjrJc#XFQ#hS{^3Nj!|dnZ+D}dJF15`S`)i@ch-Fvcm77rC(C7VEE>F5)LG3HRv9Su z-~F@Ke}=i31CuY$89^G|AXlOIpbwn5x+T|lWKXVKj*;dvrKK(wH~Z3BX@kIA-6q5556^NV6Hw#QYhjzqY*H0v zJkIvlN;hjw6c3$7y?9Rdj()FV`P}lYC`4!W>VS zw3`xyrWr>7*>C^!8pQQUv*BT>Tzg*7OQuXtT^sqIB_zkV0sHsX%7x+6g){qT1h?#u z%cmT3=}9ICa)`Qlv4vbpG5KDqQN-}{x-2s6pNR8xmdnTz0I5IyBFj7E^OHy}Z6;pK{n*i;VYkM78Yg$r8dj1SSO`#`s$q}??6TLvM zA)s*0uo9wh`Sarc+WQ<{-e&?N(oiBCN{;ca{5HJdr|`pe>Sl3qbpOpD!FMyYWg)K3wk zk;LBO8-``fSBnwN)r%6*uA3`~lrj2DQC*6@UbmZjrp&}dk?@48f;);70vQJn!Lc5$ z8|@#$FLw{S_KedTEc+g>xpa{+`H?itf3Zl;f8htUDVh=If1jQ-Odo#7^ezlvbkpS3 zF7?sVR#!D$+xg^vbV_kIg4lMHC(psM9Sd9(ebVlZe+a6pvjdk0ly$!p1W$GV*;Am! zHOY*t#$zMgrM`2aUXP)C%R)C;V4@m0BlT>k&&&X(miKkDUE-cYZE)XrNI0Lx7K%0* zfa=3EN_qHMF(983iG+uR7(DSO(Ehx*uKW8Tf$jPF!@tmL>j4rn+5gFZ!yP`NIL{t` zdV;HcQr^1UtG~QICNCU+M_%|38*oeU@xJ!(0ct2jibZkqg~Zma0h7&1a;_bcyIQMUXoTxOy1^f1k$F&oJAi}kSsRVCv2nU zcfFqp21u6j7+%EfY-+X?_jK_I3^vQub^BRVvFCSge69y196E?zygHN!&_deHAq``8 zknnUmli=tg#M+JCMW#;G!Hs@nW&OSO>0)_uiC^m8Q#hTb;>3cdQ}fcKyX|nuumlK@ zqKI=~&Je6Pj5uS2CtZSv42R%`CU2 z!z`w<)8`+S&t?^AzKmD>k}vuo;Xto2Aw4YzdI$HvzNZWl*9v&dV*K=0pKz zmrSdvpVi7p_GnfwPfjtNxbZq4^WXJ#dZ@=0H|xy4@-S`IKl-9j0E#pOrUCQ(5+5DL z*|c8{79B;}eRbStPn9-vG@Jp7R9qWbquqtISD$rZ0Rnk5T5N>ma!rh#X1$^A12N)E z9S=h>YA3dm-Gn;p#(~Jp>;_E=$vBj3(xdfh7$CYt9jNWSo-enLg$7hvaAxEQYHRRWkeG1qm z4^9?{O|{BqYzbk&k502zC;gXheXCJ~C0P={zlTW739-F?Gd;vuu+2`>5YTY5GgNf`GH|0o)@i9F zu*&%p_T_Xa_wK|61`lppwLoEo-A0~i>o)p^rS~^{=k3B*Z^+*tKSX^UNo%`3)Vo3R zU&At-&dl=mE-31}(Iha4Fy%ZO>IXUR?TK4zi8ArO^}R;r@8rG=}Lza8Z$kf|IyzsF4!hH)%K8*6oCkPAF(WS`pcyb`k<_Z}7 z)WElWI)30_Njqv>{KF{y(${d+_Vawlps$^KPPy0dwjlZ!DU%g%2r?@3x&$_!xldK} zyB{32L1{W67Ug?Z=QnM|OPmZfpqd;LtwM|JfgX3Co5aOfVKtxWYJ|yfRk5UN$4+YR zhGt>i3Nh=yJc4KZHnlI$y=PP}W2-}yDL(Br7Z*dQEg*&7c)Iw!Ex}}Fi-ifm*dTj% zEtoA3%Vpc{b{F5q8)UD-fpPYy_IsYlGWLaO{}5$Ie1=RkhEGE3a1c3KkR+GsM?6}o z)F0&_E>N>OC^%3fRKhMyXWeNa$pv~znulRi%0*=1H93*@!O6(tBuYH zWc!x^P%JQud5@+y#@;m_rcaGkv0&(#gkCpFE~Lfh4~wSSz`8_|5SEJH`6@+fI`doc z9}`6wzR~Z6e#_H>ScRM8niN54^{2{U%&L2&^Ov+R_OL}lc{@hc`8*ID3LdnyTHmx<;9M;F=66`j}{#DQS+HD@pG4zWKq zetGscp%-w>d55sYFtb0Qn^@{f-zBGxe`Z5qNH(Sdt~gC@6@hYo<Wv{>GSG7oJ(}a}ictc29+QjIi3#X?+yzXV7$zzoLquykPnC_h6NQ0H5N*$)ouZ<*sO0f9JMRR}iA1 z+Me#?nZ6`x4PSsT-@$1}d5U`mf)54{~`KLbp z620dEkXaB*h^8NuKs1sbL79A_jkq2g6deRa6S6JNk6ePG$p1Cn>^=)4 zdd%Y@1ffH{{IlTgvg$;|U!QdK+q@_$i z?Ni~ID;h!zra2i}OqE8ffSh<-3R=Vf70P8CP={2RgVd1t@?Y;}gIY(~6^%BQ7BS>0k6Gmsn>V(-|r8d=T)lKZV3{$ z-I(87Qr*~1TjSc|#8Vreq{)@oN2Z!#mzCtdvwKxQoS4z1gw}2=@IxpB+fqQWWP)D) zg5E`IRr&Z|a(rbcQ&1NQAiqO|#w<`h*Z>i+6}0aH@raueu0AP~-D-D?-U-g(+hsHt z1=8yNBShfUOB%#+Uv>B>DmP5pAlGH*w8J-jw|y!V`Q!#b3>zix&deMuC_j8(Mf-(e z`}xEAw>0*e4}(3`bgANnK@r-+==N%7w-;o!p^|0$zP|{nG#PXPRxGNE?>nGI3c0<5 zYmxXNl8BdijqOm;vXb7bHhDY687>aducW0;V!|0#ygLELFG7z z)q?pQ3u~Q`)uEm7z+zUX%a!WQUcW7U!*&kC`8&>#+_{^*6vG2TVHcBY__CDV4>ppF znbFbozaenKXN!m0wNh@W-b=^?=5Hcg`Q(Zr^u7{Y!f;%srIIvm@RJQh#~bV(96HG1 zb=uRaP>M7lK4?uYi6909&~1x2ubJTzGPxn78Xb(gSg2Dzh`4NjC3I z;HR*(*6r$~Bwwo@CY+y1n7?^MaF}>~wY-Nuf6s|j#`Ug@UWaW5F@K&x<2}|s@Bnsh z06k%wx6r`*9*Q}ooH}2j6e>9V#9P?O!N17_q*Ln4buO*YYxM9!1S!Jeemc;HX zlCc{Sl3djruNyS=1g;dEpq_EF2RTD%0ckHxzzZdn%;iBq(yQrBAOQ^4i?_#r-@R=f% zR@zxicLbg389A{uSpy~;U(CkVcA-p-%?fn!ry|4nK~rfvc7HjcDKy47r9?2U zFQ(}zJn(TA%#vuZx30}Ftx+wgEx4Wy(6m--i=es#-A@F)XmlY9&?KgyESV|4QH?L~ zPsef|?8~YozoA`uSYK@WLmqVRdrEL6%s5z6gezqouWNgaR|YCk-K^Tz?ya-e_@Lvq zY+C<(+G0^L#_nk<#PHQ~i2Ss66R*w61pA8rqp*sJ^1NB`{0;~9Y=lXuHjkD;?H(%K z0M>4)O%;X2xu0`i=x_N72cV%|xmDB>Gio*i2O8PA+y#ltuASC5bQE?rMY$6sVuDyI zH}MZ6cXw90{*bJA?N75kFf>a9>@T(B9;fDYKDhSJFx8f+FxMnBo>QKa;V|_>icpcX z-Z2zY^R2!Ns%jpACpXAKh#w#|8k2=Om3kg!ON+x%B|W%EF~msf0I}2WFF)~Vs5oLp z$J33&h*%`e&tn2+#l{H9%GC)Ux!g|(H$4P(_+;EkqUV~GkKr} zwIO6T)3o$zez~T~USX^55$;KfH&BhE3cNM}F?DwQKXg-pcQtAe;s+9H#=rO*+{23~ zs#?ByxGghn8ir(Yc9fnYzFMf$ZPeJ+J?85m9YEAC6yq|TmS)`5LhFSrnvG0E%kaJh!v>5*6aL?->SOp7axD5 zE>kE!pLsK5KsYef%VNZSmHO|vxisbWtDm2ZaSGCrNi^`)oM;D5odwyt0bsMg9LUl8 zmHgSEzjU5Zjn@V6x~974C6Uv00?X*yjfIVWyhn3~1Dpr5SxU4VpH?#dELItMsmV0d z*B#$P;Tq9M1P#t%dwoYz4$HVg38D;Mr|nvfou>90>S4KK#X}aF+ihCltT{*MrLNEi zKvw2X@}n|w>$b})khWwLiCCPnrR03D6qjiGIew8GpvJ%Jr}C?u_2aiZWZI@ejb5VC z35vudo+bC5D;9u$!WaHZm@SQ4Z-N^kE;(=UP|li+=hf`35Fun~qTPtY{070-ofowhu2uNitPI;U zUk~-xZb?Zyacl_v41w1M)g9-k8HOV_n!MF1v6o4F_0!PpM{YgC^HI0x&uMh0>5EB~ ze;Q4yNy&(8!Wk;KwG=btN%Bjw1k^--ZOf=-9r?g+qkQm12Kl*rnhdrZRf9_R*6$X`{NT%Vc zAcnv(I+e95A}}fs1t4t{p)2ni_s&A8Lp^;r~I?4 z^^(F;lrRTQ_WW3vBo2jWYXb&T5R?Hb8Y)lyp5t?IkO_h*Bpy*PQUFnB1q*DICv(0i zK^jNPKkp=Ll!p?VHM5kQ&CoZx=KEb$;OKqU!Z#7iO;*=^iqqZ9uGOgQS>oh^n&X-_ z>wV12hVLlmUPx(q;2|74R@w5ONoW<3U+J%yx!-3+K?FL_Y%YXVrQHKzOFz6%GQuV? z2iJNX7@Rli&BW3gXD76Cj6Z(mRRvd7spH!N!(^X$Y_rH_ONu~BuN!uCADi-Gd*#A< z8^~2ZS3XBUbpKrVwKu8xV7UO>9FTI=)}DcdDe=#h_$)VQu;>MCK!5iSAMI$MxmtlV z8V*-12EIwBNI#!fl(?6~@Vm>QLYrU{$7d>9wD2X2%DQki`}=!a0OkivzDvD1*C}^u zKq=;Sa2O*&BJJIEa@BbIUXb%pqF0<(Y{Ya>?WYVI^pRjr8vGcLLc%Ara7M56)E4v@ zX}R||ASLx{5730Bu4e@K>v=f{ecpFCl7gh(ima|eS8jv)NW4&DgPP3#RmZ7!Z)WKn zH&ENA&vrupsa;BhJKKn7sN}%vzcjx)x6D`1D`0A28}c9 zc(HAcDvlG6r{l{Sbwj1D&})J^dr)Su!JYM@+)A^w>fus4wX+eQK`i#$2Xg(Rt$eNM zjarUS$k0Q}FQK?+iC6Ed4c@7;8ANsdGOVG+)l_X_ffRL}aSQ8Hv~I@VU0`!pxnki} zs{hg%$+o7>XP1E^yQ+6saGX=d|HNsfr7!K#V=7VF6gh(VT$S- zcCFvKm+5%CY0pTK{BRSUbNN2Z-sBHpm58Jq8(y^kBuUD5S^hCcgl~cTVT)tMAeEl| zH7CE({^Qra2Fsy?k8%-Z@_fzyOaR@mZm5E zOdV;mBGJ9AH#HGCUjsz*uuaQ)*L_naa|-A~h~9r;4vlJ3i~Tt65*FZt{WP^0I#`R6 zPWy4rP|BJ!q!l@D3@KR)zJ6@E;4+fdq!Oy!G;Ka`5aCUiCa!J_LaM`W5EfA1LQ@Zr zatn^YxdrfVzVSTe5D|d3Na#MECO!=dkMh|68ibk7H~yaxxd;*Pa)_b&DL*2pu*t22ZI*`sk;E4!;ao|Fei=v@=|Vgj7_KeGOFAVBQcJ*aQ>zTcOD< z$}+=@JxQv)r%SKUZ=sqV7jrWCH@qh$qweS91*E@R`Xt6{N}`d#q3%=Q*D+k)&my!9 z!}PW#^Z&3rtj#RUdR}4tFU8H;xI#r7T{jmwud>mL5iv@HjANDA0BsW z7U0%S>HGyZtUHH`E>Ux?U_3>|?GUA-ddIN zQgkc`uT*>@bTmgrphKFEX=<))GIp1z2~CTyB&f2PpPIId?$kX3)r0ly`KKf%3#w{# zrz~2`_RENDG*W4Ai{csz)jLy;CsCSw_c%ro_AO=dy={xFJPPB*NQ810?QQ#%XvW1W z{$1?F8nMe5X4N*QqtWnDkSsodAlsVmU@)A$e$>$Yp@;#~c z=S#_Yzs7H}wb=e5Pe@8dG`mO(soZ&5+9D`(e(usQfZJYpS$QgaNv@wsGN5s`IQwk% z&!Ps7%8OjiGaRcj^P~96e9jZy7SpcH;-c~qnRSabwehN@SkIr+#r*aIyY*_ES16q_ zW71>5>+ftF+c?#;oxtrZCmt7zRsQX)r^SIg+Emxj^9Opd?e$wYt>?WNq=P%Bwdcun z2Oq38W}j}XL(I^&nt6?ZT2rU`(Lm{Y$0v%%nij`&p9@FUEQ>B06J*w?r!Y7&4G`ds zC1gB*qS5wVi|2bbdYsI8p@+?=y4Ls{2cY7SzBf_XFt1ESSG`R+v+f%T%}m@(_$)t5 zb+f~es_iq)!lJLGXBLTvbL-2g?3x2Jjt72zWB0wN9`EPwR>TLqAfv|vO|hyU&Mj)O z0ab(NH-9|W1`ZRzOaaeN@t8M1u2%~)?NsmLAEj_YB=NneI z9y=(zJqtdsL)pk)-C|)A=w<=XV*k53kX2Jw15P-eR+u;*G6}QJNk*~Jn6v(B^!m+t z1*R=8BkP9$POFVWAaokwXbOokVj;XMmUNa8Mu3HKx}aLe zZSnfHk?HPRM)0hrBz>Cei^9?y4h&Wd7#-5dls=J(0kimGEJ})@R zTR|hS?Pe6h;O_9M%X$0tur-WsXFZ$iGEQ|0PtW`J1FFDmit!$8Lpy0e3!d z5fR|B?SEG1In<^tdcjL=_E|>RC=BK}Y&<+Pv5_;y4*`GZ;_MB4YC5&otOg$n{tY7&52yfcq8kTku^Ge=dJ*N+UOpviG?E!9=I30lc=J=9lB$p}Lib`?uPo84*&}Cro&WvPwFg z>H=HN9y_}}EAyI*e^+N>riW+&hp|y#c4c;-4mPgyhIhUPu4~wlh&uka` zmnV-@BRYI%WaaB}k2@BT=GWx2|LW%k@AgJ4jLz$NlG)IHhDH7Zggser4%|IQz)FS+ zgjVUnTX&}O#TzVZatK(hpCNIso#K~fwV9E~BwgCjm1{xxL3T^nX}B0;rHbWU_X1Eg z@S(>>I!I>d`W<-hCM38gAFAN_kO6mxM3WkQu=be*TH_`qE;5Rqno$b&%TwKNj>QJw zx(x0FzFSHcFYtQ4A16@J;^zjw6o-k^a6-b34Y#vbtr9x|TS(XPY)>l2^h28yyZ&MO z$+HB_1{4WihT6UVJC#R^!IPiK7$<}Lo037-sPHZ*zHLRfYy@V88r(nLkKI_14Mj_vU|w_p*! zmzi->+8((9oF0=pTRjZwD=KEsIEsAGYWr$-9p0PL8uEG-x#K#|r`ZnWt*IS$`x!64 z!-AzX;($HOu)W9fkCzd!@j&pg1L(qK37nyZL+W2CG#tC?|9D-}i&wh3XFEzriYESA zWWx+WKW-_@LI?dC55e<+bxB|`RO1u#z3--~&_J2*|IVu>UHl23Bgs9)?gLT5uAL9t zAoeVz_?Zra9kz{qKhLe6zDCP`50QaTn;yDuXnKmHM{MVV2CmcZIBB65p{>hp0QS|R zH*Bu&`njU4_u$mKZ^m?#t=F zRIn@s-ui}M&BF(yE2tAIYd7pSC=7b9svZyZg9Vf@YVS$vj~Sh<{~)iBeyGy(Rx}en za(6mh&v$9XR6x2?F7LKGw6Jc= zC;EGt#ODOt2~6$3wXS{a?=T?R9~ZLmA|@Y^3WHM9tbhP{rRLr5G*eWB|H@EMT3nq8_Q5W9_kr>S|992G zd+6HXXzhL8JA;Qy*0XJ^nsnrmQ^z%YvWMeP6oZ?NV{6Zz1G7FMouOT*MctimYg-p= zD)fF}C_uE=zIhzw3>q?t-WPS&b$Bgg zICV$Zq_|e3pLJRyR72T)4DPtT%xF7q0W$b42`!<$EP!yOL8QZP3MNH0Fty1HdhJn4}t+_!7z4;Qa2H~`kzeK5(xn4k$fqqzD0S1ciU-n zhg5;$g|+lA2SY+88AkO(ZvPO)XlC7|gO0wCfU~w>t#4nyA|Cby0bM??Co1iKbUu|$_!dIi8jtzKtWDKx2yHfu9Ka3o5o6m4?Nq&-@1~X;z4+} z4&*Mn=cWa&o~k~>r$;plc!kD+#!lvsSM!h%bqQtT)xBoKeu3!DmCmcJDdQcA>lkDL zJJBwZpY#vkgeG=?yKNO+^7AS3yp2s94qr=vu z*fUkxKfZs{w)v$3WKk6CJ|gzW1&;7#CKRe6b@026amvvWjX$U6H19)Y4>j|dGTcVB zWRWXfG20_wlk#2LjnO&&N!2RH5mz+U6L@ySszZsB(hZmH6&yU%FB($qKa+&99iXtb z=q?BD%*i{%6kGJ|!0W*URoI=@Vub!zhKMpGUshUeNN!+rMvGgxJRf;xF*P7|9tyS3 z-#WJPt!msz4FLmBPj|K<*MHgUR97%LVS##RrN)eq-eWlP74w3uM5(;uDKBKM7@$k3 zew{Y|Y+)%)qA6>7G&lA8s6;uiPpRr|D7{g{*UMbZS_0?m7LJ)|(OaQOJ`tpCZwPAT z`n`gj%T;XtxcH zNKHU2FJn$!$u!{GRqFM`2PDA6lvN6@TLVxlS|v&!TT8h=-u-V3qjh=;Nmk13x1q?n zjdw#=Ib`^Y^?v_CmxKQ6kZ;i%LwfMm>lgYS;^eqWh$MNwUUo#sV3zQ5IQJYB^mk}N zwHsWzdVq?Q#6A@XQlbq(N3A=h7@E1N;tfsx(4YDqp%hQadFH>e!1}Ca^br5*yIqSoF zi6-XKV`1tw_PyZ&Q7oj@-jhOj%_fAc`YavJ=RjBs>&xc-{jtV6Py~kKxh4g~d^lz7 z_-A-pl~ho0v8Z-?Y@HqNmP1&?SwV8m%=?61_gS7-Lx9~<#LQnQlDmAWuzh;?pD+ox z@oazL?-_a<*d)_S;ian#PAp>vQ_}#kx0dQrf5Kk+;fZ2*;>3TmCBO^P-ciNFf9t|1 zLK&FieMubqB=C!?#{aTdzhLP{-rJJ63aK*zP8L-M8b6O55(PbF(_U{vCq+Ki27lOb zK_~adH;H*&cNeTTZ%AU~zes4zT!Kg>5)SguH4`kx)dJ+d7CHCczVH2(+s*6{y~`+q z%zM*(+!E$^-Zj$~wD~$(ACV@=ey9p&+qm~@0Xmj4bw@p+fpYf%?%KX{hhs*qqCA9R zk4I+EsNxJFi*Bd-gSei#nuq#;-`Z2@N$ekfzWn!$Upwh3Ep8N#n2dTX3rSUcqd|+s zTDRxxj<{v*d0!+z+{{(zH2dkCu0;ac1m^S4h$T?vJ00$md0uH$NJw5=Y}_>T7cw2n zaoCx>VP+p;e1Yb)QYo1(vXZ#PxmP3~8$pijrC5AknC*_|bc-OWAzt~txK(6yIe_$| z4>eSGW1^%;%pm>r%qLs-p}dL{-0kT=yu4jm6}a`!vYH&iD&#kA;5b`sSH!Nb1upg+ z=BFkE%t0~%4ufnj=C2V1iCtctLS;0#aONcMYs0HN$F%YC#J4?na6+Cpd$lGa36BK1 zuGF)ol2_t~W|D7k5=&`m5=Dha2?7t|zX29T!$=hHZ@M~^XKcFLW!h+K4LD4{q=pyT zDC#9iY!B|QBk6sQeH^hav|oFeYNsy|#h0nOXYc;q@dJb@DWy^&G(5g4n%p{GrMhvx ze15H(k$JLP>C~DNPWw$}QP6ps=QA%EFSg>CA4T_Kz}Z|)*A74X{_r26cj@ZvL%-7% z=-$c@ieb2Lk)c-c7r9tBVIrs5E|vQ^x0A0>zVyPjntA?Ix|4I{(n#-8{P(cR=)Ru5 zP5KyG{Cv<=>^aYc36D`FfYX&5be)DdK)=b}_J*_d&YYEtXk$V7BZ>6T<+~vY{Te`w z*9cQTA`~_^l?omacZg#!Fr5PCj#I&|e1XVLI4%~0BD-kh;!aQLS#PO1?fcTR0LSW! z>>eB4Q*xAl7ZLsX3L(Nn zu>#y2hrEGPo9b4hRuhDx8_(xewyG;;B_qzOCnX``HbAwuV#l<~cIG9Z($Rde%kKX9 zkynFNwTrg-c`7=u^9(NXcam!mdlhD-Rr%O6MBDEDr~ig>Y4!Ey;^{PBP`XUf@h*qc z{J=wJPCA8jL^qC7zP;w-$vD(Xarh;zD%V^%oXphEo$6ePWU;3fK#Oou1CBkF(SDDU4 zL{Ezct;mryvbRC;-uszDS!ddcWbLDWx6A)h`d>%tFF&*#b1ijxct=&OzNZX)lE*dS zaZ+O%Xl!jACrUCDb^XwYQBcflLwr7)>*BcKiRpP1F5A&k0-UR^PZ?a8f! zTMITn?icO(Z+z2gYOiS&D{wo|U;OQHe36WAHTzuzKZOO5%;TK*P0WnsdSj^{HZX7~ z;oLm`-s;a6mu8=D+GEa6zKV1(^E9;~Q_1j*u|&121?>`L5=qtSR; zw3Aoo&lXz8x;g|i193Vmv#+@qxgG=C>)S;I)NCDueEjYq1>m}4GBT|czajl;w`=tR zqKmm2a}k;-ny{PXt^)50H)E1YBj!Dk(;=&+7L7YM>^wfCQFLwv#)+pfgWo(iu+1PY zyz7-@-=DLO=kpPIJL)sTqrW670HQRasD_X;ykbckK@GiNznRh?t7yeE*$N5Jl>Sj- zcwrKI)%G8mPS|oD)04;r`KvlW@%G7(x-t5vU;Cf+YZ6CZSR$N~d%f%7Zhn)y+7T{l ziY$w7yuECGny=bMH6vqsKTf=-vUIwP02_w}bl;Y5sWZ;RBzCD8*o!zoe}I~=MXM`B zgIXQ$1_q{rDK(Ts)Hr~=$Jd5Pff<^T#?_8xr47@-@g_MlVC-$sEN6zg^#hmoP7s&g z>Bj5*)SsU>=6q>C`#4<@SX;PNv~l4x$E51=sZx`wn)DF>fIT?&F{$Z+B@6sQkMRC- z6vWJrqvPY+lt(2mQ~mWtxbMjS`DU#-C%94)dtLLa^j55dA9bn@hF);S1?CJHg4SuGyBH0K(pVl80gPeabH$t%?9!oJb^Mic&Ns zR1anmaa|uT!JkE@Ep7vCCL9^pT09u((6u;z60OZpS{Bq^3_Y9gL;5&q-=n*2rLiPW z(fsD-)5;Ao2C)98v0Z;MnT%*+Y1aCYD2ik+CPR|;Rl7M(c2>)0pu8%;s0yzy5gEjzOV@q1B!%D4I)8$Uo=z^0|87RfzYe; z79gmA1ra5J(gZ?>AQ+_vsi6rjQpG}%4G=J315}ELu%OcPe|1-Ppa0K$pYFrlnKN^~ z@BC)IGk1n0nO3?dkG1Pb|7bXA3ynMFShLinPbBPF1hE`kV(!n`%JmOcszgl60RlDc zOP}t?nSD#wgk>5I#j1j;WzoYh+a=N-ygAmpD$O5?rnfgLAmuaOCZ(E-&r_MiADf^Ty zaAq)$5M>~w)K?VVSgAQccu9WK8S;uLzV6#3c{*m3Z0B0hkUjTSvBDMsGe!$J=(N42-OL#bjt?KsK`ONY5L~DoU2h4mKfy>=1nV&#e*C^)Lt~ZeU zP$QGchboOtR#q&T$?+ZSl?hL>8eK=r-5IA=v-@J_yx&wm=4cqx_Oi=))Ht+_vU0P_ z!OIw5C6dZy@#|a6I7WKobt&Uq4fwN3zc!HLg7bogJ0OLa#RL`LN2gND^dx@oHLy!+ z&K5YcR#4Vky=~RU63~VqhZMlWar`H3S@nc56*vD#&nN0bya`8Y?w)vJZum)xm>12X zb@u9-y7^}Iz&o;OT#69)(?!?Gv5`8{Rqd*80bbWvtVpNa-4 z+edgQe%SbW3d;x9MItqHa8hmw0!d>sqxh&5IDdKUPRaQU6~+h!P5Q>#$j&m(*D+V! zr0E^L6Yo9N*B-cEX0|~S8*N1_ROzo?3K+`@sgj(88ZuDoxGyPKn(!}85(p`bm801vYKAK z9B&@f>7tsjy3ubTF4tTX55Hl2-NFfgon(HKW)>$rcqNmm8QxEpH9dAT?{gSDutXz* z{z1IE&5h9;WnMzMom>J$Inq0`=F>OTU#Po|Sq~|;HP^qKHXtA0WEKoy5Fd^gGrU#v z9BpVmThu(b(?-Ymxe1Y}@yS%_MfC;Ik}0-OIwh9)e)OcY2LI30M{@DGC%C-xy>vtu_F*>ro z<@5BgQO+jOB+G$dW`n5q7nEMU7RB_;Eejzw%4x&SS_F#9MhBZ@*p_uJAE8k)d%wEP z=UqrL%(B=>5kIzgm3pQWKU&WnlwlvVZB`NxWxn`uyRYl>Tax+4fw=QpaT40*Vo}GV zHIzo59c0O`u4%vfGzf^y5a$(!mK@uE~T6^u zs;O~$6dR*6Z;1vM&a*Db2fi<9vlva=@au}5YLExQ`?qr@i7@OD5D%(On9v-{i|Tdi z+Niyk%6GXh;+S|=aLBY_R$yCw6(6!Q66-~Lo?I@N6|idgfquIhCIMB&y|)^Cfrh%`;YT>N?X3ibdX7W=@5US4;k zTd8FPng|MJ61|rK+HS6$D?AsDSJ>Xvs`Zk^ZPF97VpIXrr|-x{sYZO{1Lw)m1=0hP zQ_2mIyCv%HLyeM8?s4v>a${C583m3g&Yq)3ywmdX)3uAL_*$_hUUy!+Ex7>d5~s{3 zQDqt?9R;1qLo;?JJIXvyl&e;c+%<7T?`!1AcT)?`Ecq$FbxiHDSGhjksyCcomF%(1 z1hQ}lOOF)EsDO)Fk3bM4aW~j+!&O;QTFa=c*Mmf)aa~wbKfwNMCKY0A-&%@z=+cML zoU*juFZW2nvTAuCeGRYs6(Ml*t;0noeNe zZvw04rO-y-3f)?-snjV#{jpZyh#lQn^aKx*)leYpV1zsa2~=hEpp`Var-Q~Vxha9# zAJeTW*kJ4xP)HDLWqMf?y)&o3wJh-i!jrQ`j|FV!1)} z*Al~Qk|@GK?NY3s;%9MaZ=eNK_Av&AUJAh%TOEA5c+Qi~eM187=3l2GP9A^=W6LrO8)t9(F(RB(s5}GN9=Y(Bu3G&~_deOki?|MJ=v_i)w zC5f|(w}eY*?Eq1c(C+G1f3~tJMh8w13VePRQ~a2tetpGhr;!p+>%&cYz1qUdVhA*f(sTx>-5J-iDFewa|BFcl|{wUf=yg-SbaGDQthy=#1;%vxi$FniX zyb{8#h^&{+WiB|K@-wX>?L0!C+GsAusevpRHRwoo^tvLGVb@4V9+%odX@aARNy>`V zF=#WM*#}4ZlTf^e1T9*hkJLQbNuOa?Q@qo1D_W<18g4ISm zTvybP{hp`(OYwjy9Pi*<{k|cgWq5xKKpf*8X{Cm*a3bKfgUG&pVcepnM>VTvi8ZUZ zzl7=^-yxkKoatCyCtZ1M*dqgwG41`BhHPxKI52NcyOK;3;;-fUG41@(I#n6`_73ba zs-@yAZ$2D=kRn5Y$wL{0#Sb{vmCo(KefYh;~!YJKKjAVH?kSi0}* z=eot@D+^M84;&Cjt+8luG9Sy+B%_O0rmsaR8k><{QA&p?z~ZY&Gsfelwy*wO_X+TH zSsRR3w0UQ}i?bOwWfh(~eO!{29~HV61Pzbv_`+X!uG>`G<&Rn4fq1jy*iimoUcwrH zoGd4p6W?jRau~l?is}t^WX;@NQM_=8CxBz4pLlTCK_zfyGm<{@x67eEInkW+azj0L zqWqeBGWUSztqZA>7`hFlwtP;=AmrLF93;rWHgBb&izs?v2$C8Mbp3I)Wausm zO^`~IHb;ni?yH@d59y(Y1r@6C{DUj#$Ti>=(G^v_*_ycz*);uS%um!( zQUn#q-Hh#0Lv79}3I8VrumA|qfGJU)04=5ir-dQNS2w;k1wLslt#(1*(0~U^Q7wR8%~fiL@=;o0z#0SA{zcVOFn9z;q}zD< zD4i64hSR)>$qI95KMeSZ*xpl-Yn?7;U*~8HK&hA--4>ZaT?4gkjOmMys+{CZD6_NU zw|Sg*A_v8InqXLMz_jo&$(A_ub11gf>Rqdk!`D0ZL6Pqt$RA#OkLLVfq}_#!0gD+))fkZih=5DNp}LgKi3VYfr4a!y>g2r z<1)tYP8CIirz|%WF8Dtw%!wReA^F~@j?IPm-0$PzT6<*4(iti0mmqY1%@$RqIa%K; zQfT1^C&&WO9Iue_hG9i1cdcBPdb2ehStgab&EbfAr`lbAQKej#a<|R;PI$#1dzHqT zZnY8bAf(-(GD!fnnDa4{|ML3JSE>y->}|uOkoeCy^V8Gcql8Q%@Zs%>nEs!o{~->N z>Hzp6qAKDE-!GQ_3`o)F+fXD+rb6%0o?r0)k*EGqGU9tr#%N`eKT-a#%KvNdzajk( nG`~XnPlo(Qi~o}rAHRWY>T6preCR6M0X#Sp3*%}dk1PKJEctW{ literal 0 HcmV?d00001 diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 3ff6df09d7baa..fdc40e55835ed 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -372,6 +372,7 @@ def _get_input_messages( ) -> List[BaseMessage]: from langchain_core.messages import BaseMessage + # If dictionary, try to pluck the single key representing messages if isinstance(input_val, dict): if self.input_messages_key: key = self.input_messages_key @@ -381,13 +382,25 @@ def _get_input_messages( key = "input" input_val = input_val[key] + # If value is a string, convert to a human message if isinstance(input_val, str): from langchain_core.messages import HumanMessage return [HumanMessage(content=input_val)] + # If value is a single message, convert to a list elif isinstance(input_val, BaseMessage): return [input_val] + # If value is a list or tuple... elif isinstance(input_val, (list, tuple)): + # Handle empty case + if len(input_val) == 0: + return list(input_val) + # If is a list of list, then return the first value + # This occurs for chat models - since we batch inputs + if isinstance(input_val[0], list): + if len(input_val) != 1: + raise ValueError() + return input_val[0] return list(input_val) else: raise ValueError( @@ -400,6 +413,7 @@ def _get_output_messages( ) -> List[BaseMessage]: from langchain_core.messages import BaseMessage + # If dictionary, try to pluck the single key representing messages if isinstance(output_val, dict): if self.output_messages_key: key = self.output_messages_key @@ -418,6 +432,7 @@ def _get_output_messages( from langchain_core.messages import AIMessage return [AIMessage(content=output_val)] + # If value is a single message, convert to a list elif isinstance(output_val, BaseMessage): return [output_val] elif isinstance(output_val, (list, tuple)): @@ -431,7 +446,10 @@ def _enter_history(self, input: Any, config: RunnableConfig) -> List[BaseMessage if not self.history_messages_key: # return all messages - messages += self._get_input_messages(input) + input_val = ( + input if not self.input_messages_key else input[self.input_messages_key] + ) + messages += self._get_input_messages(input_val) return messages async def _aenter_history( @@ -454,7 +472,6 @@ def _exit_history(self, run: Run, config: RunnableConfig) -> None: # Get the input messages inputs = load(run.inputs) input_messages = self._get_input_messages(inputs) - # If historic messages were prepended to the input messages, remove them to # avoid adding duplicate messages to history. if not self.history_messages_key: diff --git a/libs/core/langchain_core/tracers/base.py b/libs/core/langchain_core/tracers/base.py index c7373556c524e..8f553e52f2076 100644 --- a/libs/core/langchain_core/tracers/base.py +++ b/libs/core/langchain_core/tracers/base.py @@ -48,7 +48,9 @@ class BaseTracer(BaseCallbackHandler, ABC): def __init__( self, *, - _schema_format: Literal["original", "streaming_events"] = "original", + _schema_format: Literal[ + "original", "streaming_events", "original+chat" + ] = "original", **kwargs: Any, ) -> None: """Initialize the tracer. @@ -63,6 +65,8 @@ def __init__( for internal usage. It will likely change in the future, or be deprecated entirely in favor of a dedicated async tracer for streaming events. + - 'original+chat' is a format that is the same as 'original' + except it does NOT raise an attribute error on_chat_model_start kwargs: Additional keyword arguments that will be passed to the super class. """ @@ -163,7 +167,7 @@ def on_chat_model_start( **kwargs: Any, ) -> Run: """Start a trace for an LLM run.""" - if self._schema_format != "streaming_events": + if self._schema_format not in ("streaming_events", "original+chat"): # Please keep this un-implemented for backwards compatibility. # When it's unimplemented old tracers that use the "original" format # fallback on the on_llm_start method implementation if they @@ -360,7 +364,7 @@ def on_chain_start( def _get_chain_inputs(self, inputs: Any) -> Any: """Get the inputs for a chain run.""" - if self._schema_format == "original": + if self._schema_format in ("original", "original+chat"): return inputs if isinstance(inputs, dict) else {"input": inputs} elif self._schema_format == "streaming_events": return { @@ -371,7 +375,7 @@ def _get_chain_inputs(self, inputs: Any) -> Any: def _get_chain_outputs(self, outputs: Any) -> Any: """Get the outputs for a chain run.""" - if self._schema_format == "original": + if self._schema_format in ("original", "original+chat"): return outputs if isinstance(outputs, dict) else {"output": outputs} elif self._schema_format == "streaming_events": return { @@ -436,7 +440,7 @@ def on_tool_start( if metadata: kwargs.update({"metadata": metadata}) - if self._schema_format == "original": + if self._schema_format in ("original", "original+chat"): inputs = {"input": input_str} elif self._schema_format == "streaming_events": inputs = {"input": inputs} diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 8b1f2da9f59fc..4f156755235e4 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -482,7 +482,7 @@ def _get_standardized_inputs( def _get_standardized_outputs( - run: Run, schema_format: Literal["original", "streaming_events"] + run: Run, schema_format: Literal["original", "streaming_events", "original+chat"] ) -> Optional[Any]: """Extract standardized output from a run. diff --git a/libs/core/langchain_core/tracers/root_listeners.py b/libs/core/langchain_core/tracers/root_listeners.py index 7db7407d268f2..4b33a2fe53a3b 100644 --- a/libs/core/langchain_core/tracers/root_listeners.py +++ b/libs/core/langchain_core/tracers/root_listeners.py @@ -22,7 +22,7 @@ def __init__( on_end: Optional[Listener], on_error: Optional[Listener], ) -> None: - super().__init__() + super().__init__(_schema_format="original+chat") self.config = config self._arg_on_start = on_start diff --git a/libs/core/tests/unit_tests/fake/memory.py b/libs/core/tests/unit_tests/fake/memory.py index 43dd53dc5eba4..d8431fa85994a 100644 --- a/libs/core/tests/unit_tests/fake/memory.py +++ b/libs/core/tests/unit_tests/fake/memory.py @@ -17,6 +17,8 @@ class ChatMessageHistory(BaseChatMessageHistory, BaseModel): def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" + if not isinstance(message, BaseMessage): + raise ValueError self.messages.append(message) def clear(self) -> None: From 2b9f1469d8888e336ce524f5d84327d0fc0a5c35 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 30 May 2024 11:35:09 -0700 Subject: [PATCH 11/54] core[patch]: Release 0.2.3 (#22329) --- libs/core/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 31292512d1809..5cd1cbc8f4c29 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-core" -version = "0.2.2" +version = "0.2.3" description = "Building applications with LLMs through composability" authors = [] license = "MIT" From 0c9a034ed77b4a6b1690ea51d221ef785380804d Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Thu, 30 May 2024 12:33:32 -0700 Subject: [PATCH 12/54] add simpler agent tutorial (#22249) 1/ added section at start with full code 2/ removed retriever tool (was just distracting) 3/ added section on starting a new conversation --------- Co-authored-by: Erick Friis --- docs/docs/tutorials/agents.ipynb | 367 ++++++++++++++----------------- 1 file changed, 164 insertions(+), 203 deletions(-) diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index 331dd4fa08763..9c882cbab9d53 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -12,57 +12,106 @@ }, { "cell_type": "markdown", - "id": "f4c03f40-1328-412d-8a48-1db0cd481b77", + "id": "1df78a71", "metadata": {}, "source": [ "# Build an Agent\n", "\n", "By themselves, language models can't take actions - they just output text.\n", "A big use case for LangChain is creating **agents**.\n", - "Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.\n", - "The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n", + "Agents are systems that use LLMs as reasoning engines to determine which actions to take and the inputs to pass them.\n", + "After executing actions, the results can be fed back into the LLM to determine whether more actions are needed, or whether it is okay to finish.\n", "\n", - "In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n", + "In this tutorial we will build an agent that can interact with a search engine. You will be able to ask this agent questions, watch it call the search tool, and have conversations with it.\n", "\n", "\n", "## Concepts\n", "\n", - "Concepts we will cover are:\n", - "- Using [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n", - "- Creating a [Retriever](/docs/concepts/#retrievers) to expose specific information to our agent\n", - "- Using a Search [Tool](/docs/concepts/#tools) to look up things online\n", - "- Using [LangGraph Agents](/docs/concepts/#agents) which use an LLM to think about what to do and then execute upon that\n", - "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", + "In following this tutorial, you will learn how to:\n", "\n", - "## Setup\n", + "- Use [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n", + "- Use a Search [Tool](/docs/concepts/#tools) to look up information from the Internet\n", + "- Compose a [LangGraph Agent](/docs/concepts/#agents), which use an LLM to determine actions and then execute them\n", + "- Debug and trace your application using [LangSmith](/docs/concepts/#langsmith)\n", "\n", - "### Jupyter Notebook\n", + "## End-to-end agent\n", "\n", - "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", + "The code snippet below represents a fully functional agent that uses an LLM to decide which tools to use. It is equipped with a generic search tool. It has conversational memory - meaning that it can be used as a multi-turn chatbot.\n", "\n", - "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", + "In the rest of the guide, we will walk through the individual components and what each part does - but if you want to just grab some code and get started, feel free to use this!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a79bb782", + "metadata": {}, + "outputs": [], + "source": [ + "# Import relevant functionality\n", + "from langchain_anthropic import ChatAnthropic\n", + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_core.messages import HumanMessage\n", + "from langgraph.checkpoint.sqlite import SqliteSaver\n", + "from langgraph.prebuilt import chat_agent_executor\n", "\n", - "### Installation\n", + "# Create the agent\n", + "memory = SqliteSaver.from_conn_string(\":memory:\")\n", + "model = ChatAnthropic(model_name=\"claude-3-sonnet-20240229\")\n", + "search = TavilySearchResults(max_results=2)\n", + "tools = [search]\n", + "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", + " model, tools, checkpointer=memory\n", + ")\n", "\n", - "To install LangChain run:\n", + "# Use the agent\n", + "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", + "for chunk in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=\"hi im bob! and i live in sf\")]}, config\n", + "):\n", + " print(chunk)\n", + " print(\"----\")\n", "\n", - "```{=mdx}\n", - "import Tabs from '@theme/Tabs';\n", - "import TabItem from '@theme/TabItem';\n", - "import CodeBlock from \"@theme/CodeBlock\";\n", - "\n", - "\n", - " \n", - " pip install langchain\n", - " \n", - " \n", - " conda install langchain -c conda-forge\n", - " \n", - "\n", + "for chunk in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=\"whats the weather where I live?\")]}, config\n", + "):\n", + " print(chunk)\n", + " print(\"----\")" + ] + }, + { + "cell_type": "markdown", + "id": "f4c03f40-1328-412d-8a48-1db0cd481b77", + "metadata": {}, + "source": [ + "## Setup\n", "\n", - "```\n", + "### Jupyter Notebook\n", + "\n", + "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect interactive environments for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc), and observing these cases is a great way to better understand building with LLMs.\n", + "\n", + "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", "\n", + "### Installation\n", "\n", + "To install LangChain run:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60bb3eb1", + "metadata": {}, + "outputs": [], + "source": [ + "% pip install -U langchain-community langgraph langchain-anthropic" + ] + }, + { + "cell_type": "markdown", + "id": "2ee337ae", + "metadata": {}, + "source": [ "For more details, see our [Installation guide](/docs/how_to/installation).\n", "\n", "### LangSmith\n", @@ -86,7 +135,25 @@ "\n", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n", - "```\n" + "```\n", + "\n", + "### Tavily\n", + "\n", + "We will be using [Tavily](/docs/integrations/tools/tavily_search) (a search engine) as a tool.\n", + "In order to use it, you will need to get and set an API key:\n", + "\n", + "```bash\n", + "export TAVILY_API_KEY=\"...\"\n", + "```\n", + "\n", + "Or, if in a notebook, you can set it with:\n", + "\n", + "```python\n", + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"TAVILY_API_KEY\"] = getpass.getpass()\n", + "```" ] }, { @@ -96,23 +163,12 @@ "source": [ "## Define tools\n", "\n", - "We first need to create the tools we want to use. We will use two tools: [Tavily](/docs/integrations/tools/tavily_search) (to search online) and then a retriever over a local index we will create\n", - "\n", - "### [Tavily](/docs/integrations/tools/tavily_search)\n", - "\n", - "We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n", - "Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step.\n", - "\n", - "Once you create your API key, you will need to export that as:\n", - "\n", - "```bash\n", - "export TAVILY_API_KEY=\"...\"\n", - "```" + "We first need to create the tools we want to use. Our main tool of choice will be [Tavily](/docs/integrations/tools/tavily_search) - a search engine. We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "id": "482ce13d", "metadata": {}, "outputs": [], @@ -122,7 +178,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "id": "9cc86c0b", "metadata": {}, "outputs": [], @@ -132,20 +188,20 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "id": "e593bbf6", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'url': 'https://weather.com/weather/tenday/l/San Francisco CA USCA0987:1:US',\n", - " 'content': \"Comfy & Cozy\\nThat's Not What Was Expected\\nOutside\\n'No-Name Storms' In Florida\\nGifts From On High\\nWhat To Do For Wheezing\\nSurviving The Season\\nStay Safe\\nAir Quality Index\\nAir quality is considered satisfactory, and air pollution poses little or no risk.\\n Health & Activities\\nSeasonal Allergies and Pollen Count Forecast\\nNo pollen detected in your area\\nCold & Flu Forecast\\nFlu risk is low in your area\\nWe recognize our responsibility to use data and technology for good. recents\\nSpecialty Forecasts\\n10 Day Weather-San Francisco, CA\\nToday\\nMon 18 | Day\\nConsiderable cloudiness. Tue 19\\nTue 19 | Day\\nLight rain early...then remaining cloudy with showers in the afternoon. Wed 27\\nWed 27 | Day\\nOvercast with rain showers at times.\"},\n", - " {'url': 'https://www.accuweather.com/en/us/san-francisco/94103/hourly-weather-forecast/347629',\n", - " 'content': 'Hourly weather forecast in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.'}]" + "[{'url': 'https://www.weatherapi.com/',\n", + " 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1716929532, 'localtime': '2024-05-28 13:52'}, 'current': {'last_updated_epoch': 1716929100, 'last_updated': '2024-05-28 13:45', 'temp_c': 16.7, 'temp_f': 62.1, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 12.5, 'wind_kph': 20.2, 'wind_degree': 270, 'wind_dir': 'W', 'pressure_mb': 1019.0, 'pressure_in': 30.09, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 62, 'cloud': 25, 'feelslike_c': 16.7, 'feelslike_f': 62.1, 'windchill_c': 13.1, 'windchill_f': 55.6, 'heatindex_c': 14.5, 'heatindex_f': 58.2, 'dewpoint_c': 9.1, 'dewpoint_f': 48.4, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 4.0, 'gust_mph': 14.4, 'gust_kph': 23.2}}\"},\n", + " {'url': 'https://weatherspark.com/h/m/557/2024/5/Historical-Weather-in-May-2024-in-San-Francisco-California-United-States',\n", + " 'content': 'San Francisco Temperature History May 2024. The daily range of reported temperatures (gray bars) and 24-hour highs (red ticks) and lows (blue ticks), placed over the daily average high (faint red line) and low (faint blue line) temperature, with 25th to 75th and 10th to 90th percentile bands.'}]" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -154,108 +210,22 @@ "search.invoke(\"what is the weather in SF\")" ] }, - { - "cell_type": "markdown", - "id": "e8097977", - "metadata": {}, - "source": [ - "### Retriever\n", - "\n", - "We will also create a retriever over some data of our own. For a deeper explanation of each step here, see [this tutorial](/docs/tutorials/rag)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "9c9ce713", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", - "\n", - "loader = WebBaseLoader(\"https://docs.smith.langchain.com/overview\")\n", - "docs = loader.load()\n", - "documents = RecursiveCharacterTextSplitter(\n", - " chunk_size=1000, chunk_overlap=200\n", - ").split_documents(docs)\n", - "vector = FAISS.from_documents(documents, OpenAIEmbeddings())\n", - "retriever = vector.as_retriever()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "dae53ec6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Document(page_content='import Clientfrom langsmith.evaluation import evaluateclient = Client()# Define dataset: these are your test casesdataset_name = \"Sample Dataset\"dataset = client.create_dataset(dataset_name, description=\"A sample dataset in LangSmith.\")client.create_examples( inputs=[ {\"postfix\": \"to LangSmith\"}, {\"postfix\": \"to Evaluations in LangSmith\"}, ], outputs=[ {\"output\": \"Welcome to LangSmith\"}, {\"output\": \"Welcome to Evaluations in LangSmith\"}, ], dataset_id=dataset.id,)# Define your evaluatordef exact_match(run, example): return {\"score\": run.outputs[\"output\"] == example.outputs[\"output\"]}experiment_results = evaluate( lambda input: \"Welcome \" + input[\\'postfix\\'], # Your AI system goes here data=dataset_name, # The data to predict and grade over evaluators=[exact_match], # The evaluators to score the results experiment_prefix=\"sample-experiment\", # The name of the experiment metadata={ \"version\": \"1.0.0\", \"revision_id\":', metadata={'source': 'https://docs.smith.langchain.com/overview', 'title': 'Getting started with LangSmith | 🦜️🛠️ LangSmith', 'description': 'Introduction', 'language': 'en'})" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "retriever.invoke(\"how to upload a dataset\")[0]" - ] - }, - { - "cell_type": "markdown", - "id": "04aeca39", - "metadata": {}, - "source": [ - "Now that we have populated our index that we will do doing retrieval over, we can easily turn it into a tool (the format needed for an agent to properly use it)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "117594b5", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.tools.retriever import create_retriever_tool" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "7280b031", - "metadata": {}, - "outputs": [], - "source": [ - "retriever_tool = create_retriever_tool(\n", - " retriever,\n", - " \"langsmith_search\",\n", - " \"Search for information about LangSmith. For any questions about LangSmith, you must use this tool!\",\n", - ")" - ] - }, { "cell_type": "markdown", "id": "c3b47c1d", "metadata": {}, "source": [ - "### Tools\n", - "\n", - "Now that we have created both, we can create a list of tools that we will use downstream." + "If we want, we can create other tools. Once we have all the tools we want, we can put them in a list that we will reference later." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 4, "id": "b8e8e710", "metadata": {}, "outputs": [], "source": [ - "tools = [search, retriever_tool]" + "tools = [search]" ] }, { @@ -276,7 +246,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 5, "id": "69185491", "metadata": {}, "outputs": [], @@ -284,9 +254,9 @@ "# | output: false\n", "# | echo: false\n", "\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_anthropic import ChatAnthropic\n", "\n", - "model = ChatOpenAI(model=\"gpt-4\")" + "model = ChatAnthropic(model=\"claude-3-sonnet-20240229\")" ] }, { @@ -299,7 +269,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 6, "id": "c96c960b", "metadata": {}, "outputs": [ @@ -309,7 +279,7 @@ "'Hello! How can I assist you today?'" ] }, - "execution_count": 11, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -331,7 +301,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 7, "id": "ba692a74", "metadata": {}, "outputs": [], @@ -349,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 8, "id": "b6a7e925", "metadata": {}, "outputs": [ @@ -379,7 +349,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 9, "id": "688b465d", "metadata": {}, "outputs": [ @@ -388,7 +358,7 @@ "output_type": "stream", "text": [ "ContentString: \n", - "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in SF'}, 'id': 'call_nfE1XbCqZ8eJsB8rNdn4MQZQ'}]\n" + "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_BjPOvStlyv61w24VkHQ4pqFG'}]\n" ] } ], @@ -432,7 +402,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 10, "id": "89cf72b4-6046-4b47-8f27-5522d8cb8036", "metadata": {}, "outputs": [], @@ -456,18 +426,18 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 11, "id": "114ba50d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[HumanMessage(content='hi!', id='1535b889-10a5-45d0-a1e1-dd2e60d4bc04'),\n", - " AIMessage(content='Hello! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 129, 'total_tokens': 139}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-2c94c074-bdc9-4f01-8fd7-71cfc4777d55-0')]" + "[HumanMessage(content='hi!', id='acd18479-7e70-4114-a293-c5233736c1d5'),\n", + " AIMessage(content='Hello! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 83, 'total_tokens': 93}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ebfca269-5cb2-47c1-8987-a24acf0b5015-0', usage_metadata={'input_tokens': 83, 'output_tokens': 10, 'total_tokens': 93})]" ] }, - "execution_count": 16, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -485,66 +455,25 @@ "source": [ "In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/28311faa-e135-4d6a-ab6b-caecf6482aaa/r)\n", "\n", - "Let's now try it out on an example where it should be invoking the retriever" + "Let's now try it out on an example where it should be invoking the tool" ] }, { "cell_type": "code", - "execution_count": 17, - "id": "3fa4780a", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/plain": [ - "[HumanMessage(content='how can langsmith help with testing?', id='04f4fe8f-391a-427c-88af-1fa064db304c'),\n", - " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_FNIgdO97wo51sKx3XZOGLHqT', 'function': {'arguments': '{\\n \"query\": \"how can LangSmith help with testing\"\\n}', 'name': 'langsmith_search'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 135, 'total_tokens': 157}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-51f6ea92-84e1-43a5-b1f2-bc0c12d8613f-0', tool_calls=[{'name': 'langsmith_search', 'args': {'query': 'how can LangSmith help with testing'}, 'id': 'call_FNIgdO97wo51sKx3XZOGLHqT'}]),\n", - " ToolMessage(content=\"Getting started with LangSmith | 🦜️🛠️ LangSmith\\n\\nSkip to main contentLangSmith API DocsSearchGo to AppQuick StartUser GuideTracingEvaluationProduction Monitoring & AutomationsPrompt HubProxyPricingSelf-HostingCookbookQuick StartOn this pageGetting started with LangSmithIntroduction\\u200bLangSmith is a platform for building production-grade LLM applications. It allows you to closely monitor and evaluate your application, so you can ship quickly and with confidence. Use of LangChain is not necessary - LangSmith works on its own!Install LangSmith\\u200bWe offer Python and Typescript SDKs for all your LangSmith needs.PythonTypeScriptpip install -U langsmithyarn add langchain langsmithCreate an API key\\u200bTo create an API key head to the setting pages. Then click Create API Key.Setup your environment\\u200bShellexport LANGCHAIN_TRACING_V2=trueexport LANGCHAIN_API_KEY=# The below examples use the OpenAI API, though it's not necessary in generalexport OPENAI_API_KEY=Log your first trace\\u200bWe provide multiple ways to log traces\\n\\nLearn about the workflows LangSmith supports at each stage of the LLM application lifecycle.Pricing: Learn about the pricing model for LangSmith.Self-Hosting: Learn about self-hosting options for LangSmith.Proxy: Learn about the proxy capabilities of LangSmith.Tracing: Learn about the tracing capabilities of LangSmith.Evaluation: Learn about the evaluation capabilities of LangSmith.Prompt Hub Learn about the Prompt Hub, a prompt management tool built into LangSmith.Additional Resources\\u200bLangSmith Cookbook: A collection of tutorials and end-to-end walkthroughs using LangSmith.LangChain Python: Docs for the Python LangChain library.LangChain Python API Reference: documentation to review the core APIs of LangChain.LangChain JS: Docs for the TypeScript LangChain libraryDiscord: Join us on our Discord to discuss all things LangChain!FAQ\\u200bHow do I migrate projects between organizations?\\u200bCurrently we do not support project migration betwen organizations. While you can manually imitate this by\\n\\nteam deals with sensitive data that cannot be logged. How can I ensure that only my team can access it?\\u200bIf you are interested in a private deployment of LangSmith or if you need to self-host, please reach out to us at sales@langchain.dev. Self-hosting LangSmith requires an annual enterprise license that also comes with support and formalized access to the LangChain team.Was this page helpful?NextUser GuideIntroductionInstall LangSmithCreate an API keySetup your environmentLog your first traceCreate your first evaluationNext StepsAdditional ResourcesFAQHow do I migrate projects between organizations?Why aren't my runs aren't showing up in my project?My team deals with sensitive data that cannot be logged. How can I ensure that only my team can access it?CommunityDiscordTwitterGitHubDocs CodeLangSmith SDKPythonJS/TSMoreHomepageBlogLangChain Python DocsLangChain JS/TS DocsCopyright © 2024 LangChain, Inc.\", name='langsmith_search', id='f286c7e7-6514-4621-ac60-e4079b37ebe2', tool_call_id='call_FNIgdO97wo51sKx3XZOGLHqT'),\n", - " AIMessage(content=\"LangSmith is a platform that can significantly aid in testing by offering several features:\\n\\n1. **Tracing**: LangSmith provides robust tracing capabilities that enable you to monitor your application closely. This feature is particularly useful for tracking the behavior of your application and identifying any potential issues.\\n\\n2. **Evaluation**: LangSmith allows you to perform comprehensive evaluations of your application. This can help you assess the performance of your application under various conditions and make necessary adjustments to enhance its functionality.\\n\\n3. **Production Monitoring & Automations**: With LangSmith, you can keep a close eye on your application when it's in active use. The platform provides tools for automatic monitoring and managing routine tasks, helping to ensure your application runs smoothly.\\n\\n4. **Prompt Hub**: It's a prompt management tool built into LangSmith. This feature can be instrumental when testing various prompts in your application.\\n\\nOverall, LangSmith helps you build production-grade LLM applications with confidence, providing necessary tools for monitoring, evaluation, and automation.\", response_metadata={'token_usage': {'completion_tokens': 200, 'prompt_tokens': 782, 'total_tokens': 982}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4b80db7e-9a26-4043-8b6b-922f847f9c80-0')]" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "response = agent_executor.invoke(\n", - " {\"messages\": [HumanMessage(content=\"how can langsmith help with testing?\")]}\n", - ")\n", - "response[\"messages\"]" - ] - }, - { - "cell_type": "markdown", - "id": "f2d94242", - "metadata": {}, - "source": [ - "Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/853f62d0-3421-4dba-b30a-7277ce2bdcdf/r) to see what is going on under the hood.\n", - "\n", - "Note that the state we get back at the end also contains the tool call and the tool response message.\n", - "\n", - "Now let's try one where it needs to call the search tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 18, + "execution_count": 12, "id": "77c2f769", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[HumanMessage(content='whats the weather in sf?', id='e6b716e6-da57-41de-a227-fee281fda588'),\n", - " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TGDKm0saxuGKJD5OYOXWRvLe', 'function': {'arguments': '{\\n \"query\": \"current weather in San Francisco\"\\n}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 134, 'total_tokens': 157}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-fd7d5854-2eab-4fca-ad9e-b3de8d587614-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_TGDKm0saxuGKJD5OYOXWRvLe'}]),\n", - " ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1714426800, \\'localtime\\': \\'2024-04-29 14:40\\'}, \\'current\\': {\\'last_updated_epoch\\': 1714426200, \\'last_updated\\': \\'2024-04-29 14:30\\', \\'temp_c\\': 17.8, \\'temp_f\\': 64.0, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Sunny\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/113.png\\', \\'code\\': 1000}, \\'wind_mph\\': 23.0, \\'wind_kph\\': 37.1, \\'wind_degree\\': 290, \\'wind_dir\\': \\'WNW\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.09, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 50, \\'cloud\\': 0, \\'feelslike_c\\': 17.8, \\'feelslike_f\\': 64.0, \\'vis_km\\': 16.0, \\'vis_miles\\': 9.0, \\'uv\\': 5.0, \\'gust_mph\\': 27.5, \\'gust_kph\\': 44.3}}\"}, {\"url\": \"https://www.wunderground.com/hourly/us/ca/san-francisco/94125/date/2024-4-29\", \"content\": \"Current Weather for Popular Cities . San Francisco, CA warning 59 \\\\u00b0 F Mostly Cloudy; Manhattan, NY 56 \\\\u00b0 F Fair; Schiller Park, IL (60176) warning 58 \\\\u00b0 F Mostly Cloudy; Boston, MA 52 \\\\u00b0 F Sunny ...\"}]', name='tavily_search_results_json', id='aa0d8c3d-23b5-425a-ad05-3c174fc04892', tool_call_id='call_TGDKm0saxuGKJD5OYOXWRvLe'),\n", - " AIMessage(content='The current weather in San Francisco, California is sunny with a temperature of 64.0°F (17.8°C). The wind is coming from the WNW at a speed of 23.0 mph. The humidity level is at 50%. There is no precipitation and the cloud cover is 0%. The visibility is 16.0 km. The UV index is 5.0. Please note that this information is as of 14:30 on April 29, 2024, according to [Weather API](https://www.weatherapi.com/).', response_metadata={'token_usage': {'completion_tokens': 117, 'prompt_tokens': 620, 'total_tokens': 737}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-2359b41b-cab6-40c3-b6d9-7bdf7195a601-0')]" + "[HumanMessage(content='whats the weather in sf?', id='880db162-5d1c-476c-82dd-b125caee1656'),\n", + " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_i3ZKnTDPB1RxqwE6PWmgz5TQ', 'function': {'arguments': '{\\n \"query\": \"current weather in San Francisco\"\\n}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 88, 'total_tokens': 111}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-93b6be79-c981-4b7b-8f0a-252255f23961-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_i3ZKnTDPB1RxqwE6PWmgz5TQ'}], usage_metadata={'input_tokens': 88, 'output_tokens': 23, 'total_tokens': 111}),\n", + " ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1716929532, \\'localtime\\': \\'2024-05-28 13:52\\'}, \\'current\\': {\\'last_updated_epoch\\': 1716929100, \\'last_updated\\': \\'2024-05-28 13:45\\', \\'temp_c\\': 16.7, \\'temp_f\\': 62.1, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Partly cloudy\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/116.png\\', \\'code\\': 1003}, \\'wind_mph\\': 12.5, \\'wind_kph\\': 20.2, \\'wind_degree\\': 270, \\'wind_dir\\': \\'W\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.09, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 62, \\'cloud\\': 25, \\'feelslike_c\\': 16.7, \\'feelslike_f\\': 62.1, \\'windchill_c\\': 13.1, \\'windchill_f\\': 55.6, \\'heatindex_c\\': 14.5, \\'heatindex_f\\': 58.2, \\'dewpoint_c\\': 9.1, \\'dewpoint_f\\': 48.4, \\'vis_km\\': 16.0, \\'vis_miles\\': 9.0, \\'uv\\': 4.0, \\'gust_mph\\': 14.4, \\'gust_kph\\': 23.2}}\"}, {\"url\": \"https://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168\", \"content\": \"Current conditions at SAN FRANCISCO DOWNTOWN (SFOC1) Lat: 37.77056\\\\u00b0NLon: 122.42694\\\\u00b0WElev: 150.0ft. NA. 52\\\\u00b0F. 11\\\\u00b0C. Humidity: 90%: ... 2am PDT May 28, 2024-6pm PDT Jun 3, 2024 . ... Radar & Satellite Image. Hourly Weather Forecast. National Digital Forecast Database. High Temperature. Chance of Precipitation. ACTIVE ALERTS Toggle menu ...\"}]', name='tavily_search_results_json', id='302dfc48-60bc-4db5-815a-2e97b8a95607', tool_call_id='call_i3ZKnTDPB1RxqwE6PWmgz5TQ'),\n", + " AIMessage(content='The current weather in San Francisco, California is partly cloudy with a temperature of 16.7°C (62.1°F). The wind is coming from the west at a speed of 20.2 kph (12.5 mph). The humidity is at 62%. [Source](https://www.weatherapi.com/)', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 691, 'total_tokens': 758}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-953864dd-9af6-48aa-bc61-8b63388fca03-0', usage_metadata={'input_tokens': 691, 'output_tokens': 67, 'total_tokens': 758})]" ] }, - "execution_count": 18, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -764,6 +693,38 @@ "Example [LangSmith trace](https://smith.langchain.com/public/fa73960b-0f7d-4910-b73d-757a12f33b2b/r)" ] }, + { + "cell_type": "markdown", + "id": "ae908088", + "metadata": {}, + "source": [ + "If I want to start a new conversation, all I have to do is change the `thread_id` used" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "24460239", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'agent': {'messages': [AIMessage(content=\"As an AI, I don't have access to personal data about individuals unless it has been shared with me in the course of our conversation. I am designed to respect user privacy and confidentiality. So, I don't know your name.\", response_metadata={'token_usage': {'completion_tokens': 48, 'prompt_tokens': 86, 'total_tokens': 134}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b3c8d577-fdbf-4f0f-8fd8-ecb3a5ac8920-0', usage_metadata={'input_tokens': 86, 'output_tokens': 48, 'total_tokens': 134})]}}\n", + "----\n" + ] + } + ], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"xyz123\"}}\n", + "for chunk in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n", + "):\n", + " print(chunk)\n", + " print(\"----\")" + ] + }, { "cell_type": "markdown", "id": "c029798f", @@ -804,7 +765,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.1" + "version": "3.10.1" } }, "nbformat": 4, From 410e9add4443607618a75827afe1a676fcd7c0a7 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 30 May 2024 13:57:12 -0700 Subject: [PATCH 13/54] infra: run scheduled tests on aws, google, cohere, nvidia (#22328) Co-authored-by: Erick Friis --- .github/workflows/scheduled_test.yml | 75 ++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 10 deletions(-) diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index ce85e024cd4dc..405e2fa13275f 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -10,6 +10,7 @@ env: jobs: build: + name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }} runs-on: ubuntu-latest strategy: fail-fast: false @@ -25,16 +26,52 @@ jobs: - "libs/partners/groq" - "libs/partners/mistralai" - "libs/partners/together" - name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }} + - "libs/partners/cohere" + - "libs/partners/google-vertexai" + - "libs/partners/google-genai" + - "libs/partners/aws" + - "libs/partners/nvidia-ai-endpoints" + steps: - uses: actions/checkout@v4 + with: + path: langchain + - uses: actions/checkout@v4 + with: + repository: langchain-ai/langchain-google + path: langchain-google + - uses: actions/checkout@v4 + with: + repository: langchain-ai/langchain-nvidia + path: langchain-nvidia + - uses: actions/checkout@v4 + with: + repository: langchain-ai/langchain-cohere + path: langchain-cohere + - uses: actions/checkout@v4 + with: + repository: langchain-ai/langchain-aws + path: langchain-aws + + - name: Move libs + run: | + rm -rf \ + langchain/libs/partners/google-genai \ + langchain/libs/partners/google-vertexai \ + langchain/libs/partners/nvidia-ai-endpoints \ + langchain/libs/partners/cohere + mv langchain-google/libs/genai langchain/libs/partners/google-genai + mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai + mv langchain-nvidia/libs/ai-endpoints langchain/libs/partners/nvidia-ai-endpoints + mv langchain-cohere/libs/cohere langchain/libs/partners/cohere + mv langchain-aws/libs/aws langchain/libs/partners/aws - name: Set up Python ${{ matrix.python-version }} - uses: "./.github/actions/poetry_setup" + uses: "./langchain/.github/actions/poetry_setup" with: python-version: ${{ matrix.python-version }} poetry-version: ${{ env.POETRY_VERSION }} - working-directory: ${{ matrix.working-directory }} + working-directory: langchain/${{ matrix.working-directory }} cache-key: scheduled - name: 'Authenticate to Google Cloud' @@ -43,16 +80,20 @@ jobs: with: credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + - name: Install dependencies - working-directory: ${{ matrix.working-directory }} - shell: bash run: | echo "Running scheduled tests, installing dependencies with poetry..." + cd langchain/${{ matrix.working-directory }} poetry install --with=test_integration,test - name: Run integration tests - working-directory: ${{ matrix.working-directory }} - shell: bash env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} @@ -67,12 +108,26 @@ jobs: GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} + COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} + NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }} + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }} + GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }} run: | - make integration_test + cd langchain/${{ matrix.working-directory }} + make integration_tests + + - name: Remove external libraries + run: | + rm -rf \ + langchain/libs/partners/google-genai \ + langchain/libs/partners/google-vertexai \ + langchain/libs/partners/nvidia-ai-endpoints \ + langchain/libs/partners/cohere \ + langchain/libs/partners/aws - name: Ensure the tests did not create any additional files - working-directory: ${{ matrix.working-directory }} - shell: bash + working-directory: langchain run: | set -eu From 0214246dc69dd2d4e11fd567308f666c220cfb0d Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 30 May 2024 14:32:33 -0700 Subject: [PATCH 14/54] docs: list tool calling models (#22334) --- docs/docs/how_to/tool_calling.ipynb | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/docs/how_to/tool_calling.ipynb b/docs/docs/how_to/tool_calling.ipynb index 3f8bc623a50e7..c05a7f035ff7e 100644 --- a/docs/docs/how_to/tool_calling.ipynb +++ b/docs/docs/how_to/tool_calling.ipynb @@ -14,14 +14,20 @@ "\n", ":::\n", "\n", - "```{=mdx}\n", - ":::info\n", + ":::info Tool calling vs function calling\n", + "\n", "We use the term tool calling interchangeably with function calling. Although\n", "function calling is sometimes meant to refer to invocations of a single function,\n", "we treat all models as though they can return multiple tool or function calls in \n", "each message.\n", + "\n", + ":::\n", + "\n", + ":::info Supported models\n", + "\n", + "You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n", + "\n", ":::\n", - "```\n", "\n", "Tool calling allows a chat model to respond to a given prompt by \"calling a tool\".\n", "While the name implies that the model is performing \n", @@ -705,7 +711,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.9.1" } }, "nbformat": 4, From 75ed9ee929c38a1447a600bc1ae13faad8972a95 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Fri, 31 May 2024 07:36:12 -0700 Subject: [PATCH 15/54] docs: Fix Solar and OCI integration page typos (#22343) @efriis @baskaryan --- docs/docs/integrations/text_embedding/oci_generative_ai.ipynb | 2 +- docs/docs/integrations/text_embedding/solar.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/integrations/text_embedding/oci_generative_ai.ipynb b/docs/docs/integrations/text_embedding/oci_generative_ai.ipynb index ae935e4d354dc..c9fd205930d7a 100755 --- a/docs/docs/integrations/text_embedding/oci_generative_ai.ipynb +++ b/docs/docs/integrations/text_embedding/oci_generative_ai.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Oracle Cloud Infrastructure Generative AI" + "# Oracle Cloud Infrastructure Generative AI" ] }, { diff --git a/docs/docs/integrations/text_embedding/solar.ipynb b/docs/docs/integrations/text_embedding/solar.ipynb index a2e2443bcb539..c718f86b32e91 100644 --- a/docs/docs/integrations/text_embedding/solar.ipynb +++ b/docs/docs/integrations/text_embedding/solar.ipynb @@ -5,7 +5,7 @@ "id": "0f1199c1-f885-4290-b5e7-d1defd49abe1", "metadata": {}, "source": [ - "# Soalr\n", + "# Solar\n", "\n", "[Solar](https://console.upstage.ai/services/embedding) offers an embeddings service.\n", "\n", From 8cbce684d4ec861cfd45edc4585365db81b93afd Mon Sep 17 00:00:00 2001 From: ccurme Date: Fri, 31 May 2024 10:57:35 -0400 Subject: [PATCH 16/54] docs: update retriever how-to content (#22362) - [x] How to: use a vector store to retrieve data - [ ] How to: generate multiple queries to retrieve data for - [x] How to: use contextual compression to compress the data retrieved - [x] How to: write a custom retriever class - [x] How to: add similarity scores to retriever results ^ done last month - [x] How to: combine the results from multiple retrievers - [x] How to: reorder retrieved results to mitigate the "lost in the middle" effect - [x] How to: generate multiple embeddings per document ^ this PR - [ ] How to: retrieve the whole document for a chunk - [ ] How to: generate metadata filters - [ ] How to: create a time-weighted retriever - [ ] How to: use hybrid vector and keyword retrieval ^ todo --- docs/docs/how_to/ensemble_retriever.ipynb | 80 ++-- docs/docs/how_to/index.mdx | 2 +- docs/docs/how_to/long_context_reorder.ipynb | 116 +++--- docs/docs/how_to/multi_vector.ipynb | 392 ++++++++++---------- 4 files changed, 304 insertions(+), 286 deletions(-) diff --git a/docs/docs/how_to/ensemble_retriever.ipynb b/docs/docs/how_to/ensemble_retriever.ipynb index 015c3146e579f..80b0d50548fea 100644 --- a/docs/docs/how_to/ensemble_retriever.ipynb +++ b/docs/docs/how_to/ensemble_retriever.ipynb @@ -4,13 +4,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to create an Ensemble Retriever\n", + "# How to combine results from multiple retrievers\n", "\n", - "The `EnsembleRetriever` takes a list of retrievers as input and ensemble the results of their `get_relevant_documents()` methods and rerank the results based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n", + "The [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n", "\n", "By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n", "\n", - "The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity." + "The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity.\n", + "\n", + "## Basic usage\n", + "\n", + "Below we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)." ] }, { @@ -24,22 +28,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "from langchain.retrievers import EnsembleRetriever\n", "from langchain_community.retrievers import BM25Retriever\n", "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ + "from langchain_openai import OpenAIEmbeddings\n", + "\n", "doc_list_1 = [\n", " \"I like apples\",\n", " \"I like oranges\",\n", @@ -71,19 +68,19 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[Document(page_content='You like apples', metadata={'source': 2}),\n", - " Document(page_content='I like apples', metadata={'source': 1}),\n", - " Document(page_content='You like oranges', metadata={'source': 2}),\n", - " Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]" + "[Document(page_content='I like apples', metadata={'source': 1}),\n", + " Document(page_content='You like apples', metadata={'source': 2}),\n", + " Document(page_content='Apples and oranges are fruits', metadata={'source': 1}),\n", + " Document(page_content='You like oranges', metadata={'source': 2})]" ] }, - "execution_count": 15, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -99,24 +96,17 @@ "source": [ "## Runtime Configuration\n", "\n", - "We can also configure the retrievers at runtime. In order to do this, we need to mark the fields as configurable" + "We can also configure the individual retrievers at runtime using [configurable fields](/docs/how_to/configure). Below we update the \"top-k\" parameter for the FAISS retriever specifically:" ] }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.runnables import ConfigurableField" - ] - }, - { - "cell_type": "code", - "execution_count": 17, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ + "from langchain_core.runnables import ConfigurableField\n", + "\n", "faiss_retriever = faiss_vectorstore.as_retriever(\n", " search_kwargs={\"k\": 2}\n", ").configurable_fields(\n", @@ -125,15 +115,8 @@ " name=\"Search Kwargs\",\n", " description=\"The search kwargs to use\",\n", " )\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ + ")\n", + "\n", "ensemble_retriever = EnsembleRetriever(\n", " retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]\n", ")" @@ -141,9 +124,22 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='I like apples', metadata={'source': 1}),\n", + " Document(page_content='You like apples', metadata={'source': 2}),\n", + " Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "config = {\"configurable\": {\"search_kwargs_faiss\": {\"k\": 1}}}\n", "docs = ensemble_retriever.invoke(\"apples\", config=config)\n", @@ -181,7 +177,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.10.4" } }, "nbformat": 4, diff --git a/docs/docs/how_to/index.mdx b/docs/docs/how_to/index.mdx index 3e3637b6a4bc8..448478a03408b 100644 --- a/docs/docs/how_to/index.mdx +++ b/docs/docs/how_to/index.mdx @@ -151,7 +151,7 @@ Retrievers are responsible for taking a query and returning relevant documents. - [How to: write a custom retriever class](/docs/how_to/custom_retriever) - [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever) - [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever) -- [How to: reorder retrieved results to put most relevant documents not in the middle](/docs/how_to/long_context_reorder) +- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder) - [How to: generate multiple embeddings per document](/docs/how_to/multi_vector) - [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever) - [How to: generate metadata filters](/docs/how_to/self_query) diff --git a/docs/docs/how_to/long_context_reorder.ipynb b/docs/docs/how_to/long_context_reorder.ipynb index f84fad93df016..1d20708318965 100644 --- a/docs/docs/how_to/long_context_reorder.ipynb +++ b/docs/docs/how_to/long_context_reorder.ipynb @@ -5,28 +5,38 @@ "id": "fc0db1bc", "metadata": {}, "source": [ - "# How to reorder retrieved results to put most relevant documents not in the middle\n", + "# How to reorder retrieved results to mitigate the \"lost in the middle\" effect\n", "\n", - "No matter the architecture of your model, there is a substantial performance degradation when you include 10+ retrieved documents.\n", - "In brief: When models must access relevant information in the middle of long contexts, they tend to ignore the provided documents.\n", - "See: https://arxiv.org/abs/2307.03172\n", + "Substantial performance degradations in [RAG](/docs/tutorials/rag) applications have been [documented](https://arxiv.org/abs/2307.03172) as the number of retrieved documents grows (e.g., beyond ten). In brief: models are liable to miss relevant information in the middle of long contexts.\n", "\n", - "To avoid this issue you can re-order documents after retrieval to avoid performance degradation." + "By contrast, queries against vector stores will typically return documents in descending order of relevance (e.g., as measured by cosine similarity of [embeddings](/docs/concepts/#embedding-models)).\n", + "\n", + "To mitigate the [\"lost in the middle\"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs.\n", + "\n", + "The [LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example." ] }, { "cell_type": "code", "execution_count": null, - "id": "74d1ebe8", + "id": "2074fdaa-edff-468a-970f-6f5f26e93d4a", "metadata": {}, "outputs": [], "source": [ "%pip install --upgrade --quiet sentence-transformers langchain-chroma langchain langchain-openai langchain-huggingface > /dev/null" ] }, + { + "cell_type": "markdown", + "id": "c97eaaf2-34b7-4770-9949-e1abc4ca5226", + "metadata": {}, + "source": [ + "First we embed some artificial documents and index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store. We will use [Hugging Face](/docs/integrations/text_embedding/huggingfacehub/) embeddings, but any LangChain vector store or embeddings model will suffice." + ] + }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "id": "49cbcd8e", "metadata": {}, "outputs": [ @@ -45,20 +55,14 @@ " Document(page_content='This is just a random text.')]" ] }, - "execution_count": 3, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.chains import LLMChain, StuffDocumentsChain\n", "from langchain_chroma import Chroma\n", - "from langchain_community.document_transformers import (\n", - " LongContextReorder,\n", - ")\n", - "from langchain_core.prompts import PromptTemplate\n", "from langchain_huggingface import HuggingFaceEmbeddings\n", - "from langchain_openai import OpenAI\n", "\n", "# Get embeddings.\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", @@ -83,14 +87,22 @@ "query = \"What can you tell me about the Celtics?\"\n", "\n", "# Get relevant documents ordered by relevance score\n", - "docs = retriever.get_relevant_documents(query)\n", + "docs = retriever.invoke(query)\n", "docs" ] }, + { + "cell_type": "markdown", + "id": "175d031a-43fa-42f4-93c4-2ba52c3c3ee5", + "metadata": {}, + "source": [ + "Note that documents are returned in descending order of relevance to the query. The `LongContextReorder` document transformer will implement the re-ordering described above:" + ] + }, { "cell_type": "code", - "execution_count": 4, - "id": "34fb9d6e", + "execution_count": 3, + "id": "9a1181f2-a3dc-4614-9233-2196ab65939e", "metadata": {}, "outputs": [ { @@ -108,12 +120,14 @@ " Document(page_content='This is a document about the Boston Celtics')]" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "from langchain_community.document_transformers import LongContextReorder\n", + "\n", "# Reorder the documents:\n", "# Less relevant document will be at the middle of the list and more\n", "# relevant elements at beginning / end.\n", @@ -124,59 +138,55 @@ "reordered_docs" ] }, + { + "cell_type": "markdown", + "id": "a8d2ef0c-c397-4d8d-8118-3f7acf86d241", + "metadata": {}, + "source": [ + "Below, we show how to incorporate the re-ordered documents into a simple question-answering chain:" + ] + }, { "cell_type": "code", "execution_count": 5, - "id": "ceccab87", + "id": "8bbea705-d5b9-4ed5-9957-e12547283622", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'\\n\\nThe Celtics are referenced in four of the nine text extracts. They are mentioned as the favorite team of the author, the winner of a basketball game, a team with one of the best players, and a team with a specific player. Additionally, the last extract states that the document is about the Boston Celtics. This suggests that the Celtics are a basketball team, possibly from Boston, that is well-known and has had successful players and games in the past. '" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "The Celtics are a professional basketball team and one of the most iconic franchises in the NBA. They are highly regarded and have a large fan base. The team has had many successful seasons and is often considered one of the top teams in the league. They have a strong history and have produced many great players, such as Larry Bird and L. Kornet. The team is based in Boston and is often referred to as the Boston Celtics.\n" + ] } ], "source": [ - "# We prepare and run a custom Stuff chain with reordered docs as context.\n", + "from langchain.chains.combine_documents import create_stuff_documents_chain\n", + "from langchain_core.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI\n", "\n", - "# Override prompts\n", - "document_prompt = PromptTemplate(\n", - " input_variables=[\"page_content\"], template=\"{page_content}\"\n", - ")\n", - "document_variable_name = \"context\"\n", "llm = OpenAI()\n", - "stuff_prompt_override = \"\"\"Given this text extracts:\n", + "\n", + "prompt_template = \"\"\"\n", + "Given these texts:\n", "-----\n", "{context}\n", "-----\n", "Please answer the following question:\n", - "{query}\"\"\"\n", + "{query}\n", + "\"\"\"\n", + "\n", "prompt = PromptTemplate(\n", - " template=stuff_prompt_override, input_variables=[\"context\", \"query\"]\n", + " template=prompt_template,\n", + " input_variables=[\"context\", \"query\"],\n", ")\n", "\n", - "# Instantiate the chain\n", - "llm_chain = LLMChain(llm=llm, prompt=prompt)\n", - "chain = StuffDocumentsChain(\n", - " llm_chain=llm_chain,\n", - " document_prompt=document_prompt,\n", - " document_variable_name=document_variable_name,\n", - ")\n", - "chain.run(input_documents=reordered_docs, query=query)" + "# Create and invoke the chain:\n", + "chain = create_stuff_documents_chain(llm, prompt)\n", + "response = chain.invoke({\"context\": reordered_docs, \"query\": query})\n", + "print(response)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4696a97", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -195,7 +205,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.10.4" } }, "nbformat": 4, diff --git a/docs/docs/how_to/multi_vector.ipynb b/docs/docs/how_to/multi_vector.ipynb index 34952b3074d7e..f8733e8b610dc 100644 --- a/docs/docs/how_to/multi_vector.ipynb +++ b/docs/docs/how_to/multi_vector.ipynb @@ -5,33 +5,36 @@ "id": "d9172545", "metadata": {}, "source": [ - "# How to use the MultiVector Retriever\n", + "# How to retrieve using multiple vectors per document\n", "\n", - "It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n", + "It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n", + "\n", + "LangChain implements a base [MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n", "\n", "The methods to create multiple vectors per document include:\n", "\n", - "- Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever).\n", + "- Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n", "- Summary: create a summary for each document, embed that along with (or instead of) the document.\n", "- Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n", "\n", + "Note that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.\n", "\n", - "Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control." + "Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice." ] }, { "cell_type": "code", - "execution_count": 1, - "id": "eed469be", + "execution_count": null, + "id": "09cecd95-3499-465a-895a-944627ffb77f", "metadata": {}, "outputs": [], "source": [ - "from langchain.retrievers.multi_vector import MultiVectorRetriever" + "%pip install --upgrade --quiet langchain-chroma langchain langchain-openai > /dev/null" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "id": "18c1421a", "metadata": {}, "outputs": [], @@ -40,25 +43,22 @@ "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import TextLoader\n", "from langchain_openai import OpenAIEmbeddings\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "6d869496", - "metadata": {}, - "outputs": [], - "source": [ + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "\n", "loaders = [\n", - " TextLoader(\"../../paul_graham_essay.txt\"),\n", + " TextLoader(\"paul_graham_essay.txt\"),\n", " TextLoader(\"state_of_the_union.txt\"),\n", "]\n", "docs = []\n", "for loader in loaders:\n", " docs.extend(loader.load())\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)\n", - "docs = text_splitter.split_documents(docs)" + "docs = text_splitter.split_documents(docs)\n", + "\n", + "# The vectorstore to use to index the child chunks\n", + "vectorstore = Chroma(\n", + " collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n", + ")" ] }, { @@ -68,52 +68,54 @@ "source": [ "## Smaller chunks\n", "\n", - "Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood." + "Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n", + "\n", + "We will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the \"parent\" documents and associates them with an identifier." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "0e7b6b45", "metadata": {}, "outputs": [], "source": [ - "# The vectorstore to use to index the child chunks\n", - "vectorstore = Chroma(\n", - " collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n", - ")\n", + "import uuid\n", + "\n", + "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "\n", "# The storage layer for the parent documents\n", "store = InMemoryByteStore()\n", "id_key = \"doc_id\"\n", + "\n", "# The retriever (empty to start)\n", "retriever = MultiVectorRetriever(\n", " vectorstore=vectorstore,\n", " byte_store=store,\n", " id_key=id_key,\n", ")\n", - "import uuid\n", "\n", "doc_ids = [str(uuid.uuid4()) for _ in docs]" ] }, { - "cell_type": "code", - "execution_count": 5, - "id": "72a36491", + "cell_type": "markdown", + "id": "d4feded4-856a-4282-91c3-53aabc62e6ff", "metadata": {}, - "outputs": [], "source": [ - "# The splitter to use to create smaller chunks\n", - "child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)" + "We next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "id": "5d23247d", "metadata": {}, "outputs": [], "source": [ + "# The splitter to use to create smaller chunks\n", + "child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n", + "\n", "sub_docs = []\n", "for i, doc in enumerate(docs):\n", " _id = doc_ids[i]\n", @@ -123,9 +125,17 @@ " sub_docs.extend(_sub_docs)" ] }, + { + "cell_type": "markdown", + "id": "8e0634f8-90d5-4250-981a-5257c8a6d455", + "metadata": {}, + "source": [ + "Finally, we index the documents in our vector store and document store:" + ] + }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "id": "92ed5861", "metadata": {}, "outputs": [], @@ -134,31 +144,46 @@ "retriever.docstore.mset(list(zip(doc_ids, docs)))" ] }, + { + "cell_type": "markdown", + "id": "14c48c6d-850c-4317-9b6e-1ade92f2f710", + "metadata": {}, + "source": [ + "The vector store alone will retrieve small chunks:" + ] + }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "id": "8afed60c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '2fd77862-9ed5-4fad-bf76-e487b747b333', 'source': 'state_of_the_union.txt'})" + "Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '064eca46-a4c4-4789-8e3b-583f9597e54f', 'source': 'state_of_the_union.txt'})" ] }, - "execution_count": 8, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Vectorstore alone retrieves the small chunks\n", "retriever.vectorstore.similarity_search(\"justice breyer\")[0]" ] }, + { + "cell_type": "markdown", + "id": "717097c7-61d9-4306-8625-ef8f1940c127", + "metadata": {}, + "source": [ + "Whereas the retriever will return the larger parent document:" + ] + }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "id": "3c9017f1", "metadata": {}, "outputs": [ @@ -168,14 +193,13 @@ "9875" ] }, - "execution_count": 9, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Retriever returns larger chunks\n", - "len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)" + "len(retriever.invoke(\"justice breyer\")[0].page_content)" ] }, { @@ -183,12 +207,12 @@ "id": "cdef8339-f9fa-4b3b-955f-ad9dbdf2734f", "metadata": {}, "source": [ - "The default search type the retriever performs on the vector database is a similarity search. LangChain Vector Stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search) so if you want this instead you can just set the `search_type` property as follows:" + "The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "id": "36739460-a737-4a8e-b70f-50bf8c8eaae7", "metadata": {}, "outputs": [ @@ -198,7 +222,7 @@ "9875" ] }, - "execution_count": 10, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -208,7 +232,7 @@ "\n", "retriever.search_type = SearchType.mmr\n", "\n", - "len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)" + "len(retriever.invoke(\"justice breyer\")[0].page_content)" ] }, { @@ -216,44 +240,66 @@ "id": "d6a7ae0d", "metadata": {}, "source": [ - "## Summary\n", + "## Associating summaries with a document for retrieval\n", + "\n", + "A summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.\n", + "\n", + "We construct a simple [chain](/docs/how_to/sequence) that will receive an input [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those." + "\n", + "```" ] }, { "cell_type": "code", - "execution_count": 11, - "id": "1433dff4", + "execution_count": 8, + "id": "6589291f-55bb-4e9a-b4ff-08f2506ed641", "metadata": {}, "outputs": [], "source": [ - "import uuid\n", + "# | output: false\n", + "# | echo: false\n", "\n", - "from langchain_core.documents import Document\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI" + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI()" ] }, { "cell_type": "code", - "execution_count": 12, - "id": "35b30390", + "execution_count": 9, + "id": "1433dff4", "metadata": {}, "outputs": [], "source": [ + "import uuid\n", + "\n", + "from langchain_core.documents import Document\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "\n", "chain = (\n", " {\"doc\": lambda x: x.page_content}\n", " | ChatPromptTemplate.from_template(\"Summarize the following document:\\n\\n{doc}\")\n", - " | ChatOpenAI(max_retries=0)\n", + " | llm\n", " | StrOutputParser()\n", ")" ] }, + { + "cell_type": "markdown", + "id": "3faa9fde-1b09-4849-a815-8b2e89c30a02", + "metadata": {}, + "source": [ + "Note that we can [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:" + ] + }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 10, "id": "41a2a738", "metadata": {}, "outputs": [], @@ -261,9 +307,17 @@ "summaries = chain.batch(docs, {\"max_concurrency\": 5})" ] }, + { + "cell_type": "markdown", + "id": "73ef599e-140b-4905-8b62-6c52cdde1852", + "metadata": {}, + "source": [ + "We can then initialize a `MultiVectorRetriever` as before, indexing the summaries in our vector store, and retaining the original documents in our document store:" + ] + }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 11, "id": "7ac5e4b1", "metadata": {}, "outputs": [], @@ -279,29 +333,13 @@ " byte_store=store,\n", " id_key=id_key,\n", ")\n", - "doc_ids = [str(uuid.uuid4()) for _ in docs]" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "0d93309f", - "metadata": {}, - "outputs": [], - "source": [ + "doc_ids = [str(uuid.uuid4()) for _ in docs]\n", + "\n", "summary_docs = [\n", " Document(page_content=s, metadata={id_key: doc_ids[i]})\n", " for i, s in enumerate(summaries)\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "6d5edf0d", - "metadata": {}, - "outputs": [], - "source": [ + "]\n", + "\n", "retriever.vectorstore.add_documents(summary_docs)\n", "retriever.docstore.mset(list(zip(doc_ids, docs)))" ] @@ -320,50 +358,48 @@ ] }, { - "cell_type": "code", - "execution_count": 18, - "id": "299232d6", + "cell_type": "markdown", + "id": "f0274892-29c1-4616-9040-d23f9d537526", "metadata": {}, - "outputs": [], "source": [ - "sub_docs = vectorstore.similarity_search(\"justice breyer\")" + "Querying the vector store will return summaries:" ] }, { "cell_type": "code", - "execution_count": 19, - "id": "10e404c0", + "execution_count": 12, + "id": "299232d6", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Document(page_content=\"The document is a speech given by President Biden addressing various issues and outlining his agenda for the nation. He highlights the importance of nominating a Supreme Court justice and introduces his nominee, Judge Ketanji Brown Jackson. He emphasizes the need to secure the border and reform the immigration system, including providing a pathway to citizenship for Dreamers and essential workers. The President also discusses the protection of women's rights, including access to healthcare and the right to choose. He calls for the passage of the Equality Act to protect LGBTQ+ rights. Additionally, President Biden discusses the need to address the opioid epidemic, improve mental health services, support veterans, and fight against cancer. He expresses optimism for the future of America and the strength of the American people.\", metadata={'doc_id': '56345bff-3ead-418c-a4ff-dff203f77474'})" + "Document(page_content=\"President Biden recently nominated Judge Ketanji Brown Jackson to serve on the United States Supreme Court, emphasizing her qualifications and broad support. The President also outlined a plan to secure the border, fix the immigration system, protect women's rights, support LGBTQ+ Americans, and advance mental health services. He highlighted the importance of bipartisan unity in passing legislation, such as the Violence Against Women Act. The President also addressed supporting veterans, particularly those impacted by exposure to burn pits, and announced plans to expand benefits for veterans with respiratory cancers. Additionally, he proposed a plan to end cancer as we know it through the Cancer Moonshot initiative. President Biden expressed optimism about the future of America and emphasized the strength of the American people in overcoming challenges.\", metadata={'doc_id': '84015b1b-980e-400a-94d8-cf95d7e079bd'})" ] }, - "execution_count": 19, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n", + "\n", "sub_docs[0]" ] }, { - "cell_type": "code", - "execution_count": 20, - "id": "e4cce5c2", + "cell_type": "markdown", + "id": "e4f77ac5-2926-4f60-aad5-b2067900dff9", "metadata": {}, - "outputs": [], "source": [ - "retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")" + "Whereas the retriever will return the larger source document:" ] }, { "cell_type": "code", - "execution_count": 21, - "id": "c8570dbb", + "execution_count": 13, + "id": "e4cce5c2", "metadata": {}, "outputs": [ { @@ -372,12 +408,14 @@ "9194" ] }, - "execution_count": 21, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "retrieved_docs = retriever.invoke(\"justice breyer\")\n", + "\n", "len(retrieved_docs[0].page_content)" ] }, @@ -388,42 +426,28 @@ "source": [ "## Hypothetical Queries\n", "\n", - "An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document. These questions can then be embedded" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "5219b085", - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"hypothetical_questions\",\n", - " \"description\": \"Generate hypothetical questions\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"questions\": {\n", - " \"type\": \"array\",\n", - " \"items\": {\"type\": \"string\"},\n", - " },\n", - " },\n", - " \"required\": [\"questions\"],\n", - " },\n", - " }\n", - "]" + "An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document, which might bear close semantic similarity to relevant queries in a [RAG](/docs/tutorials/rag) application. These questions can then be embedded and associated with the documents to improve retrieval.\n", + "\n", + "Below, we use the [with_structured_output](/docs/how_to/structured_output/) method to structure the LLM output into a list of strings." ] }, { "cell_type": "code", - "execution_count": 23, - "id": "523deb92", + "execution_count": 16, + "id": "03d85234-c33a-4a43-861d-47328e1ec2ea", "metadata": {}, "outputs": [], "source": [ - "from langchain_core.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n", + "from typing import List\n", + "\n", + "from langchain_core.pydantic_v1 import BaseModel, Field\n", + "\n", + "\n", + "class HypotheticalQuestions(BaseModel):\n", + " \"\"\"Generate hypothetical questions.\"\"\"\n", + "\n", + " questions: List[str] = Field(..., description=\"List of questions\")\n", + "\n", "\n", "chain = (\n", " {\"doc\": lambda x: x.page_content}\n", @@ -431,28 +455,36 @@ " | ChatPromptTemplate.from_template(\n", " \"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\\n\\n{doc}\"\n", " )\n", - " | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(\n", - " functions=functions, function_call={\"name\": \"hypothetical_questions\"}\n", + " | ChatOpenAI(max_retries=0, model=\"gpt-4o\").with_structured_output(\n", + " HypotheticalQuestions\n", " )\n", - " | JsonKeyOutputFunctionsParser(key_name=\"questions\")\n", + " | (lambda x: x.questions)\n", ")" ] }, + { + "cell_type": "markdown", + "id": "6dddc40f-62af-413c-b944-f94a5e1f2f4e", + "metadata": {}, + "source": [ + "Invoking the chain on a single document demonstrates that it outputs a list of questions:" + ] + }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 17, "id": "11d30554", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[\"What was the author's first experience with programming like?\",\n", - " 'Why did the author switch their focus from AI to Lisp during their graduate studies?',\n", - " 'What led the author to contemplate a career in art instead of computer science?']" + "[\"What impact did the IBM 1401 have on the author's early programming experiences?\",\n", + " \"How did the transition from using the IBM 1401 to microcomputers influence the author's programming journey?\",\n", + " \"What role did Lisp play in shaping the author's understanding and approach to AI?\"]" ] }, - "execution_count": 24, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -462,22 +494,24 @@ ] }, { - "cell_type": "code", - "execution_count": 25, - "id": "3eb2e48c", + "cell_type": "markdown", + "id": "dcffc572-7b20-4b77-857a-90ec360a8f7e", "metadata": {}, - "outputs": [], "source": [ - "hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})" + "We can batch then batch the chain over all documents and assemble our vector store and document store as before:" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 18, "id": "b2cd6e75", "metadata": {}, "outputs": [], "source": [ + "# Batch chain over documents to generate hypothetical questions\n", + "hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})\n", + "\n", + "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(\n", " collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings()\n", @@ -491,82 +525,67 @@ " byte_store=store,\n", " id_key=id_key,\n", ")\n", - "doc_ids = [str(uuid.uuid4()) for _ in docs]" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "18831b3b", - "metadata": {}, - "outputs": [], - "source": [ + "doc_ids = [str(uuid.uuid4()) for _ in docs]\n", + "\n", + "\n", + "# Generate Document objects from hypothetical questions\n", "question_docs = []\n", "for i, question_list in enumerate(hypothetical_questions):\n", " question_docs.extend(\n", " [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "224b24c5", - "metadata": {}, - "outputs": [], - "source": [ + " )\n", + "\n", + "\n", "retriever.vectorstore.add_documents(question_docs)\n", "retriever.docstore.mset(list(zip(doc_ids, docs)))" ] }, { - "cell_type": "code", - "execution_count": 29, - "id": "7b442b90", + "cell_type": "markdown", + "id": "75cba8ab-a06f-4545-85fc-cf49d0204b5e", "metadata": {}, - "outputs": [], "source": [ - "sub_docs = vectorstore.similarity_search(\"justice breyer\")" + "Note that querying the underlying vector store will retrieve hypothetical questions that are semantically similar to the input query:" ] }, { "cell_type": "code", - "execution_count": 30, - "id": "089b5ad0", + "execution_count": 19, + "id": "7b442b90", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[Document(page_content='Who has been nominated to serve on the United States Supreme Court?', metadata={'doc_id': '0b3a349e-c936-4e77-9c40-0a39fc3e07f0'}),\n", - " Document(page_content=\"What was the context and content of Robert Morris' advice to the document's author in 2010?\", metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n", - " Document(page_content='How did personal circumstances influence the decision to pass on the leadership of Y Combinator?', metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n", - " Document(page_content='What were the reasons for the author leaving Yahoo in the summer of 1999?', metadata={'doc_id': 'ce4f4981-ca60-4f56-86f0-89466de62325'})]" + "[Document(page_content='What might be the potential benefits of nominating Circuit Court of Appeals Judge Ketanji Brown Jackson to the United States Supreme Court?', metadata={'doc_id': '43292b74-d1b8-4200-8a8b-ea0cb57fbcdb'}),\n", + " Document(page_content='How might the Bipartisan Infrastructure Law impact the economic competition between the U.S. and China?', metadata={'doc_id': '66174780-d00c-4166-9791-f0069846e734'}),\n", + " Document(page_content='What factors led to the creation of Y Combinator?', metadata={'doc_id': '72003c4e-4cc9-4f09-a787-0b541a65b38c'}),\n", + " Document(page_content='How did the ability to publish essays online change the landscape for writers and thinkers?', metadata={'doc_id': 'e8d2c648-f245-4bcc-b8d3-14e64a164b64'})]" ] }, - "execution_count": 30, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n", + "\n", "sub_docs" ] }, { - "cell_type": "code", - "execution_count": 31, - "id": "7594b24e", + "cell_type": "markdown", + "id": "63c32e43-5f4a-463b-a0c2-2101986f70e6", "metadata": {}, - "outputs": [], "source": [ - "retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")" + "And invoking the retriever will return the corresponding document:" ] }, { "cell_type": "code", - "execution_count": 32, - "id": "4c120c65", + "execution_count": 20, + "id": "7594b24e", "metadata": {}, "outputs": [ { @@ -575,22 +594,15 @@ "9194" ] }, - "execution_count": 32, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "retrieved_docs = retriever.invoke(\"justice breyer\")\n", "len(retrieved_docs[0].page_content)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "005072b8", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -609,7 +621,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.10.4" } }, "nbformat": 4, From 1bad0ac946f19842d494158ce6763ca81292fe85 Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Fri, 31 May 2024 08:40:48 -0700 Subject: [PATCH 17/54] docs: redirect integration links to 0.2 (#22326) --- docs/vercel.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/vercel.json b/docs/vercel.json index d14a466f4772f..cda463e41185f 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -41,6 +41,10 @@ "source": "/docs/get_started/introduction(/?)", "destination": "/v0.2/docs/introduction/" }, + { + "source": "/docs/integrations/:path(.*/?)*", + "destination": "/v0.2/docs/integrations/:path*" + }, { "source": "/docs/:path(.*/?)*", "destination": "/v0.1/docs/:path*" From 6ffa0acf32772c38d7725a6448d247d8094fa79e Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Fri, 31 May 2024 08:41:05 -0700 Subject: [PATCH 18/54] ai21: fix text-splitters version (#22366) --- libs/partners/ai21/poetry.lock | 378 +++++++++++++++++++----------- libs/partners/ai21/pyproject.toml | 2 +- 2 files changed, 237 insertions(+), 143 deletions(-) diff --git a/libs/partners/ai21/poetry.lock b/libs/partners/ai21/poetry.lock index 32d8702f02ea7..7bbd1f200611a 100644 --- a/libs/partners/ai21/poetry.lock +++ b/libs/partners/ai21/poetry.lock @@ -1,20 +1,21 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "ai21" -version = "2.2.5" +version = "2.4.0" description = "" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "ai21-2.2.5-py3-none-any.whl", hash = "sha256:ff6ca793b3adc749d984f45414e56c37095f2f3a7ecf6f6971571fe0793774f6"}, - {file = "ai21-2.2.5.tar.gz", hash = "sha256:5940f9e8c225e3e943e1bbb1f7a98cd33225dd98604fb74e6c5febc555c67c32"}, + {file = "ai21-2.4.0-py3-none-any.whl", hash = "sha256:ad140664c57df7906ee28189d30af7a746999ba35a48fe52936c74590829fd20"}, + {file = "ai21-2.4.0.tar.gz", hash = "sha256:f6c1d42796bf279875aa417efd9696c496cd371122e7b33f4ffaab72180089be"}, ] [package.dependencies] ai21-tokenizer = ">=0.9.1,<1.0.0" dataclasses-json = ">=0.6.3,<0.7.0" -requests = ">=2.31.0,<3.0.0" +httpx = ">=0.27.0,<0.28.0" +tenacity = ">=8.3.0,<9.0.0" typing-extensions = ">=4.9.0,<5.0.0" [package.extras] @@ -37,18 +38,40 @@ tokenizers = ">=0.15.0,<1.0.0" [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + [[package]] name = "certifi" version = "2024.2.2" @@ -161,13 +184,13 @@ files = [ [[package]] name = "codespell" -version = "2.2.6" +version = "2.3.0" description = "Codespell" optional = false python-versions = ">=3.8" files = [ - {file = "codespell-2.2.6-py3-none-any.whl", hash = "sha256:9ee9a3e5df0990604013ac2a9f22fa8e57669c827124a2e961fe8a1da4cacc07"}, - {file = "codespell-2.2.6.tar.gz", hash = "sha256:a8c65d8eb3faa03deabab6b3bbe798bea72e1799c7e9e955d57eca4096abcff9"}, + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, ] [package.extras] @@ -284,15 +307,71 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe, test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + [[package]] name = "huggingface-hub" -version = "0.23.0" +version = "0.23.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.23.0-py3-none-any.whl", hash = "sha256:075c30d48ee7db2bba779190dc526d2c11d422aed6f9044c5e2fdc2c432fdb91"}, - {file = "huggingface_hub-0.23.0.tar.gz", hash = "sha256:7126dedd10a4c6fac796ced4d87a8cf004efc722a5125c2c09299017fa366fa9"}, + {file = "huggingface_hub-0.23.2-py3-none-any.whl", hash = "sha256:48727a16e704d409c4bb5913613308499664f22a99743435dc3a13b23c485827"}, + {file = "huggingface_hub-0.23.2.tar.gz", hash = "sha256:f6829b62d5fdecb452a76fdbec620cba4c1573655a8d710c1df71735fd9edbd2"}, ] [package.dependencies] @@ -367,7 +446,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.0rc1" +version = "0.2.2" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -408,7 +487,7 @@ url = "../../standard-tests" [[package]] name = "langchain-text-splitters" -version = "0.0.2" +version = "0.2.0" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.8.1,<4.0" @@ -416,7 +495,7 @@ files = [] develop = true [package.dependencies] -langchain-core = ">=0.1.28,<0.3" +langchain-core = "^0.2.0" [package.extras] extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] @@ -427,13 +506,13 @@ url = "../../text-splitters" [[package]] name = "langsmith" -version = "0.1.58" +version = "0.1.65" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.58-py3-none-any.whl", hash = "sha256:1148cc836ec99d1b2f37cd2fa3014fcac213bb6bad798a2b21bb9111c18c9768"}, - {file = "langsmith-0.1.58.tar.gz", hash = "sha256:a5060933c1fb3006b498ec849677993329d7e6138bdc2ec044068ab806e09c39"}, + {file = "langsmith-0.1.65-py3-none-any.whl", hash = "sha256:ab4487029240e69cca30da1065f1e9138e5a7ca2bbe8c697f0bd7d5839f71cf7"}, + {file = "langsmith-0.1.65.tar.gz", hash = "sha256:d3c2eb2391478bd79989f02652cf66e29a7959d677614b6993a47cef43f7f43b"}, ] [package.dependencies] @@ -604,18 +683,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.7.1" +version = "2.7.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.7.2-py3-none-any.whl", hash = "sha256:834ab954175f94e6e68258537dc49402c4a5e9d0409b9f1b86b7e934a8372de7"}, + {file = "pydantic-2.7.2.tar.gz", hash = "sha256:71b2945998f9c9b7919a45bde9a50397b289937d215ae141c1d0903ba7149fd7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" +pydantic-core = "2.18.3" typing-extensions = ">=4.6.1" [package.extras] @@ -623,90 +702,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.18.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:744697428fcdec6be5670460b578161d1ffe34743a5c15656be7ea82b008197c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b40c05ced1ba4218b14986fe6f283d22e1ae2ff4c8e28881a70fb81fbfcda7"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a9a75622357076efb6b311983ff190fbfb3c12fc3a853122b34d3d358126c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e2e253af04ceaebde8eb201eb3f3e3e7e390f2d275a88300d6a1959d710539e2"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:855ec66589c68aa367d989da5c4755bb74ee92ccad4fdb6af942c3612c067e34"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d3e42bb54e7e9d72c13ce112e02eb1b3b55681ee948d748842171201a03a98a"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6ac9ffccc9d2e69d9fba841441d4259cb668ac180e51b30d3632cd7abca2b9b"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c56eca1686539fa0c9bda992e7bd6a37583f20083c37590413381acfc5f192d6"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:17954d784bf8abfc0ec2a633108207ebc4fa2df1a0e4c0c3ccbaa9bb01d2c426"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:98ed737567d8f2ecd54f7c8d4f8572ca7c7921ede93a2e52939416170d357812"}, + {file = "pydantic_core-2.18.3-cp310-none-win32.whl", hash = "sha256:9f9e04afebd3ed8c15d67a564ed0a34b54e52136c6d40d14c5547b238390e779"}, + {file = "pydantic_core-2.18.3-cp310-none-win_amd64.whl", hash = "sha256:45e4ffbae34f7ae30d0047697e724e534a7ec0a82ef9994b7913a412c21462a0"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9ebe8231726c49518b16b237b9fe0d7d361dd221302af511a83d4ada01183ab"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b8e20e15d18bf7dbb453be78a2d858f946f5cdf06c5072453dace00ab652e2b2"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0d9ff283cd3459fa0bf9b0256a2b6f01ac1ff9ffb034e24457b9035f75587cb"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f7ef5f0ebb77ba24c9970da18b771711edc5feaf00c10b18461e0f5f5949231"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73038d66614d2e5cde30435b5afdced2b473b4c77d4ca3a8624dd3e41a9c19be"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6afd5c867a74c4d314c557b5ea9520183fadfbd1df4c2d6e09fd0d990ce412cd"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd7df92f28d351bb9f12470f4c533cf03d1b52ec5a6e5c58c65b183055a60106"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:80aea0ffeb1049336043d07799eace1c9602519fb3192916ff525b0287b2b1e4"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:aaee40f25bba38132e655ffa3d1998a6d576ba7cf81deff8bfa189fb43fd2bbe"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9128089da8f4fe73f7a91973895ebf2502539d627891a14034e45fb9e707e26d"}, + {file = "pydantic_core-2.18.3-cp311-none-win32.whl", hash = "sha256:fec02527e1e03257aa25b1a4dcbe697b40a22f1229f5d026503e8b7ff6d2eda7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_amd64.whl", hash = "sha256:58ff8631dbab6c7c982e6425da8347108449321f61fe427c52ddfadd66642af7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_arm64.whl", hash = "sha256:3fc1c7f67f34c6c2ef9c213e0f2a351797cda98249d9ca56a70ce4ebcaba45f4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f0928cde2ae416a2d1ebe6dee324709c6f73e93494d8c7aea92df99aab1fc40f"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bee9bb305a562f8b9271855afb6ce00223f545de3d68560b3c1649c7c5295e9"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e862823be114387257dacbfa7d78547165a85d7add33b446ca4f4fae92c7ff5c"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a36f78674cbddc165abab0df961b5f96b14461d05feec5e1f78da58808b97e7"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba905d184f62e7ddbb7a5a751d8a5c805463511c7b08d1aca4a3e8c11f2e5048"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fdd362f6a586e681ff86550b2379e532fee63c52def1c666887956748eaa326"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b214b7ee3bd3b865e963dbed0f8bc5375f49449d70e8d407b567af3222aae4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691018785779766127f531674fa82bb368df5b36b461622b12e176c18e119022"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:60e4c625e6f7155d7d0dcac151edf5858102bc61bf959d04469ca6ee4e8381bd"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4e651e47d981c1b701dcc74ab8fec5a60a5b004650416b4abbef13db23bc7be"}, + {file = "pydantic_core-2.18.3-cp312-none-win32.whl", hash = "sha256:ffecbb5edb7f5ffae13599aec33b735e9e4c7676ca1633c60f2c606beb17efc5"}, + {file = "pydantic_core-2.18.3-cp312-none-win_amd64.whl", hash = "sha256:2c8333f6e934733483c7eddffdb094c143b9463d2af7e6bd85ebcb2d4a1b82c6"}, + {file = "pydantic_core-2.18.3-cp312-none-win_arm64.whl", hash = "sha256:7a20dded653e516a4655f4c98e97ccafb13753987434fe7cf044aa25f5b7d417"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:eecf63195be644b0396f972c82598cd15693550f0ff236dcf7ab92e2eb6d3522"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c44efdd3b6125419c28821590d7ec891c9cb0dff33a7a78d9d5c8b6f66b9702"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e59fca51ffbdd1638b3856779342ed69bcecb8484c1d4b8bdb237d0eb5a45e2"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70cf099197d6b98953468461d753563b28e73cf1eade2ffe069675d2657ed1d5"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63081a49dddc6124754b32a3774331467bfc3d2bd5ff8f10df36a95602560361"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:370059b7883485c9edb9655355ff46d912f4b03b009d929220d9294c7fd9fd60"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a64faeedfd8254f05f5cf6fc755023a7e1606af3959cfc1a9285744cc711044"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19d2e725de0f90d8671f89e420d36c3dd97639b98145e42fcc0e1f6d492a46dc"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:67bc078025d70ec5aefe6200ef094576c9d86bd36982df1301c758a9fff7d7f4"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:adf952c3f4100e203cbaf8e0c907c835d3e28f9041474e52b651761dc248a3c0"}, + {file = "pydantic_core-2.18.3-cp38-none-win32.whl", hash = "sha256:9a46795b1f3beb167eaee91736d5d17ac3a994bf2215a996aed825a45f897558"}, + {file = "pydantic_core-2.18.3-cp38-none-win_amd64.whl", hash = "sha256:200ad4e3133cb99ed82342a101a5abf3d924722e71cd581cc113fe828f727fbc"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:304378b7bf92206036c8ddd83a2ba7b7d1a5b425acafff637172a3aa72ad7083"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c826870b277143e701c9ccf34ebc33ddb4d072612683a044e7cce2d52f6c3fef"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e201935d282707394f3668380e41ccf25b5794d1b131cdd96b07f615a33ca4b1"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5560dda746c44b48bf82b3d191d74fe8efc5686a9ef18e69bdabccbbb9ad9442"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b32c2a1f8032570842257e4c19288eba9a2bba4712af542327de9a1204faff8"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:929c24e9dea3990bc8bcd27c5f2d3916c0c86f5511d2caa69e0d5290115344a9"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a8376fef60790152564b0eab376b3e23dd6e54f29d84aad46f7b264ecca943"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dccf3ef1400390ddd1fb55bf0632209d39140552d068ee5ac45553b556780e06"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41dbdcb0c7252b58fa931fec47937edb422c9cb22528f41cb8963665c372caf6"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:666e45cf071669fde468886654742fa10b0e74cd0fa0430a46ba6056b24fb0af"}, + {file = "pydantic_core-2.18.3-cp39-none-win32.whl", hash = "sha256:f9c08cabff68704a1b4667d33f534d544b8a07b8e5d039c37067fceb18789e78"}, + {file = "pydantic_core-2.18.3-cp39-none-win_amd64.whl", hash = "sha256:4afa5f5973e8572b5c0dcb4e2d4fda7890e7cd63329bd5cc3263a25c92ef0026"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:77319771a026f7c7d29c6ebc623de889e9563b7087911b46fd06c044a12aa5e9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:df11fa992e9f576473038510d66dd305bcd51d7dd508c163a8c8fe148454e059"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d531076bdfb65af593326ffd567e6ab3da145020dafb9187a1d131064a55f97c"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33ce258e4e6e6038f2b9e8b8a631d17d017567db43483314993b3ca345dcbbb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f9cd7f5635b719939019be9bda47ecb56e165e51dd26c9a217a433e3d0d59a9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cd4a032bb65cc132cae1fe3e52877daecc2097965cd3914e44fbd12b00dae7c5"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f2718430098bcdf60402136c845e4126a189959d103900ebabb6774a5d9fdb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0037a92cf0c580ed14e10953cdd26528e8796307bb8bb312dc65f71547df04d"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b95a0972fac2b1ff3c94629fc9081b16371dad870959f1408cc33b2f78ad347a"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a62e437d687cc148381bdd5f51e3e81f5b20a735c55f690c5be94e05da2b0d5c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b367a73a414bbb08507da102dc2cde0fa7afe57d09b3240ce82a16d608a7679c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ecce4b2360aa3f008da3327d652e74a0e743908eac306198b47e1c58b03dd2b"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd4435b8d83f0c9561a2a9585b1de78f1abb17cb0cef5f39bf6a4b47d19bafe3"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:616221a6d473c5b9aa83fa8982745441f6a4a62a66436be9445c65f241b86c94"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7e6382ce89a92bc1d0c0c5edd51e931432202b9080dc921d8d003e616402efd1"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff58f379345603d940e461eae474b6bbb6dab66ed9a851ecd3cb3709bf4dcf6a"}, + {file = "pydantic_core-2.18.3.tar.gz", hash = "sha256:432e999088d85c8f36b9a3f769a8e2b57aabd817bbb729a90d1fe7f18f6f1f39"}, ] [package.dependencies] @@ -823,6 +902,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -859,13 +939,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -977,6 +1057,17 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + [[package]] name = "syrupy" version = "4.6.1" @@ -1156,13 +1247,13 @@ telegram = ["requests"] [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -1199,40 +1290,43 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.1" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, ] [package.extras] @@ -1241,4 +1335,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "6e7c3f9239c1a6681051509ad0ca876e846095362c327a71c4a210286d7f3ffc" +content-hash = "8af2236a46bf24cbf18cfb15e7ef4967858e91764cc2f5bb260e59f225fe0047" diff --git a/libs/partners/ai21/pyproject.toml b/libs/partners/ai21/pyproject.toml index 0133cff0925d9..b7a1290c481b0 100644 --- a/libs/partners/ai21/pyproject.toml +++ b/libs/partners/ai21/pyproject.toml @@ -13,7 +13,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" langchain-core = ">=0.1.48,<0.3" -langchain-text-splitters = ">=0.0.1,<0.2" +langchain-text-splitters = "^0.2.0" ai21 = "^2.2.5" [tool.poetry.group.test] From a8098f5ddb50577aaf999491e1a4c40c037275e5 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Fri, 31 May 2024 12:10:22 -0700 Subject: [PATCH 19/54] anthropic[patch]: Release 0.1.15, fix sdk tools break (#22369) --- .../langchain_anthropic/chat_models.py | 13 +++--------- libs/partners/anthropic/poetry.lock | 21 +++++++++---------- libs/partners/anthropic/pyproject.toml | 4 ++-- .../tests/unit_tests/test_chat_models.py | 4 ++-- 4 files changed, 17 insertions(+), 25 deletions(-) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 42a9117ab7d1f..ac47322aca936 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -22,7 +22,7 @@ ) import anthropic -from langchain_core._api import beta, deprecated +from langchain_core._api import deprecated from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -522,10 +522,7 @@ def _generate( messages, stop=stop, run_manager=run_manager, **kwargs ) return generate_from_stream(stream_iter) - if _tools_in_params(params): - data = self._client.beta.tools.messages.create(**params) - else: - data = self._client.messages.create(**params) + data = self._client.messages.create(**params) return self._format_output(data, **kwargs) async def _agenerate( @@ -546,13 +543,9 @@ async def _agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) return await agenerate_from_stream(stream_iter) - if _tools_in_params(params): - data = await self._async_client.beta.tools.messages.create(**params) - else: - data = await self._async_client.messages.create(**params) + data = await self._async_client.messages.create(**params) return self._format_output(data, **kwargs) - @beta() def bind_tools( self, tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], diff --git a/libs/partners/anthropic/poetry.lock b/libs/partners/anthropic/poetry.lock index bfbd38aa42565..17cfdd1d97ce2 100644 --- a/libs/partners/anthropic/poetry.lock +++ b/libs/partners/anthropic/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -16,20 +16,19 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anthropic" -version = "0.26.1" +version = "0.28.0" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" files = [ - {file = "anthropic-0.26.1-py3-none-any.whl", hash = "sha256:2812b9b250b551ed8a1f0a7e6ae3f005654098994f45ebca5b5808bd154c9628"}, - {file = "anthropic-0.26.1.tar.gz", hash = "sha256:26680ff781a6f678a30a1dccd0743631e602b23a47719439ffdef5335fa167d8"}, + {file = "anthropic-0.28.0-py3-none-any.whl", hash = "sha256:2b620b21aee3d20c5d8005483c34df239d53ae895687113b26b8a36892a7e20f"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" -jiter = ">=0.1.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tokenizers = ">=0.13.0" @@ -512,7 +511,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.2rc1" +version = "0.2.3" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -521,7 +520,7 @@ develop = true [package.dependencies] jsonpatch = "^1.33" -langsmith = "^0.1.0" +langsmith = "^0.1.65" packaging = "^23.2" pydantic = ">=1,<3" PyYAML = ">=5.3" @@ -553,13 +552,13 @@ url = "../../standard-tests" [[package]] name = "langsmith" -version = "0.1.63" +version = "0.1.67" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.63-py3-none-any.whl", hash = "sha256:7810afdf5e3f3b472fc581a29371fb96cd843dde2149e048d1b9610325159d1e"}, - {file = "langsmith-0.1.63.tar.gz", hash = "sha256:a609405b52f6f54df442a142cbf19ab38662d54e532f96028b4c546434d4afdf"}, + {file = "langsmith-0.1.67-py3-none-any.whl", hash = "sha256:7eb2e1c1b375925ff47700ed8071e10c15e942e9d1d634b4a449a9060364071a"}, + {file = "langsmith-0.1.67.tar.gz", hash = "sha256:149558669a2ac4f21471cd964e61072687bba23b7c1ccb51f190a8f59b595b39"}, ] [package.dependencies] @@ -1286,4 +1285,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "b7a5f1c41811ecfc4f87a5261e0d4627abcd79a070355faa60408a3e7a1c691c" +content-hash = "a88c10c902a287792de08135f1c17391a89c7363a30c8d55a185f0c90efc22ac" diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index 859536541da52..2bbb3fdd026c1 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-anthropic" -version = "0.1.14rc2" +version = "0.1.15" description = "An integration package connecting AnthropicMessages and LangChain" authors = [] readme = "README.md" @@ -13,7 +13,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" langchain-core = { version = ">=0.2.2rc1,<0.3", allow-prereleases = true } -anthropic = ">=0.26.0,<1" +anthropic = ">=0.28.0,<1" defusedxml = { version = "^0.7.1", optional = true } [tool.poetry.group.test] diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py index 3c5c5b2691c01..09ae57b542564 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py @@ -4,7 +4,7 @@ from typing import Any, Callable, Dict, Literal, Type, cast import pytest -from anthropic.types import ContentBlock, Message, Usage +from anthropic.types import Message, TextBlock, Usage from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage from langchain_core.outputs import ChatGeneration, ChatResult from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr @@ -79,7 +79,7 @@ def test_anthropic_initialization() -> None: def test__format_output() -> None: anthropic_msg = Message( id="foo", - content=[ContentBlock(type="text", text="bar")], + content=[TextBlock(type="text", text="bar")], model="baz", role="assistant", stop_reason=None, From 4d82cea71fc6e1dd3bebaf64db3c47ddb3762157 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Fri, 31 May 2024 12:37:06 -0700 Subject: [PATCH 20/54] docs: fix llm caches redirect (#22371) --- docs/vercel.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/vercel.json b/docs/vercel.json index cda463e41185f..4338ce40bb952 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -14,8 +14,8 @@ ], "redirects": [ { - "source": "/docs/integrations/llms/llm_caching(/?)", - "destination": "docs/integration/llm_caching/" + "source": "/v0.2/docs/integrations/llms/llm_caching(/?)", + "destination": "/v0.2/docs/integrations/llm_caching/" }, { "source": "/docs/how_to/tool_calls_multi_modal(/?)", From 8a57102918f158fda5325bd9c655d2bc5162e971 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Fri, 31 May 2024 16:37:05 -0700 Subject: [PATCH 21/54] docs[patch]: Fix typo (#22377) --- docs/docs/concepts.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/concepts.mdx b/docs/docs/concepts.mdx index b0ce1c8f19769..9a7693b867f58 100644 --- a/docs/docs/concepts.mdx +++ b/docs/docs/concepts.mdx @@ -38,7 +38,7 @@ All dependencies in this package are optional to keep the package as lightweight `langgraph` is an extension of `langchain` aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. -LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for constructing more contr +LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows. ### [`langserve`](/docs/langserve) From 16cce76a684e0c523da3a87ab414b725c085c666 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Sat, 1 Jun 2024 07:36:45 -0700 Subject: [PATCH 22/54] =?UTF-8?q?=F0=9F=91=A5=20Update=20LangChain=20peopl?= =?UTF-8?q?e=20data=20(#22388)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👥 Update LangChain people data Co-authored-by: github-actions --- docs/data/people.yml | 1187 ++++++++++++++++++++++-------------------- 1 file changed, 621 insertions(+), 566 deletions(-) diff --git a/docs/data/people.yml b/docs/data/people.yml index 0889f978c0491..fa2e74f00386b 100644 --- a/docs/data/people.yml +++ b/docs/data/people.yml @@ -1,270 +1,250 @@ maintainers: +- login: efriis + count: 569 + avatarUrl: https://avatars.githubusercontent.com/u/9557659?u=44391f1f5f5e3a72acc9772ca30f28bfdcc25fac&v=4 + twitterUsername: null + url: https://github.com/efriis +- login: ccurme + count: 124 + avatarUrl: https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4 + twitterUsername: null + url: https://github.com/ccurme - login: agola11 count: 77 avatarUrl: https://avatars.githubusercontent.com/u/9536492?u=820809d60f4a720a4e1f507a1bf866dfb5f86614&v=4 twitterUsername: null url: https://github.com/agola11 -- login: hinthornw - count: 273 - avatarUrl: https://avatars.githubusercontent.com/u/13333726?u=82ebf1e0eb0663ebd49ba66f67a43f51bbf11442&v=4 +- login: baskaryan + count: 930 + avatarUrl: https://avatars.githubusercontent.com/u/22008038?u=8e3d6bbd0adbe02f0bd259c44f2ddb8612f90d88&v=4 twitterUsername: null - url: https://github.com/hinthornw + url: https://github.com/baskaryan +- login: vbarda + count: 3 + avatarUrl: https://avatars.githubusercontent.com/u/19161700?u=e76bcd472b51c9f07befd2654783d0a381f49005&v=4 + twitterUsername: vadymbarda + url: https://github.com/vbarda - login: eyurtsev - count: 329 + count: 406 avatarUrl: https://avatars.githubusercontent.com/u/3205522?v=4 twitterUsername: veryboldbagel url: https://github.com/eyurtsev -- login: efriis - count: 462 - avatarUrl: https://avatars.githubusercontent.com/u/9557659?u=44391f1f5f5e3a72acc9772ca30f28bfdcc25fac&v=4 - twitterUsername: null - url: https://github.com/efriis +- login: nfcampos + count: 216 + avatarUrl: https://avatars.githubusercontent.com/u/56902?u=fdb30e802c68bc338dd9c0820f713e4fdac75db7&v=4 + twitterUsername: nfcampos + url: https://github.com/nfcampos - login: rlancemartin count: 124 avatarUrl: https://avatars.githubusercontent.com/u/122662504?u=e88c472fba16a74332c550cc9707fd015738a0da&v=4 twitterUsername: RLanceMartin url: https://github.com/rlancemartin -- login: baskaryan - count: 877 - avatarUrl: https://avatars.githubusercontent.com/u/22008038?u=8e3d6bbd0adbe02f0bd259c44f2ddb8612f90d88&v=4 +- login: hinthornw + count: 279 + avatarUrl: https://avatars.githubusercontent.com/u/13333726?u=82ebf1e0eb0663ebd49ba66f67a43f51bbf11442&v=4 twitterUsername: null - url: https://github.com/baskaryan -- login: nfcampos - count: 207 - avatarUrl: https://avatars.githubusercontent.com/u/56902?u=fdb30e802c68bc338dd9c0820f713e4fdac75db7&v=4 - twitterUsername: nfcampos - url: https://github.com/nfcampos + url: https://github.com/hinthornw - login: hwchase17 - count: 1236 + count: 1242 avatarUrl: https://avatars.githubusercontent.com/u/11986836?u=f4c4f21a82b2af6c9f91e1f1d99ea40062f7a101&v=4 twitterUsername: null url: https://github.com/hwchase17 top_recent_contributors: - login: leo-gan - count: 54.71626755195469 + count: 53.651739473156404 avatarUrl: https://avatars.githubusercontent.com/u/2256422?v=4 twitterUsername: null url: https://github.com/leo-gan -- login: ccurme - count: 38.278296661389966 - avatarUrl: https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4 - twitterUsername: null - url: https://github.com/ccurme - login: cbornet - count: 36.443879136724554 + count: 26.458403097368777 avatarUrl: https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4 twitterUsername: null url: https://github.com/cbornet - login: tomasonjo - count: 19.880992449559265 + count: 19.70641132229249 avatarUrl: https://avatars.githubusercontent.com/u/19948365?v=4 twitterUsername: tb_tomaz url: https://github.com/tomasonjo -- login: liugddx - count: 11.876082540231806 - avatarUrl: https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4 - twitterUsername: null - url: https://github.com/liugddx - login: sepiatone - count: 10.09112072266705 + count: 9.509882757587151 avatarUrl: https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4 twitterUsername: null url: https://github.com/sepiatone -- login: lkuligin - count: 7.190159916376656 - avatarUrl: https://avatars.githubusercontent.com/u/11026406?v=4 +- login: liugddx + count: 8.653156240628679 + avatarUrl: https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4 twitterUsername: null - url: https://github.com/lkuligin + url: https://github.com/liugddx - login: maxjakob - count: 6.6805463301528745 + count: 5.680952237883977 avatarUrl: https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4 twitterUsername: null url: https://github.com/maxjakob -- login: Jibola - count: 6.225098172128444 - avatarUrl: https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4 +- login: lkuligin + count: 5.577921432045148 + avatarUrl: https://avatars.githubusercontent.com/u/11026406?v=4 twitterUsername: null - url: https://github.com/Jibola -- login: averikitsch - count: 5.942848863104591 - avatarUrl: https://avatars.githubusercontent.com/u/6519888?u=fe0b0f093e8683bdac4f205b237d2e48d7c755d4&v=4 - twitterUsername: averikitsch - url: https://github.com/averikitsch + url: https://github.com/lkuligin - login: harry-cohere count: 5.2093225809800625 avatarUrl: https://avatars.githubusercontent.com/u/127103098?v=4 twitterUsername: null url: https://github.com/harry-cohere -- login: MateuszOssGit - count: 4.6750390537043005 - avatarUrl: https://avatars.githubusercontent.com/u/139469471?v=4 - twitterUsername: null - url: https://github.com/MateuszOssGit - login: OpenVINO-dev-contest - count: 4.3576319565039485 + count: 4.723903366253619 avatarUrl: https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4 twitterUsername: null url: https://github.com/OpenVINO-dev-contest +- login: jhpiedrahitao + count: 4.438284892172325 + avatarUrl: https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4 + twitterUsername: null + url: https://github.com/jhpiedrahitao +- login: Jibola + count: 4.120043924181168 + avatarUrl: https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4 + twitterUsername: null + url: https://github.com/Jibola +- login: Adi8885 + count: 3.9793626444660273 + avatarUrl: https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4 + twitterUsername: null + url: https://github.com/Adi8885 - login: billytrend-cohere count: 3.8184721208803607 avatarUrl: https://avatars.githubusercontent.com/u/144115527?u=b881a61482b25b543dacd217d18fc5b98c38e7a3&v=4 twitterUsername: null url: https://github.com/billytrend-cohere -- login: charliermarsh - count: 3.357395278037737 - avatarUrl: https://avatars.githubusercontent.com/u/1309177?u=6328c998d93a48eba87c6b039783b8a7644c62c3&v=4 - twitterUsername: charliermarsh - url: https://github.com/charliermarsh -- login: mackong - count: 3.2827236314311636 - avatarUrl: https://avatars.githubusercontent.com/u/2212586?v=4 +- login: MateuszOssGit + count: 3.6831427197363142 + avatarUrl: https://avatars.githubusercontent.com/u/139469471?v=4 twitterUsername: null - url: https://github.com/mackong + url: https://github.com/MateuszOssGit - login: Josephasafg - count: 3.0984880810149624 + count: 3.639619673205832 avatarUrl: https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4 twitterUsername: null url: https://github.com/Josephasafg -- login: shane-huang - count: 2.9607559891350683 - avatarUrl: https://avatars.githubusercontent.com/u/1995599?v=4 - twitterUsername: null - url: https://github.com/shane-huang - login: rahul-trip - count: 2.95441257044442 + count: 3.4770653885517837 avatarUrl: https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4 twitterUsername: null url: https://github.com/rahul-trip -- login: virattt - count: 2.953568130853449 - avatarUrl: https://avatars.githubusercontent.com/u/901795?u=c8cd7391f649623258b5f5ea848550df9407107b&v=4 - twitterUsername: virattt - url: https://github.com/virattt -- login: williamdevena - count: 2.7004779782531836 - avatarUrl: https://avatars.githubusercontent.com/u/60664495?u=ace0011a868848b48cdf9c199110dc8e5be5f433&v=4 - twitterUsername: null - url: https://github.com/williamdevena -- login: k8si - count: 2.6941270848048298 - avatarUrl: https://avatars.githubusercontent.com/u/3207674?v=4 +- login: junkeon + count: 3.2330467404238674 + avatarUrl: https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4 twitterUsername: null - url: https://github.com/k8si + url: https://github.com/junkeon +- login: maximeperrindev + count: 3.078790186143875 + avatarUrl: https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4 + twitterUsername: maximeperrin_ + url: https://github.com/maximeperrindev - login: keenborder786 - count: 2.681985218393363 + count: 2.812582272135523 avatarUrl: https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4 twitterUsername: null url: https://github.com/keenborder786 +- login: sfvaroglu + count: 2.810147933590226 + avatarUrl: https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4 + twitterUsername: null + url: https://github.com/sfvaroglu +- login: zc277584121 + count: 2.7061289569388496 + avatarUrl: https://avatars.githubusercontent.com/u/17022025?u=ceee62d53f1c06bf9a014096b651ca0c42cfea3b&v=4 + twitterUsername: null + url: https://github.com/zc277584121 - login: alexsherstinsky count: 2.6800037077948278 avatarUrl: https://avatars.githubusercontent.com/u/339166?v=4 twitterUsername: null url: https://github.com/alexsherstinsky +- login: Nutlope + count: 2.654167273889662 + avatarUrl: https://avatars.githubusercontent.com/u/63742054?u=befe4ae74b906698be965bad482d0e02fc7707ab&v=4 + twitterUsername: nutlope + url: https://github.com/Nutlope - login: 3coins count: 2.647976526019808 avatarUrl: https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4 twitterUsername: pjain7 url: https://github.com/3coins -- login: scottnath - count: 2.585132044719414 - avatarUrl: https://avatars.githubusercontent.com/u/216931?u=a8ca27d75e1765295ea9d23c191d8db834951066&v=4 - twitterUsername: null - url: https://github.com/scottnath -- login: gradenr - count: 2.561001940475218 - avatarUrl: https://avatars.githubusercontent.com/u/1074525?v=4 - twitterUsername: null - url: https://github.com/gradenr -- login: volodymyr-memsql - count: 2.516727626028309 - avatarUrl: https://avatars.githubusercontent.com/u/57520563?v=4 - twitterUsername: null - url: https://github.com/volodymyr-memsql -- login: Adi8885 - count: 2.4407451161182507 - avatarUrl: https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4 - twitterUsername: null - url: https://github.com/Adi8885 -- login: bhalder - count: 2.406797424794377 - avatarUrl: https://avatars.githubusercontent.com/u/4036753?u=c6732c896b41c1ecec917bfae38aa6900585c632&v=4 - twitterUsername: null - url: https://github.com/bhalder -- login: mattgotteiner - count: 2.3942055369454875 - avatarUrl: https://avatars.githubusercontent.com/u/57731498?u=fec622b37ca3dc04125144116ad5165f37f85823&v=4 - twitterUsername: null - url: https://github.com/mattgotteiner -- login: junkeon - count: 2.3650908830650637 - avatarUrl: https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4 +- login: paul-paliychuk + count: 2.642403561566109 + avatarUrl: https://avatars.githubusercontent.com/u/26054637?u=edd1e4f54e91b549f2edb525d43210f4f04d7367&v=4 twitterUsername: null - url: https://github.com/junkeon -- login: rodrigo-f-nogueira - count: 2.352901082077336 - avatarUrl: https://avatars.githubusercontent.com/u/121117945?v=4 + url: https://github.com/paul-paliychuk +- login: dglogo + count: 2.570516492884163 + avatarUrl: https://avatars.githubusercontent.com/u/167348611?v=4 twitterUsername: null - url: https://github.com/rodrigo-f-nogueira -- login: benjibc - count: 2.35166994719905 - avatarUrl: https://avatars.githubusercontent.com/u/1585539?u=654a21985c875f78a20eda7e4884e8d64de86fba&v=4 + url: https://github.com/dglogo +- login: rohanaggarwal7997 + count: 2.5461756978498595 + avatarUrl: https://avatars.githubusercontent.com/u/24482442?u=d6095b9533599b26d16fe6273d8f513206976a62&v=4 twitterUsername: null - url: https://github.com/benjibc -- login: am-kinetica - count: 2.2163550294910146 - avatarUrl: https://avatars.githubusercontent.com/u/85610855?v=4 + url: https://github.com/rohanaggarwal7997 +- login: charliermarsh + count: 2.5281105277003553 + avatarUrl: https://avatars.githubusercontent.com/u/1309177?u=6328c998d93a48eba87c6b039783b8a7644c62c3&v=4 + twitterUsername: charliermarsh + url: https://github.com/charliermarsh +- login: pcliupc + count: 2.4647930363822415 + avatarUrl: https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4 twitterUsername: null - url: https://github.com/am-kinetica -- login: VKudlay - count: 2.158674899165992 - avatarUrl: https://avatars.githubusercontent.com/u/32310964?u=56cd9386d632a330b8ecb180d7271b3d043c93a3&v=4 + url: https://github.com/pcliupc +- login: averikitsch + count: 2.4267848111627255 + avatarUrl: https://avatars.githubusercontent.com/u/6519888?u=fe0b0f093e8683bdac4f205b237d2e48d7c755d4&v=4 + twitterUsername: averikitsch + url: https://github.com/averikitsch +- login: Dominastorm + count: 2.3639219366492097 + avatarUrl: https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4 twitterUsername: null - url: https://github.com/VKudlay -- login: mspronesti - count: 2.122998247755255 - avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 + url: https://github.com/Dominastorm +- login: mkorpela + count: 2.2351512926636232 + avatarUrl: https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4 twitterUsername: null - url: https://github.com/mspronesti -- login: rigazilla - count: 2.103159619149647 - avatarUrl: https://avatars.githubusercontent.com/u/7080882?u=f985127fd58fa96b886d591ce104f29f3bd7f81f&v=4 + url: https://github.com/mkorpela +- login: Anush008 + count: 2.1596091120323853 + avatarUrl: https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4 twitterUsername: null - url: https://github.com/rigazilla + url: https://github.com/Anush008 +- login: Anindyadeep + count: 2.151424980570444 + avatarUrl: https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4 + twitterUsername: AnindyadeepS + url: https://github.com/Anindyadeep +- login: tazarov + count: 2.133949448314687 + avatarUrl: https://avatars.githubusercontent.com/u/1157440?u=2f81a28298c1172e732898a1f8e800342434801d&v=4 + twitterUsername: t_azarov + url: https://github.com/tazarov - login: Tokkiu count: 2.082774991923363 avatarUrl: https://avatars.githubusercontent.com/u/13414571?u=c5490c987e1bcf8d47d7ecc4dca3812a21713f3a&v=4 twitterUsername: null url: https://github.com/Tokkiu -- login: sfvaroglu - count: 1.977334862056892 - avatarUrl: https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4 - twitterUsername: null - url: https://github.com/sfvaroglu -- login: BeatrixCohere - count: 1.8230586961628419 - avatarUrl: https://avatars.githubusercontent.com/u/128378696?v=4 - twitterUsername: null - url: https://github.com/BeatrixCohere -- login: 2jimoo - count: 1.7713839158851945 - avatarUrl: https://avatars.githubusercontent.com/u/107998986?u=70520f8a4ad962c0fc2706649ec401b274681927&v=4 - twitterUsername: null - url: https://github.com/2jimoo -- login: h0rv - count: 1.7513242553691613 - avatarUrl: https://avatars.githubusercontent.com/u/45851384?u=bd70e86b6954fa1663bb5245b585d13d92252f1b&v=4 +- login: shane-huang + count: 2.019357123350569 + avatarUrl: https://avatars.githubusercontent.com/u/1995599?v=4 twitterUsername: null - url: https://github.com/h0rv -- login: Dominastorm - count: 1.6862359862359864 - avatarUrl: https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4 + url: https://github.com/shane-huang +- login: JuHyung-Son + count: 1.8196656246301637 + avatarUrl: https://avatars.githubusercontent.com/u/20140126?u=d1b9220a46efe488dc3db52e5d92774d85d38dfc&v=4 twitterUsername: null - url: https://github.com/Dominastorm -- login: jackwotherspoon - count: 1.6853550616256832 - avatarUrl: https://avatars.githubusercontent.com/u/32113413?u=069f880e88a96db6ad955e3cc9fc7f9dfcf2beef&v=4 + url: https://github.com/JuHyung-Son +- login: junefish + count: 1.7761113105527058 + avatarUrl: https://avatars.githubusercontent.com/u/19216250?u=85921f52a4be080e3529d87d3e3e75bf83847b24&v=4 twitterUsername: null - url: https://github.com/jackwotherspoon + url: https://github.com/junefish - login: CahidArda count: 1.6406875476333178 avatarUrl: https://avatars.githubusercontent.com/u/57228345?v=4 @@ -280,131 +260,146 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/2700370?u=421c7cd75c8f7f1a28e6f6c19a5d587a6d478ed0&v=4 twitterUsername: null url: https://github.com/chosh0615 +- login: mspronesti + count: 1.6105999962013011 + avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 + twitterUsername: null + url: https://github.com/mspronesti +- login: Jofthomas + count: 1.6045648644592694 + avatarUrl: https://avatars.githubusercontent.com/u/52778543?u=504d8eb452ab2103a86ab469dd793eab49c8a437&v=4 + twitterUsername: null + url: https://github.com/Jofthomas - login: marlenezw count: 1.6044510631256723 avatarUrl: https://avatars.githubusercontent.com/u/57748216?u=e2029e1262ee9c9d9f5825b2d28952758a628f28&v=4 twitterUsername: marlene_zw url: https://github.com/marlenezw -- login: morganda - count: 1.6007772184242772 - avatarUrl: https://avatars.githubusercontent.com/u/1540803?v=4 - twitterUsername: _morgan_adams_ - url: https://github.com/morganda - login: angeligareta count: 1.5915893521715812 avatarUrl: https://avatars.githubusercontent.com/u/32129522?u=a6fc430ee58b3ebe776dec5fce16b686f81c8e12&v=4 twitterUsername: null url: https://github.com/angeligareta -- login: mmajewsk - count: 1.5819994670005337 - avatarUrl: https://avatars.githubusercontent.com/u/5279578?u=ce483437f50a425eab4b1f6f635ac49159f31576&v=4 - twitterUsername: mwmajewsk - url: https://github.com/mmajewsk -- login: pcliupc - count: 1.5716324738063867 - avatarUrl: https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4 +- login: quchuyuan + count: 1.5446707072756323 + avatarUrl: https://avatars.githubusercontent.com/u/40655746?u=3c10115601fd5b032c3f274e79fd68dc5bb03921&v=4 twitterUsername: null - url: https://github.com/pcliupc -- login: IANTHEREAL - count: 1.5347761232195172 - avatarUrl: https://avatars.githubusercontent.com/u/10701973?u=866bdbf25a3759626815099ce480e2ffcff520fb&v=4 + url: https://github.com/quchuyuan +- login: lalanikarim + count: 1.5436999621923326 + avatarUrl: https://avatars.githubusercontent.com/u/1296705?v=4 twitterUsername: null - url: https://github.com/IANTHEREAL -- login: killind-dev + url: https://github.com/lalanikarim +- login: anthonychu + count: 1.4923827481967016 + avatarUrl: https://avatars.githubusercontent.com/u/3982077?u=8bbebac42cb84a25c629f83f212b2d099ffa3964&v=4 + twitterUsername: nthonyChu + url: https://github.com/anthonychu +- login: Raj725 + count: 1.4820793241845873 + avatarUrl: https://avatars.githubusercontent.com/u/17705063?v=4 + twitterUsername: Raj__725 + url: https://github.com/Raj725 +- login: akiradev0x count: 1.48131190431695 avatarUrl: https://avatars.githubusercontent.com/u/61808204?v=4 twitterUsername: null - url: https://github.com/killind-dev + url: https://github.com/akiradev0x +- login: fzowl + count: 1.4684274696492066 + avatarUrl: https://avatars.githubusercontent.com/u/160063452?v=4 + twitterUsername: null + url: https://github.com/fzowl - login: cwlacewe count: 1.4440943043884222 avatarUrl: https://avatars.githubusercontent.com/u/33070862?v=4 twitterUsername: null url: https://github.com/cwlacewe -- login: chadj2 - count: 1.3955335745725006 - avatarUrl: https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4 - twitterUsername: chad_juliano - url: https://github.com/chadj2 -- login: kooyunmo - count: 1.3933145213805078 - avatarUrl: https://avatars.githubusercontent.com/u/17061663?u=bee0295d999ddb902a98872fac6009bb88950132&v=4 - twitterUsername: null - url: https://github.com/kooyunmo - login: donbr count: 1.37621540762902 avatarUrl: https://avatars.githubusercontent.com/u/7340008?u=9473b1cdea8b9929771b32f14a28ad702237900c&v=4 twitterUsername: null url: https://github.com/donbr -- login: Anindyadeep - count: 1.3646328096455973 - avatarUrl: https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4 - twitterUsername: AnindyadeepS - url: https://github.com/Anindyadeep +- login: jdogmcsteezy + count: 1.3757805389158182 + avatarUrl: https://avatars.githubusercontent.com/u/22361806?u=c6b2eec689b859aeb182654e5e67936886d860bb&v=4 + twitterUsername: null + url: https://github.com/jdogmcsteezy - login: pjb157 count: 1.3639301874595993 avatarUrl: https://avatars.githubusercontent.com/u/84070455?v=4 twitterUsername: null url: https://github.com/pjb157 +- login: mishushakov + count: 1.3510511299577161 + avatarUrl: https://avatars.githubusercontent.com/u/10400064?u=581d97314df325c15ec221f64834003d3bba5cc1&v=4 + twitterUsername: null + url: https://github.com/mishushakov - login: jnis23 count: 1.3447433498584394 avatarUrl: https://avatars.githubusercontent.com/u/22690160?u=50f2d8aa99bd7b12c01df29e8ffe519ed1cff1d5&v=4 twitterUsername: null url: https://github.com/jnis23 -- login: fzowl - count: 1.3436805411748465 - avatarUrl: https://avatars.githubusercontent.com/u/160063452?v=4 - twitterUsername: null - url: https://github.com/fzowl - login: cgalo5758 count: 1.3421410050623535 avatarUrl: https://avatars.githubusercontent.com/u/36752715?u=5137581b52bcbb8466b394f3ba40f97f9e273f52&v=4 twitterUsername: null url: https://github.com/cgalo5758 -- login: akashAD98 - count: 1.321482920446413 - avatarUrl: https://avatars.githubusercontent.com/u/62583018?u=965202caa3cfc09516af257f0affdf4aae7cdd43&v=4 +- login: chadj2 + count: 1.3281596327146796 + avatarUrl: https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4 + twitterUsername: chad_juliano + url: https://github.com/chadj2 +- login: pprados + count: 1.3156103749527968 + avatarUrl: https://avatars.githubusercontent.com/u/204694?u=c42de41cff108d35269dd2e8fac8977f1f4e471d&v=4 twitterUsername: null - url: https://github.com/akashAD98 + url: https://github.com/pprados - login: pmcfadin count: 1.307377864874382 avatarUrl: https://avatars.githubusercontent.com/u/413669?u=25b5563194493db00c227a98e23f460adb13c9ea&v=4 twitterUsername: PatrickMcFadin url: https://github.com/pmcfadin -- login: lvliang-intel - count: 1.2940236263457956 - avatarUrl: https://avatars.githubusercontent.com/u/104267837?u=762d6b00291c68379d66260d7b644942e3bab891&v=4 +- login: rodrigo-f-nogueira + count: 1.301791813986936 + avatarUrl: https://avatars.githubusercontent.com/u/121117945?v=4 twitterUsername: null - url: https://github.com/lvliang-intel -- login: aymeric-roucher - count: 1.2639370807309738 - avatarUrl: https://avatars.githubusercontent.com/u/69208727?u=132c8ca18143866b79253a6fcbc10f58984f61ab&v=4 - twitterUsername: AymericRoucher - url: https://github.com/aymeric-roucher + url: https://github.com/rodrigo-f-nogueira +- login: robcaulk + count: 1.2779301131969345 + avatarUrl: https://avatars.githubusercontent.com/u/20807672?u=f2efe9788ce26442bb3319da1a56081d64c359e5&v=4 + twitterUsername: null + url: https://github.com/robcaulk +- login: prrao87 + count: 1.2737434720745342 + avatarUrl: https://avatars.githubusercontent.com/u/35005448?u=4b6efd3d2dcdc2acde843cff4183b59087f35a9b&v=4 + twitterUsername: tech_optimist + url: https://github.com/prrao87 - login: miri-bar count: 1.2631796812631206 avatarUrl: https://avatars.githubusercontent.com/u/160584887?v=4 twitterUsername: null url: https://github.com/miri-bar -- login: jhpiedrahitao - count: 1.26173294502719 - avatarUrl: https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4 +- login: maang-h + count: 1.261154585526509 + avatarUrl: https://avatars.githubusercontent.com/u/55082429?v=4 twitterUsername: null - url: https://github.com/jhpiedrahitao -- login: ruoccofabrizio - count: 1.2612343572241183 - avatarUrl: https://avatars.githubusercontent.com/u/22171838?u=a7c4ea3fcebeafc5e9857727974bf2a3362dafe4&v=4 + url: https://github.com/maang-h +- login: markcusack + count: 1.256110556110556 + avatarUrl: https://avatars.githubusercontent.com/u/6406557?v=4 twitterUsername: null - url: https://github.com/ruoccofabrizio + url: https://github.com/markcusack - login: Sukitly count: 1.2527418643320098 avatarUrl: https://avatars.githubusercontent.com/u/54905519?u=9818cccb258351fd0abec07b4acfb414a0383823&v=4 twitterUsername: null url: https://github.com/Sukitly -- login: Blaizzy - count: 1.2493534776099087 - avatarUrl: https://avatars.githubusercontent.com/u/23445657?u=84dda94e9330c5538ea94099b5cae699c88586f8&v=4 - twitterUsername: Prince_Canuma - url: https://github.com/Blaizzy +- login: raghavdixit99 + count: 1.2461408510188998 + avatarUrl: https://avatars.githubusercontent.com/u/34462078?u=20243a60ac608142887c14251502c2a975614ba3&v=4 + twitterUsername: null + url: https://github.com/raghavdixit99 - login: jeffkit count: 1.2371919380470702 avatarUrl: https://avatars.githubusercontent.com/u/252377?v=4 @@ -415,86 +410,51 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/158216624?v=4 twitterUsername: null url: https://github.com/xsai9101 -- login: CogniJT - count: 1.2241630276564774 - avatarUrl: https://avatars.githubusercontent.com/u/131272471?v=4 +- login: Dobiichi-Origami + count: 1.2254803543624495 + avatarUrl: https://avatars.githubusercontent.com/u/56953648?v=4 twitterUsername: null - url: https://github.com/CogniJT -- login: ivyas21 - count: 1.2240107573205916 - avatarUrl: https://avatars.githubusercontent.com/u/87355704?u=e98091da04c6bfe9af8d982938556832f03fb1fb&v=4 + url: https://github.com/Dobiichi-Origami +- login: bhalder + count: 1.222365910925233 + avatarUrl: https://avatars.githubusercontent.com/u/4036753?u=c6732c896b41c1ecec917bfae38aa6900585c632&v=4 twitterUsername: null - url: https://github.com/ivyas21 + url: https://github.com/bhalder - login: nithishr count: 1.2059543552080865 avatarUrl: https://avatars.githubusercontent.com/u/12782505?u=a3f1c6e7e68b96bb7be08ecd25f74f2396394597&v=4 twitterUsername: nithishr url: https://github.com/nithishr -- login: paul-paliychuk - count: 1.2023893847098268 - avatarUrl: https://avatars.githubusercontent.com/u/26054637?u=5518e02a40c327a943bf45ff53dcaa9477a8df19&v=4 - twitterUsername: null - url: https://github.com/paul-paliychuk - login: hulitaitai count: 1.1976439549085809 avatarUrl: https://avatars.githubusercontent.com/u/146365078?v=4 twitterUsername: null url: https://github.com/hulitaitai -- login: kylehh - count: 1.1874681298443135 - avatarUrl: https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4 - twitterUsername: null - url: https://github.com/kylehh -- login: samkhano1 - count: 1.1862665585293322 - avatarUrl: https://avatars.githubusercontent.com/u/152659506?v=4 - twitterUsername: null - url: https://github.com/samkhano1 -- login: thehapyone - count: 1.182513288422728 - avatarUrl: https://avatars.githubusercontent.com/u/8368470?u=1b7aebda11db89d56b90ff89f9b108e3cd8bffe5&v=4 - twitterUsername: thehapyone - url: https://github.com/thehapyone - login: Mikelarg count: 1.1691018897330996 avatarUrl: https://avatars.githubusercontent.com/u/8142467?u=a62a20762c7fd841b470efc0ebdf5e1a01816f87&v=4 twitterUsername: null url: https://github.com/Mikelarg -- login: paulonasc - count: 1.1605414932509663 - avatarUrl: https://avatars.githubusercontent.com/u/37284051?u=6a4bc9b65700fc4835aebec6bf6aab77acdaa233&v=4 - twitterUsername: null - url: https://github.com/paulonasc +- login: 16BitNarwhal + count: 1.1620775294244683 + avatarUrl: https://avatars.githubusercontent.com/u/31218485?u=6ce575b365c0353b5b3d1ea03088f8da36764100&v=4 + twitterUsername: 16bitnarwhal + url: https://github.com/16BitNarwhal - login: tyumentsev4 count: 1.1598017295210115 avatarUrl: https://avatars.githubusercontent.com/u/56769451?u=088102b6160822bc68c25a2a5df170080d0b16a2&v=4 twitterUsername: null url: https://github.com/tyumentsev4 -- login: maximeperrindev - count: 1.1573244745291202 - avatarUrl: https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4 - twitterUsername: maximeperrin_ - url: https://github.com/maximeperrindev - login: klaus-xiong count: 1.1549839725022206 avatarUrl: https://avatars.githubusercontent.com/u/71321890?u=71a53f3a743fb8a91733e2a4cfcc05e309e3ef87&v=4 twitterUsername: null url: https://github.com/klaus-xiong -- login: fengjial - count: 1.1534497369095245 - avatarUrl: https://avatars.githubusercontent.com/u/8777479?v=4 - twitterUsername: null - url: https://github.com/fengjial -- login: mkorpela - count: 1.1523809523809523 - avatarUrl: https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4 - twitterUsername: null - url: https://github.com/mkorpela -- login: mosheber - count: 1.142195271513252 - avatarUrl: https://avatars.githubusercontent.com/u/22236370?u=289c19bfc89a43a7e0c6956f73305aab3a8bd978&v=4 +- login: BeatrixCohere + count: 1.1370683579985905 + avatarUrl: https://avatars.githubusercontent.com/u/128378696?u=8c818bd39c9cd75b606f3b5b1479787e4e6845d9&v=4 twitterUsername: null - url: https://github.com/mosheber + url: https://github.com/BeatrixCohere - login: sdan count: 1.1341991341991342 avatarUrl: https://avatars.githubusercontent.com/u/22898443?u=4e6aceb9132747788c4b6aca6c16027ee1109b01&v=4 @@ -505,14 +465,14 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3028543?u=5096311a70425e82c9b1a143d29ccd502c155a7f&v=4 twitterUsername: evgeniyzhe url: https://github.com/Randl -- login: benitoThree - count: 1.1274864376130198 - avatarUrl: https://avatars.githubusercontent.com/u/89472452?u=47bcc0d72d51f2f914a759a0fde9ef3d1c677b98&v=4 +- login: WilliamEspegren + count: 1.1222886759609925 + avatarUrl: https://avatars.githubusercontent.com/u/131612909?v=4 twitterUsername: null - url: https://github.com/benitoThree + url: https://github.com/WilliamEspegren - login: Simon-Stone count: 1.1192315309962368 - avatarUrl: https://avatars.githubusercontent.com/u/18614423?u=7a80b88c5fdcd50eaec207bf91e4498fbc5eb2fe&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/18614423?u=1d3dba8e4e87d2a449cc90c204f422327af2d09d&v=4 twitterUsername: null url: https://github.com/Simon-Stone - login: shumway743 @@ -525,46 +485,31 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/9665243?u=e403da70029d61dbbb9a2f0e03daebc5418974ed&v=4 twitterUsername: null url: https://github.com/jcjc712 -- login: chrispy-snps - count: 1.1072791194742415 - avatarUrl: https://avatars.githubusercontent.com/u/50950969?u=f0c166782c1b8f63eb983383729b5d109d7bed0a&v=4 - twitterUsername: null - url: https://github.com/chrispy-snps +- login: EvilFreelancer + count: 1.1031007751937985 + avatarUrl: https://avatars.githubusercontent.com/u/9089568?u=d2f8bc466003afc3558a96f3266a0e32d5c18c34&v=4 + twitterUsername: EvilFreelancer + url: https://github.com/EvilFreelancer - login: ihpolash count: 1.0992950654582074 avatarUrl: https://avatars.githubusercontent.com/u/11153261?u=a5af26e0bd60a27ba4aba60d15b129fc410fe8cc&v=4 twitterUsername: null url: https://github.com/ihpolash -- login: HeChangHaoGary - count: 1.0990169251038817 - avatarUrl: https://avatars.githubusercontent.com/u/53417823?v=4 - twitterUsername: null - url: https://github.com/HeChangHaoGary -- login: pranava-amzn - count: 1.0956520000145442 - avatarUrl: https://avatars.githubusercontent.com/u/119924780?v=4 - twitterUsername: null - url: https://github.com/pranava-amzn - login: giannis2two count: 1.0935243246456061 avatarUrl: https://avatars.githubusercontent.com/u/145396613?u=f0da33ee8d74a5353a43f8df3332c9cac2bd70f8&v=4 twitterUsername: giannis2two url: https://github.com/giannis2two -- login: anilaltuner - count: 1.0926640926640927 - avatarUrl: https://avatars.githubusercontent.com/u/107621925?u=4a7b06f4c0cac2534521698383f58331c00c093f&v=4 - twitterUsername: anilaltuner - url: https://github.com/anilaltuner -- login: AmineDjeghri - count: 1.088173731030874 - avatarUrl: https://avatars.githubusercontent.com/u/32715913?u=5de749a141259c3fdd8a16c6438aff2b7823fd69&v=4 - twitterUsername: aminedjeghri - url: https://github.com/AmineDjeghri -- login: piizei - count: 1.07012987012987 - avatarUrl: https://avatars.githubusercontent.com/u/191493?u=3e803364d95e760cafa108ab29ee109ba0e0af83&v=4 +- login: ea-open-source + count: 1.072039072039072 + avatarUrl: https://avatars.githubusercontent.com/u/20924562?u=3f61dc32f82124727d7157c0977240770ab82c02&v=4 twitterUsername: null - url: https://github.com/piizei + url: https://github.com/ea-open-source +- login: MacanPN + count: 1.0604834042713411 + avatarUrl: https://avatars.githubusercontent.com/u/1621509?u=e54d671ddef5ac7580003427246fc2247964c9ed&v=4 + twitterUsername: null + url: https://github.com/MacanPN - login: abdalrohman count: 1.0589562764456981 avatarUrl: https://avatars.githubusercontent.com/u/20760062?u=422c372863e9c42406db2241e41cc52c522431ef&v=4 @@ -580,21 +525,16 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/2215597?u=d5558c7d5c1ab6d4a8e5381826abd1f00371a5be&v=4 twitterUsername: null url: https://github.com/s-udhaya -- login: isahers1 - count: 1.048396518735502 - avatarUrl: https://avatars.githubusercontent.com/u/78627776?u=7fd9922950b898ab502666f2cea155cf0200fe5f&v=4 +- login: gradenr + count: 1.0507462686567164 + avatarUrl: https://avatars.githubusercontent.com/u/1074525?v=4 twitterUsername: null - url: https://github.com/isahers1 + url: https://github.com/gradenr - login: JamsheedMistri count: 1.042771583647496 avatarUrl: https://avatars.githubusercontent.com/u/13024750?u=6ae631199ec7c0bb34eb8d56200023cdd94720d3&v=4 twitterUsername: null url: https://github.com/JamsheedMistri -- login: atherfawaz - count: 1.0420221169036334 - avatarUrl: https://avatars.githubusercontent.com/u/42374034?u=cfb14ff1a7c4f0a500cd9c282bc3fbcba170daef&v=4 - twitterUsername: AtherFawaz - url: https://github.com/atherfawaz - login: Hugoberry count: 1.0420221169036334 avatarUrl: https://avatars.githubusercontent.com/u/6012338?u=198f10817236beac03b10bb8f5cc6d7fcb133cc7&v=4 @@ -610,57 +550,52 @@ top_recent_contributors: avatarUrl: https://avatars.githubusercontent.com/u/70274018?u=b6d5fd627cd26f590ed442d4dffa5bdddcb803cc&v=4 twitterUsername: null url: https://github.com/jjovalle99 +- login: am-kinetica + count: 1.037345633090314 + avatarUrl: https://avatars.githubusercontent.com/u/85610855?v=4 + twitterUsername: null + url: https://github.com/am-kinetica - login: spike-spiegel-21 count: 1.0372767684148758 avatarUrl: https://avatars.githubusercontent.com/u/83648453?u=8557d590ff3516d093da32689816e898a08245ce&v=4 twitterUsername: mynksol url: https://github.com/spike-spiegel-21 -- login: kristapratico - count: 1.0338897168451622 - avatarUrl: https://avatars.githubusercontent.com/u/31998003?u=0d91cde56e2c25d8ee7447bc55099e3dad047e99&v=4 +- login: volodymyr-memsql + count: 1.033535557802494 + avatarUrl: https://avatars.githubusercontent.com/u/57520563?v=4 twitterUsername: null - url: https://github.com/kristapratico -- login: tabbyl21 - count: 1.0298311608783133 - avatarUrl: https://avatars.githubusercontent.com/u/29782447?u=a8804de5269d64ef1c2587945e1b40925349c4a0&v=4 + url: https://github.com/volodymyr-memsql +- login: sokolgood + count: 1.0290598290598292 + avatarUrl: https://avatars.githubusercontent.com/u/126395124?u=79cff420daf96b72b14caca0061b57b884139f4f&v=4 twitterUsername: null - url: https://github.com/tabbyl21 + url: https://github.com/sokolgood - login: chyroc count: 1.0269896193771626 avatarUrl: https://avatars.githubusercontent.com/u/15604894?u=420ab32f71fa4a6839da653b5a5d97381b087902&v=4 twitterUsername: null url: https://github.com/chyroc -- login: lalanikarim - count: 1.0225303248536146 - avatarUrl: https://avatars.githubusercontent.com/u/1296705?v=4 - twitterUsername: null - url: https://github.com/lalanikarim -- login: sachinparyani - count: 1.0138494910729183 - avatarUrl: https://avatars.githubusercontent.com/u/16364994?u=d8603567cb87b4f76f0df2f7937252ae040cbebf&v=4 +- login: kartheekyakkala + count: 1.008710587657956 + avatarUrl: https://avatars.githubusercontent.com/u/50788154?u=f924ef4e8d2b47be96f7a4b4357d17b6fafaea80&v=4 twitterUsername: null - url: https://github.com/sachinparyani + url: https://github.com/kartheekyakkala top_contributors: - login: leo-gan - count: 207.38403140360873 + count: 219.10330905186854 avatarUrl: https://avatars.githubusercontent.com/u/2256422?v=4 twitterUsername: null url: https://github.com/leo-gan - login: cbornet - count: 53.605409926776204 + count: 59.739515496800784 avatarUrl: https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4 twitterUsername: null url: https://github.com/cbornet - login: tomasonjo - count: 41.43286169866708 + count: 45.704548151944536 avatarUrl: https://avatars.githubusercontent.com/u/19948365?v=4 twitterUsername: tb_tomaz url: https://github.com/tomasonjo -- login: ccurme - count: 39.58459444792489 - avatarUrl: https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4 - twitterUsername: null - url: https://github.com/ccurme - login: lkuligin count: 30.280190954283917 avatarUrl: https://avatars.githubusercontent.com/u/11026406?v=4 @@ -677,7 +612,7 @@ top_contributors: twitterUsername: LukawskiKacper url: https://github.com/kacperlukawski - login: hemidactylus - count: 14.782161383778044 + count: 15.83937565797345 avatarUrl: https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4 twitterUsername: null url: https://github.com/hemidactylus @@ -707,20 +642,20 @@ top_contributors: twitterUsername: null url: https://github.com/danielchalef - login: liugddx - count: 11.876082540231806 + count: 12.462789950468627 avatarUrl: https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4 twitterUsername: null url: https://github.com/liugddx +- login: mspronesti + count: 11.752469751839076 + avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 + twitterUsername: null + url: https://github.com/mspronesti - login: chyroc count: 11.572248609597397 avatarUrl: https://avatars.githubusercontent.com/u/15604894?u=420ab32f71fa4a6839da653b5a5d97381b087902&v=4 twitterUsername: null url: https://github.com/chyroc -- login: mspronesti - count: 11.434777711519299 - avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 - twitterUsername: null - url: https://github.com/mspronesti - login: eavanvalkenburg count: 11.086680217792539 avatarUrl: https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4 @@ -737,10 +672,15 @@ top_contributors: twitterUsername: HoltSkinner12 url: https://github.com/holtskinner - login: sepiatone - count: 10.09112072266705 + count: 10.148640760385108 avatarUrl: https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4 twitterUsername: null url: https://github.com/sepiatone +- login: MateuszOssGit + count: 10.097777784303783 + avatarUrl: https://avatars.githubusercontent.com/u/139469471?v=4 + twitterUsername: null + url: https://github.com/MateuszOssGit - login: fpingham count: 9.643938109747804 avatarUrl: https://avatars.githubusercontent.com/u/24279597?u=05e329b5fa4f95223f9fbb1daa07118f72e4a071&v=4 @@ -764,8 +704,18 @@ top_contributors: - login: nickscamara count: 8.580958404078633 avatarUrl: https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4 - twitterUsername: null + twitterUsername: nickscamara_ url: https://github.com/nickscamara +- login: maxjakob + count: 8.580433940570003 + avatarUrl: https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4 + twitterUsername: null + url: https://github.com/maxjakob +- login: keenborder786 + count: 8.540827511105405 + avatarUrl: https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4 + twitterUsername: null + url: https://github.com/keenborder786 - login: maks-operlejn-ds count: 8.50624637439208 avatarUrl: https://avatars.githubusercontent.com/u/142261444?u=23524d34d4d0dfce963a24131a3c28e89daa9fc7&v=4 @@ -776,31 +726,21 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1823547?u=ea9246b84dbc3886d96ba171aabb64d2470c8d60&v=4 twitterUsername: ofermend url: https://github.com/ofermend -- login: MateuszOssGit - count: 8.345974440750936 - avatarUrl: https://avatars.githubusercontent.com/u/139469471?v=4 - twitterUsername: null - url: https://github.com/MateuszOssGit -- login: maxjakob - count: 7.977259337395401 - avatarUrl: https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4 - twitterUsername: null - url: https://github.com/maxjakob - login: sergerdn count: 7.43609256642621 avatarUrl: https://avatars.githubusercontent.com/u/64213648?u=a9a3c39e0277dcb74d102e73511df929d2a1ecc6&v=4 twitterUsername: null url: https://github.com/sergerdn -- login: keenborder786 - count: 7.247019942527196 - avatarUrl: https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4 - twitterUsername: null - url: https://github.com/keenborder786 - login: volodymyr-memsql count: 7.100738539635195 avatarUrl: https://avatars.githubusercontent.com/u/57520563?v=4 twitterUsername: null url: https://github.com/volodymyr-memsql +- login: Jibola + count: 6.828251208705393 + avatarUrl: https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4 + twitterUsername: null + url: https://github.com/Jibola - login: averikitsch count: 6.691292200589024 avatarUrl: https://avatars.githubusercontent.com/u/6519888?u=fe0b0f093e8683bdac4f205b237d2e48d7c755d4&v=4 @@ -839,13 +779,13 @@ top_contributors: - login: michaelfeil count: 6.30450671251487 avatarUrl: https://avatars.githubusercontent.com/u/63565275?u=08a65e589a3045dad9c13218858c8a91d16528fc&v=4 - twitterUsername: null + twitterUsername: feilsystem url: https://github.com/michaelfeil -- login: Jibola - count: 6.225098172128444 - avatarUrl: https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4 +- login: Adi8885 + count: 6.2983081850953955 + avatarUrl: https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4 twitterUsername: null - url: https://github.com/Jibola + url: https://github.com/Adi8885 - login: blob42 count: 6.106082378665331 avatarUrl: https://avatars.githubusercontent.com/u/210457?u=3f6ac4dcc1ec9f1b98cc62fd7095120da2accbc4&v=4 @@ -886,6 +826,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4 twitterUsername: null url: https://github.com/skcoirz +- login: OpenVINO-dev-contest + count: 5.577090409095835 + avatarUrl: https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4 + twitterUsername: null + url: https://github.com/OpenVINO-dev-contest - login: jamesbraza count: 5.440868933802869 avatarUrl: https://avatars.githubusercontent.com/u/8990777?u=9f7c4ab36aa10d7594748fdc9ddba6ff3f0a2f77&v=4 @@ -906,11 +851,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/60956360?u=5678f015273d23e2cbdacbe172bcf154de0f4f86&v=4 twitterUsername: null url: https://github.com/outday29 -- login: OpenVINO-dev-contest - count: 5.210818999346164 - avatarUrl: https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4 - twitterUsername: null - url: https://github.com/OpenVINO-dev-contest - login: harry-cohere count: 5.2093225809800625 avatarUrl: https://avatars.githubusercontent.com/u/127103098?v=4 @@ -926,6 +866,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/15918167?v=4 twitterUsername: null url: https://github.com/ljeagle +- login: Anush008 + count: 5.0946770349601 + avatarUrl: https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4 + twitterUsername: null + url: https://github.com/Anush008 - login: joemcelroy count: 5.072750830720205 avatarUrl: https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4 @@ -951,16 +896,16 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/20971593?u=1574196bb286044d23a04aa5aa34203ada8f4309&v=4 twitterUsername: jonzluo url: https://github.com/jzluo +- login: Josephasafg + count: 4.872146554926262 + avatarUrl: https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4 + twitterUsername: null + url: https://github.com/Josephasafg - login: IANTHEREAL count: 4.80835368092392 avatarUrl: https://avatars.githubusercontent.com/u/10701973?u=866bdbf25a3759626815099ce480e2ffcff520fb&v=4 twitterUsername: null url: https://github.com/IANTHEREAL -- login: Adi8885 - count: 4.759690656747619 - avatarUrl: https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4 - twitterUsername: null - url: https://github.com/Adi8885 - login: mateusz-wosinski-ds count: 4.729385171126772 avatarUrl: https://avatars.githubusercontent.com/u/142883372?u=45481f472f5f89c4d8ca8788617ffac47c5ebd88&v=4 @@ -991,6 +936,16 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/17039389?u=796226152becf82c4d7fd5cc49a24e58a73ce66f&v=4 twitterUsername: null url: https://github.com/harupy +- login: lalanikarim + count: 4.447786538280393 + avatarUrl: https://avatars.githubusercontent.com/u/1296705?v=4 + twitterUsername: null + url: https://github.com/lalanikarim +- login: jhpiedrahitao + count: 4.438284892172325 + avatarUrl: https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4 + twitterUsername: null + url: https://github.com/jhpiedrahitao - login: kylehh count: 4.402950642669773 avatarUrl: https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4 @@ -1031,16 +986,26 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/10434946?u=6e20682a9c48909576b6ecc2fc93da3dbb90a52a&v=4 twitterUsername: yakigac url: https://github.com/yakigac +- login: pprados + count: 3.9232523309517795 + avatarUrl: https://avatars.githubusercontent.com/u/204694?u=c42de41cff108d35269dd2e8fac8977f1f4e471d&v=4 + twitterUsername: null + url: https://github.com/pprados +- login: zc277584121 + count: 3.8755569524538016 + avatarUrl: https://avatars.githubusercontent.com/u/17022025?u=ceee62d53f1c06bf9a014096b651ca0c42cfea3b&v=4 + twitterUsername: null + url: https://github.com/zc277584121 - login: HunterGerlach count: 3.8651978890968324 avatarUrl: https://avatars.githubusercontent.com/u/5001050?u=d5d0c24dc9566cec4b8e3cd376150c05b42c5210&v=4 twitterUsername: HunterGerlach url: https://github.com/HunterGerlach -- login: lalanikarim - count: 3.8593900101853724 - avatarUrl: https://avatars.githubusercontent.com/u/1296705?v=4 - twitterUsername: null - url: https://github.com/lalanikarim +- login: eltociear + count: 3.8452875586615916 + avatarUrl: https://avatars.githubusercontent.com/u/22633385?u=29190f6c8aed91fa9574b064a9995f1e49944acf&v=4 + twitterUsername: eltociear + url: https://github.com/eltociear - login: gkorland count: 3.8281796403497044 avatarUrl: https://avatars.githubusercontent.com/u/753206?u=911ac7819a0dcf86bd5fd8ad8e4f986e22b8579b&v=4 @@ -1061,11 +1026,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/8893086?u=220ec6df446248eeb09a59230c017a2c57bf8e61&v=4 twitterUsername: null url: https://github.com/saginawj -- login: eltociear - count: 3.6436068863926834 - avatarUrl: https://avatars.githubusercontent.com/u/22633385?u=29190f6c8aed91fa9574b064a9995f1e49944acf&v=4 - twitterUsername: eltociear - url: https://github.com/eltociear - login: filip-halt count: 3.6276674483672173 avatarUrl: https://avatars.githubusercontent.com/u/81822489?u=07badfd993685a278b1f929c1500a58837a6621d&v=4 @@ -1086,6 +1046,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3032459?u=590f1489107c91803bbe75de26cfeeeb77b25f8d&v=4 twitterUsername: null url: https://github.com/nelly-hateva +- login: rahul-trip + count: 3.4770653885517837 + avatarUrl: https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4 + twitterUsername: null + url: https://github.com/rahul-trip - login: wemysschen count: 3.4513780719164755 avatarUrl: https://avatars.githubusercontent.com/u/38650638?u=2b526137f18a7c41934c8da0722f1fedb74c3422&v=4 @@ -1116,11 +1081,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1309177?u=6328c998d93a48eba87c6b039783b8a7644c62c3&v=4 twitterUsername: charliermarsh url: https://github.com/charliermarsh -- login: Anush008 - count: 3.3192340488199914 - avatarUrl: https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4 - twitterUsername: null - url: https://github.com/Anush008 +- login: maximeperrindev + count: 3.3315671590019837 + avatarUrl: https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4 + twitterUsername: maximeperrin_ + url: https://github.com/maximeperrindev - login: mackong count: 3.2827236314311636 avatarUrl: https://avatars.githubusercontent.com/u/2212586?v=4 @@ -1131,21 +1096,26 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3760?u=1dfde576ef286346afcc2a71eaf1fdb2857fb547&v=4 twitterUsername: brunotorious url: https://github.com/bborn +- login: junkeon + count: 3.2330467404238674 + avatarUrl: https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4 + twitterUsername: null + url: https://github.com/junkeon - login: jj701 count: 3.1101083172010284 avatarUrl: https://avatars.githubusercontent.com/u/129657162?u=353d87b0e8d4c628536e2e40a34a7622dc3c18ab&v=4 twitterUsername: null url: https://github.com/jj701 -- login: Josephasafg - count: 3.0984880810149624 - avatarUrl: https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4 - twitterUsername: null - url: https://github.com/Josephasafg - login: cauwulixuan count: 3.0880825394837608 avatarUrl: https://avatars.githubusercontent.com/u/26039352?v=4 twitterUsername: null url: https://github.com/cauwulixuan +- login: markcusack + count: 3.0859525884440933 + avatarUrl: https://avatars.githubusercontent.com/u/6406557?v=4 + twitterUsername: null + url: https://github.com/markcusack - login: delip count: 3.0537599741527597 avatarUrl: https://avatars.githubusercontent.com/u/347398?v=4 @@ -1176,11 +1146,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1995599?v=4 twitterUsername: null url: https://github.com/shane-huang -- login: rahul-trip - count: 2.95441257044442 - avatarUrl: https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4 - twitterUsername: null - url: https://github.com/rahul-trip - login: cbh123 count: 2.9447261983889454 avatarUrl: https://avatars.githubusercontent.com/u/14149230?u=ca710ca2a64391470163ddef6b5ea7633ab26872&v=4 @@ -1206,6 +1171,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/75213811?v=4 twitterUsername: null url: https://github.com/kitrak-rev +- login: tazarov + count: 2.8446304023416493 + avatarUrl: https://avatars.githubusercontent.com/u/1157440?u=2f81a28298c1172e732898a1f8e800342434801d&v=4 + twitterUsername: t_azarov + url: https://github.com/tazarov - login: parambharat count: 2.818821939860283 avatarUrl: https://avatars.githubusercontent.com/u/12809212?u=8c1f0baf8a29f3007e3a51f5cf7b4a8e04c5ca8d&v=4 @@ -1216,6 +1186,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/139942740?u=fa99ca083ccdc7322c7b24f8a3c001e71be347b4&v=4 twitterUsername: null url: https://github.com/baichuan-assistant +- login: sfvaroglu + count: 2.810147933590226 + avatarUrl: https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4 + twitterUsername: null + url: https://github.com/sfvaroglu - login: sfc-gh-jcarroll count: 2.808430635233632 avatarUrl: https://avatars.githubusercontent.com/u/116604821?u=ec1518c27a7a15f33a138cf0b956ef1758edbaff&v=4 @@ -1228,7 +1203,7 @@ top_contributors: url: https://github.com/jeffzwang - login: BeatrixCohere count: 2.804179427283573 - avatarUrl: https://avatars.githubusercontent.com/u/128378696?v=4 + avatarUrl: https://avatars.githubusercontent.com/u/128378696?u=8c818bd39c9cd75b606f3b5b1479787e4e6845d9&v=4 twitterUsername: null url: https://github.com/BeatrixCohere - login: P-E-B @@ -1236,6 +1211,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/38215315?u=3985b6a3ecb0e8338c5912ea9e20787152d0ad7a&v=4 twitterUsername: null url: https://github.com/P-E-B +- login: chadj2 + count: 2.7236932072871802 + avatarUrl: https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4 + twitterUsername: chad_juliano + url: https://github.com/chadj2 - login: sam-h-bean count: 2.7168104401694806 avatarUrl: https://avatars.githubusercontent.com/u/43734688?u=78f139fa940620e301361a58821c9f56128f71d9&v=4 @@ -1266,6 +1246,16 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/7287580?u=5fe01002eec3d9df91ce3cef0016916554379efd&v=4 twitterUsername: null url: https://github.com/edwardzjl +- login: Nutlope + count: 2.654167273889662 + avatarUrl: https://avatars.githubusercontent.com/u/63742054?u=befe4ae74b906698be965bad482d0e02fc7707ab&v=4 + twitterUsername: nutlope + url: https://github.com/Nutlope +- login: paul-paliychuk + count: 2.642403561566109 + avatarUrl: https://avatars.githubusercontent.com/u/26054637?u=edd1e4f54e91b549f2edb525d43210f4f04d7367&v=4 + twitterUsername: null + url: https://github.com/paul-paliychuk - login: gregnr count: 2.6178326657395794 avatarUrl: https://avatars.githubusercontent.com/u/4133076?u=f3f783e0364abe955dbde6af80445ea27d948fdd&v=4 @@ -1281,11 +1271,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/12044110?v=4 twitterUsername: null url: https://github.com/sudranga -- login: pprados - count: 2.6076419559989827 - avatarUrl: https://avatars.githubusercontent.com/u/204694?u=c42de41cff108d35269dd2e8fac8977f1f4e471d&v=4 - twitterUsername: null - url: https://github.com/pprados - login: sseide count: 2.6011997659477375 avatarUrl: https://avatars.githubusercontent.com/u/5168949?v=4 @@ -1306,11 +1291,21 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/32453863?v=4 twitterUsername: null url: https://github.com/BeautyyuYanli +- login: dglogo + count: 2.570516492884163 + avatarUrl: https://avatars.githubusercontent.com/u/167348611?v=4 + twitterUsername: null + url: https://github.com/dglogo - login: gradenr count: 2.561001940475218 avatarUrl: https://avatars.githubusercontent.com/u/1074525?v=4 twitterUsername: null url: https://github.com/gradenr +- login: rohanaggarwal7997 + count: 2.5461756978498595 + avatarUrl: https://avatars.githubusercontent.com/u/24482442?u=d6095b9533599b26d16fe6273d8f513206976a62&v=4 + twitterUsername: null + url: https://github.com/rohanaggarwal7997 - login: zhaoshengbo count: 2.5359631155826756 avatarUrl: https://avatars.githubusercontent.com/u/4787922?u=dd4c7a18d86a6ad56455aa13e66daedbbbcf31b7&v=4 @@ -1331,6 +1326,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3469711?u=6962798c0280caa0d0260ccb8be1b18fb3ea44b2&v=4 twitterUsername: jtolgyesi url: https://github.com/mrtj +- login: pcliupc + count: 2.4647930363822415 + avatarUrl: https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4 + twitterUsername: null + url: https://github.com/pcliupc - login: alvarobartt count: 2.437030974584336 avatarUrl: https://avatars.githubusercontent.com/u/36760800?u=12735f9035294180cb0b83446bdf7d8ac1a3fef9&v=4 @@ -1371,11 +1371,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/48101485?u=dcf140777416a7d86a450964fc53ec5b17668603&v=4 twitterUsername: null url: https://github.com/nikhilkjha -- login: junkeon - count: 2.3650908830650637 - avatarUrl: https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4 +- login: Dominastorm + count: 2.3639219366492097 + avatarUrl: https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4 twitterUsername: null - url: https://github.com/junkeon + url: https://github.com/Dominastorm - login: raunakshrivastava7 count: 2.360449738616422 avatarUrl: https://avatars.githubusercontent.com/u/13537446?v=4 @@ -1446,6 +1446,16 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/7953259?u=a451fad7ad197a8920651cf89aaf5d950734d0a8&v=4 twitterUsername: mongomike url: https://github.com/mikelambert +- login: nicoloboschi + count: 2.244301734320702 + avatarUrl: https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4 + twitterUsername: nicoloboschi + url: https://github.com/nicoloboschi +- login: mkorpela + count: 2.2351512926636232 + avatarUrl: https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4 + twitterUsername: null + url: https://github.com/mkorpela - login: linancn count: 2.2329019681960856 avatarUrl: https://avatars.githubusercontent.com/u/31125281?u=1bc56191c789906c2a11a4183c108b2784609015&v=4 @@ -1486,6 +1496,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3300000?v=4 twitterUsername: null url: https://github.com/ruze00 +- login: raghavdixit99 + count: 2.161551236278531 + avatarUrl: https://avatars.githubusercontent.com/u/34462078?u=20243a60ac608142887c14251502c2a975614ba3&v=4 + twitterUsername: null + url: https://github.com/raghavdixit99 - login: HeChangHaoGary count: 2.159505313803356 avatarUrl: https://avatars.githubusercontent.com/u/53417823?v=4 @@ -1501,6 +1516,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/15706966?u=f6dd024f1fc955b7d411eb13ebcae7334b527063&v=4 twitterUsername: null url: https://github.com/jerwelborn +- login: Anindyadeep + count: 2.151424980570444 + avatarUrl: https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4 + twitterUsername: AnindyadeepS + url: https://github.com/Anindyadeep - login: vairodp count: 2.148470520810946 avatarUrl: https://avatars.githubusercontent.com/u/65446134?u=a292659bc2611825b65a56a7ee6bfe6fdbfa033b&v=4 @@ -1626,11 +1646,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/72488598?u=98dc24a63369cbae14913caff5f379f80f305aab&v=4 twitterUsername: null url: https://github.com/Undertone0809 -- login: sfvaroglu - count: 1.977334862056892 - avatarUrl: https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4 - twitterUsername: null - url: https://github.com/sfvaroglu - login: hetaoBackend count: 1.9761810838733918 avatarUrl: https://avatars.githubusercontent.com/u/45447813?u=6d1f8b455599848e6cd9c2410ba5f4f02d2d368c&v=4 @@ -1676,6 +1691,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/81988348?v=4 twitterUsername: null url: https://github.com/rithwik-db +- login: kartheekyakkala + count: 1.9352282943736827 + avatarUrl: https://avatars.githubusercontent.com/u/50788154?u=f924ef4e8d2b47be96f7a4b4357d17b6fafaea80&v=4 + twitterUsername: null + url: https://github.com/kartheekyakkala - login: jiayini1119 count: 1.9324094755524448 avatarUrl: https://avatars.githubusercontent.com/u/105399924?u=e69e8f1af87a33af3ecbdd5b5d4327c6dc254df6&v=4 @@ -1706,11 +1726,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/119620994?u=ac3dfad90764c69144f593023fce93080586702e&v=4 twitterUsername: weeeetard url: https://github.com/Honkware -- login: nicoloboschi - count: 1.9167514220375652 - avatarUrl: https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4 - twitterUsername: nicoloboschi - url: https://github.com/nicoloboschi - login: dwhitena count: 1.9161229841519185 avatarUrl: https://avatars.githubusercontent.com/u/4524535?u=6a41acd9f233fa9e62294d5534d1f2f52faa6b78&v=4 @@ -1741,11 +1756,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/110841617?u=e473cda5a87ca1dae11082c11db9c1ed1f4c7032&v=4 twitterUsername: ecardenas300 url: https://github.com/erika-cardenas -- login: raghavdixit99 - count: 1.871507946235241 - avatarUrl: https://avatars.githubusercontent.com/u/34462078?u=20243a60ac608142887c14251502c2a975614ba3&v=4 - twitterUsername: null - url: https://github.com/raghavdixit99 - login: Ayan-Bandyopadhyay count: 1.8648072277486105 avatarUrl: https://avatars.githubusercontent.com/u/13636019?v=4 @@ -1761,16 +1771,16 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/13009163?u=c2b3a11cceaadbc9415f545b971250c9e2b2078b&v=4 twitterUsername: sampartee url: https://github.com/Spartee -- login: markcusack - count: 1.8298420323335374 - avatarUrl: https://avatars.githubusercontent.com/u/6406557?v=4 - twitterUsername: null - url: https://github.com/markcusack - login: Jflick58 count: 1.8203673348621032 avatarUrl: https://avatars.githubusercontent.com/u/22459070?u=c541f86a16a5b46ae138a7bf1efdce36dd413f24&v=4 twitterUsername: null url: https://github.com/Jflick58 +- login: JuHyung-Son + count: 1.8196656246301637 + avatarUrl: https://avatars.githubusercontent.com/u/20140126?u=d1b9220a46efe488dc3db52e5d92774d85d38dfc&v=4 + twitterUsername: null + url: https://github.com/JuHyung-Son - login: stewartjarod count: 1.8194306782542078 avatarUrl: https://avatars.githubusercontent.com/u/949393?u=66d8768dc44519c956069acd88cfb1b0dca646f8&v=4 @@ -1811,6 +1821,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/13447955?v=4 twitterUsername: null url: https://github.com/lesterpjy +- login: junefish + count: 1.7761113105527058 + avatarUrl: https://avatars.githubusercontent.com/u/19216250?u=85921f52a4be080e3529d87d3e3e75bf83847b24&v=4 + twitterUsername: null + url: https://github.com/junefish - login: 2jimoo count: 1.7713839158851945 avatarUrl: https://avatars.githubusercontent.com/u/107998986?u=70520f8a4ad962c0fc2706649ec401b274681927&v=4 @@ -1833,7 +1848,7 @@ top_contributors: url: https://github.com/shoelsch - login: h0rv count: 1.7513242553691613 - avatarUrl: https://avatars.githubusercontent.com/u/45851384?u=bd70e86b6954fa1663bb5245b585d13d92252f1b&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/45851384?u=c9c158b6040b1fd8ae5543bad513260e157d5892&v=4 twitterUsername: null url: https://github.com/h0rv - login: JoanFM @@ -1851,6 +1866,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3195154?u=baa3820b95103662bc2aca01959e41aa651764b5&v=4 twitterUsername: mgoin_ url: https://github.com/mgoin +- login: Blaizzy + count: 1.7414601733094188 + avatarUrl: https://avatars.githubusercontent.com/u/23445657?u=84dda94e9330c5538ea94099b5cae699c88586f8&v=4 + twitterUsername: Prince_Canuma + url: https://github.com/Blaizzy - login: akmhmgc count: 1.7341341774861867 avatarUrl: https://avatars.githubusercontent.com/u/38002468?u=dd6ba12322fa2ee0d88e83a3773c8abc13ec37af&v=4 @@ -1886,6 +1906,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/115371133?u=a032d8cc4a47b9a25bc7a1699a73506bdb752ea2&v=4 twitterUsername: null url: https://github.com/fserv +- login: seanmavley + count: 1.707977209106108 + avatarUrl: https://avatars.githubusercontent.com/u/5289083?u=d663551cd0b6e74091abd6272c35c9e02e82d6c0&v=4 + twitterUsername: nkansahrexford + url: https://github.com/seanmavley - login: cloudscool count: 1.7022103473402963 avatarUrl: https://avatars.githubusercontent.com/u/37284105?u=be61bf8a5cef1060aeeb63a9bdd0a18f2edfe8d1&v=4 @@ -1916,11 +1941,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/43149077?u=26d40f875b701db58f54af0441501c12e86dec6f&v=4 twitterUsername: danielking36 url: https://github.com/dakinggg -- login: Dominastorm - count: 1.6862359862359864 - avatarUrl: https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4 - twitterUsername: null - url: https://github.com/Dominastorm - login: jackwotherspoon count: 1.6853550616256832 avatarUrl: https://avatars.githubusercontent.com/u/32113413?u=069f880e88a96db6ad955e3cc9fc7f9dfcf2beef&v=4 @@ -1936,6 +1956,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/2644049?v=4 twitterUsername: null url: https://github.com/wnleao +- login: hmasdev + count: 1.6650962791115989 + avatarUrl: https://avatars.githubusercontent.com/u/73353463?u=b07dac98e10a359f1a21dc08e61144e3671ca22f&v=4 + twitterUsername: hmdev3 + url: https://github.com/hmasdev - login: kdcokenny count: 1.663856041888594 avatarUrl: https://avatars.githubusercontent.com/u/99611484?u=f421fe8a2917ae3ea24d83f056646055a00d3174&v=4 @@ -1996,6 +2021,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/39889?u=bd28816c18beaddc4da762d61d842547fdb271d9&v=4 twitterUsername: null url: https://github.com/yarikoptic +- login: Jofthomas + count: 1.6045648644592694 + avatarUrl: https://avatars.githubusercontent.com/u/52778543?u=504d8eb452ab2103a86ab469dd793eab49c8a437&v=4 + twitterUsername: null + url: https://github.com/Jofthomas - login: marlenezw count: 1.6044510631256723 avatarUrl: https://avatars.githubusercontent.com/u/57748216?u=e2029e1262ee9c9d9f5825b2d28952758a628f28&v=4 @@ -2041,11 +2071,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/3480154?u=f69c138e15366ba9c15cafd3c753a7ba7da44ad5&v=4 twitterUsername: null url: https://github.com/wangwei1237 -- login: pcliupc - count: 1.5716324738063867 - avatarUrl: https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4 - twitterUsername: null - url: https://github.com/pcliupc - login: nimimeht count: 1.568257261793327 avatarUrl: https://avatars.githubusercontent.com/u/116048415?v=4 @@ -2076,6 +2101,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/16047967?v=4 twitterUsername: null url: https://github.com/StankoKuveljic +- login: quchuyuan + count: 1.5446707072756323 + avatarUrl: https://avatars.githubusercontent.com/u/40655746?u=3c10115601fd5b032c3f274e79fd68dc5bb03921&v=4 + twitterUsername: null + url: https://github.com/quchuyuan - login: serena-ruan count: 1.5418066453855992 avatarUrl: https://avatars.githubusercontent.com/u/82044803?u=451c2955f0862cccf64cac30e062570d208d6903&v=4 @@ -2121,6 +2151,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/7935430?v=4 twitterUsername: null url: https://github.com/rc19 +- login: anthonychu + count: 1.4923827481967016 + avatarUrl: https://avatars.githubusercontent.com/u/3982077?u=8bbebac42cb84a25c629f83f212b2d099ffa3964&v=4 + twitterUsername: nthonyChu + url: https://github.com/anthonychu - login: h3l count: 1.487215875493744 avatarUrl: https://avatars.githubusercontent.com/u/1664952?u=38196f73e9e69e2cc4f6d2e1207647af87bc440a&v=4 @@ -2131,11 +2166,21 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/6726111?u=57f5f48085f552366bc8cf19ecd1d4ad0c66cd48&v=4 twitterUsername: null url: https://github.com/JensMadsen -- login: killind-dev +- login: Raj725 + count: 1.4820793241845873 + avatarUrl: https://avatars.githubusercontent.com/u/17705063?v=4 + twitterUsername: Raj__725 + url: https://github.com/Raj725 +- login: akiradev0x count: 1.48131190431695 avatarUrl: https://avatars.githubusercontent.com/u/61808204?v=4 twitterUsername: null - url: https://github.com/killind-dev + url: https://github.com/akiradev0x +- login: fzowl + count: 1.4684274696492066 + avatarUrl: https://avatars.githubusercontent.com/u/160063452?v=4 + twitterUsername: null + url: https://github.com/fzowl - login: mlejva count: 1.4650246693128453 avatarUrl: https://avatars.githubusercontent.com/u/5136688?u=471ef01a31cc054f84abbe1b9e77ce07b2ac6853&v=4 @@ -2156,6 +2201,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/120141355?u=c114874e969ef4e38c54d042fe1b9a69bc634483&v=4 twitterUsername: null url: https://github.com/j-space-b +- login: chrispy-snps + count: 1.4574474696425916 + avatarUrl: https://avatars.githubusercontent.com/u/50950969?u=f0c166782c1b8f63eb983383729b5d109d7bed0a&v=4 + twitterUsername: null + url: https://github.com/chrispy-snps - login: amosjyng count: 1.4556202653081833 avatarUrl: https://avatars.githubusercontent.com/u/1863868?u=b00a9408d1433919780ea3248b3fc21258172152&v=4 @@ -2231,11 +2281,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/11441526?u=bbd26dd43cf43212b0b05601ed5aaf29727f5d9f&v=4 twitterUsername: _feiwang url: https://github.com/Fei-Wang -- login: chadj2 - count: 1.3955335745725006 - avatarUrl: https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4 - twitterUsername: chad_juliano - url: https://github.com/chadj2 - login: jupyterjazz count: 1.3949792989874013 avatarUrl: https://avatars.githubusercontent.com/u/45267439?u=d2ad5da7ef06e928644321e7a1cfd16842a897db&v=4 @@ -2251,6 +2296,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/7340008?u=9473b1cdea8b9929771b32f14a28ad702237900c&v=4 twitterUsername: null url: https://github.com/donbr +- login: jdogmcsteezy + count: 1.3757805389158182 + avatarUrl: https://avatars.githubusercontent.com/u/22361806?u=c6b2eec689b859aeb182654e5e67936886d860bb&v=4 + twitterUsername: null + url: https://github.com/jdogmcsteezy - login: borisdev count: 1.3742130723862958 avatarUrl: https://avatars.githubusercontent.com/u/367522?u=2b439b16d48aaea7f17d1b3b0b24a9cb0b8712ed&v=4 @@ -2271,11 +2321,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/46003469?u=4f64d04035d962af0f72d20bffd6ea61635e728e&v=4 twitterUsername: null url: https://github.com/yilmaz-burak -- login: Anindyadeep - count: 1.3646328096455973 - avatarUrl: https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4 - twitterUsername: AnindyadeepS - url: https://github.com/Anindyadeep - login: yessenzhar count: 1.364200374938888 avatarUrl: https://avatars.githubusercontent.com/u/8552242?v=4 @@ -2296,6 +2341,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/8673939?v=4 twitterUsername: null url: https://github.com/NickL77 +- login: mishushakov + count: 1.3510511299577161 + avatarUrl: https://avatars.githubusercontent.com/u/10400064?u=581d97314df325c15ec221f64834003d3bba5cc1&v=4 + twitterUsername: null + url: https://github.com/mishushakov - login: flash1293 count: 1.345843724238366 avatarUrl: https://avatars.githubusercontent.com/u/1508364?u=e75aca2de6de1a1e57329fc0c6430e1341904318&v=4 @@ -2311,11 +2361,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/22690160?u=50f2d8aa99bd7b12c01df29e8ffe519ed1cff1d5&v=4 twitterUsername: null url: https://github.com/jnis23 -- login: fzowl - count: 1.3436805411748465 - avatarUrl: https://avatars.githubusercontent.com/u/160063452?v=4 - twitterUsername: null - url: https://github.com/fzowl - login: cgalo5758 count: 1.3421410050623535 avatarUrl: https://avatars.githubusercontent.com/u/36752715?u=5137581b52bcbb8466b394f3ba40f97f9e273f52&v=4 @@ -2383,7 +2428,7 @@ top_contributors: url: https://github.com/tricktreat - login: fzliu count: 1.306437366336383 - avatarUrl: https://avatars.githubusercontent.com/u/6334158?u=5e69f8c8d469e7bd03802d0e44bb63e082bdde0c&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/6334158?u=1d02d8cc173b20c7d18e11ac20a6f40081025fc3&v=4 twitterUsername: frankzliu url: https://github.com/fzliu - login: dongreenberg @@ -2436,11 +2481,21 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/30035387?u=38717fe5778531ee96e5fc6e4a350668b5024d1c&v=4 twitterUsername: null url: https://github.com/MikeMcGarry +- login: robcaulk + count: 1.2779301131969345 + avatarUrl: https://avatars.githubusercontent.com/u/20807672?u=f2efe9788ce26442bb3319da1a56081d64c359e5&v=4 + twitterUsername: null + url: https://github.com/robcaulk - login: jagilley count: 1.27740286427827 avatarUrl: https://avatars.githubusercontent.com/u/37783831?u=5697294c9a0c5bcca4df1aafd22cf8ab64081f2f&v=4 twitterUsername: null url: https://github.com/jagilley +- login: prrao87 + count: 1.2737434720745342 + avatarUrl: https://avatars.githubusercontent.com/u/35005448?u=4b6efd3d2dcdc2acde843cff4183b59087f35a9b&v=4 + twitterUsername: tech_optimist + url: https://github.com/prrao87 - login: lujingxuansc count: 1.2709328769156623 avatarUrl: https://avatars.githubusercontent.com/u/31956487?u=4693ce4d533d97386b62851f6790881306cb88bc&v=4 @@ -2506,11 +2561,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/19657350?u=9847c9919a636e9d7022803e829ffd80008cb2d3&v=4 twitterUsername: berkedilekoglu url: https://github.com/berkedilekoglu -- login: jhpiedrahitao - count: 1.26173294502719 - avatarUrl: https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4 +- login: maang-h + count: 1.261154585526509 + avatarUrl: https://avatars.githubusercontent.com/u/55082429?v=4 twitterUsername: null - url: https://github.com/jhpiedrahitao + url: https://github.com/maang-h - login: rodrigo-clickup count: 1.2584506916235707 avatarUrl: https://avatars.githubusercontent.com/u/141281053?u=e3ff32e9ae51ff0cca84b482fc1e6c80c28ab0c6&v=4 @@ -2541,11 +2596,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/709022?v=4 twitterUsername: null url: https://github.com/tconkling -- login: Blaizzy - count: 1.2493534776099087 - avatarUrl: https://avatars.githubusercontent.com/u/23445657?u=84dda94e9330c5538ea94099b5cae699c88586f8&v=4 - twitterUsername: Prince_Canuma - url: https://github.com/Blaizzy - login: toshish count: 1.2488074903523754 avatarUrl: https://avatars.githubusercontent.com/u/986859?u=54d240cfd5355bb0cfdaf4ac0a9589963ae9ccab&v=4 @@ -2591,6 +2641,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/38943595?v=4 twitterUsername: null url: https://github.com/issam9 +- login: Dobiichi-Origami + count: 1.2254803543624495 + avatarUrl: https://avatars.githubusercontent.com/u/56953648?v=4 + twitterUsername: null + url: https://github.com/Dobiichi-Origami - login: CogniJT count: 1.2241630276564774 avatarUrl: https://avatars.githubusercontent.com/u/131272471?v=4 @@ -2618,7 +2673,7 @@ top_contributors: url: https://github.com/samching - login: lukestanley count: 1.2191793713532844 - avatarUrl: https://avatars.githubusercontent.com/u/306671?u=bc2b6ddd7f12284d0321ef84f194956d7aa19991&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/306671?u=27f910f1bdcdf18622fcccc138274be885cf1058&v=4 twitterUsername: lukestanley url: https://github.com/lukestanley - login: IlyaKIS1 @@ -2666,11 +2721,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/88005863?v=4 twitterUsername: null url: https://github.com/matiasjacob25 -- login: hmasdev - count: 1.2070985806075711 - avatarUrl: https://avatars.githubusercontent.com/u/73353463?u=b07dac98e10a359f1a21dc08e61144e3671ca22f&v=4 - twitterUsername: hmdev3 - url: https://github.com/hmasdev - login: IlyaMichlin count: 1.2064362614648567 avatarUrl: https://avatars.githubusercontent.com/u/1222232?v=4 @@ -2696,11 +2746,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/4441850?u=532666e949309d38a33cda7b1e8b5f30fee0ef7c&v=4 twitterUsername: null url: https://github.com/rsharath -- login: paul-paliychuk - count: 1.2023893847098268 - avatarUrl: https://avatars.githubusercontent.com/u/26054637?u=5518e02a40c327a943bf45ff53dcaa9477a8df19&v=4 - twitterUsername: null - url: https://github.com/paul-paliychuk - login: izapolsk count: 1.2019335109006608 avatarUrl: https://avatars.githubusercontent.com/u/21039333?u=bba2c2d18d3a5ef41360778a7679662565f326d2&v=4 @@ -2821,11 +2866,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/9324867?v=4 twitterUsername: null url: https://github.com/mhavey -- login: zc277584121 - count: 1.169427995514952 - avatarUrl: https://avatars.githubusercontent.com/u/17022025?u=ceee62d53f1c06bf9a014096b651ca0c42cfea3b&v=4 - twitterUsername: null - url: https://github.com/zc277584121 - login: praveenv count: 1.168353485594865 avatarUrl: https://avatars.githubusercontent.com/u/4526224?u=3a47513ee686870ddcbecaa70756e3e8224732af&v=4 @@ -2836,6 +2876,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1734012?u=105d7344bcd5c0dee1a293d2740cefa05cc46b9b&v=4 twitterUsername: srics url: https://github.com/srics +- login: 16BitNarwhal + count: 1.1620775294244683 + avatarUrl: https://avatars.githubusercontent.com/u/31218485?u=6ce575b365c0353b5b3d1ea03088f8da36764100&v=4 + twitterUsername: 16bitnarwhal + url: https://github.com/16BitNarwhal - login: zhangch9 count: 1.1606921459759583 avatarUrl: https://avatars.githubusercontent.com/u/12967560?v=4 @@ -2856,11 +2901,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/37992436?u=21693d9e841c3b7f9f091a210fbeee7e415a0751&v=4 twitterUsername: null url: https://github.com/izzymsft -- login: maximeperrindev - count: 1.1573244745291202 - avatarUrl: https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4 - twitterUsername: maximeperrin_ - url: https://github.com/maximeperrindev - login: richarda23 count: 1.1572395798187012 avatarUrl: https://avatars.githubusercontent.com/u/22676399?u=6b46c5acfe16b722badbfa6845516c1627171bbe&v=4 @@ -2896,11 +2936,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/8777479?v=4 twitterUsername: null url: https://github.com/fengjial -- login: mkorpela - count: 1.1523809523809523 - avatarUrl: https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4 - twitterUsername: null - url: https://github.com/mkorpela - login: simon824 count: 1.152116979484941 avatarUrl: https://avatars.githubusercontent.com/u/18065113?u=6ea1812de26ecb108c18e50b719a109049d93ce2&v=4 @@ -2931,6 +2966,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1917451?u=f0d78c43c1f2d4bed080f9a8c46905d3c22a28c7&v=4 twitterUsername: null url: https://github.com/mariokostelac +- login: samnoyes + count: 1.1441430952522442 + avatarUrl: https://avatars.githubusercontent.com/u/6432132?v=4 + twitterUsername: null + url: https://github.com/samnoyes - login: mosheber count: 1.142195271513252 avatarUrl: https://avatars.githubusercontent.com/u/22236370?u=289c19bfc89a43a7e0c6956f73305aab3a8bd978&v=4 @@ -2996,6 +3036,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/30344258?u=51c169c8996024b68e9b3ec0bfe93465940dc8b4&v=4 twitterUsername: null url: https://github.com/LMC117 +- login: WilliamEspegren + count: 1.1222886759609925 + avatarUrl: https://avatars.githubusercontent.com/u/131612909?v=4 + twitterUsername: null + url: https://github.com/WilliamEspegren - login: sunbc0120 count: 1.1207709562525592 avatarUrl: https://avatars.githubusercontent.com/u/7380988?u=ba9beadb7fd3bcd6d8439154bedbd32d5fdbd4d8&v=4 @@ -3003,7 +3048,7 @@ top_contributors: url: https://github.com/sunbc0120 - login: Simon-Stone count: 1.1192315309962368 - avatarUrl: https://avatars.githubusercontent.com/u/18614423?u=7a80b88c5fdcd50eaec207bf91e4498fbc5eb2fe&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/18614423?u=1d3dba8e4e87d2a449cc90c204f422327af2d09d&v=4 twitterUsername: null url: https://github.com/Simon-Stone - login: Amyh102 @@ -3051,11 +3096,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/9665243?u=e403da70029d61dbbb9a2f0e03daebc5418974ed&v=4 twitterUsername: null url: https://github.com/jcjc712 -- login: chrispy-snps - count: 1.1072791194742415 - avatarUrl: https://avatars.githubusercontent.com/u/50950969?u=f0c166782c1b8f63eb983383729b5d109d7bed0a&v=4 - twitterUsername: null - url: https://github.com/chrispy-snps +- login: EvilFreelancer + count: 1.1031007751937985 + avatarUrl: https://avatars.githubusercontent.com/u/9089568?u=d2f8bc466003afc3558a96f3266a0e32d5c18c34&v=4 + twitterUsername: EvilFreelancer + url: https://github.com/EvilFreelancer - login: zywilliamli count: 1.1028989292243405 avatarUrl: https://avatars.githubusercontent.com/u/32046231?u=db454b8e6da48120d78d3397006928cc86f01019&v=4 @@ -3166,11 +3211,6 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/134934501?u=167199ff0bff447057fc5e291be0225ad5260111&v=4 twitterUsername: null url: https://github.com/vrushankportkey -- login: samnoyes - count: 1.0769162044959417 - avatarUrl: https://avatars.githubusercontent.com/u/6432132?v=4 - twitterUsername: null - url: https://github.com/samnoyes - login: jxnl count: 1.0761732546629572 avatarUrl: https://avatars.githubusercontent.com/u/4852235?u=69b6d23a20085d57e304196e304cfd06f3393f3d&v=4 @@ -3191,6 +3231,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/1555858?v=4 twitterUsername: null url: https://github.com/prakul +- login: ea-open-source + count: 1.072039072039072 + avatarUrl: https://avatars.githubusercontent.com/u/20924562?u=3f61dc32f82124727d7157c0977240770ab82c02&v=4 + twitterUsername: null + url: https://github.com/ea-open-source - login: constantinmusca count: 1.0716813430993335 avatarUrl: https://avatars.githubusercontent.com/u/1473079?v=4 @@ -3236,6 +3281,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/291370?u=5802ab31e0feb7ae15465dedaa48ba646f0a4127&v=4 twitterUsername: null url: https://github.com/sanzgiri +- login: MacanPN + count: 1.0604834042713411 + avatarUrl: https://avatars.githubusercontent.com/u/1621509?u=e54d671ddef5ac7580003427246fc2247964c9ed&v=4 + twitterUsername: null + url: https://github.com/MacanPN - login: wlleiiwang count: 1.0603351955307263 avatarUrl: https://avatars.githubusercontent.com/u/6872942?v=4 @@ -3401,6 +3451,11 @@ top_contributors: avatarUrl: https://avatars.githubusercontent.com/u/38180263?u=d514276e558f3f3aaba4844fdeb14eb84e9c8cc2&v=4 twitterUsername: namanmodii url: https://github.com/naman-modi +- login: sokolgood + count: 1.0290598290598292 + avatarUrl: https://avatars.githubusercontent.com/u/126395124?u=79cff420daf96b72b14caca0061b57b884139f4f&v=4 + twitterUsername: null + url: https://github.com/sokolgood - login: harelix count: 1.0272601794340925 avatarUrl: https://avatars.githubusercontent.com/u/2310608?u=1e5009aa6681eed766a14cfb8849d820821dddce&v=4 @@ -3454,7 +3509,7 @@ top_contributors: - login: rotemweiss57 count: 1.0179108360406797 avatarUrl: https://avatars.githubusercontent.com/u/91344214?u=5c34c21b464a6bbffd83a07aafac2cf9076856db&v=4 - twitterUsername: null + twitterUsername: weiss_rotem url: https://github.com/rotemweiss57 - login: hmilkovi count: 1.0178506375227687 @@ -3464,7 +3519,7 @@ top_contributors: - login: vreyespue count: 1.0171240910157167 avatarUrl: https://avatars.githubusercontent.com/u/42059733?u=502e381ca0e17491298e90ac3c5db019dd484efc&v=4 - twitterUsername: vreyespue + twitterUsername: null url: https://github.com/vreyespue - login: deepblue count: 1.0166320166320166 @@ -3568,32 +3623,32 @@ top_contributors: url: https://github.com/jwbeck97 top_reviewers: - login: leo-gan - count: 137 + count: 141 avatarUrl: https://avatars.githubusercontent.com/u/2256422?v=4 twitterUsername: null url: https://github.com/leo-gan -- login: ccurme - count: 97 - avatarUrl: https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4 - twitterUsername: null - url: https://github.com/ccurme - login: lkuligin count: 45 avatarUrl: https://avatars.githubusercontent.com/u/11026406?v=4 twitterUsername: null url: https://github.com/lkuligin - login: cbornet - count: 31 + count: 34 avatarUrl: https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4 twitterUsername: null url: https://github.com/cbornet - login: 3coins - count: 27 + count: 28 avatarUrl: https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4 twitterUsername: pjain7 url: https://github.com/3coins +- login: liugddx + count: 18 + avatarUrl: https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4 + twitterUsername: null + url: https://github.com/liugddx - login: joemcelroy - count: 16 + count: 17 avatarUrl: https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4 twitterUsername: phoey1 url: https://github.com/joemcelroy @@ -3602,11 +3657,11 @@ top_reviewers: avatarUrl: https://avatars.githubusercontent.com/u/67427?v=4 twitterUsername: mesirii url: https://github.com/jexp -- login: liugddx +- login: mspronesti count: 15 - avatarUrl: https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 twitterUsername: null - url: https://github.com/liugddx + url: https://github.com/mspronesti - login: JohnNay count: 14 avatarUrl: https://avatars.githubusercontent.com/u/8429627?u=d28653fbd93c966ac840f93a05f0ef949495851f&v=4 @@ -3632,21 +3687,16 @@ top_reviewers: avatarUrl: https://avatars.githubusercontent.com/u/13262395?u=430eff10dfbb7d3f27a35f1ea2c9ea6a61067c88&v=4 twitterUsername: HoltSkinner12 url: https://github.com/holtskinner +- login: tomasonjo + count: 12 + avatarUrl: https://avatars.githubusercontent.com/u/19948365?v=4 + twitterUsername: tb_tomaz + url: https://github.com/tomasonjo - login: skcoirz count: 11 avatarUrl: https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4 twitterUsername: null url: https://github.com/skcoirz -- login: mspronesti - count: 11 - avatarUrl: https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4 - twitterUsername: null - url: https://github.com/mspronesti -- login: tomasonjo - count: 11 - avatarUrl: https://avatars.githubusercontent.com/u/19948365?v=4 - twitterUsername: tb_tomaz - url: https://github.com/tomasonjo - login: tylerhutcherson count: 10 avatarUrl: https://avatars.githubusercontent.com/u/20304844?u=f00461bcedad6ba384a4e234a44c906802448b4e&v=4 @@ -3687,6 +3737,21 @@ top_reviewers: avatarUrl: https://avatars.githubusercontent.com/u/25930426?v=4 twitterUsername: null url: https://github.com/pranjaldoshi96 +- login: Anush008 + count: 7 + avatarUrl: https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4 + twitterUsername: null + url: https://github.com/Anush008 +- login: nicoloboschi + count: 7 + avatarUrl: https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4 + twitterUsername: nicoloboschi + url: https://github.com/nicoloboschi +- login: ShaneHarvey + count: 7 + avatarUrl: https://avatars.githubusercontent.com/u/5015933?u=80e339672a321cde25f4b484129bbddfefb2356d&v=4 + twitterUsername: null + url: https://github.com/ShaneHarvey - login: eavanvalkenburg count: 6 avatarUrl: https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4 @@ -3717,16 +3782,11 @@ top_reviewers: avatarUrl: https://avatars.githubusercontent.com/u/2096628?u=2a4822ff8dc6b4f1162c58716d48fdfac08c8601&v=4 twitterUsername: null url: https://github.com/blink1073 -- login: Anush008 +- login: hemidactylus count: 6 - avatarUrl: https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4 twitterUsername: null - url: https://github.com/Anush008 -- login: nicoloboschi - count: 6 - avatarUrl: https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4 - twitterUsername: nicoloboschi - url: https://github.com/nicoloboschi + url: https://github.com/hemidactylus - login: andersenchen count: 5 avatarUrl: https://avatars.githubusercontent.com/u/101075607?v=4 @@ -3740,7 +3800,7 @@ top_reviewers: - login: nickscamara count: 5 avatarUrl: https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4 - twitterUsername: null + twitterUsername: nickscamara_ url: https://github.com/nickscamara - login: naveentatikonda count: 5 @@ -3757,18 +3817,13 @@ top_reviewers: avatarUrl: https://avatars.githubusercontent.com/u/6162415?u=82e86c06ae37add3750f9db9ad9d7dfa250ddae7&v=4 twitterUsername: null url: https://github.com/navneet1v -- login: hemidactylus - count: 5 - avatarUrl: https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4 - twitterUsername: null - url: https://github.com/hemidactylus - login: maxjakob count: 5 avatarUrl: https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4 twitterUsername: null url: https://github.com/maxjakob -- login: ShaneHarvey +- login: Jibola count: 5 - avatarUrl: https://avatars.githubusercontent.com/u/5015933?u=80e339672a321cde25f4b484129bbddfefb2356d&v=4 + avatarUrl: https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4 twitterUsername: null - url: https://github.com/ShaneHarvey + url: https://github.com/Jibola From acaf214a4516a2ffbd2817f553f4d48e6a908695 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sat, 1 Jun 2024 08:28:32 -0700 Subject: [PATCH 23/54] update agent docs (#22370) to use create_react_agent --------- Co-authored-by: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> --- docs/docs/how_to/qa_chat_history_how_to.ipynb | 144 +++++++++------- docs/docs/tutorials/agents.ipynb | 152 ++++++++--------- docs/docs/tutorials/qa_chat_history.ipynb | 160 ++++++++++-------- docs/docs/tutorials/sql_qa.ipynb | 24 +-- 4 files changed, 247 insertions(+), 233 deletions(-) diff --git a/docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/docs/how_to/qa_chat_history_how_to.ipynb index 52d1c311f8cda..64e3f737a66d1 100644 --- a/docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -36,12 +36,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "ede7fdc0-ef31-483d-bd67-32e4b5c5d527", "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-chroma bs4" + "%%capture --no-stderr\n", + "%pip install --upgrade --quiet langchain langchain-community langchain-chroma bs4" ] }, { @@ -54,7 +55,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "143787ca-d8e6-4dc9-8281-4374f4d71720", "metadata": {}, "outputs": [], @@ -62,7 +63,8 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", "\n", "# import dotenv\n", "\n", @@ -83,13 +85,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "07411adb-3722-4f65-ab7f-8f6f57663d11", "metadata": {}, "outputs": [], "source": [ "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", - "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" + "if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n", + " os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] }, { @@ -126,7 +129,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", "metadata": {}, "outputs": [], @@ -157,13 +160,12 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "id": "820244ae-74b4-4593-b392-822979dd91b8", "metadata": {}, "outputs": [], "source": [ "import bs4\n", - "from langchain import hub\n", "from langchain.chains import create_retrieval_chain\n", "from langchain.chains.combine_documents import create_stuff_documents_chain\n", "from langchain_chroma import Chroma\n", @@ -202,7 +204,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", "metadata": {}, "outputs": [], @@ -239,7 +241,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "id": "4c4b1695-6217-4ee8-abaf-7cc26366d988", "metadata": {}, "outputs": [], @@ -265,7 +267,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "id": "afef4385-f571-4874-8f52-3d475642f579", "metadata": {}, "outputs": [], @@ -314,7 +316,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", "metadata": {}, "outputs": [], @@ -343,17 +345,17 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "id": "1046c92f-21b3-4214-907d-92878d8cba23", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in thinking step by step or exploring multiple reasoning possibilities at each step. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'" + "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'" ] }, - "execution_count": 7, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -369,17 +371,17 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down complex tasks into smaller steps. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to guide the decomposition process effectively.'" + "'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.'" ] }, - "execution_count": 8, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -401,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "id": "7686b874-3a85-499f-82b5-28a85c4c768c", "metadata": {}, "outputs": [ @@ -411,11 +413,11 @@ "text": [ "User: What is Task Decomposition?\n", "\n", - "AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in thinking step by step or exploring multiple reasoning possibilities at each step. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n", + "AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n", "\n", "User: What are common ways of doing it?\n", "\n", - "AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down complex tasks into smaller steps. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to guide the decomposition process effectively.\n", + "AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.\n", "\n" ] } @@ -452,7 +454,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 13, "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", "metadata": {}, "outputs": [], @@ -552,17 +554,17 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 14, "id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. This process helps agents or models tackle difficult tasks by dividing them into more easily achievable subgoals. Task decomposition can be done through techniques like Chain of Thought or Tree of Thoughts, which guide the model in thinking step by step or exploring multiple reasoning possibilities at each step.'" + "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help in decomposing hard tasks into multiple manageable tasks by instructing models to think step by step and explore multiple reasoning possibilities at each step. Task decomposition can be achieved through various methods such as using prompting techniques, task-specific instructions, or human inputs.'" ] }, - "execution_count": 2, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -578,17 +580,17 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 15, "id": "17021822-896a-4513-a17d-1d20b1c5381c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Common ways of task decomposition include using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide models in breaking down complex tasks into smaller steps. This can be achieved through simple prompting with LLMs, task-specific instructions, or human inputs to help the model understand and navigate the task effectively. Task decomposition aims to enhance model performance on complex tasks by utilizing more test-time computation and shedding light on the model's thinking process.\"" + "'Task decomposition can be done in common ways such as using prompting techniques like Chain of Thought (CoT) or Tree of Thoughts, which instruct models to think step by step and explore multiple reasoning possibilities at each step. Another way is to provide task-specific instructions, such as asking to \"Write a story outline\" for writing a novel, to guide the decomposition process. Additionally, task decomposition can also involve human inputs to break down complex tasks into smaller and simpler steps.'" ] }, - "execution_count": 3, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -618,7 +620,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 16, "id": "809cc747-2135-40a2-8e73-e4556343ee64", "metadata": {}, "outputs": [], @@ -646,14 +648,14 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 17, "id": "1726d151-4653-4c72-a187-a14840add526", "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import chat_agent_executor\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(llm, tools)" + "agent_executor = create_react_agent(llm, tools)" ] }, { @@ -666,19 +668,26 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 18, "id": "52ae46d9-43f7-481b-96d5-df750be3ad65", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 5cd28d13-88dd-4eac-a465-3770ac27eff6, but expected {'tool'} run.\")\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_wxRrUmNbaNny8wh9JIb5uCRB', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-57ee0d12-6142-4957-a002-cce0093efe07-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_wxRrUmNbaNny8wh9JIb5uCRB'}])]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TbhPPPN05GKi36HLeaN4QM90', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2e60d910-879a-4a2a-b1e9-6a6c5c7d7ebc-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_TbhPPPN05GKi36HLeaN4QM90'}])]}}\n", "----\n", - "{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='9c3a17f7-653c-47fa-b4e4-fa3d8d24c85d', tool_call_id='call_wxRrUmNbaNny8wh9JIb5uCRB')]}}\n", + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_TbhPPPN05GKi36HLeaN4QM90')]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps agents in planning and executing tasks more effectively. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into manageable steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute tasks efficiently.\\n\\nIf you would like more detailed information or examples on task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 154, 'prompt_tokens': 588, 'total_tokens': 742}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-8991fa20-c527-4f9e-a058-fc6264fe6259-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in transforming big tasks into multiple manageable tasks, making it easier for autonomous agents to handle and interpret the thinking process. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to \"think step by step\" to decompose hard tasks. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of multiple thoughts per step. Task decomposition can be facilitated through various methods such as using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 130, 'prompt_tokens': 636, 'total_tokens': 766}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-3ef17638-65df-4030-a7fe-795e6da91c69-0')]}}\n", "----\n" ] } @@ -707,7 +716,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "id": "837a401e-9757-4d0e-a0da-24fa097d887e", "metadata": {}, "outputs": [], @@ -716,9 +725,7 @@ "\n", "memory = SqliteSaver.from_conn_string(\":memory:\")\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, checkpointer=memory\n", - ")" + "agent_executor = create_react_agent(llm, tools, checkpointer=memory)" ] }, { @@ -733,7 +740,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 20, "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", "metadata": {}, "outputs": [ @@ -741,7 +748,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-1451e59b-b135-4776-985d-4759338ffee5-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1cd17562-18aa-4839-b41b-403b17a0fc20-0')]}}\n", "----\n" ] } @@ -766,19 +773,26 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 21, "id": "e2c570ae-dd91-402c-8693-ae746de63b16", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID c54381c0-c5d9-495a-91a0-aca4ae755663, but expected {'tool'} run.\")\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ab2x4iUPSWDAHS5txL7PspSK', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-f76b5813-b41c-4d0d-9ed2-667b988d885e-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_ab2x4iUPSWDAHS5txL7PspSK'}])]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-122bf097-7ff1-49aa-b430-e362b51354ad-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg'}])]}}\n", "----\n", - "{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e0895fa5-5d41-4be0-98db-10a83d42fc2f', tool_call_id='call_ab2x4iUPSWDAHS5txL7PspSK')]}}\n", + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_rg7zKTE5e0ICxVSslJ1u9LMg')]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in complex tasks where the task is broken down into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute complex tasks effectively.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 165, 'prompt_tokens': 611, 'total_tokens': 776}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-13296566-8577-4d65-982b-a39718988ca3-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving intricate problems by dividing them into more manageable components. By decomposing tasks, agents or models can better understand the steps involved and plan their actions accordingly. Techniques like Chain of Thought (CoT) and Tree of Thoughts are examples of methods that enhance model performance on complex tasks by breaking them down into smaller steps.', response_metadata={'token_usage': {'completion_tokens': 87, 'prompt_tokens': 659, 'total_tokens': 746}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b9166386-83e5-4b82-9a4b-590e5fa76671-0')]}}\n", "----\n" ] } @@ -805,7 +819,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 22, "id": "570d8c68-136e-4ba5-969a-03ba195f6118", "metadata": {}, "outputs": [ @@ -813,11 +827,24 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7', 'function': {'arguments': '{\"query\":\"common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 930, 'total_tokens': 951}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dd842071-6dbd-4b68-8657-892eaca58638-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'common ways of task decomposition'}, 'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7'}])]}}\n", - "----\n", - "{'action': {'messages': [ToolMessage(content='Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='c749bb8e-c8e0-4fa3-bc11-3e2e0651880b', tool_call_id='call_KvoiamnLfGEzMeEMlV3u0TJ7')]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 769, 'total_tokens': 790}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2d2c8327-35cd-484a-b8fd-52436657c2d8-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI'}])]}}\n", + "----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 29553415-e0f4-41a9-8921-ba489e377f68, but expected {'tool'} run.\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_6kbxTU5CDWLmF9mrvR7bWSkI')]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using language models with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Utilizing task-specific instructions, for example, using \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.\\n\\nThese methods help in breaking down complex tasks into smaller and more manageable steps, facilitating better planning and execution of the overall task.', response_metadata={'token_usage': {'completion_tokens': 100, 'prompt_tokens': 1475, 'total_tokens': 1575}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-98b765b3-f1a6-4c9a-ad0f-2db7950b900f-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Common ways of task decomposition include:\\n1. Using LLM with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Using task-specific instructions, for example, \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 1339, 'total_tokens': 1406}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ad14cde-ca75-4238-a868-f865e0fc50dd-0')]}}\n", "----\n" ] } @@ -852,20 +879,15 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 23, "id": "b1d2b4d4-e604-497d-873d-d345b808578e", "metadata": {}, "outputs": [], "source": [ "import bs4\n", - "from langchain.agents import AgentExecutor, create_tool_calling_agent\n", "from langchain.tools.retriever import create_retriever_tool\n", "from langchain_chroma import Chroma\n", - "from langchain_community.chat_message_histories import ChatMessageHistory\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory\n", "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "from langgraph.checkpoint.sqlite import SqliteSaver\n", @@ -900,9 +922,7 @@ "tools = [tool]\n", "\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, checkpointer=memory\n", - ")" + "agent_executor = create_react_agent(llm, tools, checkpointer=memory)" ] }, { @@ -941,7 +961,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.11.2" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index 9c882cbab9d53..5943e07b26589 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -43,26 +43,39 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "a79bb782", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'agent': {'messages': [AIMessage(content=\"Hello Bob! Since you didn't ask a specific question, I don't need to use any tools to respond. It's nice to meet you. San Francisco is a wonderful city with lots to see and do. I hope you're enjoying living there. Please let me know if you have any other questions!\", response_metadata={'id': 'msg_01Mmfzfs9m4XMgVzsCZYMWqH', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 271, 'output_tokens': 65}}, id='run-44c57f9c-a637-4888-b7d9-6d985031ae48-0', usage_metadata={'input_tokens': 271, 'output_tokens': 65, 'total_tokens': 336})]}}\n", + "----\n", + "{'agent': {'messages': [AIMessage(content=[{'text': 'To get current weather information for your location in San Francisco, let me invoke the search tool:', 'type': 'text'}, {'id': 'toolu_01BGEyQaSz3pTq8RwUUHSRoo', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}], response_metadata={'id': 'msg_013AVSVsRLKYZjduLpJBY4us', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 347, 'output_tokens': 80}}, id='run-de7923b6-5ee2-4ebe-bd95-5aed4933d0e3-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'san francisco weather'}, 'id': 'toolu_01BGEyQaSz3pTq8RwUUHSRoo'}], usage_metadata={'input_tokens': 347, 'output_tokens': 80, 'total_tokens': 427})]}}\n", + "----\n", + "{'tools': {'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1717238643, \\'localtime\\': \\'2024-06-01 3:44\\'}, \\'current\\': {\\'last_updated_epoch\\': 1717237800, \\'last_updated\\': \\'2024-06-01 03:30\\', \\'temp_c\\': 12.0, \\'temp_f\\': 53.6, \\'is_day\\': 0, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/night/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 5.6, \\'wind_kph\\': 9.0, \\'wind_degree\\': 310, \\'wind_dir\\': \\'NW\\', \\'pressure_mb\\': 1013.0, \\'pressure_in\\': 29.92, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 88, \\'cloud\\': 100, \\'feelslike_c\\': 10.5, \\'feelslike_f\\': 50.8, \\'windchill_c\\': 9.3, \\'windchill_f\\': 48.7, \\'heatindex_c\\': 11.1, \\'heatindex_f\\': 51.9, \\'dewpoint_c\\': 8.8, \\'dewpoint_f\\': 47.8, \\'vis_km\\': 6.4, \\'vis_miles\\': 3.0, \\'uv\\': 1.0, \\'gust_mph\\': 12.5, \\'gust_kph\\': 20.1}}\"}, {\"url\": \"https://www.timeanddate.com/weather/usa/san-francisco/historic\", \"content\": \"Past Weather in San Francisco, California, USA \\\\u2014 Yesterday and Last 2 Weeks. Time/General. Weather. Time Zone. DST Changes. Sun & Moon. Weather Today Weather Hourly 14 Day Forecast Yesterday/Past Weather Climate (Averages) Currently: 68 \\\\u00b0F. Passing clouds.\"}]', name='tavily_search_results_json', tool_call_id='toolu_01BGEyQaSz3pTq8RwUUHSRoo')]}}\n", + "----\n", + "{'agent': {'messages': [AIMessage(content='Based on the search results, the current weather in San Francisco is:\\n\\nTemperature: 53.6°F (12°C)\\nConditions: Misty\\nWind: 5.6 mph (9 kph) from the Northwest\\nHumidity: 88%\\nCloud Cover: 100% \\n\\nThe results provide detailed information like wind chill, heat index, visibility and more. It looks like a typical cool, foggy morning in San Francisco. Let me know if you need any other details about the weather where you live!', response_metadata={'id': 'msg_019WGLbaojuNdbCnqac7zaGW', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1035, 'output_tokens': 120}}, id='run-1bb68bf3-b212-4ef4-8a31-10c830421c78-0', usage_metadata={'input_tokens': 1035, 'output_tokens': 120, 'total_tokens': 1155})]}}\n", + "----\n" + ] + } + ], "source": [ "# Import relevant functionality\n", "from langchain_anthropic import ChatAnthropic\n", "from langchain_community.tools.tavily_search import TavilySearchResults\n", "from langchain_core.messages import HumanMessage\n", "from langgraph.checkpoint.sqlite import SqliteSaver\n", - "from langgraph.prebuilt import chat_agent_executor\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "# Create the agent\n", "memory = SqliteSaver.from_conn_string(\":memory:\")\n", "model = ChatAnthropic(model_name=\"claude-3-sonnet-20240229\")\n", "search = TavilySearchResults(max_results=2)\n", "tools = [search]\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " model, tools, checkpointer=memory\n", - ")\n", + "agent_executor = create_react_agent(model, tools, checkpointer=memory)\n", "\n", "# Use the agent\n", "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n", @@ -104,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "% pip install -U langchain-community langgraph langchain-anthropic" + "% pip install -U langchain-community langgraph langchain-anthropic tavily-python" ] }, { @@ -166,39 +179,19 @@ "We first need to create the tools we want to use. Our main tool of choice will be [Tavily](/docs/integrations/tools/tavily_search) - a search engine. We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n" ] }, - { - "cell_type": "code", - "execution_count": 1, - "id": "482ce13d", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.tools.tavily_search import TavilySearchResults" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "9cc86c0b", - "metadata": {}, - "outputs": [], - "source": [ - "search = TavilySearchResults(max_results=2)" - ] - }, { "cell_type": "code", "execution_count": 3, - "id": "e593bbf6", + "id": "482ce13d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'url': 'https://www.weatherapi.com/',\n", - " 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1716929532, 'localtime': '2024-05-28 13:52'}, 'current': {'last_updated_epoch': 1716929100, 'last_updated': '2024-05-28 13:45', 'temp_c': 16.7, 'temp_f': 62.1, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 12.5, 'wind_kph': 20.2, 'wind_degree': 270, 'wind_dir': 'W', 'pressure_mb': 1019.0, 'pressure_in': 30.09, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 62, 'cloud': 25, 'feelslike_c': 16.7, 'feelslike_f': 62.1, 'windchill_c': 13.1, 'windchill_f': 55.6, 'heatindex_c': 14.5, 'heatindex_f': 58.2, 'dewpoint_c': 9.1, 'dewpoint_f': 48.4, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 4.0, 'gust_mph': 14.4, 'gust_kph': 23.2}}\"},\n", - " {'url': 'https://weatherspark.com/h/m/557/2024/5/Historical-Weather-in-May-2024-in-San-Francisco-California-United-States',\n", - " 'content': 'San Francisco Temperature History May 2024. The daily range of reported temperatures (gray bars) and 24-hour highs (red ticks) and lows (blue ticks), placed over the daily average high (faint red line) and low (faint blue line) temperature, with 25th to 75th and 10th to 90th percentile bands.'}]" + " 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1717238703, 'localtime': '2024-06-01 3:45'}, 'current': {'last_updated_epoch': 1717237800, 'last_updated': '2024-06-01 03:30', 'temp_c': 12.0, 'temp_f': 53.6, 'is_day': 0, 'condition': {'text': 'Mist', 'icon': '//cdn.weatherapi.com/weather/64x64/night/143.png', 'code': 1030}, 'wind_mph': 5.6, 'wind_kph': 9.0, 'wind_degree': 310, 'wind_dir': 'NW', 'pressure_mb': 1013.0, 'pressure_in': 29.92, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 88, 'cloud': 100, 'feelslike_c': 10.5, 'feelslike_f': 50.8, 'windchill_c': 9.3, 'windchill_f': 48.7, 'heatindex_c': 11.1, 'heatindex_f': 51.9, 'dewpoint_c': 8.8, 'dewpoint_f': 47.8, 'vis_km': 6.4, 'vis_miles': 3.0, 'uv': 1.0, 'gust_mph': 12.5, 'gust_kph': 20.1}}\"},\n", + " {'url': 'https://www.wunderground.com/hourly/us/ca/san-francisco/date/2024-01-06',\n", + " 'content': 'Current Weather for Popular Cities . San Francisco, CA 58 ° F Partly Cloudy; Manhattan, NY warning 51 ° F Cloudy; Schiller Park, IL (60176) warning 51 ° F Fair; Boston, MA warning 41 ° F ...'}]" ] }, "execution_count": 3, @@ -207,24 +200,13 @@ } ], "source": [ - "search.invoke(\"what is the weather in SF\")" - ] - }, - { - "cell_type": "markdown", - "id": "c3b47c1d", - "metadata": {}, - "source": [ - "If we want, we can create other tools. Once we have all the tools we want, we can put them in a list that we will reference later." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "b8e8e710", - "metadata": {}, - "outputs": [], - "source": [ + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "\n", + "search = TavilySearchResults(max_results=2)\n", + "search_results = search.invoke(\"what is the weather in SF\")\n", + "print(search_results)\n", + "# If we want, we can create other tools.\n", + "# Once we have all the tools we want, we can put them in a list that we will reference later.\n", "tools = [search]" ] }, @@ -246,7 +228,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "id": "69185491", "metadata": {}, "outputs": [], @@ -269,17 +251,17 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "id": "c96c960b", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Hello! How can I assist you today?'" + "'Hi there!'" ] }, - "execution_count": 6, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -301,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "id": "ba692a74", "metadata": {}, "outputs": [], @@ -319,7 +301,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "b6a7e925", "metadata": {}, "outputs": [ @@ -327,7 +309,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "ContentString: Hello! How can I assist you today?\n", + "ContentString: Hello!\n", "ToolCalls: []\n" ] } @@ -349,7 +331,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "id": "688b465d", "metadata": {}, "outputs": [ @@ -357,8 +339,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "ContentString: \n", - "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_BjPOvStlyv61w24VkHQ4pqFG'}]\n" + "ContentString: [{'id': 'toolu_01TSdZjtqppPVYyvrYvsok6d', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n", + "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'san francisco weather'}, 'id': 'toolu_01TSdZjtqppPVYyvrYvsok6d'}]\n" ] } ], @@ -402,14 +384,14 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "89cf72b4-6046-4b47-8f27-5522d8cb8036", "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import chat_agent_executor\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(model, tools)" + "agent_executor = create_react_agent(model, tools)" ] }, { @@ -426,18 +408,18 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "id": "114ba50d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[HumanMessage(content='hi!', id='acd18479-7e70-4114-a293-c5233736c1d5'),\n", - " AIMessage(content='Hello! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 83, 'total_tokens': 93}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ebfca269-5cb2-47c1-8987-a24acf0b5015-0', usage_metadata={'input_tokens': 83, 'output_tokens': 10, 'total_tokens': 93})]" + "[HumanMessage(content='hi!', id='a820fcc5-9b87-457a-9af0-f21768143ee3'),\n", + " AIMessage(content='Hello!', response_metadata={'id': 'msg_01VbC493X1VEDyusgttiEr1z', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 264, 'output_tokens': 5}}, id='run-0e0ddae8-a85b-4bd6-947c-c36c857a4698-0', usage_metadata={'input_tokens': 264, 'output_tokens': 5, 'total_tokens': 269})]" ] }, - "execution_count": 11, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -460,20 +442,20 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "id": "77c2f769", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[HumanMessage(content='whats the weather in sf?', id='880db162-5d1c-476c-82dd-b125caee1656'),\n", - " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_i3ZKnTDPB1RxqwE6PWmgz5TQ', 'function': {'arguments': '{\\n \"query\": \"current weather in San Francisco\"\\n}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 88, 'total_tokens': 111}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-93b6be79-c981-4b7b-8f0a-252255f23961-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_i3ZKnTDPB1RxqwE6PWmgz5TQ'}], usage_metadata={'input_tokens': 88, 'output_tokens': 23, 'total_tokens': 111}),\n", - " ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1716929532, \\'localtime\\': \\'2024-05-28 13:52\\'}, \\'current\\': {\\'last_updated_epoch\\': 1716929100, \\'last_updated\\': \\'2024-05-28 13:45\\', \\'temp_c\\': 16.7, \\'temp_f\\': 62.1, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Partly cloudy\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/116.png\\', \\'code\\': 1003}, \\'wind_mph\\': 12.5, \\'wind_kph\\': 20.2, \\'wind_degree\\': 270, \\'wind_dir\\': \\'W\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.09, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 62, \\'cloud\\': 25, \\'feelslike_c\\': 16.7, \\'feelslike_f\\': 62.1, \\'windchill_c\\': 13.1, \\'windchill_f\\': 55.6, \\'heatindex_c\\': 14.5, \\'heatindex_f\\': 58.2, \\'dewpoint_c\\': 9.1, \\'dewpoint_f\\': 48.4, \\'vis_km\\': 16.0, \\'vis_miles\\': 9.0, \\'uv\\': 4.0, \\'gust_mph\\': 14.4, \\'gust_kph\\': 23.2}}\"}, {\"url\": \"https://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168\", \"content\": \"Current conditions at SAN FRANCISCO DOWNTOWN (SFOC1) Lat: 37.77056\\\\u00b0NLon: 122.42694\\\\u00b0WElev: 150.0ft. NA. 52\\\\u00b0F. 11\\\\u00b0C. Humidity: 90%: ... 2am PDT May 28, 2024-6pm PDT Jun 3, 2024 . ... Radar & Satellite Image. Hourly Weather Forecast. National Digital Forecast Database. High Temperature. Chance of Precipitation. ACTIVE ALERTS Toggle menu ...\"}]', name='tavily_search_results_json', id='302dfc48-60bc-4db5-815a-2e97b8a95607', tool_call_id='call_i3ZKnTDPB1RxqwE6PWmgz5TQ'),\n", - " AIMessage(content='The current weather in San Francisco, California is partly cloudy with a temperature of 16.7°C (62.1°F). The wind is coming from the west at a speed of 20.2 kph (12.5 mph). The humidity is at 62%. [Source](https://www.weatherapi.com/)', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 691, 'total_tokens': 758}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-953864dd-9af6-48aa-bc61-8b63388fca03-0', usage_metadata={'input_tokens': 691, 'output_tokens': 67, 'total_tokens': 758})]" + "[HumanMessage(content='whats the weather in sf?', id='1d6c96bb-4ddb-415c-a579-a07d5264de0d'),\n", + " AIMessage(content=[{'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF', 'input': {'query': 'weather in san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}], response_metadata={'id': 'msg_0132wQUcEduJ8UKVVVqwJzM4', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 269, 'output_tokens': 61}}, id='run-26d5e5e8-d4fd-46d2-a197-87b95b10e823-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in san francisco'}, 'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF'}], usage_metadata={'input_tokens': 269, 'output_tokens': 61, 'total_tokens': 330}),\n", + " ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1717238703, \\'localtime\\': \\'2024-06-01 3:45\\'}, \\'current\\': {\\'last_updated_epoch\\': 1717237800, \\'last_updated\\': \\'2024-06-01 03:30\\', \\'temp_c\\': 12.0, \\'temp_f\\': 53.6, \\'is_day\\': 0, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/night/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 5.6, \\'wind_kph\\': 9.0, \\'wind_degree\\': 310, \\'wind_dir\\': \\'NW\\', \\'pressure_mb\\': 1013.0, \\'pressure_in\\': 29.92, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 88, \\'cloud\\': 100, \\'feelslike_c\\': 10.5, \\'feelslike_f\\': 50.8, \\'windchill_c\\': 9.3, \\'windchill_f\\': 48.7, \\'heatindex_c\\': 11.1, \\'heatindex_f\\': 51.9, \\'dewpoint_c\\': 8.8, \\'dewpoint_f\\': 47.8, \\'vis_km\\': 6.4, \\'vis_miles\\': 3.0, \\'uv\\': 1.0, \\'gust_mph\\': 12.5, \\'gust_kph\\': 20.1}}\"}, {\"url\": \"https://www.timeanddate.com/weather/usa/san-francisco/hourly\", \"content\": \"Sun & Moon. Weather Today Weather Hourly 14 Day Forecast Yesterday/Past Weather Climate (Averages) Currently: 59 \\\\u00b0F. Passing clouds. (Weather station: San Francisco International Airport, USA). See more current weather.\"}]', name='tavily_search_results_json', id='37aa1fd9-b232-4a02-bd22-bc5b9b44a22c', tool_call_id='toolu_01Y5EK4bw2LqsQXeaUv8iueF'),\n", + " AIMessage(content='Based on the search results, here is a summary of the current weather in San Francisco:\\n\\nThe weather in San Francisco is currently misty with a temperature of around 53°F (12°C). There is complete cloud cover and moderate winds from the northwest around 5-9 mph (9-14 km/h). Humidity is high at 88%. Visibility is around 3 miles (6.4 km). \\n\\nThe results provide an hourly forecast as well as current conditions from a couple different weather sources. Let me know if you need any additional details about the San Francisco weather!', response_metadata={'id': 'msg_01BRX9mrT19nBDdHYtR7wJ92', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 920, 'output_tokens': 132}}, id='run-d0325583-3ddc-4432-b2b2-d023eb97660f-0', usage_metadata={'input_tokens': 920, 'output_tokens': 132, 'total_tokens': 1052})]" ] }, - "execution_count": 12, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -505,7 +487,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "532d6557", "metadata": {}, "outputs": [ @@ -547,7 +529,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "a3fb262c", "metadata": {}, "outputs": [ @@ -615,7 +597,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "c4073e35", "metadata": {}, "outputs": [], @@ -627,21 +609,19 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 12, "id": "e64a944e-f9ac-43cf-903c-d3d28d765377", "metadata": {}, "outputs": [], "source": [ - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " model, tools, checkpointer=memory\n", - ")\n", + "agent_executor = create_react_agent(model, tools, checkpointer=memory)\n", "\n", "config = {\"configurable\": {\"thread_id\": \"abc123\"}}" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 13, "id": "a13462d0-2d02-4474-921e-15a1ba1fa274", "metadata": {}, "outputs": [ @@ -649,7 +629,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 131, 'total_tokens': 142}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-607733e3-4b8d-4137-ae66-8a4b8ccc8d40-0')]}}\n", + "{'agent': {'messages': [AIMessage(content=\"Hello Bob! It's nice to meet you again.\", response_metadata={'id': 'msg_013C1z2ZySagEFwmU1EsysR2', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1162, 'output_tokens': 14}}, id='run-f878acfd-d195-44e8-9166-e2796317e3f8-0', usage_metadata={'input_tokens': 1162, 'output_tokens': 14, 'total_tokens': 1176})]}}\n", "----\n" ] } @@ -664,7 +644,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 14, "id": "56d8028b-5dbc-40b2-86f5-ed60631d86a3", "metadata": {}, "outputs": [ @@ -672,7 +652,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Your name is Bob. How can I assist you further?', response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 154, 'total_tokens': 167}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-e1181ba6-732d-4564-b479-9f1ab6bf01f6-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='You mentioned your name is Bob when you introduced yourself earlier. So your name is Bob.', response_metadata={'id': 'msg_01WNwnRNGwGDRw6vRdivt6i1', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1184, 'output_tokens': 21}}, id='run-f5c0b957-8878-405a-9d4b-a7cd38efe81f-0', usage_metadata={'input_tokens': 1184, 'output_tokens': 21, 'total_tokens': 1205})]}}\n", "----\n" ] } @@ -703,7 +683,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "id": "24460239", "metadata": {}, "outputs": [ @@ -711,7 +691,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content=\"As an AI, I don't have access to personal data about individuals unless it has been shared with me in the course of our conversation. I am designed to respect user privacy and confidentiality. So, I don't know your name.\", response_metadata={'token_usage': {'completion_tokens': 48, 'prompt_tokens': 86, 'total_tokens': 134}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b3c8d577-fdbf-4f0f-8fd8-ecb3a5ac8920-0', usage_metadata={'input_tokens': 86, 'output_tokens': 48, 'total_tokens': 134})]}}\n", + "{'agent': {'messages': [AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant without personal information about you, I don't have a specific name associated with our conversation.\", response_metadata={'id': 'msg_01NoaXNNYZKSoBncPcLkdcbo', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 267, 'output_tokens': 36}}, id='run-c9f7df3d-525a-4d8f-bbcf-a5b4a5d2e4b0-0', usage_metadata={'input_tokens': 267, 'output_tokens': 36, 'total_tokens': 303})]}}\n", "----\n" ] } @@ -765,7 +745,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.11.2" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/qa_chat_history.ipynb b/docs/docs/tutorials/qa_chat_history.ipynb index 208ba0dca00b1..c86980907177b 100644 --- a/docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/docs/tutorials/qa_chat_history.ipynb @@ -45,11 +45,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "ede7fdc0-ef31-483d-bd67-32e4b5c5d527", "metadata": {}, "outputs": [], "source": [ + "%%capture --no-stderr\n", "%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-chroma bs4" ] }, @@ -63,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "143787ca-d8e6-4dc9-8281-4374f4d71720", "metadata": {}, "outputs": [], @@ -71,7 +72,8 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", "\n", "# import dotenv\n", "\n", @@ -92,13 +94,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "07411adb-3722-4f65-ab7f-8f6f57663d11", "metadata": {}, "outputs": [], "source": [ "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", - "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" + "if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n", + " os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] }, { @@ -125,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", "metadata": {}, "outputs": [], @@ -140,8 +143,8 @@ }, { "cell_type": "code", - "execution_count": 2, - "id": "d8a913b1-0eea-442a-8a64-ec73333f104b", + "execution_count": 6, + "id": "820244ae-74b4-4593-b392-822979dd91b8", "metadata": {}, "outputs": [], "source": [ @@ -151,20 +154,10 @@ "from langchain.chains.combine_documents import create_stuff_documents_chain\n", "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnablePassthrough\n", "from langchain_openai import OpenAIEmbeddings\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "820244ae-74b4-4593-b392-822979dd91b8", - "metadata": {}, - "outputs": [], - "source": [ + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "\n", "# 1. Load, chunk and index the contents of the blog to create a retriever.\n", "loader = WebBaseLoader(\n", " web_paths=(\"https://lilianweng.github.io/posts/2023-06-23-agent/\",),\n", @@ -206,17 +199,17 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "id": "bf55faaf-0d17-4b74-925d-c478b555f7b2", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. This process can be achieved through techniques like Chain of Thought (CoT) or Tree of Thoughts, which help agents plan and execute tasks effectively by dividing them into sequential subgoals. Task decomposition can be facilitated by using prompting techniques, task-specific instructions, or human inputs to guide the agent through the steps required to accomplish a task.'" + "\"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into step-by-step processes, enhancing performance and understanding of the model's thinking process.\"" ] }, - "execution_count": 4, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -278,7 +271,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", "metadata": {}, "outputs": [], @@ -322,7 +315,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "id": "66f275f3-ddef-4678-b90d-ee64576878f9", "metadata": {}, "outputs": [], @@ -354,7 +347,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "id": "0005810b-1b95-4666-a795-08d80e478b83", "metadata": {}, "outputs": [ @@ -362,7 +355,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Task decomposition can be done in several common ways, such as using Language Model (LLM) with simple prompting like \"Steps for XYZ\" or asking for subgoals to achieve a specific task. Task-specific instructions can also be provided, like requesting a story outline for writing a novel. Additionally, human inputs can be utilized to decompose tasks into smaller components effectively.\n" + "Task decomposition can be achieved through various methods such as using techniques like Chain of Thought (CoT) or Tree of Thoughts to break down complex tasks into smaller steps. Common ways include prompting the model with simple instructions like \"Steps for XYZ\" or task-specific instructions like \"Write a story outline.\" Human inputs can also be used to guide the task decomposition process effectively.\n" ] } ], @@ -421,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", "metadata": {}, "outputs": [], @@ -450,17 +443,17 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 12, "id": "1046c92f-21b3-4214-907d-92878d8cba23", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into manageable components.'" + "'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.'" ] }, - "execution_count": 9, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -476,17 +469,17 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" to guide the model through subgoals, providing task-specific instructions like \"Write a story outline\" for specific tasks, or incorporating human inputs to break down complex tasks. These approaches help in dividing a large task into smaller, more manageable components for better understanding and execution.'" + "'Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.'" ] }, - "execution_count": 10, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -508,7 +501,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 14, "id": "7686b874-3a85-499f-82b5-28a85c4c768c", "metadata": {}, "outputs": [ @@ -518,11 +511,11 @@ "text": [ "User: What is Task Decomposition?\n", "\n", - "AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into manageable components.\n", + "AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.\n", "\n", "User: What are common ways of doing it?\n", "\n", - "AI: Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" to guide the model through subgoals, providing task-specific instructions like \"Write a story outline\" for specific tasks, or incorporating human inputs to break down complex tasks. These approaches help in dividing a large task into smaller, more manageable components for better understanding and execution.\n", + "AI: Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.\n", "\n" ] } @@ -557,7 +550,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 15, "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", "metadata": {}, "outputs": [], @@ -657,17 +650,17 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 16, "id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. This process helps agents or models tackle difficult tasks by dividing them into more easily achievable subgoals. Task decomposition can be done through techniques like Chain of Thought or Tree of Thoughts, which guide the model in thinking step by step or exploring multiple reasoning possibilities at each step.'" + "'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks to facilitate problem-solving. Different methods like Chain of Thought and Tree of Thoughts can be employed to decompose tasks effectively.'" ] }, - "execution_count": 2, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -683,17 +676,17 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 17, "id": "17021822-896a-4513-a17d-1d20b1c5381c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Common ways of task decomposition include using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide models in breaking down complex tasks into smaller steps. This can be achieved through simple prompting with LLMs, task-specific instructions, or human inputs to help the model understand and navigate the task effectively. Task decomposition aims to enhance model performance on complex tasks by utilizing more test-time computation and shedding light on the model's thinking process.\"" + "'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\", providing task-specific instructions like \"Write a story outline,\" or incorporating human inputs to break down complex tasks into smaller components. These approaches help in organizing thoughts and planning ahead for successful task completion.'" ] }, - "execution_count": 3, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -724,7 +717,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 18, "id": "809cc747-2135-40a2-8e73-e4556343ee64", "metadata": {}, "outputs": [], @@ -749,17 +742,24 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 19, "id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 0ec120e2-b1fc-4593-9fee-2dd4f4cae256, but expected {'tool'} run.\")\n" + ] + }, { "data": { "text/plain": [ - "'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:'" + "'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.'" ] }, - "execution_count": 13, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -781,14 +781,14 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 20, "id": "1726d151-4653-4c72-a187-a14840add526", "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import chat_agent_executor\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(llm, tools)" + "agent_executor = create_react_agent(llm, tools)" ] }, { @@ -801,19 +801,26 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 21, "id": "170403a2-c914-41db-85d8-a2c381da112d", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 1a50f4da-34a7-44af-8cbb-c67c90c9619e, but expected {'tool'} run.\")\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_demTlnha4vYA1IH6CByYupBQ', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d1c3f3da-be18-46a5-b3a8-4621ba1f7f2a-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_demTlnha4vYA1IH6CByYupBQ'}])]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dddbe2d2-2355-4ca5-9961-1ceb39d78cf9-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx'}])]}}\n", "----\n", - "{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e83e4002-33d2-46ff-82f4-fddb3035fb6a', tool_call_id='call_demTlnha4vYA1IH6CByYupBQ')]}}\n", + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_1ZkTWsLYIlKZ1uMyIQGUuyJx')]}}\n", "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in autonomous agent systems to break down complex tasks into smaller and simpler steps. This approach helps agents better understand and plan for the various steps involved in completing a task. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are prompted to \"think step by step\" to decompose hard tasks into manageable steps. Another approach, known as Tree of Thoughts, extends CoT by exploring multiple reasoning possibilities at each step and creating a tree structure of tasks.\\n\\nTask decomposition can be achieved through various methods, such as using simple prompts for language models, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can effectively plan and execute tasks with greater efficiency.\\n\\nIn summary, task decomposition is a valuable strategy for autonomous agents to tackle complex tasks by breaking them down into smaller, more manageable steps.', response_metadata={'token_usage': {'completion_tokens': 177, 'prompt_tokens': 588, 'total_tokens': 765}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-808f32b9-ae61-4f31-a55a-f30643594282-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method of task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step and generates multiple thoughts per step, creating a tree structure. Task decomposition can be facilitated by using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 119, 'prompt_tokens': 636, 'total_tokens': 755}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4a701854-97f2-4ec2-b6e1-73410911fa72-0')]}}\n", "----\n" ] } @@ -838,7 +845,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 22, "id": "04a3a664-3c3f-4cd1-9995-26662a52da7c", "metadata": {}, "outputs": [], @@ -847,9 +854,7 @@ "\n", "memory = SqliteSaver.from_conn_string(\":memory:\")\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, checkpointer=memory\n", - ")" + "agent_executor = create_react_agent(llm, tools, checkpointer=memory)" ] }, { @@ -864,7 +869,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 23, "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", "metadata": {}, "outputs": [ @@ -872,7 +877,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-1451e59b-b135-4776-985d-4759338ffee5-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-022806f0-eb26-4c87-9132-ed2fcc6c21ea-0')]}}\n", "----\n" ] } @@ -897,7 +902,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "id": "e2c570ae-dd91-402c-8693-ae746de63b16", "metadata": {}, "outputs": [ @@ -905,11 +910,22 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ab2x4iUPSWDAHS5txL7PspSK', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-f76b5813-b41c-4d0d-9ed2-667b988d885e-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_ab2x4iUPSWDAHS5txL7PspSK'}])]}}\n", - "----\n", - "{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e0895fa5-5d41-4be0-98db-10a83d42fc2f', tool_call_id='call_ab2x4iUPSWDAHS5txL7PspSK')]}}\n", - "----\n", - "{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in complex tasks where the task is broken down into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute complex tasks effectively.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 165, 'prompt_tokens': 611, 'total_tokens': 776}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-13296566-8577-4d65-982b-a39718988ca3-0')]}}\n", + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_DdAAJJgGIQOZQgKVE4duDyML', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-acc3c903-4f6f-48dd-8b36-f6f3b80d0856-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_DdAAJJgGIQOZQgKVE4duDyML'}])]}}\n", + "----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 9a7ba580-ec91-412d-9649-1b5cbf5ae7bc, but expected {'tool'} run.\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_DdAAJJgGIQOZQgKVE4duDyML')]}}\n", "----\n" ] } @@ -936,7 +952,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "570d8c68-136e-4ba5-969a-03ba195f6118", "metadata": {}, "outputs": [ @@ -983,7 +999,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "id": "b1d2b4d4-e604-497d-873d-d345b808578e", "metadata": {}, "outputs": [], @@ -1031,9 +1047,7 @@ "tools = [tool]\n", "\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, checkpointer=memory\n", - ")" + "agent_executor = create_react_agent(llm, tools, checkpointer=memory)" ] }, { @@ -1080,7 +1094,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.11.2" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/sql_qa.ipynb b/docs/docs/tutorials/sql_qa.ipynb index 3b7e32b773f13..e21c13f3d45b6 100644 --- a/docs/docs/tutorials/sql_qa.ipynb +++ b/docs/docs/tutorials/sql_qa.ipynb @@ -36,6 +36,7 @@ "metadata": {}, "outputs": [], "source": [ + "%%capture --no-stderr\n", "%pip install --upgrade --quiet langchain langchain-community langchain-openai" ] }, @@ -55,11 +56,13 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", + "if not os.environ.get(\"OPENAI_API_KEY\"):\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", "\n", - "# Uncomment the below to use LangSmith. Not required.\n", - "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n", - "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"" + "# Comment out the below to opt-out of using LangSmith in this notebook. Not required.\n", + "if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n", + " os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n", + " os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"" ] }, { @@ -467,6 +470,7 @@ "metadata": {}, "outputs": [], "source": [ + "%%capture --no-stderr\n", "%pip install --upgrade --quiet langgraph" ] }, @@ -484,11 +488,9 @@ "outputs": [], "source": [ "from langchain_core.messages import HumanMessage\n", - "from langgraph.prebuilt import chat_agent_executor\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", - "agent_executor = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, messages_modifier=system_message\n", - ")" + "agent_executor = create_react_agent(llm, tools, messages_modifier=system_message)" ] }, { @@ -725,9 +727,7 @@ "\n", "system_message = SystemMessage(content=system)\n", "\n", - "agent = chat_agent_executor.create_tool_calling_executor(\n", - " llm, tools, messages_modifier=system_message\n", - ")" + "agent = create_react_agent(llm, tools, messages_modifier=system_message)" ] }, { @@ -780,7 +780,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.1" + "version": "3.10.1" } }, "nbformat": 4, From eabcfaa3d676c4f31133f17fe6de51fc083bae0b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 3 Jun 2024 07:17:35 -0700 Subject: [PATCH 24/54] Update Ollama instructions (#22394) --- docs/docs/integrations/providers/ollama.mdx | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/docs/integrations/providers/ollama.mdx b/docs/docs/integrations/providers/ollama.mdx index 5e8e81ca23014..704b02ab15f23 100644 --- a/docs/docs/integrations/providers/ollama.mdx +++ b/docs/docs/integrations/providers/ollama.mdx @@ -1,6 +1,6 @@ # Ollama ->[Ollama](https://ollama.ai/) is a python library. It allows you to run open-source large language models, +>[Ollama](https://ollama.com/) allows you to run open-source large language models, > such as LLaMA2, locally. > >`Ollama` bundles model weights, configuration, and data into a single package, defined by a Modelfile. @@ -12,11 +12,8 @@ on how to use `Ollama` with LangChain. ## Installation and Setup -Follow [these instructions](https://github.com/jmorganca/ollama?tab=readme-ov-file#ollama) +Follow [these instructions](https://github.com/ollama/ollama?tab=readme-ov-file#ollama) to set up and run a local Ollama instance. -To use, you should set up the environment variables `ANYSCALE_API_BASE` and -`ANYSCALE_API_KEY`. - ## LLM From ed8e9c437a62e305bb38a2624282152262b014d4 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Mon, 3 Jun 2024 07:18:10 -0700 Subject: [PATCH 25/54] core: In RunnableSequence pass kwargs to the first step (#22393) - This is a pattern that shows up occasionally in langgraph questions, people chain a graph to something else after, and want to pass the graph some kwargs (eg. stream_mode) --- libs/core/langchain_core/runnables/base.py | 69 +++++---- .../unit_tests/runnables/test_runnable.py | 131 ++++++++++++++++-- 2 files changed, 161 insertions(+), 39 deletions(-) diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index b51cb5d89fc87..43b025ad441c5 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -2379,7 +2379,9 @@ def __ror__( name=self.name, ) - def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: from langchain_core.beta.runnables.context import config_with_context # setup callbacks and context @@ -2396,13 +2398,14 @@ def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Outpu # invoke all steps in sequence try: for i, step in enumerate(self.steps): - input = step.invoke( - input, - # mark each step as a child run - patch_config( - config, callbacks=run_manager.get_child(f"seq:step:{i+1}") - ), + # mark each step as a child run + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{i+1}") ) + if i == 0: + input = step.invoke(input, config, **kwargs) + else: + input = step.invoke(input, config) # finish the root run except BaseException as e: run_manager.on_chain_error(e) @@ -2433,13 +2436,14 @@ async def ainvoke( # invoke all steps in sequence try: for i, step in enumerate(self.steps): - input = await step.ainvoke( - input, - # mark each step as a child run - patch_config( - config, callbacks=run_manager.get_child(f"seq:step:{i+1}") - ), + # mark each step as a child run + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{i+1}") ) + if i == 0: + input = await step.ainvoke(input, config, **kwargs) + else: + input = await step.ainvoke(input, config) # finish the root run except BaseException as e: await run_manager.on_chain_error(e) @@ -2519,7 +2523,7 @@ def batch( if i not in failed_inputs_map ], return_exceptions=return_exceptions, - **kwargs, + **(kwargs if stepidx == 0 else {}), ) # If an input failed, add it to the map for i, inp in zip(remaining_idxs, inputs): @@ -2549,6 +2553,8 @@ def batch( ) for rm, config in zip(run_managers, configs) ], + return_exceptions=return_exceptions, + **(kwargs if i == 0 else {}), ) # finish the root runs @@ -2646,7 +2652,7 @@ async def abatch( if i not in failed_inputs_map ], return_exceptions=return_exceptions, - **kwargs, + **(kwargs if stepidx == 0 else {}), ) # If an input failed, add it to the map for i, inp in zip(remaining_idxs, inputs): @@ -2676,6 +2682,8 @@ async def abatch( ) for rm, config in zip(run_managers, configs) ], + return_exceptions=return_exceptions, + **(kwargs if i == 0 else {}), ) # finish the root runs except BaseException as e: @@ -2704,6 +2712,7 @@ def _transform( input: Iterator[Input], run_manager: CallbackManagerForChainRun, config: RunnableConfig, + **kwargs: Any, ) -> Iterator[Output]: from langchain_core.beta.runnables.context import config_with_context @@ -2714,14 +2723,14 @@ def _transform( # steps that don't natively support transforming an input stream will # buffer input in memory until all available, and then start emitting output final_pipeline = cast(Iterator[Output], input) - for step in steps: - final_pipeline = step.transform( - final_pipeline, - patch_config( - config, - callbacks=run_manager.get_child(f"seq:step:{steps.index(step)+1}"), - ), + for idx, step in enumerate(steps): + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{idx+1}") ) + if idx == 0: + final_pipeline = step.transform(final_pipeline, config, **kwargs) + else: + final_pipeline = step.transform(final_pipeline, config) for output in final_pipeline: yield output @@ -2731,6 +2740,7 @@ async def _atransform( input: AsyncIterator[Input], run_manager: AsyncCallbackManagerForChainRun, config: RunnableConfig, + **kwargs: Any, ) -> AsyncIterator[Output]: from langchain_core.beta.runnables.context import aconfig_with_context @@ -2742,14 +2752,15 @@ async def _atransform( # steps that don't natively support transforming an input stream will # buffer input in memory until all available, and then start emitting output final_pipeline = cast(AsyncIterator[Output], input) - for step in steps: - final_pipeline = step.atransform( - final_pipeline, - patch_config( - config, - callbacks=run_manager.get_child(f"seq:step:{steps.index(step)+1}"), - ), + for idx, step in enumerate(steps): + config = patch_config( + config, + callbacks=run_manager.get_child(f"seq:step:{idx+1}"), ) + if idx == 0: + final_pipeline = step.atransform(final_pipeline, config, **kwargs) + else: + final_pipeline = step.atransform(final_pipeline, config) async for output in final_pipeline: yield output diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index 72a9494a8078a..2672d17c5bc36 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -182,6 +182,7 @@ def invoke( self, input: str, config: Optional[RunnableConfig] = None, + **kwargs: Any, ) -> int: return len(input) @@ -1409,26 +1410,136 @@ async def test_passthrough_tap_async(mocker: MockerFixture) -> None: fake = FakeRunnable() mock = mocker.Mock() - seq: Runnable = fake | RunnablePassthrough(mock) + seq: Runnable = RunnablePassthrough(mock) | fake | RunnablePassthrough(mock) + + assert await seq.ainvoke("hello", my_kwarg="value") == 5 + assert mock.call_args_list == [ + mocker.call("hello", my_kwarg="value"), + mocker.call(5), + ] + mock.reset_mock() + + assert await seq.abatch(["hello", "byebye"], my_kwarg="value") == [5, 6] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list + mock.reset_mock() + + assert await seq.abatch( + ["hello", "byebye"], my_kwarg="value", return_exceptions=True + ) == [ + 5, + 6, + ] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list + mock.reset_mock() - assert await seq.ainvoke("hello") == 5 - assert mock.call_args_list == [mocker.call(5)] + assert sorted( + [ + a + async for a in seq.abatch_as_completed( + ["hello", "byebye"], my_kwarg="value", return_exceptions=True + ) + ] + ) == [ + (0, 5), + (1, 6), + ] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list mock.reset_mock() assert [ - part async for part in seq.astream("hello", dict(metadata={"key": "value"})) + part + async for part in seq.astream( + "hello", dict(metadata={"key": "value"}), my_kwarg="value" + ) ] == [5] - assert mock.call_args_list == [mocker.call(5)] + assert mock.call_args_list == [ + mocker.call("hello", my_kwarg="value"), + mocker.call(5), + ] + mock.reset_mock() + + assert seq.invoke("hello", my_kwarg="value") == 5 # type: ignore[call-arg] + assert mock.call_args_list == [ + mocker.call("hello", my_kwarg="value"), + mocker.call(5), + ] + mock.reset_mock() + + assert seq.batch(["hello", "byebye"], my_kwarg="value") == [5, 6] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list + mock.reset_mock() + + assert seq.batch(["hello", "byebye"], my_kwarg="value", return_exceptions=True) == [ + 5, + 6, + ] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list mock.reset_mock() - assert seq.invoke("hello") == 5 - assert mock.call_args_list == [mocker.call(5)] + assert sorted( + a + for a in seq.batch_as_completed( + ["hello", "byebye"], my_kwarg="value", return_exceptions=True + ) + ) == [ + (0, 5), + (1, 6), + ] + assert len(mock.call_args_list) == 4 + for call in [ + mocker.call("hello", my_kwarg="value"), + mocker.call("byebye", my_kwarg="value"), + mocker.call(5), + mocker.call(6), + ]: + assert call in mock.call_args_list mock.reset_mock() - assert [part for part in seq.stream("hello", dict(metadata={"key": "value"}))] == [ - 5 + assert [ + part + for part in seq.stream( + "hello", dict(metadata={"key": "value"}), my_kwarg="value" + ) + ] == [5] + assert mock.call_args_list == [ + mocker.call("hello", my_kwarg="value"), + mocker.call(5), ] - assert mock.call_args_list == [mocker.call(5)] mock.reset_mock() From 3e92ed805659c60b0168b1ed13e0e30fa7661dbe Mon Sep 17 00:00:00 2001 From: Qingchuan Hao Date: Mon, 3 Jun 2024 22:19:00 +0800 Subject: [PATCH 26/54] docs: add Microsoft Azure to ChatModelTabs (#22367) Co-authored-by: Chester Curme --- docs/docs/tutorials/rag.ipynb | 2 +- docs/src/theme/ChatModelTabs.js | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/docs/docs/tutorials/rag.ipynb b/docs/docs/tutorials/rag.ipynb index 86cfd43d56c37..88110e7630da7 100644 --- a/docs/docs/tutorials/rag.ipynb +++ b/docs/docs/tutorials/rag.ipynb @@ -104,7 +104,7 @@ "```\n", "## Preview\n", "\n", - "In this guide we’ll build a QA app over as website. The specific website we will use isthe [LLM Powered Autonomous\n", + "In this guide we’ll build a QA app over as website. The specific website we will use is the [LLM Powered Autonomous\n", "Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post\n", "by Lilian Weng, which allows us to ask questions about the contents of\n", "the post.\n", diff --git a/docs/src/theme/ChatModelTabs.js b/docs/src/theme/ChatModelTabs.js index e1218acbb2435..8424c6d1c5248 100644 --- a/docs/src/theme/ChatModelTabs.js +++ b/docs/src/theme/ChatModelTabs.js @@ -20,6 +20,7 @@ import CodeBlock from "@theme-original/CodeBlock"; * @property {boolean} [hideMistral] - Whether or not to hide Mistral chat model. * @property {boolean} [hideGoogle] - Whether or not to hide Google VertexAI chat model. * @property {boolean} [hideTogether] - Whether or not to hide Together chat model. + * @property {boolean} [hideAzure] - Whether or not to hide Microsoft Azure OpenAI chat model. * @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`. */ @@ -35,6 +36,7 @@ export default function ChatModelTabs(props) { mistralParams, googleParams, togetherParams, + azureParams, hideOpenai, hideAnthropic, hideCohere, @@ -42,6 +44,7 @@ export default function ChatModelTabs(props) { hideMistral, hideGoogle, hideTogether, + hideAzure, customVarName, } = props; @@ -57,7 +60,10 @@ export default function ChatModelTabs(props) { const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`; const togetherParamsOrDefault = togetherParams ?? - `\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",`; + `\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",\n`; + const azureParamsOrDefault = + azureParams ?? + `\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`; const llmVarName = customVarName ?? "model"; @@ -80,6 +86,15 @@ export default function ChatModelTabs(props) { default: false, shouldHide: hideAnthropic, }, + { + value: "Azure", + label: "Azure", + text: `from langchain_openai import AzureChatOpenAI\n\n${llmVarName} = AzureChatOpenAI(${azureParamsOrDefault})`, + apiKeyName: "AZURE_OPENAI_API_KEY", + packageName: "langchain-openai", + default: false, + shouldHide: hideAzure, + }, { value: "Google", label: "Google", From a7ae16f9120a5107018c106778690c0de78d59a9 Mon Sep 17 00:00:00 2001 From: Joan Fontanals Date: Mon, 3 Jun 2024 16:23:37 +0200 Subject: [PATCH 27/54] add `embed_image` API to JinaEmbedding (#22416) - **Description:** Add `embed_image` to JinaEmbedding to embed images - **Twitter handle:** https://x.com/JinaAI_ --- .../langchain_community/embeddings/jina.py | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/libs/community/langchain_community/embeddings/jina.py b/libs/community/langchain_community/embeddings/jina.py index dc66409e60ca0..7c50faf46b1e3 100644 --- a/libs/community/langchain_community/embeddings/jina.py +++ b/libs/community/langchain_community/embeddings/jina.py @@ -1,4 +1,7 @@ +import base64 +from os.path import exists from typing import Any, Dict, List, Optional +from urllib.parse import urlparse import requests from langchain_core.embeddings import Embeddings @@ -8,6 +11,18 @@ JINA_API_URL: str = "https://api.jina.ai/v1/embeddings" +def is_local(url: str) -> bool: + url_parsed = urlparse(url) + if url_parsed.scheme in ("file", ""): # Possibly a local file + return exists(url_parsed.path) + return False + + +def get_bytes_str(file_path: str) -> str: + with open(file_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") + + class JinaEmbeddings(BaseModel, Embeddings): """Jina embedding models.""" @@ -40,10 +55,10 @@ def validate_environment(cls, values: Dict) -> Dict: values["session"] = session return values - def _embed(self, texts: List[str]) -> List[List[float]]: + def _embed(self, input: Any) -> List[List[float]]: # Call Jina AI Embedding API resp = self.session.post( # type: ignore - JINA_API_URL, json={"input": texts, "model": self.model_name} + JINA_API_URL, json={"input": input, "model": self.model_name} ).json() if "data" not in resp: raise RuntimeError(resp["detail"]) @@ -73,3 +88,18 @@ def embed_query(self, text: str) -> List[float]: Embeddings for the text. """ return self._embed([text])[0] + + def embed_images(self, uris: List[str]) -> List[List[float]]: + """Call out to Jina's image embedding endpoint. + Args: + uris: The list of uris to embed. + Returns: + List of embeddings, one for each text. + """ + input = [] + for uri in uris: + if is_local(uri): + input.append({"bytes": get_bytes_str(uri)}) + else: + input.append({"url": uri}) + return self._embed(input) From dac355fc62fdebd1fbe6991162c71abc6da78024 Mon Sep 17 00:00:00 2001 From: Klaudia Lemiec <79466298+klaudialemiec@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:26:28 +0200 Subject: [PATCH 28/54] docs: notebook loader: change .html to .ipynb (#22407) Co-authored-by: Erick Friis --- .../integrations/document_loaders/jupyter_notebook.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb b/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb index e8533803a7b04..57b65fb3be72c 100644 --- a/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb +++ b/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb @@ -8,7 +8,7 @@ "\n", ">[Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Applications) (formerly `IPython Notebook`) is a web-based interactive computational environment for creating notebook documents.\n", "\n", - "This notebook covers how to load data from a `Jupyter notebook (.html)` into a format suitable by LangChain." + "This notebook covers how to load data from a `Jupyter notebook (.ipynb)` into a format suitable by LangChain." ] }, { @@ -31,7 +31,7 @@ "outputs": [], "source": [ "loader = NotebookLoader(\n", - " \"example_data/notebook.html\",\n", + " \"example_data/notebook.ipynb\",\n", " include_outputs=True,\n", " max_output_length=20,\n", " remove_newline=True,\n", @@ -42,7 +42,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`NotebookLoader.load()` loads the `.html` notebook file into a `Document` object.\n", + "`NotebookLoader.load()` loads the `.ipynb` notebook file into a `Document` object.\n", "\n", "**Parameters**:\n", "\n", From 2d81a72884c46744ba3ac764efe7f37ece24452a Mon Sep 17 00:00:00 2001 From: Charles John <1017170+charl3sj@users.noreply.github.com> Date: Mon, 3 Jun 2024 20:02:57 +0530 Subject: [PATCH 29/54] community: fix missing `apify_api_token` field in ApifyWrapper (#22421) - **Description:** The `ApifyWrapper` class expects `apify_api_token` to be passed as a named parameter or set as an environment variable. But the corresponding field was missing in the class definition causing the argument to be ignored when passed as a named param. This patch fixes that. --- libs/community/langchain_community/utilities/apify.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/community/langchain_community/utilities/apify.py b/libs/community/langchain_community/utilities/apify.py index 04b893d460d88..efcae49e2d1f6 100644 --- a/libs/community/langchain_community/utilities/apify.py +++ b/libs/community/langchain_community/utilities/apify.py @@ -17,6 +17,7 @@ class ApifyWrapper(BaseModel): apify_client: Any apify_client_async: Any + apify_api_token: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: From 1ad1dc5303e060f5ffa47fee52e67cdde0cc659d Mon Sep 17 00:00:00 2001 From: Zheng Robert Jia Date: Mon, 3 Jun 2024 09:34:24 -0500 Subject: [PATCH 30/54] docs: resolve minor syntax error. (#22375) Used the correct magic command. Changed from `% pip...` to `%pip` Co-authored-by: Erick Friis --- docs/docs/tutorials/agents.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index 5943e07b26589..76d77d7cb1b2a 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -117,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "% pip install -U langchain-community langgraph langchain-anthropic tavily-python" + "%pip install -U langchain-community langgraph langchain-anthropic tavily-python" ] }, { From ceb73ad06fc1d7004db2d3af89bfa200d6f6c1a7 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Mon, 3 Jun 2024 07:34:53 -0700 Subject: [PATCH 31/54] core: In BaseRetriever make get_relevant_docs delegate to invoke (#22434) - This fixes all the tracing issues with people still using get_relevant_docs, and a change we need for 0.3 anyway Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [ ] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17. --- libs/core/langchain_core/retrievers.py | 160 +++++++++++++------------ 1 file changed, 82 insertions(+), 78 deletions(-) diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index 897efbef86783..c42311a44280a 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -190,15 +190,40 @@ def invoke( retriever.invoke("query") """ + from langchain_core.callbacks.manager import CallbackManager + config = ensure_config(config) - return self.get_relevant_documents( + callback_manager = CallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags"), + local_tags=self.tags, + inheritable_metadata=config.get("metadata"), + local_metadata=self.metadata, + ) + run_manager = callback_manager.on_retriever_start( + dumpd(self), input, - callbacks=config.get("callbacks"), - tags=config.get("tags"), - metadata=config.get("metadata"), - run_name=config.get("run_name"), - **kwargs, + name=config.get("run_name"), + run_id=kwargs.pop("run_id", None), ) + try: + _kwargs = kwargs if self._expects_other_args else {} + if self._new_arg_supported: + result = self._get_relevant_documents( + input, run_manager=run_manager, **_kwargs + ) + else: + result = self._get_relevant_documents(input, **_kwargs) + except Exception as e: + run_manager.on_retriever_error(e) + raise e + else: + run_manager.on_retriever_end( + result, + ) + return result async def ainvoke( self, @@ -224,15 +249,40 @@ async def ainvoke( await retriever.ainvoke("query") """ + from langchain_core.callbacks.manager import AsyncCallbackManager + config = ensure_config(config) - return await self.aget_relevant_documents( + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags"), + local_tags=self.tags, + inheritable_metadata=config.get("metadata"), + local_metadata=self.metadata, + ) + run_manager = await callback_manager.on_retriever_start( + dumpd(self), input, - callbacks=config.get("callbacks"), - tags=config.get("tags"), - metadata=config.get("metadata"), - run_name=config.get("run_name"), - **kwargs, + name=config.get("run_name"), + run_id=kwargs.pop("run_id", None), ) + try: + _kwargs = kwargs if self._expects_other_args else {} + if self._new_arg_supported: + result = await self._aget_relevant_documents( + input, run_manager=run_manager, **_kwargs + ) + else: + result = await self._aget_relevant_documents(input, **_kwargs) + except Exception as e: + await run_manager.on_retriever_error(e) + raise e + else: + await run_manager.on_retriever_end( + result, + ) + return result @abstractmethod def _get_relevant_documents( @@ -293,39 +343,16 @@ def get_relevant_documents( Returns: List of relevant documents """ - from langchain_core.callbacks.manager import CallbackManager - - callback_manager = CallbackManager.configure( - callbacks, - None, - verbose=kwargs.get("verbose", False), - inheritable_tags=tags, - local_tags=self.tags, - inheritable_metadata=metadata, - local_metadata=self.metadata, - ) - run_manager = callback_manager.on_retriever_start( - dumpd(self), - query, - name=run_name, - run_id=kwargs.pop("run_id", None), - ) - try: - _kwargs = kwargs if self._expects_other_args else {} - if self._new_arg_supported: - result = self._get_relevant_documents( - query, run_manager=run_manager, **_kwargs - ) - else: - result = self._get_relevant_documents(query, **_kwargs) - except Exception as e: - run_manager.on_retriever_error(e) - raise e - else: - run_manager.on_retriever_end( - result, - ) - return result + config: RunnableConfig = {} + if callbacks: + config["callbacks"] = callbacks + if tags: + config["tags"] = tags + if metadata: + config["metadata"] = metadata + if run_name: + config["run_name"] = run_name + return self.invoke(query, config, **kwargs) @deprecated(since="0.1.46", alternative="ainvoke", removal="0.3.0") async def aget_relevant_documents( @@ -357,36 +384,13 @@ async def aget_relevant_documents( Returns: List of relevant documents """ - from langchain_core.callbacks.manager import AsyncCallbackManager - - callback_manager = AsyncCallbackManager.configure( - callbacks, - None, - verbose=kwargs.get("verbose", False), - inheritable_tags=tags, - local_tags=self.tags, - inheritable_metadata=metadata, - local_metadata=self.metadata, - ) - run_manager = await callback_manager.on_retriever_start( - dumpd(self), - query, - name=run_name, - run_id=kwargs.pop("run_id", None), - ) - try: - _kwargs = kwargs if self._expects_other_args else {} - if self._new_arg_supported: - result = await self._aget_relevant_documents( - query, run_manager=run_manager, **_kwargs - ) - else: - result = await self._aget_relevant_documents(query, **_kwargs) - except Exception as e: - await run_manager.on_retriever_error(e) - raise e - else: - await run_manager.on_retriever_end( - result, - ) - return result + config: RunnableConfig = {} + if callbacks: + config["callbacks"] = callbacks + if tags: + config["tags"] = tags + if metadata: + config["metadata"] = metadata + if run_name: + config["run_name"] = run_name + return await self.ainvoke(query, config, **kwargs) From 678a19a5f7166e52ff57667247376ce125e0672d Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Mon, 3 Jun 2024 08:21:55 -0700 Subject: [PATCH 32/54] infra: bump anthropic mypy 1 (#22373) --- .../langchain_anthropic/chat_models.py | 8 +- libs/partners/anthropic/poetry.lock | 69 +++++++------- libs/partners/anthropic/pyproject.toml | 3 +- .../integration_tests/test_chat_models.py | 40 ++++---- .../integration_tests/test_experimental.py | 20 ++-- .../tests/integration_tests/test_llms.py | 6 +- .../tests/unit_tests/test_chat_models.py | 92 +++++++++---------- .../tests/unit_tests/test_output_parsers.py | 6 +- 8 files changed, 120 insertions(+), 124 deletions(-) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index ac47322aca936..9988f38b1234b 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -104,7 +104,7 @@ def _merge_messages( curr = curr.copy(deep=True) if isinstance(curr, ToolMessage): if isinstance(curr.content, str): - curr = HumanMessage( + curr = HumanMessage( # type: ignore[misc] [ { "type": "tool_result", @@ -114,7 +114,7 @@ def _merge_messages( ] ) else: - curr = HumanMessage(curr.content) + curr = HumanMessage(curr.content) # type: ignore[misc] last = merged[-1] if merged else None if isinstance(last, HumanMessage) and isinstance(curr, HumanMessage): if isinstance(last.content, str): @@ -425,7 +425,7 @@ def _stream( ] message_chunk = AIMessageChunk( content=message.content, - tool_call_chunks=tool_call_chunks, + tool_call_chunks=tool_call_chunks, # type: ignore[arg-type] ) yield ChatGenerationChunk(message=message_chunk) else: @@ -464,7 +464,7 @@ async def _astream( ] message_chunk = AIMessageChunk( content=message.content, - tool_call_chunks=tool_call_chunks, + tool_call_chunks=tool_call_chunks, # type: ignore[arg-type] ) yield ChatGenerationChunk(message=message_chunk) else: diff --git a/libs/partners/anthropic/poetry.lock b/libs/partners/anthropic/poetry.lock index 17cfdd1d97ce2..42664fe60e4a4 100644 --- a/libs/partners/anthropic/poetry.lock +++ b/libs/partners/anthropic/poetry.lock @@ -568,52 +568,49 @@ requests = ">=2,<3" [[package]] name = "mypy" -version = "0.991" +version = "1.10.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, - {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, - {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, - {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, - {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, - {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, - {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, - {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, - {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, - {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, - {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, - {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, - {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, - {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, - {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, - {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, - {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, - {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, - {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, - {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, - {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, - {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, - {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, - {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] +mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] @@ -1285,4 +1282,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "a88c10c902a287792de08135f1c17391a89c7363a30c8d55a185f0c90efc22ac" +content-hash = "e1cff75b89d41dd6b5bf1fc13f2a8c777f3820936773ed5ebaecae185db28249" diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index 2bbb3fdd026c1..22bacd1c8ca3d 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -41,10 +41,9 @@ optional = true [tool.poetry.group.lint.dependencies] ruff = ">=0.2.2,<1" -mypy = "^0.991" [tool.poetry.group.typing.dependencies] -mypy = "^0.991" +mypy = "^1" langchain-core = { path = "../../core", develop = true } [tool.poetry.group.dev] diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index bc60d73777898..cee2cf70cf8ba 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -26,7 +26,7 @@ def test_stream() -> None: """Test streaming tokens from Anthropic.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str) @@ -34,7 +34,7 @@ def test_stream() -> None: async def test_astream() -> None: """Test streaming tokens from Anthropic.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] async for token in llm.astream("I'm Pickle Rick"): assert isinstance(token.content, str) @@ -42,7 +42,7 @@ async def test_astream() -> None: async def test_abatch() -> None: """Test streaming tokens from ChatAnthropicMessages.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: @@ -51,7 +51,7 @@ async def test_abatch() -> None: async def test_abatch_tags() -> None: """Test batch tokens from ChatAnthropicMessages.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.abatch( ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]} @@ -62,7 +62,7 @@ async def test_abatch_tags() -> None: def test_batch() -> None: """Test batch tokens from ChatAnthropicMessages.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: @@ -71,7 +71,7 @@ def test_batch() -> None: async def test_ainvoke() -> None: """Test invoke tokens from ChatAnthropicMessages.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result.content, str) @@ -79,7 +79,7 @@ async def test_ainvoke() -> None: def test_invoke() -> None: """Test invoke tokens from ChatAnthropicMessages.""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"])) assert isinstance(result.content, str) @@ -87,7 +87,7 @@ def test_invoke() -> None: def test_system_invoke() -> None: """Test invoke tokens with a system message""" - llm = ChatAnthropicMessages(model_name=MODEL_NAME) + llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] prompt = ChatPromptTemplate.from_messages( [ @@ -108,7 +108,7 @@ def test_system_invoke() -> None: def test_anthropic_call() -> None: """Test valid call to anthropic.""" - chat = ChatAnthropic(model="test") + chat = ChatAnthropic(model="test") # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -117,7 +117,7 @@ def test_anthropic_call() -> None: def test_anthropic_generate() -> None: """Test generate method of anthropic.""" - chat = ChatAnthropic(model="test") + chat = ChatAnthropic(model="test") # type: ignore[call-arg] chat_messages: List[List[BaseMessage]] = [ [HumanMessage(content="How many toes do dogs have?")] ] @@ -133,7 +133,7 @@ def test_anthropic_generate() -> None: def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" - chat = ChatAnthropic(model="test") + chat = ChatAnthropic(model="test") # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.stream([message]) for token in response: @@ -145,7 +145,7 @@ def test_anthropic_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatAnthropic( + chat = ChatAnthropic( # type: ignore[call-arg] model="test", callback_manager=callback_manager, verbose=True, @@ -161,7 +161,7 @@ async def test_anthropic_async_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatAnthropic( + chat = ChatAnthropic( # type: ignore[call-arg] model="test", callback_manager=callback_manager, verbose=True, @@ -177,7 +177,7 @@ async def test_anthropic_async_streaming_callback() -> None: def test_anthropic_multimodal() -> None: """Test that multimodal inputs are handled correctly.""" - chat = ChatAnthropic(model=MODEL_NAME) + chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg] messages = [ HumanMessage( content=[ @@ -202,7 +202,7 @@ def test_streaming() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - llm = ChatAnthropicMessages( + llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg] model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager ) @@ -216,7 +216,7 @@ async def test_astreaming() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - llm = ChatAnthropicMessages( + llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg] model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager ) @@ -226,7 +226,7 @@ async def test_astreaming() -> None: def test_tool_use() -> None: - llm = ChatAnthropic( + llm = ChatAnthropic( # type: ignore[call-arg] model="claude-3-opus-20240229", ) @@ -277,7 +277,7 @@ def type_letter(letter: str) -> str: """Type the given letter.""" return "OK" - model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0).bind_tools( + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0).bind_tools( # type: ignore[call-arg] [type_letter] ) @@ -314,7 +314,7 @@ def type_letter(letter: str) -> str: def test_with_structured_output() -> None: - llm = ChatAnthropic( + llm = ChatAnthropic( # type: ignore[call-arg] model="claude-3-opus-20240229", ) @@ -341,7 +341,7 @@ class GetWeather(BaseModel): @pytest.mark.parametrize("tool_choice", ["GetWeather", "auto", "any"]) def test_anthropic_bind_tools_tool_choice(tool_choice: str) -> None: - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg] model="claude-3-sonnet-20240229", ) chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice=tool_choice) diff --git a/libs/partners/anthropic/tests/integration_tests/test_experimental.py b/libs/partners/anthropic/tests/integration_tests/test_experimental.py index 4e23e0f4422e5..54cb5378757dc 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_experimental.py +++ b/libs/partners/anthropic/tests/integration_tests/test_experimental.py @@ -18,7 +18,7 @@ def test_stream() -> None: """Test streaming tokens from Anthropic.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str) @@ -26,7 +26,7 @@ def test_stream() -> None: async def test_astream() -> None: """Test streaming tokens from Anthropic.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] async for token in llm.astream("I'm Pickle Rick"): assert isinstance(token.content, str) @@ -34,7 +34,7 @@ async def test_astream() -> None: async def test_abatch() -> None: """Test streaming tokens from ChatAnthropicTools.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: @@ -43,7 +43,7 @@ async def test_abatch() -> None: async def test_abatch_tags() -> None: """Test batch tokens from ChatAnthropicTools.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.abatch( ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]} @@ -54,7 +54,7 @@ async def test_abatch_tags() -> None: def test_batch() -> None: """Test batch tokens from ChatAnthropicTools.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: @@ -63,7 +63,7 @@ def test_batch() -> None: async def test_ainvoke() -> None: """Test invoke tokens from ChatAnthropicTools.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result.content, str) @@ -71,7 +71,7 @@ async def test_ainvoke() -> None: def test_invoke() -> None: """Test invoke tokens from ChatAnthropicTools.""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"])) assert isinstance(result.content, str) @@ -79,7 +79,7 @@ def test_invoke() -> None: def test_system_invoke() -> None: """Test invoke tokens with a system message""" - llm = ChatAnthropicTools(model_name=MODEL_NAME) + llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg] prompt = ChatPromptTemplate.from_messages( [ @@ -108,7 +108,7 @@ class Person(BaseModel): name: str age: int - chain = ChatAnthropicTools( + chain = ChatAnthropicTools( # type: ignore[call-arg, call-arg] model_name=BIG_MODEL_NAME, temperature=0, default_headers={"anthropic-beta": "tools-2024-04-04"}, @@ -153,7 +153,7 @@ class Email(BaseModel): ] ) - llm = ChatAnthropicTools( + llm = ChatAnthropicTools( # type: ignore[call-arg, call-arg] temperature=0, model_name=BIG_MODEL_NAME, default_headers={"anthropic-beta": "tools-2024-04-04"}, diff --git a/libs/partners/anthropic/tests/integration_tests/test_llms.py b/libs/partners/anthropic/tests/integration_tests/test_llms.py index 35bbf89377201..26cfac8a2ec33 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_llms.py +++ b/libs/partners/anthropic/tests/integration_tests/test_llms.py @@ -18,20 +18,20 @@ def test_anthropic_model_name_param() -> None: @pytest.mark.requires("anthropic") def test_anthropic_model_param() -> None: - llm = Anthropic(model="foo") + llm = Anthropic(model="foo") # type: ignore[call-arg] assert llm.model == "foo" def test_anthropic_call() -> None: """Test valid call to anthropic.""" - llm = Anthropic(model="claude-instant-1") + llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" - llm = Anthropic(model="claude-instant-1") + llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg] generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py index 09ae57b542564..1b8968d1a177a 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py @@ -25,8 +25,8 @@ def test_initialization() -> None: """Test chat model initialization.""" for model in [ - ChatAnthropic(model_name="claude-instant-1.2", api_key="xyz", timeout=2), - ChatAnthropic( + ChatAnthropic(model_name="claude-instant-1.2", api_key="xyz", timeout=2), # type: ignore[arg-type] + ChatAnthropic( # type: ignore[call-arg, call-arg, call-arg] model="claude-instant-1.2", anthropic_api_key="xyz", default_request_timeout=2, @@ -39,32 +39,32 @@ def test_initialization() -> None: @pytest.mark.requires("anthropic") def test_anthropic_model_name_param() -> None: - llm = ChatAnthropic(model_name="foo") + llm = ChatAnthropic(model_name="foo") # type: ignore[call-arg, call-arg] assert llm.model == "foo" @pytest.mark.requires("anthropic") def test_anthropic_model_param() -> None: - llm = ChatAnthropic(model="foo") + llm = ChatAnthropic(model="foo") # type: ignore[call-arg] assert llm.model == "foo" @pytest.mark.requires("anthropic") def test_anthropic_model_kwargs() -> None: - llm = ChatAnthropic(model_name="foo", model_kwargs={"foo": "bar"}) + llm = ChatAnthropic(model_name="foo", model_kwargs={"foo": "bar"}) # type: ignore[call-arg, call-arg] assert llm.model_kwargs == {"foo": "bar"} @pytest.mark.requires("anthropic") def test_anthropic_invalid_model_kwargs() -> None: with pytest.raises(ValueError): - ChatAnthropic(model="foo", model_kwargs={"max_tokens_to_sample": 5}) + ChatAnthropic(model="foo", model_kwargs={"max_tokens_to_sample": 5}) # type: ignore[call-arg] @pytest.mark.requires("anthropic") def test_anthropic_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = ChatAnthropic(model="foo", foo="bar") + llm = ChatAnthropic(model="foo", foo="bar") # type: ignore[call-arg, call-arg] assert llm.model_kwargs == {"foo": "bar"} @@ -73,7 +73,7 @@ def test_anthropic_initialization() -> None: """Test anthropic initialization.""" # Verify that chat anthropic can be initialized using a secret key provided # as a parameter rather than an environment variable. - ChatAnthropic(model="test", anthropic_api_key="test") + ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[call-arg, call-arg] def test__format_output() -> None: @@ -90,7 +90,7 @@ def test__format_output() -> None: expected = ChatResult( generations=[ ChatGeneration( - message=AIMessage( + message=AIMessage( # type: ignore[misc] "bar", usage_metadata={ "input_tokens": 2, @@ -108,16 +108,16 @@ def test__format_output() -> None: "usage": {"input_tokens": 2, "output_tokens": 1}, }, ) - llm = ChatAnthropic(model="test", anthropic_api_key="test") + llm = ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[call-arg, call-arg] actual = llm._format_output(anthropic_msg) assert expected == actual def test__merge_messages() -> None: messages = [ - SystemMessage("foo"), - HumanMessage("bar"), - AIMessage( + SystemMessage("foo"), # type: ignore[misc] + HumanMessage("bar"), # type: ignore[misc] + AIMessage( # type: ignore[misc] [ {"text": "baz", "type": "text"}, { @@ -137,14 +137,14 @@ def test__merge_messages() -> None: }, ] ), - ToolMessage("buz output", tool_call_id="1"), - ToolMessage("blah output", tool_call_id="2"), - HumanMessage("next thing"), + ToolMessage("buz output", tool_call_id="1"), # type: ignore[misc] + ToolMessage("blah output", tool_call_id="2"), # type: ignore[misc] + HumanMessage("next thing"), # type: ignore[misc] ] expected = [ - SystemMessage("foo"), - HumanMessage("bar"), - AIMessage( + SystemMessage("foo"), # type: ignore[misc] + HumanMessage("bar"), # type: ignore[misc] + AIMessage( # type: ignore[misc] [ {"text": "baz", "type": "text"}, { @@ -164,7 +164,7 @@ def test__merge_messages() -> None: }, ] ), - HumanMessage( + HumanMessage( # type: ignore[misc] [ {"type": "tool_result", "content": "buz output", "tool_use_id": "1"}, {"type": "tool_result", "content": "blah output", "tool_use_id": "2"}, @@ -178,15 +178,15 @@ def test__merge_messages() -> None: def test__merge_messages_mutation() -> None: original_messages = [ - HumanMessage([{"type": "text", "text": "bar"}]), - HumanMessage("next thing"), + HumanMessage([{"type": "text", "text": "bar"}]), # type: ignore[misc] + HumanMessage("next thing"), # type: ignore[misc] ] messages = [ - HumanMessage([{"type": "text", "text": "bar"}]), - HumanMessage("next thing"), + HumanMessage([{"type": "text", "text": "bar"}]), # type: ignore[misc] + HumanMessage("next thing"), # type: ignore[misc] ] expected = [ - HumanMessage( + HumanMessage( # type: ignore[misc] [{"type": "text", "text": "bar"}, {"type": "text", "text": "next thing"}] ), ] @@ -305,13 +305,13 @@ def test_convert_to_anthropic_tool( def test__format_messages_with_tool_calls() -> None: - system = SystemMessage("fuzz") - human = HumanMessage("foo") - ai = AIMessage( + system = SystemMessage("fuzz") # type: ignore[misc] + human = HumanMessage("foo") # type: ignore[misc] + ai = AIMessage( # type: ignore[misc] "", tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], ) - tool = ToolMessage( + tool = ToolMessage( # type: ignore[misc] "blurb", tool_call_id="1", ) @@ -344,15 +344,15 @@ def test__format_messages_with_tool_calls() -> None: def test__format_messages_with_str_content_and_tool_calls() -> None: - system = SystemMessage("fuzz") - human = HumanMessage("foo") + system = SystemMessage("fuzz") # type: ignore[misc] + human = HumanMessage("foo") # type: ignore[misc] # If content and tool_calls are specified and content is a string, then both are # included with content first. - ai = AIMessage( + ai = AIMessage( # type: ignore[misc] "thought", tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], ) - tool = ToolMessage("blurb", tool_call_id="1") + tool = ToolMessage("blurb", tool_call_id="1") # type: ignore[misc] messages = [system, human, ai, tool] expected = ( "fuzz", @@ -383,15 +383,15 @@ def test__format_messages_with_str_content_and_tool_calls() -> None: def test__format_messages_with_list_content_and_tool_calls() -> None: - system = SystemMessage("fuzz") - human = HumanMessage("foo") + system = SystemMessage("fuzz") # type: ignore[misc] + human = HumanMessage("foo") # type: ignore[misc] # If content and tool_calls are specified and content is a list, then content is # preferred. - ai = AIMessage( + ai = AIMessage( # type: ignore[misc] [{"type": "text", "text": "thought"}], tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], ) - tool = ToolMessage( + tool = ToolMessage( # type: ignore[misc] "blurb", tool_call_id="1", ) @@ -418,10 +418,10 @@ def test__format_messages_with_list_content_and_tool_calls() -> None: def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None: """Show that tool_calls are preferred to tool_use blocks when both have same id.""" - system = SystemMessage("fuzz") - human = HumanMessage("foo") + system = SystemMessage("fuzz") # type: ignore[misc] + human = HumanMessage("foo") # type: ignore[misc] # NOTE: tool_use block in contents and tool_calls have different arguments. - ai = AIMessage( + ai = AIMessage( # type: ignore[misc] [ {"type": "text", "text": "thought"}, { @@ -433,7 +433,7 @@ def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None: ], tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "BUZZ"}}], ) - tool = ToolMessage("blurb", tool_call_id="1") + tool = ToolMessage("blurb", tool_call_id="1") # type: ignore[misc] messages = [system, human, ai, tool] expected = ( "fuzz", @@ -465,7 +465,7 @@ def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None: def test_anthropic_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg] model="claude-3-opus-20240229", anthropic_api_key="secret-api-key", ) @@ -477,7 +477,7 @@ def test_anthropic_api_key_masked_when_passed_from_env( ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("ANTHROPIC_API_KEY ", "secret-api-key") - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg] model="claude-3-opus-20240229", ) print(chat_model.anthropic_api_key, end="") # noqa: T201 @@ -490,7 +490,7 @@ def test_anthropic_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test that the API key is masked when passed via the constructor.""" - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg] model="claude-3-opus-20240229", anthropic_api_key="secret-api-key", ) @@ -502,7 +502,7 @@ def test_anthropic_api_key_masked_when_passed_via_constructor( def test_anthropic_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg] model="claude-3-opus-20240229", anthropic_api_key="secret-api-key", ) @@ -519,7 +519,7 @@ class GetWeather(BaseModel): def test_anthropic_bind_tools_tool_choice() -> None: - chat_model = ChatAnthropic( + chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg] model="claude-3-opus-20240229", anthropic_api_key="secret-api-key", ) diff --git a/libs/partners/anthropic/tests/unit_tests/test_output_parsers.py b/libs/partners/anthropic/tests/unit_tests/test_output_parsers.py index 1a8ee7d3e97ef..8f8814b34458e 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_output_parsers.py +++ b/libs/partners/anthropic/tests/unit_tests/test_output_parsers.py @@ -19,7 +19,7 @@ {"type": "tool_use", "input": {"baz": "a"}, "id": "2", "name": "_Foo2"}, ] -_RESULT: List = [ChatGeneration(message=AIMessage(_CONTENT))] +_RESULT: List = [ChatGeneration(message=AIMessage(_CONTENT))] # type: ignore[misc] class _Foo1(BaseModel): @@ -50,7 +50,7 @@ def test_tools_output_parser_args_only() -> None: assert expected == actual expected = [] - actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) + actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc] assert expected == actual @@ -61,7 +61,7 @@ def test_tools_output_parser_first_tool_only() -> None: assert expected == actual expected = None - actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) + actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc] assert expected == actual From 8fad2e209a9843fad04c4e94d214a4a8f28cd4ca Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 3 Jun 2024 08:48:26 -0700 Subject: [PATCH 33/54] fix error message (#22437) Was confusing when language is in Enum but not implemented --- libs/text-splitters/langchain_text_splitters/character.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/text-splitters/langchain_text_splitters/character.py b/libs/text-splitters/langchain_text_splitters/character.py index 6783f98363ac8..a492bb01b38c7 100644 --- a/libs/text-splitters/langchain_text_splitters/character.py +++ b/libs/text-splitters/langchain_text_splitters/character.py @@ -635,6 +635,8 @@ def get_separators_for_language(language: Language) -> List[str]: " ", "", ] + elif language in Language._value2member_map_: + raise ValueError(f"Language {language} is not implemented yet!") else: raise ValueError( f"Language {language} is not supported! " From 86509161b0250964051e6075427db4ed4a935304 Mon Sep 17 00:00:00 2001 From: Dan <38852336+SebanDan@users.noreply.github.com> Date: Mon, 3 Jun 2024 17:55:06 +0200 Subject: [PATCH 34/54] community: fix AzureSearch delete documents (#22315) **Description** Fix AzureSearch delete documents method by using FIELDS_ID variable instead of the hard coded "id" value **Issue:** This is linked to this issue: https://github.com/langchain-ai/langchain/issues/22314 Co-authored-by: dseban --- libs/community/langchain_community/vectorstores/azuresearch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/vectorstores/azuresearch.py b/libs/community/langchain_community/vectorstores/azuresearch.py index 1ba6bbf1b805c..29177043cf31a 100644 --- a/libs/community/langchain_community/vectorstores/azuresearch.py +++ b/libs/community/langchain_community/vectorstores/azuresearch.py @@ -401,7 +401,7 @@ def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool: False otherwise. """ if ids: - res = self.client.delete_documents([{"id": i} for i in ids]) + res = self.client.delete_documents([{FIELDS_ID: i} for i in ids]) return len(res) > 0 else: return False From c01467b1f4f9beae8f1edb105b17aa4f36bf6573 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 3 Jun 2024 09:46:40 -0700 Subject: [PATCH 35/54] core[patch]: RFC: Allow concatenation of messages with multi part content (#22002) Anthropic's streaming treats tool calls as different content parts (streamed back with a different index) from normal content in the `content`. This means that we need to update our chunk-merging logic to handle chunks with multi-part content. The alternative is coerceing Anthropic's responses into a string, but we generally like to preserve model provider responses faithfully when we can. This will also likely be useful for multimodal outputs in the future. This current PR does unfortunately make `index` a magic field within content parts, but Anthropic and OpenAI both use it at the moment to determine order anyway. To avoid cases where we have content arrays with holes and to simplify the logic, I've also restricted merging to chunks in order. TODO: tests CC @baskaryan @ccurme @efriis --- libs/core/langchain_core/messages/base.py | 9 +++-- libs/core/tests/unit_tests/test_messages.py | 42 +++++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index 60c57220033e0..d73b4d526b3f4 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -1,11 +1,11 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast from langchain_core.load.serializable import Serializable from langchain_core.pydantic_v1 import Extra, Field from langchain_core.utils import get_bolded_text -from langchain_core.utils._merge import merge_dicts +from langchain_core.utils._merge import merge_dicts, merge_lists from langchain_core.utils.interactive_env import is_interactive_env if TYPE_CHECKING: @@ -95,9 +95,10 @@ def merge_content( else: return_list: List[Union[str, Dict]] = [first_content] return return_list + second_content - # If both are lists, merge them naively elif isinstance(second_content, List): - return first_content + second_content + # If both are lists + merged_list = merge_lists(first_content, second_content) + return cast(list, merged_list) # If the first content is a list, and the second content is a string else: # If the last element of the first content is a string diff --git a/libs/core/tests/unit_tests/test_messages.py b/libs/core/tests/unit_tests/test_messages.py index 21884cf1e835d..c893f8ced57ae 100644 --- a/libs/core/tests/unit_tests/test_messages.py +++ b/libs/core/tests/unit_tests/test_messages.py @@ -162,6 +162,48 @@ def test_chat_message_chunks() -> None: ), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side" +def test_complex_ai_message_chunks() -> None: + assert AIMessageChunk(content=["I am"], id="ai4") + AIMessageChunk( + content=[" indeed."] + ) == AIMessageChunk( + id="ai4", content=["I am", " indeed."] + ), "Content concatenation with arrays of strings should naively combine" + + assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk( + content=" indeed." + ) == AIMessageChunk( + content=[{"index": 0, "text": "I am"}, " indeed."] + ), "Concatenating mixed content arrays should naively combine them" + + assert ( + AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + + AIMessageChunk(content=[{"index": 0, "text": " indeed."}]) + == AIMessageChunk(content=[{"index": 0, "text": "I am indeed."}]) + ), "Concatenating when both content arrays are dicts with the same index should merge" # noqa: E501 + + assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk( + content=[{"text": " indeed."}] + ) == AIMessageChunk( + content=[{"index": 0, "text": "I am"}, {"text": " indeed."}] + ), "Concatenating when one chunk is missing an index should not merge or throw" # noqa: E501 + + assert ( + AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + + AIMessageChunk(content=[{"index": 2, "text": " indeed."}]) + == AIMessageChunk( + content=[{"index": 0, "text": "I am"}, {"index": 2, "text": " indeed."}] + ) + ), "Concatenating when both content arrays are dicts with a gap between indexes should not result in a holey array" # noqa: E501 + + assert ( + AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + + AIMessageChunk(content=[{"index": 1, "text": " indeed."}]) + == AIMessageChunk( + content=[{"index": 0, "text": "I am"}, {"index": 1, "text": " indeed."}] + ) + ), "Concatenating when both content arrays are dicts with separate indexes should not merge" # noqa: E501 + + def test_function_message_chunks() -> None: assert FunctionMessageChunk( name="hello", content="I am", id="ai5" From ba0dca46d7fdd76b12cef4ad1cbab1526f1bd253 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Tue, 4 Jun 2024 03:37:10 +0800 Subject: [PATCH 36/54] community[minor]: Add IPEX-LLM BGE embedding support on both Intel CPU and GPU (#22226) **Description:** [IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency. This PR adds ipex-llm integrations to langchain for BGE embedding support on both Intel CPU and GPU. **Dependencies:** `ipex-llm`, `sentence-transformers` **Contribution maintainer**: @Oscilloscope98 **tests and docs**: - langchain/docs/docs/integrations/text_embedding/ipex_llm.ipynb - langchain/docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb - langchain/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py --------- Co-authored-by: Shengsheng Huang --- .../text_embedding/ipex_llm.ipynb | 101 +++++++++++ .../text_embedding/ipex_llm_gpu.ipynb | 164 ++++++++++++++++++ .../embeddings/__init__.py | 3 + .../embeddings/ipex_llm.py | 140 +++++++++++++++ .../embeddings/test_ipex_llm.py | 52 ++++++ .../unit_tests/embeddings/test_imports.py | 1 + 6 files changed, 461 insertions(+) create mode 100644 docs/docs/integrations/text_embedding/ipex_llm.ipynb create mode 100644 docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb create mode 100644 libs/community/langchain_community/embeddings/ipex_llm.py create mode 100644 libs/community/tests/integration_tests/embeddings/test_ipex_llm.py diff --git a/docs/docs/integrations/text_embedding/ipex_llm.ipynb b/docs/docs/integrations/text_embedding/ipex_llm.ipynb new file mode 100644 index 0000000000000..ef13acbb9d868 --- /dev/null +++ b/docs/docs/integrations/text_embedding/ipex_llm.ipynb @@ -0,0 +1,101 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Local BGE Embeddings with IPEX-LLM on Intel CPU\n", + "\n", + "> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\n", + "\n", + "This example goes over how to use LangChain to conduct embedding tasks with `ipex-llm` optimizations on Intel CPU. This would be helpful in applications such as RAG, document QA, etc.\n", + "\n", + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -qU langchain langchain-community" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install IPEX-LLM for optimizations on Intel CPU, as well as `sentence-transformers`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu\n", + "%pip install sentence-transformers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Note**\n", + ">\n", + "> For Windows users, `--extra-index-url https://download.pytorch.org/whl/cpu` when install `ipex-llm` is not required.\n", + "\n", + "## Basic Usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.embeddings import IpexLLMBgeEmbeddings\n", + "\n", + "embedding_model = IpexLLMBgeEmbeddings(\n", + " model_name=\"BAAI/bge-large-en-v1.5\",\n", + " model_kwargs={},\n", + " encode_kwargs={\"normalize_embeddings\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "API Reference\n", + "- [IpexLLMBgeEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.ipex_llm.IpexLLMBgeEmbeddings.html)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sentence = \"IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\"\n", + "query = \"What is IPEX-LLM?\"\n", + "\n", + "text_embeddings = embedding_model.embed_documents([sentence, query])\n", + "print(f\"text_embeddings[0][:10]: {text_embeddings[0][:10]}\")\n", + "print(f\"text_embeddings[1][:10]: {text_embeddings[1][:10]}\")\n", + "\n", + "query_embedding = embedding_model.embed_query(query)\n", + "print(f\"query_embedding[:10]: {query_embedding[:10]}\")" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb b/docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb new file mode 100644 index 0000000000000..3bfe477b296a7 --- /dev/null +++ b/docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb @@ -0,0 +1,164 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Local BGE Embeddings with IPEX-LLM on Intel GPU\n", + "\n", + "> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\n", + "\n", + "This example goes over how to use LangChain to conduct embedding tasks with `ipex-llm` optimizations on Intel GPU. This would be helpful in applications such as RAG, document QA, etc.\n", + "\n", + "> **Note**\n", + ">\n", + "> It is recommended that only Windows users with Intel Arc A-Series GPU (except for Intel Arc A300-Series or Pro A60) run this Jupyter notebook directly. For other cases (e.g. Linux users, Intel iGPU, etc.), it is recommended to run the code with Python scripts in terminal for best experiences.\n", + "\n", + "## Install Prerequisites\n", + "To benefit from IPEX-LLM on Intel GPUs, there are several prerequisite steps for tools installation and environment preparation.\n", + "\n", + "If you are a Windows user, visit the [Install IPEX-LLM on Windows with Intel GPU Guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html), and follow [Install Prerequisites](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html#install-prerequisites) to update GPU driver (optional) and install Conda.\n", + "\n", + "If you are a Linux user, visit the [Install IPEX-LLM on Linux with Intel GPU](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html), and follow [**Install Prerequisites**](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html#install-prerequisites) to install GPU driver, Intel® oneAPI Base Toolkit 2024.0, and Conda.\n", + "\n", + "## Setup\n", + "\n", + "After the prerequisites installation, you should have created a conda environment with all prerequisites installed. **Start the jupyter service in this conda environment**:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -qU langchain langchain-community" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install IPEX-LLM for optimizations on Intel GPU, as well as `sentence-transformers`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/\n", + "%pip install sentence-transformers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Note**\n", + ">\n", + "> You can also use `https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/` as the extra-indel-url.\n", + "\n", + "## Runtime Configuration\n", + "\n", + "For optimal performance, it is recommended to set several environment variables based on your device:\n", + "\n", + "### For Windows Users with Intel Core Ultra integrated GPU" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"SYCL_CACHE_PERSISTENT\"] = \"1\"\n", + "os.environ[\"BIGDL_LLM_XMX_DISABLED\"] = \"1\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### For Windows Users with Intel Arc A-Series GPU" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"SYCL_CACHE_PERSISTENT\"] = \"1\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Note**\n", + ">\n", + "> For the first time that each model runs on Intel iGPU/Intel Arc A300-Series or Pro A60, it may take several minutes to compile.\n", + ">\n", + "> For other GPU type, please refer to [here](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#runtime-configuration) for Windows users, and [here](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#id5) for Linux users.\n", + "\n", + "\n", + "## Basic Usage\n", + "\n", + "Setting `device` to `\"xpu\"` in `model_kwargs` when initializing `IpexLLMBgeEmbeddings` will put the embedding model on Intel GPU and benefit from IPEX-LLM optimizations:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.embeddings import IpexLLMBgeEmbeddings\n", + "\n", + "embedding_model = IpexLLMBgeEmbeddings(\n", + " model_name=\"BAAI/bge-large-en-v1.5\",\n", + " model_kwargs={\"device\": \"xpu\"},\n", + " encode_kwargs={\"normalize_embeddings\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "API Reference\n", + "- [IpexLLMBgeEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.ipex_llm.IpexLLMBgeEmbeddings.html)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sentence = \"IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\"\n", + "query = \"What is IPEX-LLM?\"\n", + "\n", + "text_embeddings = embedding_model.embed_documents([sentence, query])\n", + "print(f\"text_embeddings[0][:10]: {text_embeddings[0][:10]}\")\n", + "print(f\"text_embeddings[1][:10]: {text_embeddings[1][:10]}\")\n", + "\n", + "query_embedding = embedding_model.embed_query(query)\n", + "print(f\"query_embedding[:10]: {query_embedding[:10]}\")" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/community/langchain_community/embeddings/__init__.py b/libs/community/langchain_community/embeddings/__init__.py index 5f72f232997ca..4905f879cd09e 100644 --- a/libs/community/langchain_community/embeddings/__init__.py +++ b/libs/community/langchain_community/embeddings/__init__.py @@ -104,6 +104,7 @@ from langchain_community.embeddings.infinity_local import ( InfinityEmbeddingsLocal, ) + from langchain_community.embeddings.ipex_llm import IpexLLMBgeEmbeddings from langchain_community.embeddings.itrex import ( QuantizedBgeEmbeddings, ) @@ -258,6 +259,7 @@ "HuggingFaceInstructEmbeddings", "InfinityEmbeddings", "InfinityEmbeddingsLocal", + "IpexLLMBgeEmbeddings", "JavelinAIGatewayEmbeddings", "JinaEmbeddings", "JohnSnowLabsEmbeddings", @@ -336,6 +338,7 @@ "HuggingFaceInstructEmbeddings": "langchain_community.embeddings.huggingface", "InfinityEmbeddings": "langchain_community.embeddings.infinity", "InfinityEmbeddingsLocal": "langchain_community.embeddings.infinity_local", + "IpexLLMBgeEmbeddings": "langchain_community.embeddings.ipex_llm", "JavelinAIGatewayEmbeddings": "langchain_community.embeddings.javelin_ai_gateway", "JinaEmbeddings": "langchain_community.embeddings.jina", "JohnSnowLabsEmbeddings": "langchain_community.embeddings.johnsnowlabs", diff --git a/libs/community/langchain_community/embeddings/ipex_llm.py b/libs/community/langchain_community/embeddings/ipex_llm.py new file mode 100644 index 0000000000000..8935d80a1e777 --- /dev/null +++ b/libs/community/langchain_community/embeddings/ipex_llm.py @@ -0,0 +1,140 @@ +# This file is adapted from +# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/embeddings/huggingface.py + +from typing import Any, Dict, List, Optional + +from langchain_core.embeddings import Embeddings +from langchain_core.pydantic_v1 import BaseModel, Extra, Field + +DEFAULT_BGE_MODEL = "BAAI/bge-small-en-v1.5" +DEFAULT_QUERY_BGE_INSTRUCTION_EN = ( + "Represent this question for searching relevant passages: " +) +DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:" + + +class IpexLLMBgeEmbeddings(BaseModel, Embeddings): + """Wrapper around the BGE embedding model + with IPEX-LLM optimizations on Intel CPUs and GPUs. + + To use, you should have the ``ipex-llm`` + and ``sentence_transformers`` package installed. Refer to + `here `_ + for installation on Intel CPU. + + Example on Intel CPU: + .. code-block:: python + + from langchain_community.embeddings import IpexLLMBgeEmbeddings + + embedding_model = IpexLLMBgeEmbeddings( + model_name="BAAI/bge-large-en-v1.5", + model_kwargs={}, + encode_kwargs={"normalize_embeddings": True}, + ) + + Refer to + `here `_ + for installation on Intel GPU. + + Example on Intel GPU: + .. code-block:: python + + from langchain_community.embeddings import IpexLLMBgeEmbeddings + + embedding_model = IpexLLMBgeEmbeddings( + model_name="BAAI/bge-large-en-v1.5", + model_kwargs={"device": "xpu"}, + encode_kwargs={"normalize_embeddings": True}, + ) + """ + + client: Any #: :meta private: + model_name: str = DEFAULT_BGE_MODEL + """Model name to use.""" + cache_folder: Optional[str] = None + """Path to store models. + Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Keyword arguments to pass to the model.""" + encode_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Keyword arguments to pass when calling the `encode` method of the model.""" + query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN + """Instruction to use for embedding query.""" + embed_instruction: str = "" + """Instruction to use for embedding document.""" + + def __init__(self, **kwargs: Any): + """Initialize the sentence_transformer.""" + super().__init__(**kwargs) + try: + import sentence_transformers + from ipex_llm.transformers.convert import _optimize_post, _optimize_pre + + except ImportError as exc: + base_url = ( + "https://python.langchain.com/v0.1/docs/integrations/text_embedding/" + ) + raise ImportError( + "Could not import ipex_llm or sentence_transformers. " + f"Please refer to {base_url}/ipex_llm/ " + "for install required packages on Intel CPU. " + f"And refer to {base_url}/ipex_llm_gpu/ " + "for install required packages on Intel GPU. " + ) from exc + + # Set "cpu" as default device + if "device" not in self.model_kwargs: + self.model_kwargs["device"] = "cpu" + + if self.model_kwargs["device"] not in ["cpu", "xpu"]: + raise ValueError( + "IpexLLMBgeEmbeddings currently only supports device to be " + f"'cpu' or 'xpu', but you have: {self.model_kwargs['device']}." + ) + + self.client = sentence_transformers.SentenceTransformer( + self.model_name, cache_folder=self.cache_folder, **self.model_kwargs + ) + + # Add ipex-llm optimizations + self.client = _optimize_pre(self.client) + self.client = _optimize_post(self.client) + if self.model_kwargs["device"] == "xpu": + self.client = self.client.half().to("xpu") + + if "-zh" in self.model_name: + self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Compute doc embeddings using a HuggingFace transformer model. + + Args: + texts: The list of texts to embed. + + Returns: + List of embeddings, one for each text. + """ + texts = [self.embed_instruction + t.replace("\n", " ") for t in texts] + embeddings = self.client.encode(texts, **self.encode_kwargs) + return embeddings.tolist() + + def embed_query(self, text: str) -> List[float]: + """Compute query embeddings using a HuggingFace transformer model. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + text = text.replace("\n", " ") + embedding = self.client.encode( + self.query_instruction + text, **self.encode_kwargs + ) + return embedding.tolist() diff --git a/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py b/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py new file mode 100644 index 0000000000000..30a7c96d70047 --- /dev/null +++ b/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py @@ -0,0 +1,52 @@ +"""Test IPEX LLM""" + +import os + +import pytest + +from langchain_community.embeddings import IpexLLMBgeEmbeddings + +model_ids_to_test = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS") or "" +skip_if_no_model_ids = pytest.mark.skipif( + not model_ids_to_test, + reason="TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS environment variable not set.", +) +model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore + +device = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_DEVICE") or "cpu" + +sentence = "IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., \ +local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency." +query = "What is IPEX-LLM?" + + +@skip_if_no_model_ids +@pytest.mark.parametrize( + "model_id", + model_ids_to_test, +) +def test_embed_documents(model_id: str) -> None: + """Test IpexLLMBgeEmbeddings embed_documents""" + embedding_model = IpexLLMBgeEmbeddings( + model_name=model_id, + model_kwargs={"device": device}, + encode_kwargs={"normalize_embeddings": True}, + ) + output = embedding_model.embed_documents([sentence, query]) + assert len(output) == 2 + + +@skip_if_no_model_ids +@pytest.mark.parametrize( + "model_id", + model_ids_to_test, +) +def test_embed_query(model_id: str) -> None: + """Test IpexLLMBgeEmbeddings embed_documents""" + embedding_model = IpexLLMBgeEmbeddings( + model_name=model_id, + model_kwargs={"device": device}, + encode_kwargs={"normalize_embeddings": True}, + ) + output = embedding_model.embed_query(query) + assert isinstance(output, list) diff --git a/libs/community/tests/unit_tests/embeddings/test_imports.py b/libs/community/tests/unit_tests/embeddings/test_imports.py index fbf40de973b4c..3dd123ae79ad0 100644 --- a/libs/community/tests/unit_tests/embeddings/test_imports.py +++ b/libs/community/tests/unit_tests/embeddings/test_imports.py @@ -55,6 +55,7 @@ "LocalAIEmbeddings", "AwaEmbeddings", "HuggingFaceBgeEmbeddings", + "IpexLLMBgeEmbeddings", "ErnieEmbeddings", "JavelinAIGatewayEmbeddings", "OllamaEmbeddings", From 13140dc4ffbad618212a34174b5a569149f2d2ba Mon Sep 17 00:00:00 2001 From: maang-h <55082429+maang-h@users.noreply.github.com> Date: Tue, 4 Jun 2024 03:38:11 +0800 Subject: [PATCH 37/54] community[patch]: Update the default api_url and reqeust_body of sparkllm embedding (#22136) - **Description:** When I was running the SparkLLMTextEmbeddings, app_id, api_key and api_secret are all correct, but it cannot run normally using the current URL. ```python # example from langchain_community.embeddings import SparkLLMTextEmbeddings embedding= SparkLLMTextEmbeddings( spark_app_id="my-app-id", spark_api_key="my-api-key", spark_api_secret="my-api-secret" ) embedding= "hello" print(spark.embed_query(text1)) ``` ![sparkembedding](https://github.com/langchain-ai/langchain/assets/55082429/11daa853-4f67-45b2-aae2-c95caa14e38c) So I updated the url and request body parameters according to [Embedding_api](https://www.xfyun.cn/doc/spark/Embedding_api.html), now it is runnable. --- .../embeddings/sparkllm.py | 96 ++++++++++++++----- .../unit_tests/embeddings/test_sparkllm.py | 47 +++++++++ 2 files changed, 118 insertions(+), 25 deletions(-) create mode 100644 libs/community/tests/unit_tests/embeddings/test_sparkllm.py diff --git a/libs/community/langchain_community/embeddings/sparkllm.py b/libs/community/langchain_community/embeddings/sparkllm.py index 44a6b9a7fdad3..fe82f9b3126f2 100644 --- a/libs/community/langchain_community/embeddings/sparkllm.py +++ b/libs/community/langchain_community/embeddings/sparkllm.py @@ -5,25 +5,20 @@ import logging from datetime import datetime from time import mktime -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional from urllib.parse import urlencode from wsgiref.handlers import format_date_time import numpy as np import requests from langchain_core.embeddings import Embeddings -from langchain_core.pydantic_v1 import BaseModel, SecretStr, root_validator +from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from numpy import ndarray -# Used for document and knowledge embedding -EMBEDDING_P_API_URL: str = "https://cn-huabei-1.xf-yun.com/v1/private/sa8a05c27" -# Used for user questions embedding -EMBEDDING_Q_API_URL: str = "https://cn-huabei-1.xf-yun.com/v1/private/s50d55a16" - # SparkLLMTextEmbeddings is an embedding model provided by iFLYTEK Co., Ltd.. (https://iflytek.com/en/). -# Official Website: https://www.xfyun.cn/doc/spark/Embedding_new_api.html +# Official Website: https://www.xfyun.cn/doc/spark/Embedding_api.html # Developers need to create an application in the console first, use the appid, APIKey, # and APISecret provided in the application for authentication, # and generate an authentication URL for handshake. @@ -43,39 +38,89 @@ def __init__(self, host: str, path: str, schema: str) -> None: class SparkLLMTextEmbeddings(BaseModel, Embeddings): - """SparkLLM Text Embedding models.""" + """SparkLLM Text Embedding models. + + To use, you should have the environment variable "SPARK_APP_ID","SPARK_API_KEY" + and "SPARK_API_SECRET" set your APP_ID, API_KEY and API_SECRET or pass it + as a name parameter to the constructor. + + Example: + .. code-block:: python + + from langchain_community.embeddings import SparkLLMTextEmbeddings + + embeddings = SparkLLMTextEmbeddings( + spark_app_id="your-app-id", + spark_api_key="your-api-key", + spark_api_secret="your-api-secret" + ) + text = "This is a test query." + query_result = embeddings.embed_query(text) + + """ + + spark_app_id: Optional[SecretStr] = Field(default=None, alias="app_id") + """Automatically inferred from env var `SPARK_APP_ID` if not provided.""" + spark_api_key: Optional[SecretStr] = Field(default=None, alias="api_key") + """Automatically inferred from env var `SPARK_API_KEY` if not provided.""" + spark_api_secret: Optional[SecretStr] = Field(default=None, alias="api_secret") + """Automatically inferred from env var `SPARK_API_SECRET` if not provided.""" + base_url: str = Field(default="https://emb-cn-huabei-1.xf-yun.com/") + """Base URL path for API requests""" + domain: Literal["para", "query"] = Field(default="para") + """This parameter is used for which Embedding this time belongs to. + If "para"(default), it belongs to document Embedding. + If "query", it belongs to query Embedding.""" + + class Config: + """Configuration for this pydantic object""" - spark_app_id: SecretStr - spark_api_key: SecretStr - spark_api_secret: SecretStr + allow_population_by_field_name = True @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that auth token exists in environment.""" - cls.spark_app_id = convert_to_secret_str( + values["spark_app_id"] = convert_to_secret_str( get_from_dict_or_env(values, "spark_app_id", "SPARK_APP_ID") ) - cls.spark_api_key = convert_to_secret_str( + values["spark_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "spark_api_key", "SPARK_API_KEY") ) - cls.spark_api_secret = convert_to_secret_str( + values["spark_api_secret"] = convert_to_secret_str( get_from_dict_or_env(values, "spark_api_secret", "SPARK_API_SECRET") ) return values def _embed(self, texts: List[str], host: str) -> Optional[List[List[float]]]: + """Internal method to call Spark Embedding API and return embeddings. + + Args: + texts: A list of texts to embed. + host: Base URL path for API requests + + Returns: + A list of list of floats representing the embeddings, + or list with value None if an error occurs. + """ + app_id = "" + api_key = "" + api_secret = "" + if self.spark_app_id: + app_id = self.spark_app_id.get_secret_value() + if self.spark_api_key: + api_key = self.spark_api_key.get_secret_value() + if self.spark_api_secret: + api_secret = self.spark_api_secret.get_secret_value() url = self._assemble_ws_auth_url( request_url=host, method="POST", - api_key=self.spark_api_key.get_secret_value(), - api_secret=self.spark_api_secret.get_secret_value(), + api_key=api_key, + api_secret=api_secret, ) embed_result: list = [] for text in texts: query_context = {"messages": [{"content": text, "role": "user"}]} - content = self._get_body( - self.spark_app_id.get_secret_value(), query_context - ) + content = self._get_body(app_id, query_context) response = requests.post( url, json=content, headers={"content-type": "application/json"} ).text @@ -95,7 +140,7 @@ def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: # t Returns: A list of embeddings, one for each text, or None if an error occurs. """ - return self._embed(texts, EMBEDDING_P_API_URL) + return self._embed(texts, self.base_url) def embed_query(self, text: str) -> Optional[List[float]]: # type: ignore[override] """Public method to get embedding for a single query text. @@ -106,7 +151,7 @@ def embed_query(self, text: str) -> Optional[List[float]]: # type: ignore[overr Returns: Embeddings for the text, or None if an error occurs. """ - result = self._embed([text], EMBEDDING_Q_API_URL) + result = self._embed([text], self.base_url) return result[0] if result is not None else None @staticmethod @@ -151,11 +196,12 @@ def _parse_url(request_url: str) -> Url: u = Url(host, path, schema) return u - @staticmethod - def _get_body(appid: str, text: dict) -> Dict[str, Any]: + def _get_body(self, appid: str, text: dict) -> Dict[str, Any]: body = { "header": {"app_id": appid, "uid": "39769795890", "status": 3}, - "parameter": {"emb": {"feature": {"encoding": "utf8"}}}, + "parameter": { + "emb": {"domain": self.domain, "feature": {"encoding": "utf8"}} + }, "payload": { "messages": { "text": base64.b64encode(json.dumps(text).encode("utf-8")).decode() diff --git a/libs/community/tests/unit_tests/embeddings/test_sparkllm.py b/libs/community/tests/unit_tests/embeddings/test_sparkllm.py new file mode 100644 index 0000000000000..d318035106e23 --- /dev/null +++ b/libs/community/tests/unit_tests/embeddings/test_sparkllm.py @@ -0,0 +1,47 @@ +import os +from typing import cast + +import pytest +from langchain_core.pydantic_v1 import SecretStr, ValidationError + +from langchain_community.embeddings import SparkLLMTextEmbeddings + + +def test_sparkllm_initialization_by_alias() -> None: + # Effective initialization + embeddings = SparkLLMTextEmbeddings( + app_id="your-app-id", # type: ignore[arg-type] + api_key="your-api-key", # type: ignore[arg-type] + api_secret="your-api-secret", # type: ignore[arg-type] + ) + assert cast(SecretStr, embeddings.spark_app_id).get_secret_value() == "your-app-id" + assert ( + cast(SecretStr, embeddings.spark_api_key).get_secret_value() == "your-api-key" + ) + assert ( + cast(SecretStr, embeddings.spark_api_secret).get_secret_value() + == "your-api-secret" + ) + + +def test_initialization_parameters_from_env() -> None: + # Setting environment variable + os.environ["SPARK_APP_ID"] = "your-app-id" + os.environ["SPARK_API_KEY"] = "your-api-key" + os.environ["SPARK_API_SECRET"] = "your-api-secret" + + # Effective initialization + embeddings = SparkLLMTextEmbeddings() + assert cast(SecretStr, embeddings.spark_app_id).get_secret_value() == "your-app-id" + assert ( + cast(SecretStr, embeddings.spark_api_key).get_secret_value() == "your-api-key" + ) + assert ( + cast(SecretStr, embeddings.spark_api_secret).get_secret_value() + == "your-api-secret" + ) + + # Environment variable missing + del os.environ["SPARK_APP_ID"] + with pytest.raises(ValidationError): + SparkLLMTextEmbeddings() From 1f751343e25e9679ac5861ec03fff02b86817af4 Mon Sep 17 00:00:00 2001 From: Harichandan Roy Date: Mon, 3 Jun 2024 14:38:51 -0500 Subject: [PATCH 38/54] community[patch]: update embeddings/oracleai.py (#22240) Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" "community/embeddings: update oracleai.py" - [ ] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! Adding oracle VECTOR_ARRAY_T support. - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. Tests are not impacted. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Done. Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17. --- cookbook/oracleai_demo.ipynb | 2 - .../text_embedding/oracleai.ipynb | 7 --- .../embeddings/oracleai.py | 59 +++++++++++-------- 3 files changed, 36 insertions(+), 32 deletions(-) diff --git a/cookbook/oracleai_demo.ipynb b/cookbook/oracleai_demo.ipynb index ad0a6385cb75e..8d67e122833c5 100644 --- a/cookbook/oracleai_demo.ipynb +++ b/cookbook/oracleai_demo.ipynb @@ -526,8 +526,6 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "***Note:*** Currently, OracleEmbeddings processes each embedding generation request individually, without batching, by calling REST endpoints separately for each request. This method could potentially lead to exceeding the maximum request per minute quota set by some providers. However, we are actively working to enhance this process by implementing request batching, which will allow multiple embedding requests to be combined into fewer API calls, thereby optimizing our use of provider resources and adhering to their request limits. This update is expected to be rolled out soon, eliminating the current limitation.\n", - "\n", "***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model." ] }, diff --git a/docs/docs/integrations/text_embedding/oracleai.ipynb b/docs/docs/integrations/text_embedding/oracleai.ipynb index cfda80026ba76..1cb2c2adca763 100644 --- a/docs/docs/integrations/text_embedding/oracleai.ipynb +++ b/docs/docs/integrations/text_embedding/oracleai.ipynb @@ -193,13 +193,6 @@ "Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "***Note:*** Currently, OracleEmbeddings processes each embedding generation request individually, without batching, by calling REST endpoints separately for each request. This method could potentially lead to exceeding the maximum request per minute quota set by some providers. However, we are actively working to enhance this process by implementing request batching, which will allow multiple embedding requests to be combined into fewer API calls, thereby optimizing our use of provider resources and adhering to their request limits. This update is expected to be rolled out soon, eliminating the current limitation." - ] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/libs/community/langchain_community/embeddings/oracleai.py b/libs/community/langchain_community/embeddings/oracleai.py index ca2dc7f5b733c..24105cf020447 100644 --- a/libs/community/langchain_community/embeddings/oracleai.py +++ b/libs/community/langchain_community/embeddings/oracleai.py @@ -118,23 +118,29 @@ def embed_documents(self, texts: List[str]) -> List[List[float]]: "begin utl_http.set_proxy(:proxy); end;", proxy=self.proxy ) - for text in texts: - cursor.execute( - "select t.* " - + "from dbms_vector_chain.utl_to_embeddings(:content, " - + "json(:params)) t", - content=text, - params=json.dumps(self.params), - ) + chunks = [] + for i, text in enumerate(texts, start=1): + chunk = {"chunk_id": i, "chunk_data": text} + chunks.append(json.dumps(chunk)) + + vector_array_type = self.conn.gettype("SYS.VECTOR_ARRAY_T") + inputs = vector_array_type.newobject(chunks) + cursor.execute( + "select t.* " + + "from dbms_vector_chain.utl_to_embeddings(:content, " + + "json(:params)) t", + content=inputs, + params=json.dumps(self.params), + ) - for row in cursor: - if row is None: - embeddings.append([]) - else: - rdata = json.loads(row[0]) - # dereference string as array - vec = json.loads(rdata["embed_vector"]) - embeddings.append(vec) + for row in cursor: + if row is None: + embeddings.append([]) + else: + rdata = json.loads(row[0]) + # dereference string as array + vec = json.loads(rdata["embed_vector"]) + embeddings.append(vec) cursor.close() return embeddings @@ -159,20 +165,27 @@ def embed_query(self, text: str) -> List[float]: """ # A sample unit test. -''' get the Oracle connection ''' +import oracledb +# get the Oracle connection conn = oracledb.connect( - user="", - password="", - dsn="") + user="", + password="", + dsn="/", +) print("Oracle connection is established...") -''' params ''' -embedder_params = {"provider":"database", "model":"demo_model"} +# params +embedder_params = {"provider": "database", "model": "demo_model"} proxy = "" -''' instance ''' +# instance embedder = OracleEmbeddings(conn=conn, params=embedder_params, proxy=proxy) +docs = ["hello world!", "hi everyone!", "greetings!"] +embeds = embedder.embed_documents(docs) +print(f"Total Embeddings: {len(embeds)}") +print(f"Embedding generated by OracleEmbeddings: {embeds[0]}\n") + embed = embedder.embed_query("Hello World!") print(f"Embedding generated by OracleEmbeddings: {embed}") From 56e5aa4dd9fbd5efcf7836e7323a42867cdad3fc Mon Sep 17 00:00:00 2001 From: Brandon Sharp <8883217+Reverendheat@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:05:56 -0400 Subject: [PATCH 39/54] community[patch]: Airtable to allow for addtl params (#22092) - [X] **PR title**: "community: added optional params to Airtable table.all()" - [X] **PR message**: - **Description:** Add's **kwargs to AirtableLoader to allow for kwargs: https://pyairtable.readthedocs.io/en/latest/api.html#pyairtable.Table.all - **Issue:** N/A - **Dependencies:** N/A - **Twitter handle:** parakoopa88 - [X] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [X] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17. --------- Co-authored-by: Bagatur Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> --- .../document_loaders/airtable.ipynb | 5 ++- .../document_loaders/airtable.py | 42 +++++++++++-------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/docs/docs/integrations/document_loaders/airtable.ipynb b/docs/docs/integrations/document_loaders/airtable.ipynb index 48080be9ffb96..fb59c3bbb0a47 100644 --- a/docs/docs/integrations/document_loaders/airtable.ipynb +++ b/docs/docs/integrations/document_loaders/airtable.ipynb @@ -47,7 +47,8 @@ "source": [ "api_key = \"xxx\"\n", "base_id = \"xxx\"\n", - "table_id = \"xxx\"" + "table_id = \"xxx\"\n", + "view = \"xxx\" # optional" ] }, { @@ -57,7 +58,7 @@ "metadata": {}, "outputs": [], "source": [ - "loader = AirtableLoader(api_key, table_id, base_id)\n", + "loader = AirtableLoader(api_key, table_id, base_id, view=view)\n", "docs = loader.load()" ] }, diff --git a/libs/community/langchain_community/document_loaders/airtable.py b/libs/community/langchain_community/document_loaders/airtable.py index c39ed1fe630cb..45487ba243173 100644 --- a/libs/community/langchain_community/document_loaders/airtable.py +++ b/libs/community/langchain_community/document_loaders/airtable.py @@ -1,21 +1,29 @@ -from typing import Iterator +from typing import Any, Iterator +from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document -from langchain_community.document_loaders.base import BaseLoader - class AirtableLoader(BaseLoader): """Load the `Airtable` tables.""" - def __init__(self, api_token: str, table_id: str, base_id: str): - """Initialize with API token and the IDs for table and base""" + def __init__( + self, api_token: str, table_id: str, base_id: str, **kwargs: Any + ) -> None: + """Initialize with API token and the IDs for table and base. + + Args: + api_token: Airtable API token. + table_id: Airtable table ID. + base_id: + **kwargs: Additional parameters to pass to Table.all(). Refer to the + pyairtable documentation for available options: + https://pyairtable.readthedocs.io/en/latest/api.html#pyairtable.Table.all + """ # noqa: E501 self.api_token = api_token - """Airtable API token.""" self.table_id = table_id - """Airtable table ID.""" self.base_id = base_id - """Airtable base ID.""" + self.kwargs = kwargs def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from table.""" @@ -23,14 +31,14 @@ def lazy_load(self) -> Iterator[Document]: from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) - records = table.all() + records = table.all(**self.kwargs) for record in records: + metadata = { + "source": self.base_id + "_" + self.table_id, + "base_id": self.base_id, + "table_id": self.table_id, + } + if "view" in self.kwargs: + metadata["view"] = self.kwargs["view"] # Need to convert record from dict to str - yield Document( - page_content=str(record), - metadata={ - "source": self.base_id + "_" + self.table_id, - "base_id": self.base_id, - "table_id": self.table_id, - }, - ) + yield Document(page_content=str(record), metadata=metadata) From 01352bb55f52015885162b13fff1e02219b39822 Mon Sep 17 00:00:00 2001 From: maang-h <55082429+maang-h@users.noreply.github.com> Date: Tue, 4 Jun 2024 04:22:38 +0800 Subject: [PATCH 40/54] community[minor]: Implement MiniMaxChat interface (#22391) - **Description:** Implement MiniMaxChat interface, include: - No longer inherits the LLM class (like other chat model) - Update request parameters (v1 -> v2) - update `base url` - update message role (system, user, assistant) - add `stream` function - no longer use `group id` - Implement the `_stream`, `_agenerate`, and `_astream` interfaces [minimax v2 api document](https://platform.minimaxi.com/document/guides/chat-model/V2?id=65e0736ab2845de20908e2dd) --- .../chat_models/minimax.py | 344 ++++++++++++++++-- 1 file changed, 311 insertions(+), 33 deletions(-) diff --git a/libs/community/langchain_community/chat_models/minimax.py b/libs/community/langchain_community/chat_models/minimax.py index 2b8419b9d1f8a..d79e3499a6a13 100644 --- a/libs/community/langchain_community/chat_models/minimax.py +++ b/libs/community/langchain_community/chat_models/minimax.py @@ -1,61 +1,212 @@ """Wrapper around Minimax chat models.""" +import json import logging -from typing import Any, Dict, List, Optional, cast +from contextlib import asynccontextmanager, contextmanager +from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Type, Union from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.language_models.chat_models import ( + BaseChatModel, + agenerate_from_stream, + generate_from_stream, +) from langchain_core.messages import ( AIMessage, + AIMessageChunk, BaseMessage, + BaseMessageChunk, + ChatMessage, + ChatMessageChunk, HumanMessage, + SystemMessage, ) -from langchain_core.outputs import ChatGeneration, ChatResult - -from langchain_community.llms.minimax import MinimaxCommon -from langchain_community.llms.utils import enforce_stop_tokens +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult +from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator +from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env logger = logging.getLogger(__name__) -def _parse_message(msg_type: str, text: str) -> Dict: - return {"sender_type": msg_type, "text": text} +@contextmanager +def connect_httpx_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator: + from httpx_sse import EventSource + + with client.stream(method, url, **kwargs) as response: + yield EventSource(response) + + +@asynccontextmanager +async def aconnect_httpx_sse( + client: Any, method: str, url: str, **kwargs: Any +) -> AsyncIterator: + from httpx_sse import EventSource + + async with client.stream(method, url, **kwargs) as response: + yield EventSource(response) + +def _convert_message_to_dict(message: BaseMessage) -> Dict[str, Any]: + """Convert a LangChain messages to Dict.""" + message_dict: Dict[str, Any] + if isinstance(message, HumanMessage): + message_dict = {"role": "user", "content": message.content} + elif isinstance(message, AIMessage): + message_dict = {"role": "assistant", "content": message.content} + elif isinstance(message, SystemMessage): + message_dict = {"role": "system", "content": message.content} + else: + raise TypeError(f"Got unknown type '{message.__class__.__name__}'.") + return message_dict -def _parse_chat_history(history: List[BaseMessage]) -> List: - """Parse a sequence of messages into history.""" - chat_history = [] - for message in history: - content = cast(str, message.content) - if isinstance(message, HumanMessage): - chat_history.append(_parse_message("USER", content)) - if isinstance(message, AIMessage): - chat_history.append(_parse_message("BOT", content)) - return chat_history +def _convert_dict_to_message(dct: Dict[str, Any]) -> BaseMessage: + """Convert a dict to LangChain message.""" + role = dct.get("role") + content = dct.get("content", "") + if role == "assistant": + additional_kwargs = {} + tool_calls = dct.get("tool_calls", None) + if tool_calls is not None: + additional_kwargs["tool_calls"] = tool_calls + return AIMessage(content=content, additional_kwargs=additional_kwargs) + return ChatMessage(role=role, content=content) # type: ignore[arg-type] -class MiniMaxChat(MinimaxCommon, BaseChatModel): + +def _convert_delta_to_message_chunk( + dct: Dict[str, Any], default_class: Type[BaseMessageChunk] +) -> BaseMessageChunk: + role = dct.get("role") + content = dct.get("content", "") + additional_kwargs = {} + tool_calls = dct.get("tool_call", None) + if tool_calls is not None: + additional_kwargs["tool_calls"] = tool_calls + + if role == "assistant" or default_class == AIMessageChunk: + return AIMessageChunk(content=content, additional_kwargs=additional_kwargs) + if role or default_class == ChatMessageChunk: + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] + return default_class(content=content) # type: ignore[call-arg] + + +class MiniMaxChat(BaseChatModel): """MiniMax large language models. - To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and - ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to - the constructor. + To use, you should have the environment variable``MINIMAX_API_KEY`` set with + your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain_community.chat_models import MiniMaxChat - llm = MiniMaxChat(model_name="abab5-chat") + llm = MiniMaxChat(model="abab5-chat") """ + @property + def _identifying_params(self) -> Dict[str, Any]: + """Get the identifying parameters.""" + return {**{"model": self.model}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "minimax" + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return { + "model": self.model, + "max_tokens": self.max_tokens, + "temperature": self.temperature, + "top_p": self.top_p, + **self.model_kwargs, + } + + _client: Any + model: str = "abab6.5-chat" + """Model name to use.""" + max_tokens: int = 256 + """Denotes the number of tokens to predict per generation.""" + temperature: float = 0.7 + """A non-negative float that tunes the degree of randomness in generation.""" + top_p: float = 0.95 + """Total probability mass of tokens to consider at each step.""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + minimax_api_host: str = Field( + default="https://api.minimax.chat/v1/text/chatcompletion_v2", alias="base_url" + ) + minimax_group_id: Optional[str] = Field(default=None, alias="group_id") + """[DEPRECATED, keeping it for for backward compatibility] Group Id""" + minimax_api_key: Optional[SecretStr] = Field(default=None, alias="api_key") + """Minimax API Key""" + streaming: bool = False + """Whether to stream the results or not.""" + + class Config: + """Configuration for this pydantic object.""" + + allow_population_by_field_name = True + + @root_validator(allow_reuse=True) + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + values["minimax_api_key"] = convert_to_secret_str( + get_from_dict_or_env(values, "minimax_api_key", "MINIMAX_API_KEY") + ) + values["minimax_group_id"] = get_from_dict_or_env( + values, "minimax_group_id", "MINIMAX_GROUP_ID" + ) + # Get custom api url from environment. + values["minimax_api_host"] = get_from_dict_or_env( + values, + "minimax_api_host", + "MINIMAX_API_HOST", + values["minimax_api_host"], + ) + return values + + def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: + generations = [] + if not isinstance(response, dict): + response = response.dict() + for res in response["choices"]: + message = _convert_dict_to_message(res["message"]) + generation_info = dict(finish_reason=res.get("finish_reason")) + generations.append( + ChatGeneration(message=message, generation_info=generation_info) + ) + token_usage = response.get("usage", {}) + llm_output = { + "token_usage": token_usage, + "model_name": self.model, + } + return ChatResult(generations=generations, llm_output=llm_output) + + def _create_payload_parameters( # type: ignore[no-untyped-def] + self, messages: List[BaseMessage], is_stream: bool = False, **kwargs + ) -> Dict[str, Any]: + """Create API request body parameters.""" + message_dicts = [_convert_message_to_dict(m) for m in messages] + payload = self._default_params + payload["messages"] = message_dicts + payload.update(**kwargs) + if is_stream: + payload["stream"] = True + + return payload + def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. @@ -64,6 +215,7 @@ def _generate( does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. + stream: Whether to stream the results or not. Returns: The ChatResult that contains outputs generated by the model. @@ -75,22 +227,148 @@ def _generate( raise ValueError( "You should provide at least one message to start the chat!" ) - history = _parse_chat_history(messages) - payload = self._default_params - payload["messages"] = history - text = self._client.post(payload) + is_stream = stream if stream is not None else self.streaming + if is_stream: + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) + payload = self._create_payload_parameters(messages, **kwargs) + api_key = "" + if self.minimax_api_key is not None: + api_key = self.minimax_api_key.get_secret_value() + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + import httpx - # This is required since the stop are not enforced by the model parameters - text = text if stop is None else enforce_stop_tokens(text, stop) - return ChatResult(generations=[ChatGeneration(message=AIMessage(text))]) # type: ignore[misc] + with httpx.Client(headers=headers, timeout=60) as client: + response = client.post(self.minimax_api_host, json=payload) + response.raise_for_status() + + return self._create_chat_result(response.json()) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + """Stream the chat response in chunks.""" + payload = self._create_payload_parameters(messages, is_stream=True, **kwargs) + api_key = "" + if self.minimax_api_key is not None: + api_key = self.minimax_api_key.get_secret_value() + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + import httpx + + with httpx.Client(headers=headers, timeout=60) as client: + with connect_httpx_sse( + client, "POST", self.minimax_api_host, json=payload + ) as event_source: + for sse in event_source.iter_sse(): + chunk = json.loads(sse.data) + if len(chunk["choices"]) == 0: + continue + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], AIMessageChunk + ) + finish_reason = choice.get("finish_reason", None) + + generation_info = ( + {"finish_reason": finish_reason} + if finish_reason is not None + else None + ) + chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + yield chunk + if run_manager: + run_manager.on_llm_new_token(chunk.text, chunk=chunk) + if finish_reason is not None: + break async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: - raise NotImplementedError( - """Minimax AI doesn't support async requests at the moment.""" - ) + if not messages: + raise ValueError( + "You should provide at least one message to start the chat!" + ) + is_stream = stream if stream is not None else self.streaming + if is_stream: + stream_iter = self._astream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return await agenerate_from_stream(stream_iter) + payload = self._create_payload_parameters(messages, **kwargs) + api_key = "" + if self.minimax_api_key is not None: + api_key = self.minimax_api_key.get_secret_value() + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + import httpx + + async with httpx.AsyncClient(headers=headers, timeout=60) as client: + response = await client.post(self.minimax_api_host, json=payload) + response.raise_for_status() + return self._create_chat_result(response.json()) + + async def _astream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + payload = self._create_payload_parameters(messages, is_stream=True, **kwargs) + api_key = "" + if self.minimax_api_key is not None: + api_key = self.minimax_api_key.get_secret_value() + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + import httpx + + async with httpx.AsyncClient(headers=headers, timeout=60) as client: + async with aconnect_httpx_sse( + client, "POST", self.minimax_api_host, json=payload + ) as event_source: + async for sse in event_source.aiter_sse(): + chunk = json.loads(sse.data) + if len(chunk["choices"]) == 0: + continue + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], AIMessageChunk + ) + finish_reason = choice.get("finish_reason", None) + + generation_info = ( + {"finish_reason": finish_reason} + if finish_reason is not None + else None + ) + chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + yield chunk + if run_manager: + await run_manager.on_llm_new_token(chunk.text, chunk=chunk) + if finish_reason is not None: + break From c599732e1add8d86f887e47d83a8a3e831fdb4e7 Mon Sep 17 00:00:00 2001 From: Tom Clelford Date: Mon, 3 Jun 2024 21:26:59 +0100 Subject: [PATCH 41/54] text-splitters[patch]: fix HTMLSectionSplitter parsing of xslt paths (#22176) ## Description This PR allows passing the HTMLSectionSplitter paths to xslt files. It does so by fixing two trivial bugs with how passed paths were being handled. It also changes the default value of the param `xslt_path` to `None` so the special case where the file was part of the langchain package could be handled. ## Issue #22175 --- .../langchain_text_splitters/html.py | 20 ++++++------ .../tests/test_data/test_splitter.xslt | 9 ++++++ .../tests/unit_tests/test_text_splitters.py | 31 +++++++++++++++++++ 3 files changed, 50 insertions(+), 10 deletions(-) create mode 100644 libs/text-splitters/tests/test_data/test_splitter.xslt diff --git a/libs/text-splitters/langchain_text_splitters/html.py b/libs/text-splitters/langchain_text_splitters/html.py index 6ad27314c02b8..89113313967e8 100644 --- a/libs/text-splitters/langchain_text_splitters/html.py +++ b/libs/text-splitters/langchain_text_splitters/html.py @@ -1,7 +1,6 @@ from __future__ import annotations import copy -import os import pathlib from io import BytesIO, StringIO from typing import Any, Dict, Iterable, List, Optional, Tuple, TypedDict, cast @@ -173,7 +172,7 @@ class HTMLSectionSplitter: def __init__( self, headers_to_split_on: List[Tuple[str, str]], - xslt_path: str = "xsl/converting_to_header.xslt", + xslt_path: Optional[str] = None, **kwargs: Any, ) -> None: """Create a new HTMLSectionSplitter. @@ -183,10 +182,17 @@ def __init__( (arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4, h5, h6 e.g. [("h1", "Header 1"), ("h2", "Header 2"]. xslt_path: path to xslt file for document transformation. + Uses a default if not passed. Needed for html contents that using different format and layouts. """ self.headers_to_split_on = dict(headers_to_split_on) - self.xslt_path = xslt_path + + if xslt_path is None: + self.xslt_path = ( + pathlib.Path(__file__).parent / "xsl/converting_to_header.xslt" + ).absolute() + else: + self.xslt_path = pathlib.Path(xslt_path).absolute() self.kwargs = kwargs def split_documents(self, documents: Iterable[Document]) -> List[Document]: @@ -284,13 +290,7 @@ def convert_possible_tags_to_header(self, html_content: str) -> str: parser = etree.HTMLParser() tree = etree.parse(StringIO(html_content), parser) - # document transformation for "structure-aware" chunking is handled with xsl. - # this is needed for htmls files that using different font sizes and layouts - # check to see if self.xslt_path is a relative path or absolute path - if not os.path.isabs(self.xslt_path): - xslt_path = pathlib.Path(__file__).parent / self.xslt_path - - xslt_tree = etree.parse(xslt_path) + xslt_tree = etree.parse(self.xslt_path) transform = etree.XSLT(xslt_tree) result = transform(tree) return str(result) diff --git a/libs/text-splitters/tests/test_data/test_splitter.xslt b/libs/text-splitters/tests/test_data/test_splitter.xslt new file mode 100644 index 0000000000000..cbb5828bf1242 --- /dev/null +++ b/libs/text-splitters/tests/test_data/test_splitter.xslt @@ -0,0 +1,9 @@ + + + + + + + + \ No newline at end of file diff --git a/libs/text-splitters/tests/unit_tests/test_text_splitters.py b/libs/text-splitters/tests/unit_tests/test_text_splitters.py index 062f4d089d145..9f9c76b98d74d 100644 --- a/libs/text-splitters/tests/unit_tests/test_text_splitters.py +++ b/libs/text-splitters/tests/unit_tests/test_text_splitters.py @@ -1619,6 +1619,37 @@ def test_happy_path_splitting_based_on_header_with_whitespace_chars() -> None: assert docs[2].metadata["Header 2"] == "Baz" +@pytest.mark.requires("lxml") +@pytest.mark.requires("bs4") +def test_section_splitter_accepts_a_relative_path() -> None: + html_string = """

Foo

""" + test_file = Path("tests/test_data/test_splitter.xslt") + assert test_file.is_file() + + sec_splitter = HTMLSectionSplitter( + headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")], + xslt_path=test_file.as_posix(), + ) + + sec_splitter.split_text(html_string) + + +@pytest.mark.requires("lxml") +@pytest.mark.requires("bs4") +def test_section_splitter_accepts_an_absolute_path() -> None: + html_string = """

Foo

""" + test_file = Path("tests/test_data/test_splitter.xslt").absolute() + assert test_file.is_absolute() + assert test_file.is_file() + + sec_splitter = HTMLSectionSplitter( + headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")], + xslt_path=test_file.as_posix(), + ) + + sec_splitter.split_text(html_string) + + def test_split_json() -> None: """Test json text splitter""" max_chunk = 800 From 52da6a160d9930900dc14144c21df414614dcdfc Mon Sep 17 00:00:00 2001 From: Ethan Yang Date: Tue, 4 Jun 2024 04:27:17 +0800 Subject: [PATCH 42/54] community[patch]: Update OpenVINO embedding and reranker to support static input shape (#22171) It can help to deploy embedding models on NPU device --- .../document_compressors/openvino_rerank.py | 16 +++++++++++++--- .../langchain_community/embeddings/openvino.py | 17 ++++++++++++++--- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/libs/community/langchain_community/document_compressors/openvino_rerank.py b/libs/community/langchain_community/document_compressors/openvino_rerank.py index b5222a2dcd25e..24fb92673010d 100644 --- a/libs/community/langchain_community/document_compressors/openvino_rerank.py +++ b/libs/community/langchain_community/document_compressors/openvino_rerank.py @@ -114,9 +114,19 @@ def rerank(self, request: Any) -> Any: passages = request.passages query_passage_pairs = [[query, passage["text"]] for passage in passages] - input_tensors = self.tokenizer( - query_passage_pairs, padding=True, truncation=True, return_tensors="pt" - ) + length = self.ov_model.request.inputs[0].get_partial_shape()[1] + if length.is_dynamic: + input_tensors = self.tokenizer( + query_passage_pairs, padding=True, truncation=True, return_tensors="pt" + ) + else: + input_tensors = self.tokenizer( + query_passage_pairs, + padding="max_length", + max_length=length.get_length(), + truncation=True, + return_tensors="pt", + ) outputs = self.ov_model(**input_tensors, return_dict=True) if outputs[0].shape[1] > 1: diff --git a/libs/community/langchain_community/embeddings/openvino.py b/libs/community/langchain_community/embeddings/openvino.py index 7e4d52fd59212..dd4e939a89d23 100644 --- a/libs/community/langchain_community/embeddings/openvino.py +++ b/libs/community/langchain_community/embeddings/openvino.py @@ -210,9 +210,20 @@ def run_mean_pooling(model_output: Any, attention_mask: Any) -> Any: 0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar ): sentences_batch = sentences_sorted[start_index : start_index + batch_size] - features = self.tokenizer( - sentences_batch, padding=True, truncation=True, return_tensors="pt" - ) + + length = self.ov_model.request.inputs[0].get_partial_shape()[1] + if length.is_dynamic: + features = self.tokenizer( + sentences_batch, padding=True, truncation=True, return_tensors="pt" + ) + else: + features = self.tokenizer( + sentences_batch, + padding="max_length", + max_length=length.get_length(), + truncation=True, + return_tensors="pt", + ) out_features = self.ov_model(**features) if mean_pooling: From 5119ab2fb99fce80191ccd4c0474c3fdbc76f980 Mon Sep 17 00:00:00 2001 From: Isaac Francisco <78627776+isahers1@users.noreply.github.com> Date: Mon, 3 Jun 2024 14:40:01 -0700 Subject: [PATCH 43/54] docs: agents tutorial wording (#22447) --- docs/docs/tutorials/agents.ipynb | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index 76d77d7cb1b2a..c06fcc71d90f5 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -181,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "id": "482ce13d", "metadata": {}, "outputs": [ @@ -228,7 +228,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "id": "69185491", "metadata": {}, "outputs": [], @@ -251,7 +251,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "id": "c96c960b", "metadata": {}, "outputs": [ @@ -283,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "id": "ba692a74", "metadata": {}, "outputs": [], @@ -301,7 +301,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "id": "b6a7e925", "metadata": {}, "outputs": [ @@ -331,7 +331,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "688b465d", "metadata": {}, "outputs": [ @@ -339,8 +339,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "ContentString: [{'id': 'toolu_01TSdZjtqppPVYyvrYvsok6d', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n", - "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'san francisco weather'}, 'id': 'toolu_01TSdZjtqppPVYyvrYvsok6d'}]\n" + "ContentString: [{'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp', 'input': {'query': 'weather san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n", + "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'weather san francisco'}, 'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp'}]\n" ] } ], @@ -356,7 +356,7 @@ "id": "83c4bcd3", "metadata": {}, "source": [ - "We can see that there's now no content, but there is a tool call! It wants us to call the Tavily Search tool.\n", + "We can see that there's now no text content, but there is a tool call! It wants us to call the Tavily Search tool.\n", "\n", "This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent." ] @@ -745,7 +745,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.3" } }, "nbformat": 4, From afe89a141107f0af79446533614386a4459510bc Mon Sep 17 00:00:00 2001 From: ccurme Date: Mon, 3 Jun 2024 17:45:03 -0400 Subject: [PATCH 44/54] community: add standard chat model params to Ollama (#22446) --- .../langchain_community/chat_models/ollama.py | 19 +++++++++- .../unit_tests/chat_models/test_ollama.py | 35 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 libs/community/tests/unit_tests/chat_models/test_ollama.py diff --git a/libs/community/langchain_community/chat_models/ollama.py b/libs/community/langchain_community/chat_models/ollama.py index 5e5a98f8ae079..b0fd0944ee38e 100644 --- a/libs/community/langchain_community/chat_models/ollama.py +++ b/libs/community/langchain_community/chat_models/ollama.py @@ -6,7 +6,7 @@ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.language_models.chat_models import BaseChatModel, LangSmithParams from langchain_core.messages import ( AIMessage, AIMessageChunk, @@ -69,6 +69,23 @@ def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" return False + def _get_ls_params( + self, stop: Optional[List[str]] = None, **kwargs: Any + ) -> LangSmithParams: + """Get standard params for tracing.""" + params = self._get_invocation_params(stop=stop, **kwargs) + ls_params = LangSmithParams( + ls_provider="ollama", + ls_model_name=self.model, + ls_model_type="chat", + ls_temperature=params.get("temperature", self.temperature), + ) + if ls_max_tokens := params.get("num_predict", self.num_predict): + ls_params["ls_max_tokens"] = ls_max_tokens + if ls_stop := stop or params.get("stop", None) or self.stop: + ls_params["ls_stop"] = ls_stop + return ls_params + @deprecated("0.0.3", alternative="_convert_messages_to_ollama_messages") def _format_message_as_text(self, message: BaseMessage) -> str: if isinstance(message, ChatMessage): diff --git a/libs/community/tests/unit_tests/chat_models/test_ollama.py b/libs/community/tests/unit_tests/chat_models/test_ollama.py new file mode 100644 index 0000000000000..a99049345acdb --- /dev/null +++ b/libs/community/tests/unit_tests/chat_models/test_ollama.py @@ -0,0 +1,35 @@ +from typing import List, Literal, Optional + +import pytest +from langchain_core.pydantic_v1 import BaseModel, ValidationError + +from langchain_community.chat_models import ChatOllama + + +def test_standard_params() -> None: + class ExpectedParams(BaseModel): + ls_provider: str + ls_model_name: str + ls_model_type: Literal["chat"] + ls_temperature: Optional[float] + ls_max_tokens: Optional[int] + ls_stop: Optional[List[str]] + + model = ChatOllama(model="llama3") + ls_params = model._get_ls_params() + try: + ExpectedParams(**ls_params) + except ValidationError as e: + pytest.fail(f"Validation error: {e}") + assert ls_params["ls_model_name"] == "llama3" + + # Test optional params + model = ChatOllama(num_predict=10, stop=["test"], temperature=0.33) + ls_params = model._get_ls_params() + try: + ExpectedParams(**ls_params) + except ValidationError as e: + pytest.fail(f"Validation error: {e}") + assert ls_params["ls_max_tokens"] == 10 + assert ls_params["ls_stop"] == ["test"] + assert ls_params["ls_temperature"] == 0.33 From f397a84a599c2c3c2d9d9c72bbee5589b4348475 Mon Sep 17 00:00:00 2001 From: bhardwaj-vipul <80268362+bhardwaj-vipul@users.noreply.github.com> Date: Tue, 4 Jun 2024 03:40:15 +0530 Subject: [PATCH 45/54] langchain[patch]: Fix MongoDBAtlasVectorSearch reference in self query retriever (#22401) **Description:** SelfQuery Retriever with MongoDBAtlasVectorSearch (from langchain_mongodb import MongoDBAtlasVectorSearch) and Chroma (from langchain_chroma import Chroma) is not supported. The imports in the [builtin translators](https://github.com/langchain-ai/langchain/blob/8cbce684d4ec861cfd45edc4585365db81b93afd/libs/langchain/langchain/retrievers/self_query/base.py#L73) points to the [deprecated](https://github.com/langchain-ai/langchain/blob/acaf214a4516a2ffbd2817f553f4d48e6a908695/libs/community/langchain_community/vectorstores/mongodb_atlas.py#L36) vectorstore. **Issue:** #22272 --------- Co-authored-by: Bagatur Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> --- .../langchain/retrievers/self_query/base.py | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index ce6dc6b68d8f5..092478f840bc1 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -64,13 +64,11 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: from langchain_community.query_constructors.weaviate import WeaviateTranslator from langchain_community.vectorstores import ( AstraDB, - Chroma, DashVector, DatabricksVectorSearch, DeepLake, Dingo, Milvus, - MongoDBAtlasVectorSearch, MyScale, OpenSearchVectorSearch, PGVector, @@ -82,9 +80,15 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: Vectara, Weaviate, ) + from langchain_community.vectorstores import ( + Chroma as CommunityChroma, + ) from langchain_community.vectorstores import ( ElasticsearchStore as ElasticsearchStoreCommunity, ) + from langchain_community.vectorstores import ( + MongoDBAtlasVectorSearch as CommunityMongoDBAtlasVectorSearch, + ) from langchain_community.vectorstores import ( Pinecone as CommunityPinecone, ) @@ -93,7 +97,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: AstraDB: AstraDBTranslator, PGVector: PGVectorTranslator, CommunityPinecone: PineconeTranslator, - Chroma: ChromaTranslator, + CommunityChroma: ChromaTranslator, DashVector: DashvectorTranslator, Dingo: DingoDBTranslator, Weaviate: WeaviateTranslator, @@ -106,7 +110,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: SupabaseVectorStore: SupabaseVectorTranslator, TimescaleVector: TimescaleVectorTranslator, OpenSearchVectorSearch: OpenSearchTranslator, - MongoDBAtlasVectorSearch: MongoDBAtlasTranslator, + CommunityMongoDBAtlasVectorSearch: MongoDBAtlasTranslator, } if isinstance(vectorstore, DatabricksVectorSearch): return DatabricksVectorSearchTranslator() @@ -148,6 +152,22 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: if isinstance(vectorstore, PineconeVectorStore): return PineconeTranslator() + try: + from langchain_mongodb import MongoDBAtlasVectorSearch + except ImportError: + pass + else: + if isinstance(vectorstore, MongoDBAtlasVectorSearch): + return MongoDBAtlasTranslator() + + try: + from langchain_chroma import Chroma + except ImportError: + pass + else: + if isinstance(vectorstore, Chroma): + return ChromaTranslator() + raise ValueError( f"Self query retriever with Vector Store type {vectorstore.__class__}" f" not supported." From f78ae1d932edbf9bea1d65c369ca362691d0f2c7 Mon Sep 17 00:00:00 2001 From: Stefano Lottini Date: Tue, 4 Jun 2024 00:13:57 +0200 Subject: [PATCH 46/54] docs: Astra DB vectorstore, add automatic-embedding example (#22350) Description: Adding an example showcasing the newly-introduced API-side embedding computation option for the Astra DB vector store --- .../integrations/vectorstores/astradb.ipynb | 131 +++++++++++++----- 1 file changed, 98 insertions(+), 33 deletions(-) diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index b185df23f1cda..a4f0c7c1bc811 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -23,8 +23,6 @@ "id": "d2d6ca14-fb7e-4172-9aa0-a3119a064b96", "metadata": {}, "source": [ - "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", - "\n", "_Note: in addition to access to the database, an OpenAI API Key is required to run the full example._" ] }, @@ -51,7 +49,7 @@ "metadata": {}, "outputs": [], "source": [ - "pip install --upgrade langchain-astradb" + "pip install -qU langchain-astradb" ] }, { @@ -59,7 +57,7 @@ "id": "2453d83a-bc8f-41e1-a692-befe4dd90156", "metadata": {}, "source": [ - "_**Note.** the following are all packages required to run the full demo on this page. Depending on your LangChain setup, some of them may need to be installed:_" + "_Make sure you have installed the packages required to run all of this demo:_" ] }, { @@ -69,7 +67,7 @@ "metadata": {}, "outputs": [], "source": [ - "pip install langchain langchain-openai datasets pypdf" + "pip install -qU langchain langchain-community langchain-openai datasets pypdf" ] }, { @@ -90,9 +88,8 @@ "import os\n", "from getpass import getpass\n", "\n", - "from datasets import (\n", - " load_dataset,\n", - ")\n", + "from astrapy.info import CollectionVectorServiceOptions\n", + "from datasets import load_dataset\n", "from langchain_community.document_loaders import PyPDFLoader\n", "from langchain_core.documents import Document\n", "from langchain_core.output_parsers import StrOutputParser\n", @@ -102,26 +99,6 @@ "from langchain_text_splitters import RecursiveCharacterTextSplitter" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "1983f1da-0ae7-4a9b-bf4c-4ade328f7a3a", - "metadata": {}, - "outputs": [], - "source": [ - "os.environ[\"OPENAI_API_KEY\"] = getpass(\"OPENAI_API_KEY = \")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c656df06-e938-4bc5-b570-440b8b7a0189", - "metadata": {}, - "outputs": [], - "source": [ - "embe = OpenAIEmbeddings()" - ] - }, { "cell_type": "markdown", "id": "22866f09-e10d-4f05-a24b-b9420129462e", @@ -145,7 +122,7 @@ "id": "68f61b01-3e09-47c1-9d67-5d6915c86626", "metadata": {}, "source": [ - "## Connection parameters\n", + "## DB Connection parameters\n", "\n", "These are found on your Astra DB dashboard:\n", "\n", @@ -173,7 +150,53 @@ }, { "cell_type": "markdown", - "id": "196268bd-a950-41c3-bede-f5b55f6a0804", + "id": "84a1fe85-a42c-4f15-92e1-f79f1dd43ea2", + "metadata": {}, + "source": [ + "## Create the vector store\n", + "\n", + "There are two ways to create an Astra DB vector store, which differ in how the embeddings are computed.\n", + "\n", + "*Explicit embeddings*. You can separately instantiate a `langchain_core.embeddings.Embeddings` class and pass it to the `AstraDBVectorStore` constructor, just like with most other LangChain vector stores.\n", + "\n", + "*Integrated embedding computation*. Alternatively, you can use the [Vectorize](https://www.datastax.com/blog/simplifying-vector-embedding-generation-with-astra-vectorize) feature of Astra DB and simply specify the name of a supported embedding model when creating the store. The embedding computations are entirely handled within the database. (To proceed with this method, you must have enabled the desired embedding integration for your database, as described [in the docs](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).)\n", + "\n", + "**Please choose one method and run the corresponding cells only.**" + ] + }, + { + "cell_type": "markdown", + "id": "8c435386-e8d5-41f4-a9e5-7b609ef781f9", + "metadata": {}, + "source": [ + "### Method 1: provide embeddings explicitly\n", + "\n", + "This demo will use an OpenAI embedding model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dfa5c005-9738-4c53-b8a8-8540fcbb8bad", + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = getpass(\"OPENAI_API_KEY = \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3accae6f-73e2-483a-83f7-76eb33558a1f", + "metadata": {}, + "outputs": [], + "source": [ + "my_embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "markdown", + "id": "465b1b16-5363-4c4f-9917-a49e02a86c14", "metadata": {}, "source": [ "Now you can create the vector store:" @@ -187,7 +210,7 @@ "outputs": [], "source": [ "vstore = AstraDBVectorStore(\n", - " embedding=embe,\n", + " embedding=my_embeddings,\n", " collection_name=\"astra_vector_demo\",\n", " api_endpoint=ASTRA_DB_API_ENDPOINT,\n", " token=ASTRA_DB_APPLICATION_TOKEN,\n", @@ -195,6 +218,46 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "5d5d2bfa-c071-4a5b-8b6e-3daa1b6de164", + "metadata": {}, + "source": [ + "### Method 2: use Astra Vectorize (embeddings integrated in Astra DB)\n", + "\n", + "Here it is assumed that you have\n", + "\n", + "- enabled the OpenAI integration in your Astra DB organization,\n", + "- added an API Key named `\"MY_OPENAI_API_KEY\"` to the integration, and\n", + "- scoped it to the database you are using.\n", + "\n", + "For more details please consult the [documentation](https://docs.datastax.com/en/astra-db-serverless/integrations/embedding-providers/openai.html)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d18455d-3fa6-4f9e-b687-3a2bc71c9a23", + "metadata": {}, + "outputs": [], + "source": [ + "openai_vectorize_options = CollectionVectorServiceOptions(\n", + " provider=\"openai\",\n", + " model_name=\"text-embedding-3-small\",\n", + " authentication={\n", + " \"providerKey\": \"MY_OPENAI_API_KEY.providerKey\",\n", + " },\n", + ")\n", + "\n", + "vstore = AstraDBVectorStore(\n", + " collection_name=\"astra_vectorize_demo\",\n", + " api_endpoint=ASTRA_DB_API_ENDPOINT,\n", + " token=ASTRA_DB_APPLICATION_TOKEN,\n", + " namespace=ASTRA_DB_KEYSPACE,\n", + " collection_vector_service_options=openai_vectorize_options,\n", + ")" + ] + }, { "cell_type": "markdown", "id": "9a348678-b2f6-46ca-9a0d-2eb4cc6b66b1", @@ -334,7 +397,9 @@ "id": "b14ea558-bfbe-41ce-807e-d70670060ada", "metadata": {}, "source": [ - "### MMR (Maximal-marginal-relevance) search" + "### MMR (Maximal-marginal-relevance) search\n", + "\n", + "_Note: the MMR search method is not (yet) supported for vector stores built with Astra Vectorize._" ] }, { @@ -537,7 +602,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.11.2" } }, "nbformat": 4, From cbd5720011e1b07306f6941861c5527a058677bb Mon Sep 17 00:00:00 2001 From: Miroslav Date: Mon, 3 Jun 2024 17:20:32 -0500 Subject: [PATCH 47/54] huggingface[patch]: Skip Login to HuggingFaceHub when token is not set (#22365) --- .../llms/huggingface_endpoint.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py index f9bde4bd49955..27decd5374e32 100644 --- a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py +++ b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py @@ -1,5 +1,6 @@ import json import logging +import os from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional from langchain_core.callbacks import ( @@ -9,7 +10,7 @@ from langchain_core.language_models.llms import LLM from langchain_core.outputs import GenerationChunk from langchain_core.pydantic_v1 import Extra, Field, root_validator -from langchain_core.utils import get_from_dict_or_env, get_pydantic_field_names +from langchain_core.utils import get_pydantic_field_names logger = logging.getLogger(__name__) @@ -167,16 +168,17 @@ def validate_environment(cls, values: Dict) -> Dict: "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) - try: - huggingfacehub_api_token = get_from_dict_or_env( - values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" - ) - login(token=huggingfacehub_api_token) - except Exception as e: - raise ValueError( - "Could not authenticate with huggingface_hub. " - "Please check your API token." - ) from e + huggingfacehub_api_token = values["huggingfacehub_api_token"] or os.getenv( + "HUGGINGFACEHUB_API_TOKEN" + ) + if huggingfacehub_api_token is not None: + try: + login(token=huggingfacehub_api_token) + except Exception as e: + raise ValueError( + "Could not authenticate with huggingface_hub. " + "Please check your API token." + ) from e from huggingface_hub import AsyncInferenceClient, InferenceClient From ef3df45d9da9dd7597266fb765672e9268700199 Mon Sep 17 00:00:00 2001 From: Dristy Srivastava <58721149+dristysrivastava@users.noreply.github.com> Date: Tue, 4 Jun 2024 04:06:17 +0530 Subject: [PATCH 48/54] community[minor]: Updating payload for pebblo discover API (#22309) **Description:** Updating response for pebblo discover API. Also updating filed name case type **Documentation:** N/A **Unit tests:** N/A --- .../langchain_community/document_loaders/pebblo.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/libs/community/langchain_community/document_loaders/pebblo.py b/libs/community/langchain_community/document_loaders/pebblo.py index 8a710a9a5b2c4..ed203b2c88d75 100644 --- a/libs/community/langchain_community/document_loaders/pebblo.py +++ b/libs/community/langchain_community/document_loaders/pebblo.py @@ -327,17 +327,16 @@ def _send_discover(self) -> None: try: headers.update({"x-api-key": self.api_key}) if pebblo_resp: - pebblo_resp_docs = json.loads(pebblo_resp.text).get("ai_apps_data") + pebblo_server_version = json.loads(pebblo_resp.text).get( + "pebblo_server_version" + ) payload.update( { - "pebblo_server_version": pebblo_resp_docs.get( - "pebbloServerVersion" - ), - "pebblo_client_version": pebblo_resp_docs.get( - "pebbloClientVersion" - ), + "pebblo_server_version": pebblo_server_version, + "pebblo_client_version": payload["plugin_version"], } ) + payload.pop("plugin_version") pebblo_cloud_url = f"{PEBBLO_CLOUD_URL}{APP_DISCOVER_URL}" pebblo_cloud_response = requests.post( pebblo_cloud_url, headers=headers, json=payload, timeout=20 From f2dd31b9e81c33941a1ad9ba55eed6b8aa142bc7 Mon Sep 17 00:00:00 2001 From: Martin Kolb Date: Tue, 4 Jun 2024 00:53:43 +0200 Subject: [PATCH 49/54] docs: Fix doc issue for HANA Cloud Vector Engine (#22260) - **Description:** This PR fixes a rendering issue in the docs (Python notebook) of HANA Cloud Vector Engine. - **Issue:** N/A - **Dependencies:** no new dependencies added File of the fixed notebook: `docs/docs/integrations/vectorstores/hanavector.ipynb` --- docs/docs/integrations/vectorstores/sap_hanavector.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb index e6f7c0da455d8..1e8dc1b55295b 100644 --- a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb +++ b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb @@ -584,9 +584,9 @@ "You are an expert in state of the union topics. You are provided multiple context items that are related to the prompt you have to answer.\n", "Use the following pieces of context to answer the question at the end.\n", "\n", - "```\n", + "'''\n", "{context}\n", - "```\n", + "'''\n", "\n", "Question: {question}\n", "\"\"\"\n", From bc7e32f315bc9d75531eb01d9c6cb155c464bc91 Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Tue, 4 Jun 2024 07:22:42 +0800 Subject: [PATCH 50/54] core(patch):fix partial_variables not working with SystemMessagePromptTemplate (#20711) - **Issue:** close #17560 - @baskaryan, @eyurtsev --- libs/core/langchain_core/prompts/chat.py | 11 ++- .../tests/unit_tests/prompts/test_chat.py | 73 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index eea59202cfc70..b43c4b07e1860 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -407,6 +407,8 @@ def from_template( cls: Type[_StringImageMessagePromptTemplateT], template: Union[str, List[Union[str, _TextTemplateParam, _ImageTemplateParam]]], template_format: str = "f-string", + *, + partial_variables: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> _StringImageMessagePromptTemplateT: """Create a class from a string template. @@ -414,6 +416,7 @@ def from_template( Args: template: a template. template_format: format of the template. + partial_variables: A dictionary of variables that can be used too partially. **kwargs: keyword arguments to pass to the constructor. Returns: @@ -421,10 +424,16 @@ def from_template( """ if isinstance(template, str): prompt: Union[StringPromptTemplate, List] = PromptTemplate.from_template( - template, template_format=template_format + template, + template_format=template_format, + partial_variables=partial_variables, ) return cls(prompt=prompt, **kwargs) elif isinstance(template, list): + if (partial_variables is not None) and len(partial_variables) > 0: + raise ValueError( + "Partial variables are not supported for list of templates." + ) prompt = [] for tmpl in template: if isinstance(tmpl, str) or isinstance(tmpl, dict) and "text" in tmpl: diff --git a/libs/core/tests/unit_tests/prompts/test_chat.py b/libs/core/tests/unit_tests/prompts/test_chat.py index 2cb19695e4ea1..e5fddbb4854d5 100644 --- a/libs/core/tests/unit_tests/prompts/test_chat.py +++ b/libs/core/tests/unit_tests/prompts/test_chat.py @@ -99,6 +99,79 @@ def test_create_chat_prompt_template_from_template_partial() -> None: assert output_prompt.prompt == expected_prompt +def test_create_system_message_prompt_template_from_template_partial() -> None: + """Create a system message prompt template with partials.""" + + graph_creator_content = """ + Your instructions are: + {instructions} + History: + {history} + """ + json_prompt_instructions: dict = {} + graph_analyst_template = SystemMessagePromptTemplate.from_template( + template=graph_creator_content, + input_variables=["history"], + partial_variables={"instructions": json_prompt_instructions}, + ) + assert graph_analyst_template.format(history="history") == SystemMessage( + content="\n Your instructions are:\n " + " {}\n History:\n " + "history\n " + ) + + +def test_create_system_message_prompt_list_template() -> None: + graph_creator_content1 = """ + This is the prompt for the first test: + {variables} + """ + graph_creator_content2 = """ + This is the prompt for the second test: + {variables} + """ + graph_analyst_template = SystemMessagePromptTemplate.from_template( + template=[graph_creator_content1, graph_creator_content2], + input_variables=["variables"], + ) + assert graph_analyst_template.format(variables="foo") == SystemMessage( + content=[ + { + "type": "text", + "text": "\n This is the prompt for the first test:\n foo\n ", + }, + { + "type": "text", + "text": "\n This is the prompt for " + "the second test:\n foo\n ", + }, + ] + ) + + +def test_create_system_message_prompt_list_template_partial_variables_not_null() -> ( + None +): + graph_creator_content1 = """ + This is the prompt for the first test: + {variables} + """ + graph_creator_content2 = """ + This is the prompt for the second test: + {variables} + """ + + try: + graph_analyst_template = SystemMessagePromptTemplate.from_template( + template=[graph_creator_content1, graph_creator_content2], + input_variables=["variables"], + partial_variables={"variables": "foo"}, + ) + graph_analyst_template.format(variables="foo") + except ValueError as e: + assert str(e) == "Partial variables are not supported for list of templates." + + def test_message_prompt_template_from_template_file() -> None: expected = ChatMessagePromptTemplate( prompt=PromptTemplate( From b0f014666dbd5c155ba6899027595992054094a9 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 3 Jun 2024 16:30:17 -0700 Subject: [PATCH 51/54] docs[patch]: Adds search keywords for common queries (#22449) CC @baskaryan @efriis @ccurme --- docs/docs/concepts.mdx | 4 ++++ docs/docs/how_to/character_text_splitter.ipynb | 14 ++++++++++++++ docs/docs/how_to/dynamic_chain.ipynb | 10 ---------- docs/docs/how_to/message_history.ipynb | 14 ++++++++++++++ docs/docs/how_to/recursive_text_splitter.ipynb | 14 ++++++++++++++ docs/docs/how_to/sequence.ipynb | 9 ++++++--- docs/docs/how_to/streaming.ipynb | 8 ++++++-- docs/docs/integrations/platforms/microsoft.mdx | 4 ++++ docs/docs/integrations/platforms/openai.mdx | 4 ++++ docs/docs/integrations/providers/pinecone.mdx | 4 ++++ docs/docs/integrations/text_embedding/openai.ipynb | 14 ++++++++++++++ docs/docs/tutorials/agents.ipynb | 8 ++++++-- docs/docs/tutorials/chatbot.ipynb | 7 ++++++- docs/docs/versions/v0_2/deprecations.mdx | 1 + 14 files changed, 97 insertions(+), 18 deletions(-) diff --git a/docs/docs/concepts.mdx b/docs/docs/concepts.mdx index 9a7693b867f58..253ed2b17470c 100644 --- a/docs/docs/concepts.mdx +++ b/docs/docs/concepts.mdx @@ -1,3 +1,7 @@ +--- +keywords: [prompt, documents, chatprompttemplate, prompttemplate, invoke, lcel, tool, tools, embedding, embeddings, vector, vectorstore, llm, loader, retriever, retrievers] +--- + # Conceptual guide import ThemedImage from '@theme/ThemedImage'; diff --git a/docs/docs/how_to/character_text_splitter.ipynb b/docs/docs/how_to/character_text_splitter.ipynb index 3f79bb5ca5484..ae5b712598e67 100644 --- a/docs/docs/how_to/character_text_splitter.ipynb +++ b/docs/docs/how_to/character_text_splitter.ipynb @@ -1,5 +1,19 @@ { "cells": [ + { + "cell_type": "raw", + "id": "f781411d", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [charactertextsplitter]\n", + "---" + ] + }, { "cell_type": "markdown", "id": "c3ee8d00", diff --git a/docs/docs/how_to/dynamic_chain.ipynb b/docs/docs/how_to/dynamic_chain.ipynb index b513d7ee3b876..2f8c26efd2a21 100644 --- a/docs/docs/how_to/dynamic_chain.ipynb +++ b/docs/docs/how_to/dynamic_chain.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "raw", - "id": "77bf57fb-e990-45f2-8b5f-c76388b05966", - "metadata": {}, - "source": [ - "---\n", - "keywords: [LCEL]\n", - "---" - ] - }, { "cell_type": "markdown", "id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1", diff --git a/docs/docs/how_to/message_history.ipynb b/docs/docs/how_to/message_history.ipynb index fbc88b00b22f2..aa89349b5274e 100644 --- a/docs/docs/how_to/message_history.ipynb +++ b/docs/docs/how_to/message_history.ipynb @@ -1,5 +1,19 @@ { "cells": [ + { + "cell_type": "raw", + "id": "8165bd4c", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [memory]\n", + "---" + ] + }, { "cell_type": "markdown", "id": "f47033eb", diff --git a/docs/docs/how_to/recursive_text_splitter.ipynb b/docs/docs/how_to/recursive_text_splitter.ipynb index 8d48ce2ff289d..6d0dc6c68473e 100644 --- a/docs/docs/how_to/recursive_text_splitter.ipynb +++ b/docs/docs/how_to/recursive_text_splitter.ipynb @@ -1,5 +1,19 @@ { "cells": [ + { + "cell_type": "raw", + "id": "52976910", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [recursivecharactertextsplitter]\n", + "---" + ] + }, { "cell_type": "markdown", "id": "a678d550", diff --git a/docs/docs/how_to/sequence.ipynb b/docs/docs/how_to/sequence.ipynb index 85caf9fa46c0b..45733ce52bd20 100644 --- a/docs/docs/how_to/sequence.ipynb +++ b/docs/docs/how_to/sequence.ipynb @@ -2,11 +2,14 @@ "cells": [ { "cell_type": "raw", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", - "sidebar_position: 0\n", - "keywords: [Runnable, Runnables, LCEL]\n", + "keywords: [Runnable, Runnables, LCEL, chain, chains, chaining]\n", "---" ] }, diff --git a/docs/docs/how_to/streaming.ipynb b/docs/docs/how_to/streaming.ipynb index 13a06637e6dbb..3ee7283bbcab2 100644 --- a/docs/docs/how_to/streaming.ipynb +++ b/docs/docs/how_to/streaming.ipynb @@ -3,10 +3,14 @@ { "cell_type": "raw", "id": "0bdb3b97-4989-4237-b43b-5943dbbd8302", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", - "sidebar_position: 1.5\n", + "keywords: [stream]\n", "---" ] }, diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx index 11ef1cdd7fe58..232bf3b735fcb 100644 --- a/docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/docs/integrations/platforms/microsoft.mdx @@ -1,3 +1,7 @@ +--- +keywords: [azure] +--- + # Microsoft All functionality related to `Microsoft Azure` and other `Microsoft` products. diff --git a/docs/docs/integrations/platforms/openai.mdx b/docs/docs/integrations/platforms/openai.mdx index bbcc0e46a33af..4c6520c47bbe1 100644 --- a/docs/docs/integrations/platforms/openai.mdx +++ b/docs/docs/integrations/platforms/openai.mdx @@ -1,3 +1,7 @@ +--- +keywords: [openai] +--- + # OpenAI All functionality related to OpenAI diff --git a/docs/docs/integrations/providers/pinecone.mdx b/docs/docs/integrations/providers/pinecone.mdx index decb5147fc363..6a56785d5b2b9 100644 --- a/docs/docs/integrations/providers/pinecone.mdx +++ b/docs/docs/integrations/providers/pinecone.mdx @@ -1,3 +1,7 @@ +--- +keywords: [pinecone] +--- + # Pinecone >[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality. diff --git a/docs/docs/integrations/text_embedding/openai.ipynb b/docs/docs/integrations/text_embedding/openai.ipynb index effb05cd99a57..7d71663e53365 100644 --- a/docs/docs/integrations/text_embedding/openai.ipynb +++ b/docs/docs/integrations/text_embedding/openai.ipynb @@ -1,5 +1,19 @@ { "cells": [ + { + "cell_type": "raw", + "id": "ae8077b8", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [openaiembeddings]\n", + "---" + ] + }, { "cell_type": "markdown", "id": "278b6c63", diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index c06fcc71d90f5..7187cfbdbc0a7 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -3,10 +3,14 @@ { "cell_type": "raw", "id": "17546ebb", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", - "sidebar_position: 4\n", + "keywords: [agent, agents]\n", "---" ] }, diff --git a/docs/docs/tutorials/chatbot.ipynb b/docs/docs/tutorials/chatbot.ipynb index b340b26b5d6fb..2c92b18a6d203 100644 --- a/docs/docs/tutorials/chatbot.ipynb +++ b/docs/docs/tutorials/chatbot.ipynb @@ -2,10 +2,15 @@ "cells": [ { "cell_type": "raw", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", "sidebar_position: 1\n", + "keywords: [conversationchain]\n", "---" ] }, diff --git a/docs/docs/versions/v0_2/deprecations.mdx b/docs/docs/versions/v0_2/deprecations.mdx index 42025f9b5ebcf..06c37605874b9 100644 --- a/docs/docs/versions/v0_2/deprecations.mdx +++ b/docs/docs/versions/v0_2/deprecations.mdx @@ -1,6 +1,7 @@ --- sidebar_position: 3 sidebar_label: Changes +keywords: [retrievalqa, llmchain, conversationalretrievalchain] --- # Deprecations and Breaking Changes From 25cf1a74d5cec79d4cc4a7c9f4199d77459de60f Mon Sep 17 00:00:00 2001 From: Isaac Francisco <78627776+isahers1@users.noreply.github.com> Date: Mon, 3 Jun 2024 17:16:54 -0700 Subject: [PATCH 52/54] docs: rag tutorial small fixes (#22450) --- docs/docs/tutorials/rag.ipynb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/docs/tutorials/rag.ipynb b/docs/docs/tutorials/rag.ipynb index 88110e7630da7..554a9c64091b5 100644 --- a/docs/docs/tutorials/rag.ipynb +++ b/docs/docs/tutorials/rag.ipynb @@ -424,8 +424,7 @@ "`TextSplitter`: Object that splits a list of `Document`s into smaller\n", "chunks. Subclass of `DocumentTransformer`s.\n", "\n", - "- Explore [context-aware splitters](/docs/how_to#text-splitters), which keep the location (“context”) of each\n", - " split in the original `Document`\n", + "- Learn more about splitting text using different methods by reading the [how-to docs](/docs/how_to#text-splitters)\n", "- [Code (py or js)](/docs/integrations/document_loaders/source_code)\n", "- [Scientific papers](/docs/integrations/document_loaders/grobid)\n", "- [Interface](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TextSplitter.html): API reference for the base interface.\n", @@ -577,7 +576,7 @@ " - `MultiQueryRetriever` [generates variants of the input\n", " question](/docs/how_to/MultiQueryRetriever)\n", " to improve retrieval hit rate.\n", - " - `MultiVectorRetriever` (diagram below) instead generates\n", + " - `MultiVectorRetriever` instead generates\n", " [variants of the\n", " embeddings](/docs/how_to/multi_vector),\n", " also in order to improve retrieval hit rate.\n", From 0061ded002924be9fb7e2229b5de32c13bcf1ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fahreddin=20=C3=96zcan?= <88107904+fahreddinozcan@users.noreply.github.com> Date: Tue, 4 Jun 2024 02:30:56 +0200 Subject: [PATCH 53/54] community[patch]: Upstash Vector Store Namespace Support (#22251) This PR introduces namespace support for Upstash Vector Store, which would allow users to partition their data in the vector index. --------- Co-authored-by: Bagatur Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> --- docs/docs/integrations/providers/upstash.mdx | 16 ++ .../vectorstores/upstash.py | 157 +++++++++++++++--- 2 files changed, 154 insertions(+), 19 deletions(-) diff --git a/docs/docs/integrations/providers/upstash.mdx b/docs/docs/integrations/providers/upstash.mdx index 0b619dcde9526..d1bfa783c230c 100644 --- a/docs/docs/integrations/providers/upstash.mdx +++ b/docs/docs/integrations/providers/upstash.mdx @@ -61,6 +61,22 @@ store = UpstashVectorStore( See [Upstash Vector documentation](https://upstash.com/docs/vector/features/embeddingmodels) for more detail on embedding models. +## Namespaces +You can use namespaces to partition your data in the index. Namespaces are useful when you want to query over huge amount of data, and you want to partition the data to make the queries faster. When you use namespaces, there won't be post-filtering on the results which will make the query results more precise. + +```python +from langchain_community.vectorstores.upstash import UpstashVectorStore +import os + +os.environ["UPSTASH_VECTOR_REST_URL"] = "" +os.environ["UPSTASH_VECTOR_REST_TOKEN"] = "" + +store = UpstashVectorStore( + embedding=embeddings + namespace="my_namespace" +) +``` + ### Inserting Vectors ```python diff --git a/libs/community/langchain_community/vectorstores/upstash.py b/libs/community/langchain_community/vectorstores/upstash.py index 96bcc6e4e5132..23aeec0cc3cfb 100644 --- a/libs/community/langchain_community/vectorstores/upstash.py +++ b/libs/community/langchain_community/vectorstores/upstash.py @@ -2,7 +2,7 @@ import logging import uuid -from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union, cast import numpy as np from langchain_core.documents import Document @@ -64,6 +64,8 @@ def __init__( index_url: Optional[str] = None, index_token: Optional[str] = None, embedding: Optional[Union[Embeddings, bool]] = None, + *, + namespace: str = "", ): """ Constructor for UpstashVectorStore. @@ -83,6 +85,7 @@ def __init__( is applied. If true, Upstash embeddings are used. When Upstash embeddings are used, text is sent directly to Upstash and embedding is applied there instead of embedding in Langchain. + namespace: Namespace to use from the index. Example: .. code-block:: python @@ -94,7 +97,8 @@ def __init__( vectorstore = UpstashVectorStore( embedding=embeddings, index_url="...", - index_token="..." + index_token="...", + namespace="..." ) # With an existing index @@ -103,7 +107,8 @@ def __init__( index = Index(url="...", token="...") vectorstore = UpstashVectorStore( embedding=embeddings, - index=index + index=index, + namespace="..." ) """ @@ -145,6 +150,7 @@ def __init__( self._embeddings = embedding self._text_key = text_key + self._namespace = namespace @property def embeddings(self) -> Optional[Union[Embeddings, bool]]: # type: ignore @@ -187,6 +193,8 @@ def add_documents( ids: Optional[List[str]] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[str]: """ @@ -202,6 +210,7 @@ def add_documents( batch_size: Batch size to use when upserting the embeddings. Upstash supports at max 1000 vectors per request. embedding_batch_size: Chunk size to use when embedding the texts. + namespace: Namespace to use from the index. Returns: List of ids from adding the texts into the vectorstore. @@ -216,6 +225,7 @@ def add_documents( batch_size=batch_size, ids=ids, embedding_chunk_size=embedding_chunk_size, + namespace=namespace, **kwargs, ) @@ -225,6 +235,8 @@ async def aadd_documents( ids: Optional[List[str]] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[str]: """ @@ -240,6 +252,7 @@ async def aadd_documents( batch_size: Batch size to use when upserting the embeddings. Upstash supports at max 1000 vectors per request. embedding_batch_size: Chunk size to use when embedding the texts. + namespace: Namespace to use from the index. Returns: List of ids from adding the texts into the vectorstore. @@ -254,6 +267,7 @@ async def aadd_documents( ids=ids, batch_size=batch_size, embedding_chunk_size=embedding_chunk_size, + namespace=namespace, **kwargs, ) @@ -264,6 +278,8 @@ def add_texts( ids: Optional[List[str]] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[str]: """ @@ -281,11 +297,15 @@ def add_texts( batch_size: Batch size to use when upserting the embeddings. Upstash supports at max 1000 vectors per request. embedding_batch_size: Chunk size to use when embedding the texts. + namespace: Namespace to use from the index. Returns: List of ids from adding the texts into the vectorstore. """ + if namespace is None: + namespace = self._namespace + texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] @@ -308,7 +328,9 @@ def add_texts( for batch in batch_iterate( batch_size, zip(chunk_ids, embeddings, chunk_metadatas) ): - self._index.upsert(vectors=batch, **kwargs) + self._index.upsert( + vectors=batch, namespace=cast(str, namespace), **kwargs + ) return ids @@ -319,6 +341,8 @@ async def aadd_texts( ids: Optional[List[str]] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[str]: """ @@ -336,11 +360,15 @@ async def aadd_texts( batch_size: Batch size to use when upserting the embeddings. Upstash supports at max 1000 vectors per request. embedding_batch_size: Chunk size to use when embedding the texts. + namespace: Namespace to use from the index. Returns: List of ids from adding the texts into the vectorstore. """ + if namespace is None: + namespace = self._namespace + texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] @@ -363,7 +391,9 @@ async def aadd_texts( for batch in batch_iterate( batch_size, zip(chunk_ids, embeddings, chunk_metadatas) ): - await self._async_index.upsert(vectors=batch, **kwargs) + await self._async_index.upsert( + vectors=batch, namespace=cast(str, namespace), **kwargs + ) return ids @@ -372,6 +402,8 @@ def similarity_search_with_score( query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Retrieve texts most similar to query and @@ -381,12 +413,13 @@ def similarity_search_with_score( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query and score for each """ return self.similarity_search_by_vector_with_score( - self._embed_query(query), k=k, filter=filter, **kwargs + self._embed_query(query), k=k, filter=filter, namespace=namespace, **kwargs ) async def asimilarity_search_with_score( @@ -394,6 +427,8 @@ async def asimilarity_search_with_score( query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Retrieve texts most similar to query and @@ -403,12 +438,13 @@ async def asimilarity_search_with_score( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query and score for each """ return await self.asimilarity_search_by_vector_with_score( - self._embed_query(query), k=k, filter=filter, **kwargs + self._embed_query(query), k=k, filter=filter, namespace=namespace, **kwargs ) def _process_results(self, results: List) -> List[Tuple[Document, float]]: @@ -430,15 +466,25 @@ def similarity_search_by_vector_with_score( embedding: Union[List[float], str], k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return texts whose embedding is closest to the given embedding""" filter = filter or "" + if namespace is None: + namespace = self._namespace + if isinstance(embedding, str): results = self._index.query( - data=embedding, top_k=k, include_metadata=True, filter=filter, **kwargs + data=embedding, + top_k=k, + include_metadata=True, + filter=filter, + namespace=namespace, + **kwargs, ) else: results = self._index.query( @@ -446,6 +492,7 @@ def similarity_search_by_vector_with_score( top_k=k, include_metadata=True, filter=filter, + namespace=namespace, **kwargs, ) @@ -456,15 +503,25 @@ async def asimilarity_search_by_vector_with_score( embedding: Union[List[float], str], k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return texts whose embedding is closest to the given embedding""" filter = filter or "" + if namespace is None: + namespace = self._namespace + if isinstance(embedding, str): results = await self._async_index.query( - data=embedding, top_k=k, include_metadata=True, filter=filter, **kwargs + data=embedding, + top_k=k, + include_metadata=True, + filter=filter, + namespace=namespace, + **kwargs, ) else: results = await self._async_index.query( @@ -472,6 +529,7 @@ async def asimilarity_search_by_vector_with_score( top_k=k, include_metadata=True, filter=filter, + namespace=namespace, **kwargs, ) @@ -482,6 +540,8 @@ def similarity_search( query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. @@ -490,12 +550,13 @@ def similarity_search( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( - query, k=k, filter=filter, **kwargs + query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] @@ -504,6 +565,8 @@ async def asimilarity_search( query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. @@ -512,12 +575,13 @@ async def asimilarity_search( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query """ docs_and_scores = await self.asimilarity_search_with_score( - query, k=k, filter=filter, **kwargs + query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] @@ -526,6 +590,8 @@ def similarity_search_by_vector( embedding: Union[List[float], str], k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return documents closest to the given embedding. @@ -534,12 +600,13 @@ def similarity_search_by_vector( embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_by_vector_with_score( - embedding, k=k, filter=filter, **kwargs + embedding, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] @@ -548,6 +615,8 @@ async def asimilarity_search_by_vector( embedding: Union[List[float], str], k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return documents closest to the given embedding. @@ -556,12 +625,13 @@ async def asimilarity_search_by_vector( embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents most similar to the query """ docs_and_scores = await self.asimilarity_search_by_vector_with_score( - embedding, k=k, filter=filter, **kwargs + embedding, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] @@ -570,25 +640,31 @@ def _similarity_search_with_relevance_scores( query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Since Upstash always returns relevance scores, default implementation is used. """ - return self.similarity_search_with_score(query, k=k, filter=filter, **kwargs) + return self.similarity_search_with_score( + query, k=k, filter=filter, namespace=namespace, **kwargs + ) async def _asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Since Upstash always returns relevance scores, default implementation is used. """ return await self.asimilarity_search_with_score( - query, k=k, filter=filter, **kwargs + query, k=k, filter=filter, namespace=namespace, **kwargs ) def max_marginal_relevance_search_by_vector( @@ -598,6 +674,8 @@ def max_marginal_relevance_search_by_vector( fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -614,10 +692,14 @@ def max_marginal_relevance_search_by_vector( to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents selected by maximal marginal relevance. """ + if namespace is None: + namespace = self._namespace + assert isinstance(self.embeddings, Embeddings) if isinstance(embedding, str): results = self._index.query( @@ -626,6 +708,7 @@ def max_marginal_relevance_search_by_vector( include_vectors=True, include_metadata=True, filter=filter or "", + namespace=namespace, **kwargs, ) else: @@ -635,6 +718,7 @@ def max_marginal_relevance_search_by_vector( include_vectors=True, include_metadata=True, filter=filter or "", + namespace=namespace, **kwargs, ) @@ -657,6 +741,8 @@ async def amax_marginal_relevance_search_by_vector( fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -673,10 +759,15 @@ async def amax_marginal_relevance_search_by_vector( to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents selected by maximal marginal relevance. """ + + if namespace is None: + namespace = self._namespace + assert isinstance(self.embeddings, Embeddings) if isinstance(embedding, str): results = await self._async_index.query( @@ -685,6 +776,7 @@ async def amax_marginal_relevance_search_by_vector( include_vectors=True, include_metadata=True, filter=filter or "", + namespace=namespace, **kwargs, ) else: @@ -694,6 +786,7 @@ async def amax_marginal_relevance_search_by_vector( include_vectors=True, include_metadata=True, filter=filter or "", + namespace=namespace, **kwargs, ) @@ -716,6 +809,8 @@ def max_marginal_relevance_search( fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -732,6 +827,7 @@ def max_marginal_relevance_search( to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents selected by maximal marginal relevance. @@ -743,6 +839,7 @@ def max_marginal_relevance_search( fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, + namespace=namespace, **kwargs, ) @@ -753,6 +850,8 @@ async def amax_marginal_relevance_search( fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[str] = None, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -769,6 +868,7 @@ async def amax_marginal_relevance_search( to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional metadata filter in str format + namespace: Namespace to use from the index. Returns: List of Documents selected by maximal marginal relevance. @@ -780,6 +880,7 @@ async def amax_marginal_relevance_search( fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, + namespace=namespace, **kwargs, ) @@ -797,6 +898,8 @@ def from_texts( async_index: Optional[AsyncIndex] = None, index_url: Optional[str] = None, index_token: Optional[str] = None, + *, + namespace: str = "", **kwargs: Any, ) -> UpstashVectorStore: """Create a new UpstashVectorStore from a list of texts. @@ -819,6 +922,7 @@ def from_texts( async_index=async_index, index_url=index_url, index_token=index_token, + namespace=namespace, **kwargs, ) @@ -828,6 +932,7 @@ def from_texts( ids=ids, batch_size=batch_size, embedding_chunk_size=embedding_chunk_size, + namespace=namespace, ) return vector_store @@ -845,6 +950,8 @@ async def afrom_texts( async_index: Optional[AsyncIndex] = None, index_url: Optional[str] = None, index_token: Optional[str] = None, + *, + namespace: str = "", **kwargs: Any, ) -> UpstashVectorStore: """Create a new UpstashVectorStore from a list of texts. @@ -865,6 +972,7 @@ async def afrom_texts( text_key=text_key, index=index, async_index=async_index, + namespace=namespace, index_url=index_url, index_token=index_token, **kwargs, @@ -875,6 +983,7 @@ async def afrom_texts( metadatas=metadatas, ids=ids, batch_size=batch_size, + namespace=namespace, embedding_chunk_size=embedding_chunk_size, ) return vector_store @@ -884,6 +993,8 @@ def delete( ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, batch_size: Optional[int] = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> None: """Delete by vector IDs @@ -892,14 +1003,17 @@ def delete( ids: List of ids to delete. delete_all: Delete all vectors in the index. batch_size: Batch size to use when deleting the embeddings. + namespace: Namespace to use from the index. Upstash supports at max 1000 deletions per request. """ + if namespace is None: + namespace = self._namespace if delete_all: - self._index.reset() + self._index.reset(namespace=namespace) elif ids is not None: for batch in batch_iterate(batch_size, ids): - self._index.delete(ids=batch) + self._index.delete(ids=batch, namespace=namespace) else: raise ValueError("Either ids or delete_all should be provided") @@ -910,6 +1024,8 @@ async def adelete( ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, batch_size: Optional[int] = 1000, + *, + namespace: Optional[str] = None, **kwargs: Any, ) -> None: """Delete by vector IDs @@ -918,14 +1034,17 @@ async def adelete( ids: List of ids to delete. delete_all: Delete all vectors in the index. batch_size: Batch size to use when deleting the embeddings. + namespace: Namespace to use from the index. Upstash supports at max 1000 deletions per request. """ + if namespace is None: + namespace = self._namespace if delete_all: - await self._async_index.reset() + await self._async_index.reset(namespace=namespace) elif ids is not None: for batch in batch_iterate(batch_size, ids): - await self._async_index.delete(ids=batch) + await self._async_index.delete(ids=batch, namespace=namespace) else: raise ValueError("Either ids or delete_all should be provided") From 98b2e7b195235f8b31f91939edc8dcc22336f4e6 Mon Sep 17 00:00:00 2001 From: Michal Gregor Date: Tue, 4 Jun 2024 02:47:35 +0200 Subject: [PATCH 54/54] huggingface[patch]: Support for HuggingFacePipeline in ChatHuggingFace. (#22194) - **Description:** Added support for using HuggingFacePipeline in ChatHuggingFace (previously it was only usable with API endpoints, probably by oversight). - **Issue:** #19997 - **Dependencies:** none - **Twitter handle:** none --------- Co-authored-by: Bagatur Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> --- docs/docs/integrations/chat/huggingface.ipynb | 56 +++++++++++++++++++ .../chat_models/huggingface.py | 16 +++++- 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/docs/docs/integrations/chat/huggingface.ipynb b/docs/docs/integrations/chat/huggingface.ipynb index 6bdb04870ef1f..f203d29f68513 100644 --- a/docs/docs/integrations/chat/huggingface.ipynb +++ b/docs/docs/integrations/chat/huggingface.ipynb @@ -58,6 +58,62 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### `HuggingFacePipeline`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_huggingface import HuggingFacePipeline\n", + "\n", + "llm = HuggingFacePipeline.from_model_id(\n", + " model_id=\"HuggingFaceH4/zephyr-7b-beta\",\n", + " task=\"text-generation\",\n", + " pipeline_kwargs=dict(\n", + " max_new_tokens=512,\n", + " do_sample=False,\n", + " repetition_penalty=1.03,\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run a quantized version, you might specify a `bitsandbytes` quantization config as follows:\n", + "\n", + "```python\n", + "from transformers import BitsAndBytesConfig\n", + "\n", + "quantization_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_quant_type=\"nf4\",\n", + " bnb_4bit_compute_dtype=\"float16\",\n", + " bnb_4bit_use_double_quant=True\n", + ")\n", + "```\n", + "\n", + "and pass it to the `HuggingFacePipeline` as a part of its `model_kwargs`:\n", + "\n", + "```python\n", + "pipeline = HuggingFacePipeline(\n", + " ...\n", + "\n", + " model_kwargs={\"quantization_config\": quantization_config},\n", + " \n", + " ...\n", + ")\n", + "```" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py index 8cbb274477e7d..9b3564d53029c 100644 --- a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py +++ b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py @@ -35,6 +35,7 @@ from langchain_core.utils.function_calling import convert_to_openai_tool from langchain_huggingface.llms.huggingface_endpoint import HuggingFaceEndpoint +from langchain_huggingface.llms.huggingface_pipeline import HuggingFacePipeline DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant.""" @@ -135,6 +136,10 @@ def _is_huggingface_endpoint(llm: Any) -> bool: return isinstance(llm, HuggingFaceEndpoint) +def _is_huggingface_pipeline(llm: Any) -> bool: + return isinstance(llm, HuggingFacePipeline) + + class ChatHuggingFace(BaseChatModel): """ Wrapper for using Hugging Face LLM's as ChatModels. @@ -150,8 +155,8 @@ class ChatHuggingFace(BaseChatModel): """ llm: Any - """LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint, or - HuggingFaceHub.""" + """LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint, + HuggingFaceHub, or HuggingFacePipeline.""" system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT) tokenizer: Any = None model_id: Optional[str] = None @@ -175,10 +180,12 @@ def validate_llm(cls, values: dict) -> dict: not _is_huggingface_hub(values["llm"]) and not _is_huggingface_textgen_inference(values["llm"]) and not _is_huggingface_endpoint(values["llm"]) + and not _is_huggingface_pipeline(values["llm"]) ): raise TypeError( "Expected llm to be one of HuggingFaceTextGenInference, " - f"HuggingFaceEndpoint, HuggingFaceHub, received {type(values['llm'])}" + "HuggingFaceEndpoint, HuggingFaceHub, HuggingFacePipeline " + f"received {type(values['llm'])}" ) return values @@ -293,6 +300,9 @@ def _resolve_model_id(self) -> None: return elif _is_huggingface_textgen_inference(self.llm): endpoint_url: Optional[str] = self.llm.inference_server_url + elif _is_huggingface_pipeline(self.llm): + self.model_id = self.llm.model_id + return else: endpoint_url = self.llm.endpoint_url