From 58e40b382cf80ca5f5390c2b5fd79bb023031b6b Mon Sep 17 00:00:00 2001
From: Zeeland <287017217@qq.com>
Date: Tue, 7 Nov 2023 09:43:17 +0800
Subject: [PATCH 1/3] test: optimize tests
---
Makefile | 14 +-
docs/_coverpage.md | 2 +-
docs/get_started/quick_start.md | 11 +-
docs/images/coverage.svg | 6 +-
example/llm/custom_conversation.py | 18 +
example/llm/llm_private_key.py | 12 +
poetry.lock | 371 +++++++++++++++++-
promptulate/tools/arxiv/api_wrapper.py | 5 +-
promptulate/tools/base.py | 2 +-
promptulate/tools/duckduckgo/api_wrapper.py | 2 +-
promptulate/tools/human_feedback/tools.py | 21 +-
promptulate/tools/iot_swith_mqtt/tools.py | 2 +-
promptulate/tools/paper/tools.py | 2 +-
.../tools/semantic_scholar/api_wrapper.py | 2 +-
promptulate/utils/logger.py | 4 +-
pyproject.toml | 1 +
tests/framework/test_conversation.py | 78 ++--
tests/hook/test_agent.py | 7 +-
tests/hook/test_tool.py | 2 +-
tests/tools/test_arxiv_tools.py | 144 +++----
tests/tools/test_ernie_bot_adapt.py | 16 -
tests/tools/test_human_feedback_toos.py | 23 +-
tests/tools/test_iotSwitch_tools.py | 84 ++--
tests/tools/test_langchain_tools.py | 23 +-
tests/tools/test_paper_tools.py | 48 +--
25 files changed, 620 insertions(+), 280 deletions(-)
create mode 100644 example/llm/custom_conversation.py
create mode 100644 example/llm/llm_private_key.py
delete mode 100644 tests/tools/test_ernie_bot_adapt.py
diff --git a/Makefile b/Makefile
index fdaa9853..8a73a6c6 100644
--- a/Makefile
+++ b/Makefile
@@ -4,15 +4,13 @@ OS := $(shell python -c "import sys; print(sys.platform)")
ifeq ($(OS),win32)
PYTHONPATH := $(shell python -c "import os; print(os.getcwd())")
TEST_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate ./tests/test_chat.py ./tests/output_formatter
+ TEST_PROD_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
else
PYTHONPATH := `pwd`
TEST_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate ./tests/test_chat.py ./tests/output_formatter
+ TEST_PROD_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
endif
-#* Poetry
-.PHONY: poetry-download
-poetry-download:
- pip install poetry
#* Installation
.PHONY: install
@@ -33,14 +31,18 @@ polish-codestyle:
.PHONY: formatting
formatting: polish-codestyle
-
-
#* Linting
.PHONY: test
test:
$(TEST_COMMAND)
poetry run coverage-badge -o docs/images/coverage.svg -f
+#* Linting
+.PHONY: test-prod
+test-prod:
+ $(TEST_PROD_COMMAND)
+ poetry run coverage-badge -o docs/images/coverage.svg -f
+
.PHONY: check-codestyle
check-codestyle:
poetry run isort --diff --check-only --settings-path pyproject.toml promptulate tests example
diff --git a/docs/_coverpage.md b/docs/_coverpage.md
index ce877640..1989f801 100644
--- a/docs/_coverpage.md
+++ b/docs/_coverpage.md
@@ -20,7 +20,7 @@
Promptulate
-> All you need is an elegant LLM development framework.
+> All you need is an elegant LLM Agent development framework.
diff --git a/docs/get_started/quick_start.md b/docs/get_started/quick_start.md
index 8fd358c1..11be546b 100644
--- a/docs/get_started/quick_start.md
+++ b/docs/get_started/quick_start.md
@@ -193,7 +193,10 @@ if __name__ == "__main__":
- Python >= 3.8
- make
-> make 不是必须的,但是利用 makefile 的能力轻松集成运行 test、lint 等模块。
+> 本项目使用 make 进行项目配套设施的构建,通过 makefile 的能力轻松集成运行 test、lint 等模块,请确保你的电脑已经安装了 make。
+>
+> [how to install and use make in windows?](https://stackoverflow.com/questions/32127524/how-to-install-and-use-make-in-windows)
+
运行以下命令:
@@ -208,12 +211,8 @@ pip install poetry
make install
```
-如果你没有安装 make,也可以使用如下方式安装:
-```shell
-pip install poetry
-poetry install
-```
+本项目使用配备代码语法检查工具,如果你想提交 pr,则需要在 commit 之前运行 `make polish-codestyle` 进行代码规范格式化,并且运行 `make lint` 通过语法与单元测试的检查。
## 更多
diff --git a/docs/images/coverage.svg b/docs/images/coverage.svg
index 9d027c7d..6d24dca8 100644
--- a/docs/images/coverage.svg
+++ b/docs/images/coverage.svg
@@ -9,13 +9,13 @@
-
+
coverage
coverage
- 49%
- 49%
+ 73%
+ 73%
diff --git a/example/llm/custom_conversation.py b/example/llm/custom_conversation.py
new file mode 100644
index 00000000..70d836d8
--- /dev/null
+++ b/example/llm/custom_conversation.py
@@ -0,0 +1,18 @@
+from promptulate.llms import ChatOpenAI
+from promptulate.schema import AssistantMessage, MessageSet, SystemMessage, UserMessage
+
+
+def main():
+ messages = MessageSet(
+ messages=[
+ SystemMessage(content="You are a helpful assitant"),
+ UserMessage(content="Hello?"),
+ ]
+ )
+
+ llm = ChatOpenAI()
+ answer: AssistantMessage = llm.predict(messages)
+ print(answer.content)
+
+
+main()
diff --git a/example/llm/llm_private_key.py b/example/llm/llm_private_key.py
new file mode 100644
index 00000000..be2ea549
--- /dev/null
+++ b/example/llm/llm_private_key.py
@@ -0,0 +1,12 @@
+"""This example will show how to use a specified key in OpenAI model."""
+from promptulate.llms import ChatOpenAI
+
+
+def main():
+ llm = ChatOpenAI()
+ llm.set_private_api_key("your key here")
+ print(llm("hello"))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poetry.lock b/poetry.lock
index ada04e89..d9142cfc 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]]
name = "aiofiles"
@@ -150,24 +150,24 @@ reference = "tsinghua"
[[package]]
name = "anyio"
-version = "4.0.0"
+version = "3.7.1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.7"
files = [
- {file = "anyio-4.0.0-py3-none-any.whl", hash = "sha256:cfdb2b588b9fc25ede96d8db56ed50848b0b649dca3dd1df0b11f683bb9e0b5f"},
- {file = "anyio-4.0.0.tar.gz", hash = "sha256:f7ed51751b2c2add651e5747c891b47e26d2a21be5d32d9311dfe9692f3e5d7a"},
+ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"},
+ {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"},
]
[package.dependencies]
-exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
[package.extras]
-doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
-trio = ["trio (>=0.22)"]
+doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"]
+test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (<0.22)"]
[package.source]
type = "legacy"
@@ -804,6 +804,26 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "dataclasses-json"
+version = "0.6.1"
+description = "Easily serialize dataclasses to and from JSON."
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "dataclasses_json-0.6.1-py3-none-any.whl", hash = "sha256:1bd8418a61fe3d588bb0079214d7fb71d44937da40742b787256fd53b26b6c80"},
+ {file = "dataclasses_json-0.6.1.tar.gz", hash = "sha256:a53c220c35134ce08211a1057fd0e5bf76dc5331627c6b241cacbc570a89faae"},
+]
+
+[package.dependencies]
+marshmallow = ">=3.18.0,<4.0.0"
+typing-inspect = ">=0.4.0,<1"
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "distlib"
version = "0.3.7"
@@ -1019,6 +1039,81 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "greenlet"
+version = "3.0.1"
+description = "Lightweight in-process concurrent programming"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "greenlet-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f89e21afe925fcfa655965ca8ea10f24773a1791400989ff32f467badfe4a064"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28e89e232c7593d33cac35425b58950789962011cc274aa43ef8865f2e11f46d"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8ba29306c5de7717b5761b9ea74f9c72b9e2b834e24aa984da99cbfc70157fd"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19bbdf1cce0346ef7341705d71e2ecf6f41a35c311137f29b8a2dc2341374565"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599daf06ea59bfedbec564b1692b0166a0045f32b6f0933b0dd4df59a854caf2"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b641161c302efbb860ae6b081f406839a8b7d5573f20a455539823802c655f63"},
+ {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d57e20ba591727da0c230ab2c3f200ac9d6d333860d85348816e1dca4cc4792e"},
+ {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5805e71e5b570d490938d55552f5a9e10f477c19400c38bf1d5190d760691846"},
+ {file = "greenlet-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:52e93b28db27ae7d208748f45d2db8a7b6a380e0d703f099c949d0f0d80b70e9"},
+ {file = "greenlet-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f7bfb769f7efa0eefcd039dd19d843a4fbfbac52f1878b1da2ed5793ec9b1a65"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e6c7db42638dc45cf2e13c73be16bf83179f7859b07cfc139518941320be96"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1757936efea16e3f03db20efd0cd50a1c86b06734f9f7338a90c4ba85ec2ad5a"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19075157a10055759066854a973b3d1325d964d498a805bb68a1f9af4aaef8ec"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9d21aaa84557d64209af04ff48e0ad5e28c5cca67ce43444e939579d085da72"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2847e5d7beedb8d614186962c3d774d40d3374d580d2cbdab7f184580a39d234"},
+ {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:97e7ac860d64e2dcba5c5944cfc8fa9ea185cd84061c623536154d5a89237884"},
+ {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b2c02d2ad98116e914d4f3155ffc905fd0c025d901ead3f6ed07385e19122c94"},
+ {file = "greenlet-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:22f79120a24aeeae2b4471c711dcf4f8c736a2bb2fabad2a67ac9a55ea72523c"},
+ {file = "greenlet-3.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:100f78a29707ca1525ea47388cec8a049405147719f47ebf3895e7509c6446aa"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60d5772e8195f4e9ebf74046a9121bbb90090f6550f81d8956a05387ba139353"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:daa7197b43c707462f06d2c693ffdbb5991cbb8b80b5b984007de431493a319c"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea6b8aa9e08eea388c5f7a276fabb1d4b6b9d6e4ceb12cc477c3d352001768a9"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d11ebbd679e927593978aa44c10fc2092bc454b7d13fdc958d3e9d508aba7d0"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbd4c177afb8a8d9ba348d925b0b67246147af806f0b104af4d24f144d461cd5"},
+ {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20107edf7c2c3644c67c12205dc60b1bb11d26b2610b276f97d666110d1b511d"},
+ {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8bef097455dea90ffe855286926ae02d8faa335ed8e4067326257cb571fc1445"},
+ {file = "greenlet-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:b2d3337dcfaa99698aa2377c81c9ca72fcd89c07e7eb62ece3f23a3fe89b2ce4"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80ac992f25d10aaebe1ee15df45ca0d7571d0f70b645c08ec68733fb7a020206"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:337322096d92808f76ad26061a8f5fccb22b0809bea39212cd6c406f6a7060d2"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9934adbd0f6e476f0ecff3c94626529f344f57b38c9a541f87098710b18af0a"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4d815b794fd8868c4d67602692c21bf5293a75e4b607bb92a11e821e2b859a"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41bdeeb552d814bcd7fb52172b304898a35818107cc8778b5101423c9017b3de"},
+ {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6e6061bf1e9565c29002e3c601cf68569c450be7fc3f7336671af7ddb4657166"},
+ {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fa24255ae3c0ab67e613556375a4341af04a084bd58764731972bcbc8baeba36"},
+ {file = "greenlet-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:b489c36d1327868d207002391f662a1d163bdc8daf10ab2e5f6e41b9b96de3b1"},
+ {file = "greenlet-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f33f3258aae89da191c6ebaa3bc517c6c4cbc9b9f689e5d8452f7aedbb913fa8"},
+ {file = "greenlet-3.0.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d2905ce1df400360463c772b55d8e2518d0e488a87cdea13dd2c71dcb2a1fa16"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a02d259510b3630f330c86557331a3b0e0c79dac3d166e449a39363beaae174"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55d62807f1c5a1682075c62436702aaba941daa316e9161e4b6ccebbbf38bda3"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fcc780ae8edbb1d050d920ab44790201f027d59fdbd21362340a85c79066a74"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eddd98afc726f8aee1948858aed9e6feeb1758889dfd869072d4465973f6bfd"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eabe7090db68c981fca689299c2d116400b553f4b713266b130cfc9e2aa9c5a9"},
+ {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f2f6d303f3dee132b322a14cd8765287b8f86cdc10d2cb6a6fae234ea488888e"},
+ {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d923ff276f1c1f9680d32832f8d6c040fe9306cbfb5d161b0911e9634be9ef0a"},
+ {file = "greenlet-3.0.1-cp38-cp38-win32.whl", hash = "sha256:0b6f9f8ca7093fd4433472fd99b5650f8a26dcd8ba410e14094c1e44cd3ceddd"},
+ {file = "greenlet-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:990066bff27c4fcf3b69382b86f4c99b3652bab2a7e685d968cd4d0cfc6f67c6"},
+ {file = "greenlet-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ce85c43ae54845272f6f9cd8320d034d7a946e9773c693b27d620edec825e376"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ee2e967bd7ff85d84a2de09df10e021c9b38c7d91dead95b406ed6350c6997"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87c8ceb0cf8a5a51b8008b643844b7f4a8264a2c13fcbcd8a8316161725383fe"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6a8c9d4f8692917a3dc7eb25a6fb337bff86909febe2f793ec1928cd97bedfc"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fbc5b8f3dfe24784cee8ce0be3da2d8a79e46a276593db6868382d9c50d97b1"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85d2b77e7c9382f004b41d9c72c85537fac834fb141b0296942d52bf03fe4a3d"},
+ {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:696d8e7d82398e810f2b3622b24e87906763b6ebfd90e361e88eb85b0e554dc8"},
+ {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:329c5a2e5a0ee942f2992c5e3ff40be03e75f745f48847f118a3cfece7a28546"},
+ {file = "greenlet-3.0.1-cp39-cp39-win32.whl", hash = "sha256:cf868e08690cb89360eebc73ba4be7fb461cfbc6168dd88e2fbbe6f31812cd57"},
+ {file = "greenlet-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ac4a39d1abae48184d420aa8e5e63efd1b75c8444dd95daa3e03f6c6310e9619"},
+ {file = "greenlet-3.0.1.tar.gz", hash = "sha256:816bd9488a94cba78d93e1abb58000e8266fa9cc2aa9ccdd6eb0696acb24005b"},
+]
+
+[package.extras]
+docs = ["Sphinx"]
+test = ["objgraph", "psutil"]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "h11"
version = "0.14.0"
@@ -1306,6 +1401,106 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "jsonpatch"
+version = "1.33"
+description = "Apply JSON-Patches (RFC 6902)"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
+files = [
+ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
+ {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
+]
+
+[package.dependencies]
+jsonpointer = ">=1.9"
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
+[[package]]
+name = "jsonpointer"
+version = "2.4"
+description = "Identify specific nodes in a JSON document (RFC 6901)"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
+files = [
+ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
+ {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
+]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
+[[package]]
+name = "langchain"
+version = "0.0.324"
+description = "Building applications with LLMs through composability"
+optional = false
+python-versions = ">=3.8.1,<4.0"
+files = [
+ {file = "langchain-0.0.324-py3-none-any.whl", hash = "sha256:9be84d14e264567d52b93d0d2ba1e8cbf38c6e50a3914be02dbd9ea0fabaafd9"},
+ {file = "langchain-0.0.324.tar.gz", hash = "sha256:d8dc589aa57699d51eeef8ce0507cd3faac4465ad0ff08dfb0a19e5661c3af44"},
+]
+
+[package.dependencies]
+aiohttp = ">=3.8.3,<4.0.0"
+anyio = "<4.0"
+async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""}
+dataclasses-json = ">=0.5.7,<0.7"
+jsonpatch = ">=1.33,<2.0"
+langsmith = ">=0.0.52,<0.1.0"
+numpy = ">=1,<2"
+pydantic = ">=1,<3"
+PyYAML = ">=5.3"
+requests = ">=2,<3"
+SQLAlchemy = ">=1.4,<3"
+tenacity = ">=8.1.0,<9.0.0"
+
+[package.extras]
+all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.10.1,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
+azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (>=0,<1)"]
+clarifai = ["clarifai (>=9.1.0)"]
+cli = ["typer (>=0.9.0,<0.10.0)"]
+cohere = ["cohere (>=4,<5)"]
+docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
+embeddings = ["sentence-transformers (>=2,<3)"]
+extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (>=0,<1)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
+javascript = ["esprima (>=4.0.1,<5.0.0)"]
+llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
+openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.6.0)"]
+qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
+text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
+[[package]]
+name = "langsmith"
+version = "0.0.57"
+description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
+optional = false
+python-versions = ">=3.8.1,<4.0"
+files = [
+ {file = "langsmith-0.0.57-py3-none-any.whl", hash = "sha256:d9d466cc45ce5224096ffb820d019b6f83678fc1f1021076ed75728aba60ec2b"},
+ {file = "langsmith-0.0.57.tar.gz", hash = "sha256:34929afd84cbfd46a8469229e3befc14c7e89186a0bee8ce9d084c7b8b271005"},
+]
+
+[package.dependencies]
+pydantic = ">=1,<3"
+requests = ">=2,<3"
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "litellm"
version = "0.12.12"
@@ -1519,6 +1714,31 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "marshmallow"
+version = "3.20.1"
+description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"},
+ {file = "marshmallow-3.20.1.tar.gz", hash = "sha256:5d2371bbe42000f2b3fb5eaa065224df7d8f8597bc19a1bbfa5bfe7fba8da889"},
+]
+
+[package.dependencies]
+packaging = ">=17.0"
+
+[package.extras]
+dev = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"]
+docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"]
+lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"]
+tests = ["pytest", "pytz", "simplejson"]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "multidict"
version = "6.0.4"
@@ -2351,6 +2571,117 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "sqlalchemy"
+version = "2.0.23"
+description = "Database Abstraction Library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:638c2c0b6b4661a4fd264f6fb804eccd392745c5887f9317feb64bb7cb03b3ea"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b5036aa326dc2df50cba3c958e29b291a80f604b1afa4c8ce73e78e1c9f01d"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:787af80107fb691934a01889ca8f82a44adedbf5ef3d6ad7d0f0b9ac557e0c34"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c14eba45983d2f48f7546bb32b47937ee2cafae353646295f0e99f35b14286ab"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0666031df46b9badba9bed00092a1ffa3aa063a5e68fa244acd9f08070e936d3"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89a01238fcb9a8af118eaad3ffcc5dedaacbd429dc6fdc43fe430d3a941ff965"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-win32.whl", hash = "sha256:cabafc7837b6cec61c0e1e5c6d14ef250b675fa9c3060ed8a7e38653bd732ff8"},
+ {file = "SQLAlchemy-2.0.23-cp310-cp310-win_amd64.whl", hash = "sha256:87a3d6b53c39cd173990de2f5f4b83431d534a74f0e2f88bd16eabb5667e65c6"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d5578e6863eeb998980c212a39106ea139bdc0b3f73291b96e27c929c90cd8e1"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62d9e964870ea5ade4bc870ac4004c456efe75fb50404c03c5fd61f8bc669a72"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c80c38bd2ea35b97cbf7c21aeb129dcbebbf344ee01a7141016ab7b851464f8e"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75eefe09e98043cff2fb8af9796e20747ae870c903dc61d41b0c2e55128f958d"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd45a5b6c68357578263d74daab6ff9439517f87da63442d244f9f23df56138d"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a86cb7063e2c9fb8e774f77fbf8475516d270a3e989da55fa05d08089d77f8c4"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-win32.whl", hash = "sha256:b41f5d65b54cdf4934ecede2f41b9c60c9f785620416e8e6c48349ab18643855"},
+ {file = "SQLAlchemy-2.0.23-cp311-cp311-win_amd64.whl", hash = "sha256:9ca922f305d67605668e93991aaf2c12239c78207bca3b891cd51a4515c72e22"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0f7fb0c7527c41fa6fcae2be537ac137f636a41b4c5a4c58914541e2f436b45"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c424983ab447dab126c39d3ce3be5bee95700783204a72549c3dceffe0fc8f4"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f508ba8f89e0a5ecdfd3761f82dda2a3d7b678a626967608f4273e0dba8f07ac"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6463aa765cf02b9247e38b35853923edbf2f6fd1963df88706bc1d02410a5577"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e599a51acf3cc4d31d1a0cf248d8f8d863b6386d2b6782c5074427ebb7803bda"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd54601ef9cc455a0c61e5245f690c8a3ad67ddb03d3b91c361d076def0b4c60"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-win32.whl", hash = "sha256:42d0b0290a8fb0165ea2c2781ae66e95cca6e27a2fbe1016ff8db3112ac1e846"},
+ {file = "SQLAlchemy-2.0.23-cp312-cp312-win_amd64.whl", hash = "sha256:227135ef1e48165f37590b8bfc44ed7ff4c074bf04dc8d6f8e7f1c14a94aa6ca"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:14aebfe28b99f24f8a4c1346c48bc3d63705b1f919a24c27471136d2f219f02d"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e983fa42164577d073778d06d2cc5d020322425a509a08119bdcee70ad856bf"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e0dc9031baa46ad0dd5a269cb7a92a73284d1309228be1d5935dac8fb3cae24"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5f94aeb99f43729960638e7468d4688f6efccb837a858b34574e01143cf11f89"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:63bfc3acc970776036f6d1d0e65faa7473be9f3135d37a463c5eba5efcdb24c8"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-win32.whl", hash = "sha256:f48ed89dd11c3c586f45e9eec1e437b355b3b6f6884ea4a4c3111a3358fd0c18"},
+ {file = "SQLAlchemy-2.0.23-cp37-cp37m-win_amd64.whl", hash = "sha256:1e018aba8363adb0599e745af245306cb8c46b9ad0a6fc0a86745b6ff7d940fc"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64ac935a90bc479fee77f9463f298943b0e60005fe5de2aa654d9cdef46c54df"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c4722f3bc3c1c2fcc3702dbe0016ba31148dd6efcd2a2fd33c1b4897c6a19693"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af79c06825e2836de21439cb2a6ce22b2ca129bad74f359bddd173f39582bf5"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:683ef58ca8eea4747737a1c35c11372ffeb84578d3aab8f3e10b1d13d66f2bc4"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d4041ad05b35f1f4da481f6b811b4af2f29e83af253bf37c3c4582b2c68934ab"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aeb397de65a0a62f14c257f36a726945a7f7bb60253462e8602d9b97b5cbe204"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-win32.whl", hash = "sha256:42ede90148b73fe4ab4a089f3126b2cfae8cfefc955c8174d697bb46210c8306"},
+ {file = "SQLAlchemy-2.0.23-cp38-cp38-win_amd64.whl", hash = "sha256:964971b52daab357d2c0875825e36584d58f536e920f2968df8d581054eada4b"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:616fe7bcff0a05098f64b4478b78ec2dfa03225c23734d83d6c169eb41a93e55"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e680527245895aba86afbd5bef6c316831c02aa988d1aad83c47ffe92655e74"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9585b646ffb048c0250acc7dad92536591ffe35dba624bb8fd9b471e25212a35"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4895a63e2c271ffc7a81ea424b94060f7b3b03b4ea0cd58ab5bb676ed02f4221"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cc1d21576f958c42d9aec68eba5c1a7d715e5fc07825a629015fe8e3b0657fb0"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:967c0b71156f793e6662dd839da54f884631755275ed71f1539c95bbada9aaab"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-win32.whl", hash = "sha256:0a8c6aa506893e25a04233bc721c6b6cf844bafd7250535abb56cb6cc1368884"},
+ {file = "SQLAlchemy-2.0.23-cp39-cp39-win_amd64.whl", hash = "sha256:f3420d00d2cb42432c1d0e44540ae83185ccbbc67a6054dcc8ab5387add6620b"},
+ {file = "SQLAlchemy-2.0.23-py3-none-any.whl", hash = "sha256:31952bbc527d633b9479f5f81e8b9dfada00b91d6baba021a869095f1a97006d"},
+ {file = "SQLAlchemy-2.0.23.tar.gz", hash = "sha256:c1bda93cbbe4aa2aa0aa8655c5aeda505cd219ff3e8da91d1d329e143e4aff69"},
+]
+
+[package.dependencies]
+greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""}
+typing-extensions = ">=4.2.0"
+
+[package.extras]
+aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"]
+aioodbc = ["aioodbc", "greenlet (!=0.4.17)"]
+aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
+asyncio = ["greenlet (!=0.4.17)"]
+asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"]
+mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"]
+mssql = ["pyodbc"]
+mssql-pymssql = ["pymssql"]
+mssql-pyodbc = ["pyodbc"]
+mypy = ["mypy (>=0.910)"]
+mysql = ["mysqlclient (>=1.4.0)"]
+mysql-connector = ["mysql-connector-python"]
+oracle = ["cx-oracle (>=8)"]
+oracle-oracledb = ["oracledb (>=1.0.1)"]
+postgresql = ["psycopg2 (>=2.7)"]
+postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
+postgresql-pg8000 = ["pg8000 (>=1.29.1)"]
+postgresql-psycopg = ["psycopg (>=3.0.7)"]
+postgresql-psycopg2binary = ["psycopg2-binary"]
+postgresql-psycopg2cffi = ["psycopg2cffi"]
+postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
+pymysql = ["pymysql"]
+sqlcipher = ["sqlcipher3-binary"]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
+[[package]]
+name = "tenacity"
+version = "8.2.3"
+description = "Retry code until it succeeds"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"},
+ {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"},
+]
+
+[package.extras]
+doc = ["reno", "sphinx", "tornado (>=4.5)"]
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "tiktoken"
version = "0.5.1"
@@ -2594,6 +2925,26 @@ type = "legacy"
url = "https://pypi.tuna.tsinghua.edu.cn/simple"
reference = "tsinghua"
+[[package]]
+name = "typing-inspect"
+version = "0.9.0"
+description = "Runtime inspection utilities for typing module."
+optional = false
+python-versions = "*"
+files = [
+ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
+ {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=0.3.0"
+typing-extensions = ">=3.7.4"
+
+[package.source]
+type = "legacy"
+url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+reference = "tsinghua"
+
[[package]]
name = "urllib3"
version = "2.0.7"
@@ -2772,4 +3123,4 @@ reference = "tsinghua"
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
-content-hash = "d8fb5b0a59e953422a65ada9492e5e89c4c9e1faeec28f35382ada81f1f59270"
+content-hash = "278bba46c961e2c7b0a5391403df827f6b2e9c8278f7f1cd3eed534c66a99f3e"
diff --git a/promptulate/tools/arxiv/api_wrapper.py b/promptulate/tools/arxiv/api_wrapper.py
index bc07ed3e..31c7f610 100644
--- a/promptulate/tools/arxiv/api_wrapper.py
+++ b/promptulate/tools/arxiv/api_wrapper.py
@@ -59,7 +59,7 @@ class Config:
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
- import arxiv
+ import arxiv # noqa
except ImportError:
raise ValueError(
"Could not import arxiv python package. "
@@ -83,10 +83,13 @@ def _query(
"""
if not keyword:
keyword = ""
+
if not id_list:
id_list = []
+
if not num_results:
num_results = self.max_num_of_result
+
if isinstance(id_list, str):
id_list = [id_list]
diff --git a/promptulate/tools/base.py b/promptulate/tools/base.py
index 651eacd1..81e9ae7d 100644
--- a/promptulate/tools/base.py
+++ b/promptulate/tools/base.py
@@ -69,7 +69,7 @@ class Tool(ABC):
description: str
"""Tool description"""
- def __init__(self, **kwargs):
+ def __init__(self, *args, **kwargs):
self.check_params()
if "hooks" in kwargs and kwargs["hooks"]:
for hook in kwargs["hooks"]:
diff --git a/promptulate/tools/duckduckgo/api_wrapper.py b/promptulate/tools/duckduckgo/api_wrapper.py
index b5267c7f..e2235561 100644
--- a/promptulate/tools/duckduckgo/api_wrapper.py
+++ b/promptulate/tools/duckduckgo/api_wrapper.py
@@ -21,7 +21,7 @@ class Config:
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
- from duckduckgo_search import DDGS
+ from duckduckgo_search import DDGS # noqa
except ImportError:
raise ValueError(
"Could not import duckduckgo-search python package. "
diff --git a/promptulate/tools/human_feedback/tools.py b/promptulate/tools/human_feedback/tools.py
index 1692bf0a..2a522ccf 100644
--- a/promptulate/tools/human_feedback/tools.py
+++ b/promptulate/tools/human_feedback/tools.py
@@ -4,8 +4,9 @@
from promptulate.utils.color_print import print_text
-def _print_func(content) -> None:
- print_text(f"[Agent ask] {content}", "blue")
+def _print_func(llm_question: str) -> None:
+ """Default way to show llm question when llm using HumanFeedBackTool."""
+ print_text(f"[Agent ask] {llm_question}", "blue")
class HumanFeedBackTool(Tool):
@@ -14,22 +15,22 @@ class HumanFeedBackTool(Tool):
name: str = "human_feedback"
description: str = (
"Human feedback tools are used to collect human feedback information."
- "Please only use this tool in situations where relevant contextual information is lacking or reasoning cannot "
- "continue."
- "Please enter the content you wish for human feedback and interaction, but do not ask for knowledge or let "
- "humans reason. "
+ "Please only use this tool in situations where relevant contextual information"
+ "is lacking or reasoning cannot continue. Please enter the content you wish for "
+ "human feedback and interaction, but do not ask for knowledge or let humans reason."
)
def __init__(
self,
- prompt_func: Callable[[str], None] = _print_func,
+ output_func: Callable[[str], None] = _print_func,
input_func: Callable = input,
+ *args,
**kwargs,
):
- super().__init__(**kwargs)
- self.prompt_func = prompt_func
+ super().__init__(*args, **kwargs)
+ self.output_func = output_func
self.input_func = input_func
def _run(self, content: str, *args, **kwargs) -> str:
- self.prompt_func(content)
+ self.output_func(content)
return self.input_func()
diff --git a/promptulate/tools/iot_swith_mqtt/tools.py b/promptulate/tools/iot_swith_mqtt/tools.py
index 087b49eb..0025b649 100644
--- a/promptulate/tools/iot_swith_mqtt/tools.py
+++ b/promptulate/tools/iot_swith_mqtt/tools.py
@@ -41,7 +41,7 @@ def __init__(
def _run(self, question: str, *args, **kwargs) -> str:
try:
- import paho.mqtt.client as mqtt
+ import paho.mqtt.client as mqtt # noqa
except ImportError:
raise ImportError(
"Could not import paho python package. "
diff --git a/promptulate/tools/paper/tools.py b/promptulate/tools/paper/tools.py
index de948663..d6fdee37 100644
--- a/promptulate/tools/paper/tools.py
+++ b/promptulate/tools/paper/tools.py
@@ -141,7 +141,7 @@ def get_advice():
if paper_info:
paper_info = paper_info[0]
- except NetWorkError as e:
+ except NetWorkError:
paper_info = self.arxiv_apiwrapper.query(
keyword=query, num_results=1, specified_fields=["title", "summary"]
)
diff --git a/promptulate/tools/semantic_scholar/api_wrapper.py b/promptulate/tools/semantic_scholar/api_wrapper.py
index 5383ad36..ca3c56d6 100644
--- a/promptulate/tools/semantic_scholar/api_wrapper.py
+++ b/promptulate/tools/semantic_scholar/api_wrapper.py
@@ -90,7 +90,7 @@ def get_detail():
self.current_result = response.json()["matches"]
if len(self.current_result) == 0:
- logger.debug(f"[pne] semantic scholar return none")
+ logger.debug("[pne] semantic scholar return none")
return []
for item in self.current_result:
diff --git a/promptulate/utils/logger.py b/promptulate/utils/logger.py
index 47ec17ee..03c35c8a 100644
--- a/promptulate/utils/logger.py
+++ b/promptulate/utils/logger.py
@@ -21,7 +21,7 @@
import logging
import os
-from promptulate import utils
+from promptulate.utils.core_utils import get_default_storage_path
logger = logging.getLogger(__name__)
@@ -31,7 +31,7 @@ def get_logger():
def get_default_log_path():
- return utils.get_default_storage_path("log")
+ return get_default_storage_path("log")
def get_log_name() -> str:
diff --git a/pyproject.toml b/pyproject.toml
index e8b792fa..7691ea67 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -33,6 +33,7 @@ pytest-cov = "^3.0.0"
coverage = "^6.1.2"
pre-commit = "^3.5.0"
coverage-badge = "^1.1.0"
+langchain = "^0.0.324"
[tool.poetry.scripts]
pne-chat = "promptulate.client.chat:main"
diff --git a/tests/framework/test_conversation.py b/tests/framework/test_conversation.py
index 91ba97b0..63f1564a 100644
--- a/tests/framework/test_conversation.py
+++ b/tests/framework/test_conversation.py
@@ -1,46 +1,46 @@
-from unittest import TestCase
+# from unittest import TestCase
-from promptulate.frameworks.conversation import Conversation
-from promptulate.memory import FileChatMemory
-from promptulate.utils.logger import enable_log, get_logger
+# from promptulate.frameworks.conversation import Conversation
+# from promptulate.memory import FileChatMemory
+# from promptulate.utils.logger import enable_log, get_logger
-enable_log()
-logger = get_logger()
+# enable_log()
+# logger = get_logger()
-class TestConversation(TestCase):
- def test_predict(self):
- conversation = Conversation()
- result = conversation.run("什么是大语言模型")
- self.assertIsNotNone(result)
- self.assertTrue("大语言模型" in result)
+# class TestConversation(TestCase):
+# def test_predict(self):
+# conversation = Conversation()
+# result = conversation.run("什么是大语言模型")
+# self.assertIsNotNone(result)
+# self.assertTrue("大语言模型" in result)
- def test_predict_with_stop(self):
- conversation = Conversation()
- prompt = """
- Please strictly output the following content.
- ```
- [start] This is a test [end]
- ```
- """
- result = conversation.run(prompt, stop=["test"])
- self.assertTrue("test [end]" not in result)
- self.assertIsNotNone(result)
+# def test_predict_with_stop(self):
+# conversation = Conversation()
+# prompt = """
+# Please strictly output the following content.
+# ```
+# [start] This is a test [end]
+# ```
+# """
+# result = conversation.run(prompt, stop=["test"])
+# self.assertTrue("test [end]" not in result)
+# self.assertIsNotNone(result)
- def test_memory_with_buffer(self):
- conversation = Conversation()
- prompt = """给我想5个公司的名字"""
- conversation.run(prompt)
- conversation_id = conversation.conversation_id
- new_conversation = Conversation(conversation_id=conversation_id)
- new_conversation.predict("再给我五个")
+# def test_memory_with_buffer(self):
+# conversation = Conversation()
+# prompt = """give me 5 company names"""
+# conversation.run(prompt)
+# conversation_id = conversation.conversation_id
+# new_conversation = Conversation(conversation_id=conversation_id)
+# new_conversation.predict("give me 5 more")
- def test_memory_with_file(self):
- conversation = Conversation(memory=FileChatMemory())
- prompt = """给我想5个公司的名字"""
- conversation.run(prompt)
- conversation_id = conversation.conversation_id
- new_conversation = Conversation(
- conversation_id=conversation_id, memory=FileChatMemory()
- )
- new_conversation.predict("再给我五个")
+# def test_memory_with_file(self):
+# conversation = Conversation(memory=FileChatMemory())
+# prompt = """give me 5 company names"""
+# conversation.run(prompt)
+# conversation_id = conversation.conversation_id
+# new_conversation = Conversation(
+# conversation_id=conversation_id, memory=FileChatMemory()
+# )
+# new_conversation.predict("give me 5 more")
diff --git a/tests/hook/test_agent.py b/tests/hook/test_agent.py
index db2f737f..99738350 100644
--- a/tests/hook/test_agent.py
+++ b/tests/hook/test_agent.py
@@ -34,7 +34,7 @@ def handle_result(*args, **kwargs):
self.assertIsNotNone(result)
print(f" result: {result}")
- hooks = [handle_result, handle_start, handle_result]
+ hooks = [handle_create, handle_start, handle_result]
tools = [DuckDuckGoTool(), Calculator()]
agent = ToolAgent(tools=tools, hooks=hooks)
agent.run("What is promptulate?")
@@ -71,8 +71,9 @@ def handle_result(*args, **kwargs):
self.assertIsNotNone(result)
print(f" result: {result}")
- tool = DuckDuckGoTool()
- tool.run("What is promptulate?")
+ tools = [DuckDuckGoTool(), Calculator()]
+ agent = ToolAgent(tools=tools)
+ agent.run("What is promptulate?")
self.assertTrue(create_flag)
self.assertTrue(start_flag)
diff --git a/tests/hook/test_tool.py b/tests/hook/test_tool.py
index d800b30d..936bf5cb 100644
--- a/tests/hook/test_tool.py
+++ b/tests/hook/test_tool.py
@@ -33,7 +33,7 @@ def handle_result(*args, **kwargs):
self.assertIsNotNone(result)
print(f" result: {result}")
- hooks = [handle_result, handle_start, handle_result]
+ hooks = [handle_create, handle_start, handle_result]
tool = DuckDuckGoTool(hooks=hooks)
tool.run("What is LLM?")
diff --git a/tests/tools/test_arxiv_tools.py b/tests/tools/test_arxiv_tools.py
index 61cb9404..76260cf6 100644
--- a/tests/tools/test_arxiv_tools.py
+++ b/tests/tools/test_arxiv_tools.py
@@ -1,90 +1,54 @@
-from unittest import TestCase
-
-from promptulate.tools.arxiv.api_wrapper import ArxivAPIWrapper
-from promptulate.tools.arxiv.tools import (
- ArxivQueryTool,
- ArxivReferenceTool,
- ArxivSummaryTool,
-)
-from promptulate.utils.logger import enable_log, get_logger
-
-enable_log()
-logger = get_logger()
-
-
-class TestArxivApiWrapper(TestCase):
- @classmethod
- def setUpClass(cls):
- cls.arxiv_api_wrapper = ArxivAPIWrapper()
-
- def test_query_by_keyword(self):
- results = self.arxiv_api_wrapper.query("Attention Is All You Need")
- for result in results:
- logger.info(result["title"])
- self.assertIsNotNone(results)
- self.assertEqual(len(results), self.arxiv_api_wrapper.max_num_of_result)
-
- def test_query_by_arxiv_id(self):
- results = self.arxiv_api_wrapper.query(id_list=["1605.08386v1"])
- self.assertIsNotNone(results)
- self.assertEqual(len(results), 1)
-
- def test_query_by_filter(self):
- keys = ["entry_id", "title", "authors", "summary"]
- result = self.arxiv_api_wrapper.query(
- "Tree of Thoughts: Deliberate Problem Solving with Large Language Models",
- specified_fields=keys,
- )
- logger.info(result)
- for item in result:
- print(item)
- self.assertIsNotNone(result[0]["title"])
- self.assertIsNotNone(result[0]["entry_id"])
- self.assertIsNotNone(result[0]["authors"])
- self.assertIsNotNone(result[0]["summary"])
-
- # def test_arxiv_api_wrapper_download_dpf(self):
- # result = self.arxiv_api_wrapper.download_pdf(["1605.08386v1"])
- # self.assertIsNotNone(result)
-
-
-class TestArxivTools(TestCase):
- def test_arxiv_query_tool(self):
- tool = ArxivQueryTool()
- ret = tool.run("LLM")
- logger.info(ret)
- self.assertIsNotNone(ret)
- self.assertIsInstance(ret, str)
-
- def test_arxiv_query_tool_by_specified_fields(self):
- tool = ArxivQueryTool()
- ret = tool.run("LLM", specified_fields=["entry_id", "title"])
- logger.info(ret)
- self.assertIsNotNone(ret)
- self.assertTrue("entry_id" in ret)
- self.assertTrue("title" in ret)
- self.assertTrue("summary" not in ret)
- self.assertTrue("authors" not in ret)
-
- def test_arxiv_reference_tool(self):
- tool = ArxivReferenceTool()
- prompt = """
- 该论文探讨了Codex语言模型的Text-to-SQL能力,并对其在Spider基准测试中的表现进行了实证评估。研究发现,即使没有任何fine-tuning,Codex也是Spider基准测试中的强基准。同时,该研究还分析了Codex在这种情况下的失效模式。此外,该论文还在GeoQuery和Scholar基准测试中进行了实验,表明一小部分in-domain样例可以在不进行fine-tuning的情况下提高Codex的性能,并使其胜过在少量样例上进行finetune的最先进模型。
- 该论文的一个关键见解是,语言模型的表现在很大程度上依赖于领域的样本数据。但是,该研究表明,在语言模型具备一定的常识知识和推理能力的情况下,提供一些领域内例子可以显著地提高它的性能。这意味着,我们并不总是需要进行大量的finetuning操作就能使模型在某些针对性任务上获得良好的表现。而且,在某些情况下,提供数据的方式也许比finetuning更为高效。
- 该论文的经验教训是,我们需要在不同的语言模型和关键任务上进行评估,以便更好地了解它们的优缺点。此外,在一些应用场景中,我们可以通过提供一些in-domain例子来改进模型。
- """
- ret = tool.run(prompt)
- logger.info(ret)
- self.assertTrue("[1]" in ret)
-
- def test_arxiv_summary_tool(self):
- tool = ArxivSummaryTool()
- ret = tool.run("Large Language Models are Zero-Shot Reasoners")
- logger.info(f"[result] {ret}")
- self.assertIsNotNone(ret)
-
- def test_arxiv_summary_tool_by_arxiv_id(self):
- tool = ArxivSummaryTool()
- ret = tool.run("2205.11916v4")
- logger.info(f"[result] {ret}")
- self.assertIsNotNone(ret)
+import pytest
+from unittest.mock import patch
+from pydantic import ValidationError
+from promptulate.tools.arxiv.api_wrapper import ArxivAPIWrapper, ArxivQuerySet # Assumes your code is in "your_module.py"
+
+@pytest.fixture
+def mock_search():
+ class MockResult:
+ def __init__(self):
+ self.entry_id = "1"
+ self.title = "Test Title"
+ self.summary = "Test Summary"
+
+ class MockSearch:
+ @staticmethod
+ def results():
+ return iter([MockResult()]) # Returns a single MockResult instance
+
+ return MockSearch()
+
+
+@pytest.fixture
+def arxiv_api_wrapper():
+ return ArxivAPIWrapper(max_num_of_result=5)
+
+
+def test_validate_environment_with_installed_package(arxiv_api_wrapper):
+ assert isinstance(arxiv_api_wrapper, ArxivAPIWrapper)
+
+
+def test_validate_environment_with_no_installed_package():
+ with pytest.raises(ValidationError):
+ ArxivAPIWrapper(max_num_of_result='invalid')
+
+
+@patch("promptulate.tools.arxiv.api_wrapper.ArxivAPIWrapper._query")
+def test_query_with_specified_fields(mock_query, arxiv_api_wrapper, mock_search):
+ mock_query.return_value = mock_search
+ result = arxiv_api_wrapper.query(
+ "test keyword", specified_fields=["entry_id", "title", "summary"]
+ )
+
+ assert len(result) == 1
+ assert result[0] == {"entry_id": "1", "title": "Test Title", "summary": "Test Summary"}
+
+
+@patch("promptulate.tools.arxiv.api_wrapper.ArxivAPIWrapper._query")
+def test_query_with_no_specified_fields(mock_query, arxiv_api_wrapper, mock_search):
+ mock_query.return_value = mock_search
+ result = arxiv_api_wrapper.query("test keyword")
+
+ assert len(result) == 1
+ assert result[0] == {"entry_id": "1", "title": "Test Title", "summary": "Test Summary"}
+
diff --git a/tests/tools/test_ernie_bot_adapt.py b/tests/tools/test_ernie_bot_adapt.py
deleted file mode 100644
index d5be8abd..00000000
--- a/tests/tools/test_ernie_bot_adapt.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from unittest import TestCase
-
-from promptulate import Conversation, enable_log
-from promptulate.llms import ErnieBot
-
-enable_log()
-
-
-class TestErnieBotAdapt(TestCase):
- def test_run(self):
- llm = ErnieBot(temperature=0.1)
- conversation = Conversation(llm=llm)
- while True:
- prompt = str(input("[User] "))
- ret = conversation.run(prompt)
- print(f"[output] {ret}")
diff --git a/tests/tools/test_human_feedback_toos.py b/tests/tools/test_human_feedback_toos.py
index bd358b2c..d4b36235 100644
--- a/tests/tools/test_human_feedback_toos.py
+++ b/tests/tools/test_human_feedback_toos.py
@@ -1,18 +1,15 @@
-from unittest import TestCase
-
from promptulate.tools.human_feedback import HumanFeedBackTool
-class TestHumanFeedBackTool(TestCase):
- def prompt_func(self, content: str) -> None:
- print(content)
+def output_func(llm_question: str):
+ print(llm_question)
+
+
+def input_func():
+ return "human_answer"
- def input_func(self):
- return input()
- def test_run(self):
- tool = HumanFeedBackTool(
- prompt_func=self.prompt_func, input_func=self.input_func
- )
- result = tool.run("我好冷")
- print(result)
+def test_human_fb_tool():
+ tool = HumanFeedBackTool(output_func=output_func, input_func=input_func)
+ answer = tool.run("Hello")
+ assert answer == "human_answer"
diff --git a/tests/tools/test_iotSwitch_tools.py b/tests/tools/test_iotSwitch_tools.py
index 199a7081..ec7469ea 100644
--- a/tests/tools/test_iotSwitch_tools.py
+++ b/tests/tools/test_iotSwitch_tools.py
@@ -1,49 +1,49 @@
-from unittest import TestCase
+# from unittest import TestCase
-import paho.mqtt.client as mqtt
+# import paho.mqtt.client as mqtt
-from promptulate import enable_log
-from promptulate.tools.iot_swith_mqtt import IotSwitchTool
-from promptulate.tools.iot_swith_mqtt.api_wrapper import IotSwitchAPIWrapper
-from promptulate.utils import get_logger
+# from promptulate import enable_log
+# from promptulate.tools.iot_swith_mqtt import IotSwitchTool
+# from promptulate.tools.iot_swith_mqtt.api_wrapper import IotSwitchAPIWrapper
+# from promptulate.utils import get_logger
-enable_log()
-logger = get_logger()
+# enable_log()
+# logger = get_logger()
-class TestIotSwitchAPIWrapper(TestCase):
- def test_run(self):
- api_wrapper = IotSwitchAPIWrapper()
- # MQTT broker address and port
- broker_address = "XXX"
- broker_port = 1883
- # username and password
- username = "cwl"
- password = "XXXX"
- client = mqtt.Client()
- client.username_pw_set(username, password)
- client.connect(broker_address, broker_port)
- api_wrapper.run(client, "/123", "hello")
+# class TestIotSwitchAPIWrapper(TestCase):
+# def test_run(self):
+# api_wrapper = IotSwitchAPIWrapper()
+# # MQTT broker address and port
+# broker_address = "XXX"
+# broker_port = 1883
+# # username and password
+# username = "cwl"
+# password = "XXXX"
+# client = mqtt.Client()
+# client.username_pw_set(username, password)
+# client.connect(broker_address, broker_port)
+# api_wrapper.run(client, "/123", "hello")
-class TestIotSwitchTool(TestCase):
- def test_run(self):
- # MQTT broker address and port
- broker_address = "XXX"
- broker_port = 1883
- # username and password
- username = "XXXX"
- password = "XXXXXX"
- client = mqtt.Client()
- client.username_pw_set(username, password)
- client.connect(broker_address, broker_port)
- tool = IotSwitchTool(
- client=client,
- rule_table=[
- {"content": "开风扇", "topic": "/123", "ask": "open fan"},
- {"content": "开加热器", "topic": "/123", "ask": "open heater"},
- {"content": "开灯", "topic": "/123", "ask": "open light"},
- ],
- )
- result = tool.run("我好冷")
- print(result)
+# class TestIotSwitchTool(TestCase):
+# def test_run(self):
+# # MQTT broker address and port
+# broker_address = "XXX"
+# broker_port = 1883
+# # username and password
+# username = "XXXX"
+# password = "XXXXXX"
+# client = mqtt.Client()
+# client.username_pw_set(username, password)
+# client.connect(broker_address, broker_port)
+# tool = IotSwitchTool(
+# client=client,
+# rule_table=[
+# {"content": "开风扇", "topic": "/123", "ask": "open fan"},
+# {"content": "开加热器", "topic": "/123", "ask": "open heater"},
+# {"content": "开灯", "topic": "/123", "ask": "open light"},
+# ],
+# )
+# result = tool.run("我好冷")
+# print(result)
diff --git a/tests/tools/test_langchain_tools.py b/tests/tools/test_langchain_tools.py
index e74d58a8..cb8a39a1 100644
--- a/tests/tools/test_langchain_tools.py
+++ b/tests/tools/test_langchain_tools.py
@@ -1,13 +1,20 @@
-from unittest import TestCase
+import os
+from tempfile import TemporaryDirectory
-from langchain.tools.shell.tool import ShellTool
+from langchain.agents.agent_toolkits import FileManagementToolkit
from promptulate.tools.langchain.tools import LangchainTool
-class TestLangchainTool(TestCase):
- def test_shell_run(self):
- tool = LangchainTool(ShellTool())
- result = tool._run("echo HelloWorld")
- print(result)
- self.assertIsNotNone(result)
+def test_read_file():
+ working_directory = TemporaryDirectory()
+
+ lc_write_tool = FileManagementToolkit(
+ root_dir=str(working_directory.name),
+ selected_tools=["write_file"],
+ ).get_tools()[0]
+
+ tool = LangchainTool(lc_write_tool)
+ tool.run({"file_path": "example.txt", "text": "Hello World!"})
+
+ assert os.path.exists(os.path.join(working_directory.name, "example.txt"))
diff --git a/tests/tools/test_paper_tools.py b/tests/tools/test_paper_tools.py
index b77fd336..7cece44c 100644
--- a/tests/tools/test_paper_tools.py
+++ b/tests/tools/test_paper_tools.py
@@ -1,29 +1,29 @@
-from unittest import TestCase
+# from unittest import TestCase
-from promptulate.tools.paper.tools import PaperSummaryTool
-from promptulate.utils.logger import enable_log, get_logger
+# from promptulate.tools.paper.tools import PaperSummaryTool
+# from promptulate.utils.logger import enable_log, get_logger
-enable_log()
-logger = get_logger()
+# enable_log()
+# logger = get_logger()
-class TestPaperSummaryTool(TestCase):
- def test_run(self):
- tool = PaperSummaryTool()
- result = tool.run("attention is all you need")
- logger.info(f"[result] {result}")
- self.assertIsNotNone(result)
- self.assertTrue("[1]" in result)
- self.assertTrue("关键见解" in result)
- self.assertTrue("经验教训" in result)
- self.assertTrue("相关建议" in result)
+# class TestPaperSummaryTool(TestCase):
+# def test_run(self):
+# tool = PaperSummaryTool()
+# result = tool.run("attention is all you need")
+# logger.info(f"[result] {result}")
+# self.assertIsNotNone(result)
+# self.assertTrue("[1]" in result)
+# self.assertTrue("关键见解" in result)
+# self.assertTrue("经验教训" in result)
+# self.assertTrue("相关建议" in result)
- def test_by_arxiv_id(self):
- tool = PaperSummaryTool()
- result = tool.run("2205.11916v4")
- logger.info(f"[result] {result}")
- self.assertIsNotNone(result)
- self.assertTrue("[1]" in result)
- self.assertTrue("关键见解" in result)
- self.assertTrue("经验教训" in result)
- self.assertTrue("相关建议" in result)
+# def test_by_arxiv_id(self):
+# tool = PaperSummaryTool()
+# result = tool.run("2205.11916v4")
+# logger.info(f"[result] {result}")
+# self.assertIsNotNone(result)
+# self.assertTrue("[1]" in result)
+# self.assertTrue("关键见解" in result)
+# self.assertTrue("经验教训" in result)
+# self.assertTrue("相关建议" in result)
From ca65353329c6359bd5b165def245f53d864fc9dc Mon Sep 17 00:00:00 2001
From: Zeeland <287017217@qq.com>
Date: Tue, 7 Nov 2023 09:52:36 +0800
Subject: [PATCH 2/3] pref: formatter
---
promptulate/client/chat.py | 2 +-
promptulate/llms/erniebot/erniebot.py | 8 +-------
promptulate/tools/arxiv/api_wrapper.py | 8 ++++----
promptulate/tools/duckduckgo/api_wrapper.py | 2 +-
promptulate/tools/iot_swith_mqtt/tools.py | 2 +-
5 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/promptulate/client/chat.py b/promptulate/client/chat.py
index 82794cd8..a7518c88 100644
--- a/promptulate/client/chat.py
+++ b/promptulate/client/chat.py
@@ -158,7 +158,7 @@ def chat():
if args.proxy_mode:
set_proxy_mode(args.proxy_mode)
- print_text(f"Hi there, here is promptulate chat terminal.", "pink")
+ print_text("Hi there, here is promptulate chat terminal.", "pink")
terminal_mode = questionary.select(
"Choose a chat terminal:",
diff --git a/promptulate/llms/erniebot/erniebot.py b/promptulate/llms/erniebot/erniebot.py
index 0a85d868..fead651e 100644
--- a/promptulate/llms/erniebot/erniebot.py
+++ b/promptulate/llms/erniebot/erniebot.py
@@ -7,13 +7,7 @@
from promptulate.config import Config
from promptulate.llms import BaseLLM
-from promptulate.schema import (
- AssistantMessage,
- BaseMessage,
- LLMType,
- MessageSet,
- UserMessage,
-)
+from promptulate.schema import AssistantMessage, LLMType, MessageSet, UserMessage
from promptulate.tips import LLMError
from promptulate.utils import get_logger
diff --git a/promptulate/tools/arxiv/api_wrapper.py b/promptulate/tools/arxiv/api_wrapper.py
index 31c7f610..2ba60ee0 100644
--- a/promptulate/tools/arxiv/api_wrapper.py
+++ b/promptulate/tools/arxiv/api_wrapper.py
@@ -59,7 +59,7 @@ class Config:
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
- import arxiv # noqa
+ import arxiv # noqa
except ImportError:
raise ValueError(
"Could not import arxiv python package. "
@@ -83,13 +83,13 @@ def _query(
"""
if not keyword:
keyword = ""
-
+
if not id_list:
id_list = []
-
+
if not num_results:
num_results = self.max_num_of_result
-
+
if isinstance(id_list, str):
id_list = [id_list]
diff --git a/promptulate/tools/duckduckgo/api_wrapper.py b/promptulate/tools/duckduckgo/api_wrapper.py
index e2235561..3ece6305 100644
--- a/promptulate/tools/duckduckgo/api_wrapper.py
+++ b/promptulate/tools/duckduckgo/api_wrapper.py
@@ -21,7 +21,7 @@ class Config:
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
- from duckduckgo_search import DDGS # noqa
+ from duckduckgo_search import DDGS # noqa
except ImportError:
raise ValueError(
"Could not import duckduckgo-search python package. "
diff --git a/promptulate/tools/iot_swith_mqtt/tools.py b/promptulate/tools/iot_swith_mqtt/tools.py
index 0025b649..13fd9a28 100644
--- a/promptulate/tools/iot_swith_mqtt/tools.py
+++ b/promptulate/tools/iot_swith_mqtt/tools.py
@@ -41,7 +41,7 @@ def __init__(
def _run(self, question: str, *args, **kwargs) -> str:
try:
- import paho.mqtt.client as mqtt # noqa
+ import paho.mqtt.client as mqtt # noqa
except ImportError:
raise ImportError(
"Could not import paho python package. "
From 8a80e9caa9508123c7d604e1ea5cab689931e954 Mon Sep 17 00:00:00 2001
From: Zeeland <287017217@qq.com>
Date: Tue, 7 Nov 2023 10:04:44 +0800
Subject: [PATCH 3/3] fix: code format
---
tests/tools/test_arxiv_tools.py | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/tests/tools/test_arxiv_tools.py b/tests/tools/test_arxiv_tools.py
index 76260cf6..41f27a16 100644
--- a/tests/tools/test_arxiv_tools.py
+++ b/tests/tools/test_arxiv_tools.py
@@ -1,7 +1,10 @@
-import pytest
from unittest.mock import patch
+
+import pytest
from pydantic import ValidationError
-from promptulate.tools.arxiv.api_wrapper import ArxivAPIWrapper, ArxivQuerySet # Assumes your code is in "your_module.py"
+
+from promptulate.tools.arxiv.api_wrapper import ArxivAPIWrapper
+
@pytest.fixture
def mock_search():
@@ -30,7 +33,7 @@ def test_validate_environment_with_installed_package(arxiv_api_wrapper):
def test_validate_environment_with_no_installed_package():
with pytest.raises(ValidationError):
- ArxivAPIWrapper(max_num_of_result='invalid')
+ ArxivAPIWrapper(max_num_of_result="invalid")
@patch("promptulate.tools.arxiv.api_wrapper.ArxivAPIWrapper._query")
@@ -41,7 +44,11 @@ def test_query_with_specified_fields(mock_query, arxiv_api_wrapper, mock_search)
)
assert len(result) == 1
- assert result[0] == {"entry_id": "1", "title": "Test Title", "summary": "Test Summary"}
+ assert result[0] == {
+ "entry_id": "1",
+ "title": "Test Title",
+ "summary": "Test Summary",
+ }
@patch("promptulate.tools.arxiv.api_wrapper.ArxivAPIWrapper._query")
@@ -50,5 +57,8 @@ def test_query_with_no_specified_fields(mock_query, arxiv_api_wrapper, mock_sear
result = arxiv_api_wrapper.query("test keyword")
assert len(result) == 1
- assert result[0] == {"entry_id": "1", "title": "Test Title", "summary": "Test Summary"}
-
+ assert result[0] == {
+ "entry_id": "1",
+ "title": "Test Title",
+ "summary": "Test Summary",
+ }