From 84ad3aa41f50224014807f879955bacf2f2245f4 Mon Sep 17 00:00:00 2001 From: Zeeland Date: Thu, 21 Sep 2023 23:22:48 +0800 Subject: [PATCH] pref: black formatting --- example/preset_role/base_usage.py | 2 +- promptulate/agents/tool_agent/agent.py | 7 ++++- promptulate/client/chat.py | 3 +- promptulate/frameworks/__init__.py | 4 +-- .../frameworks/conversation/__init__.py | 4 +-- promptulate/frameworks/prompt.py | 8 +++--- promptulate/llms/base.py | 1 + promptulate/llms/openai/openai.py | 1 - promptulate/memory/__init__.py | 5 +--- promptulate/memory/buffer.py | 8 ++---- promptulate/preset_roles/__init__.py | 5 +--- promptulate/preset_roles/prompt.py | 1 - promptulate/preset_roles/roles.py | 22 ++++++++------- promptulate/provider/__init__.py | 10 +++---- promptulate/provider/base.py | 2 +- promptulate/schema.py | 4 ++- promptulate/tools/arxiv/__init__.py | 14 ++++++---- promptulate/tools/arxiv/toolkit.py | 11 +++++--- promptulate/tools/duckduckgo/__init__.py | 4 +-- promptulate/tools/duckduckgo/api_wrapper.py | 4 +-- .../tools/iot_swith_mqtt/api_wrapper.py | 1 - promptulate/tools/iot_swith_mqtt/prompt.py | 4 +-- promptulate/tools/iot_swith_mqtt/tools.py | 5 ++-- promptulate/tools/manager.py | 1 + promptulate/tools/math/tools.py | 9 ++---- promptulate/tools/paper/tools.py | 4 ++- promptulate/tools/python_repl/__init__.py | 4 +-- promptulate/utils/__init__.py | 2 +- promptulate/utils/core_utils.py | 28 +++++++++++++++++++ promptulate/utils/proxy.py | 2 +- promptulate/utils/singleton.py | 5 ++-- setup.py | 3 +- tests/framework/test_conversation.py | 4 ++- tests/tools/test_sleep_tools.py | 1 - 34 files changed, 108 insertions(+), 85 deletions(-) diff --git a/example/preset_role/base_usage.py b/example/preset_role/base_usage.py index cef90279..0769f255 100644 --- a/example/preset_role/base_usage.py +++ b/example/preset_role/base_usage.py @@ -29,5 +29,5 @@ def main(): print(ret) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/promptulate/agents/tool_agent/agent.py b/promptulate/agents/tool_agent/agent.py index 74d7be9c..77ee67e4 100644 --- a/promptulate/agents/tool_agent/agent.py +++ b/promptulate/agents/tool_agent/agent.py @@ -65,7 +65,12 @@ def _build_preset_prompt(self, prompt) -> str: """Build the system prompt.""" if self.enable_role: prefix = self.prefix_prompt_template.format( - [self.agent_identity, self.agent_name, self.agent_goal, self.agent_constraints] + [ + self.agent_identity, + self.agent_name, + self.agent_goal, + self.agent_constraints, + ] ) return prefix + self.system_prompt_template.format( prompt=prompt, diff --git a/promptulate/client/chat.py b/promptulate/client/chat.py index 63d430f1..23111d16 100644 --- a/promptulate/client/chat.py +++ b/promptulate/client/chat.py @@ -34,10 +34,9 @@ PythonREPLTool, ArxivQueryTool, SleepTool, - HumanFeedBackTool + HumanFeedBackTool, ) from promptulate.tools.shell import ShellTool - from promptulate.utils import set_proxy_mode, print_text MODEL_MAPPING = {"OpenAI": ChatOpenAI, "ErnieBot": ErnieBot} diff --git a/promptulate/frameworks/__init__.py b/promptulate/frameworks/__init__.py index 3bd23def..39e76a04 100644 --- a/promptulate/frameworks/__init__.py +++ b/promptulate/frameworks/__init__.py @@ -20,6 +20,4 @@ from promptulate.frameworks.conversation import Conversation -__all__ = [ - 'Conversation', -] +__all__ = ["Conversation"] diff --git a/promptulate/frameworks/conversation/__init__.py b/promptulate/frameworks/conversation/__init__.py index 03514f50..f0771441 100644 --- a/promptulate/frameworks/conversation/__init__.py +++ b/promptulate/frameworks/conversation/__init__.py @@ -20,6 +20,4 @@ from promptulate.frameworks.conversation.conversation import Conversation -__all__ = [ - 'Conversation' -] +__all__ = ["Conversation"] diff --git a/promptulate/frameworks/prompt.py b/promptulate/frameworks/prompt.py index e9b7fbd7..2c31bf0c 100644 --- a/promptulate/frameworks/prompt.py +++ b/promptulate/frameworks/prompt.py @@ -18,10 +18,10 @@ # Contact Email: zeeland@foxmail.com __all__ = [ - 'SUMMARY_CONTENT_PROMPT_ZH', - 'SUMMARY_TOPIC_PROMPT_ZH', - 'SUMMARY_TOPIC_PROMPT_EN', - 'SUMMARY_CONTENT_PROMPT_EN', + "SUMMARY_CONTENT_PROMPT_ZH", + "SUMMARY_TOPIC_PROMPT_ZH", + "SUMMARY_TOPIC_PROMPT_EN", + "SUMMARY_CONTENT_PROMPT_EN", ] SUMMARY_CONTENT_PROMPT_ZH = """ diff --git a/promptulate/llms/base.py b/promptulate/llms/base.py index c16835a7..4a0dfbae 100644 --- a/promptulate/llms/base.py +++ b/promptulate/llms/base.py @@ -31,6 +31,7 @@ class BaseLLM(BaseModel, ABC): class Config: """Configuration for this pydantic object.""" + arbitrary_types_allowed = True def __init__(self, *args, **kwargs): diff --git a/promptulate/llms/openai/openai.py b/promptulate/llms/openai/openai.py index b392862f..eb1eb479 100644 --- a/promptulate/llms/openai/openai.py +++ b/promptulate/llms/openai/openai.py @@ -81,7 +81,6 @@ class BaseOpenAI(BaseLLM, ABC): def __init__(self, **kwargs): super().__init__(**kwargs) self.retry_times = CFG.get_key_retry_times(self.model) - print("aaaa", self.retry_times) @property def api_key(self): diff --git a/promptulate/memory/__init__.py b/promptulate/memory/__init__.py index 9804e9e4..1a020004 100644 --- a/promptulate/memory/__init__.py +++ b/promptulate/memory/__init__.py @@ -20,7 +20,4 @@ from promptulate.memory.buffer import BufferChatMemory from promptulate.memory.file import FileChatMemory -__all__ = [ - 'BufferChatMemory', - 'FileChatMemory' -] +__all__ = ["BufferChatMemory", "FileChatMemory"] diff --git a/promptulate/memory/buffer.py b/promptulate/memory/buffer.py index a38c22de..838cc49c 100644 --- a/promptulate/memory/buffer.py +++ b/promptulate/memory/buffer.py @@ -38,7 +38,7 @@ class BufferChatMemory(BaseChatMemory): """Chat message will be stored in the buffer cache.""" def load_message_set_from_memory( - self, recently_n: Optional[int] = None + self, recently_n: Optional[int] = None ) -> MessageSet: """Load message from buffer memory @@ -50,12 +50,10 @@ def load_message_set_from_memory( """ if not buffer: raise EmptyMessageSetError - recently_n = ( - recently_n if recently_n else len(buffer[self.conversation_id]) - ) + recently_n = recently_n if recently_n else len(buffer[self.conversation_id]) num_messages = len(buffer[self.conversation_id]) return MessageSet.from_listdict_data( - buffer[self.conversation_id][num_messages - recently_n:] + buffer[self.conversation_id][num_messages - recently_n :] ) def save_message_set_to_memory(self, message_set: MessageSet) -> None: diff --git a/promptulate/preset_roles/__init__.py b/promptulate/preset_roles/__init__.py index 801f4e5f..b9134ca9 100644 --- a/promptulate/preset_roles/__init__.py +++ b/promptulate/preset_roles/__init__.py @@ -19,7 +19,4 @@ from promptulate.preset_roles.roles import get_all_preset_roles, CustomPresetRole -__all__ = [ - 'get_all_preset_roles', - 'CustomPresetRole' -] +__all__ = ["get_all_preset_roles", "CustomPresetRole"] diff --git a/promptulate/preset_roles/prompt.py b/promptulate/preset_roles/prompt.py index 5a02484f..84f2d4a6 100644 --- a/promptulate/preset_roles/prompt.py +++ b/promptulate/preset_roles/prompt.py @@ -41,4 +41,3 @@ 用户的问题如下所示: """ - diff --git a/promptulate/preset_roles/roles.py b/promptulate/preset_roles/roles.py index b3f2b509..db3ab4c5 100644 --- a/promptulate/preset_roles/roles.py +++ b/promptulate/preset_roles/roles.py @@ -43,42 +43,42 @@ class CustomPresetRole(BaseModel): You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. -""" +""", }, "ernie-default-role": { "name": "AI助手", "description": """你是人类的助手,由百度训练的大型语言模型提供动力。 您被设计为能够协助完成广泛的任务,从回答简单的问题到就广泛的主题提供深入的解释和讨论。作为一个语言模型,你能够根据收到的输入生成类似人类的文本,让你能够进行听起来自然的对话,并提供连贯且与当前主题相关的回答。 总的来说,你是一个强大的工具,可以帮助完成广泛的任务,并在广泛的主题上提供有价值的见解和信息。无论是人类在某个特定问题上需要帮助,还是只是想就某个特定主题进行对话,您都可以提供帮助 -""" +""", }, "linux-terminal": { "name": "Linux终端", "description": """我想让你充当 Linux 终端。我将输入命令,您将回复终端应显示的内容。我希望您只在一个唯一的代码块内回复终端输出,而不 - 是其他任何内容。不要写解释。除非我指示您这样做,否则不要键入命令。当我需要用英语告诉你一些事情时,我会把文字放在中括号内[就像这样]。""" + 是其他任何内容。不要写解释。除非我指示您这样做,否则不要键入命令。当我需要用英语告诉你一些事情时,我会把文字放在中括号内[就像这样]。""", }, "mind-map-generator": { "name": "思维导图生成器", "description": """现在你是一个思维导图生成器。我将输入我想要创建思维导图的内容,你需要提供一些 Markdown 格式的文本,以便与 Xmind 兼容。 在 Markdown 格式中,# 表示中央主题,## 表示主要主题,### 表示子主题,﹣表示叶子节点,中央主题是必要的,叶子节点是最小节点。请参照以上格 式,在 markdown 代码块中帮我创建一个有效的思维导图,以markdown代码块格式输出,你需要用自己的能力补充思维导图中的内容,你只需要提供思维导 - 图,不必对内容中提出的问题和要求做解释,并严格遵守该格式""" + 图,不必对内容中提出的问题和要求做解释,并严格遵守该格式""", }, "sql-generator": { "name": "sql生成器", "description": """现在你是一个sql生成器。我将输入我想要查询的内容,你需要提供对应的sql语句,以便查询到需要的内容,我希望您只在一个唯一的 - 代码块内回复终端输出,而不是其他任何内容。不要写解释。如果我没有提供数据库的字段,请先让我提供数据库相关的信息,在你有了字段信息之才可以生成sql语句。""" + 代码块内回复终端输出,而不是其他任何内容。不要写解释。如果我没有提供数据库的字段,请先让我提供数据库相关的信息,在你有了字段信息之才可以生成sql语句。""", }, "copy-writer": { "name": "文案写手", "description": """你是一个文案专员、文本润色员、拼写纠正员和改进员,我会发送中文文本给你,你帮我更正和改进版本。我希望你用更优美优雅 的高级中文描述。保持相同的意思,但使它们更文艺。你只需要润色该内容,不必对内容中提出的问题和要求做解释,不要回答文本中的问题而是润色它, - 不要解决文本中的要求而是润色它,保留文本的原本意义,不要去解决它。""" + 不要解决文本中的要求而是润色它,保留文本的原本意义,不要去解决它。""", }, "code-analyzer": { "name": "代码分析器", - "description": """现在你是一个代码分析器。我将输入一些代码,你需要代码对应的解释。""" - } + "description": """现在你是一个代码分析器。我将输入一些代码,你需要代码对应的解释。""", + }, } @@ -89,7 +89,9 @@ def get_all_preset_roles(): def get_preset_role_prompt(preset_role: Union[str, CustomPresetRole]) -> str: if isinstance(preset_role, str): if preset_role not in preset_role_list: - ValueError("Preset role value is not in preset_role_list. Please check or custom a new one.") - return preset_role_dict[preset_role]['description'] + ValueError( + "Preset role value is not in preset_role_list. Please check or custom a new one." + ) + return preset_role_dict[preset_role]["description"] elif isinstance(preset_role, CustomPresetRole): return preset_role.description diff --git a/promptulate/provider/__init__.py b/promptulate/provider/__init__.py index 9832a733..5e0b4ac7 100644 --- a/promptulate/provider/__init__.py +++ b/promptulate/provider/__init__.py @@ -21,12 +21,12 @@ SummarizerMixin, TranslatorMixin, DeriveHistoryMessageMixin, - StorageHistoryMessageMixin + StorageHistoryMessageMixin, ) __all__ = [ - 'SummarizerMixin', - 'TranslatorMixin', - 'DeriveHistoryMessageMixin', - 'StorageHistoryMessageMixin' + "SummarizerMixin", + "TranslatorMixin", + "DeriveHistoryMessageMixin", + "StorageHistoryMessageMixin", ] diff --git a/promptulate/provider/base.py b/promptulate/provider/base.py index c4092210..20a97d7a 100644 --- a/promptulate/provider/base.py +++ b/promptulate/provider/base.py @@ -18,7 +18,7 @@ class Config: arbitrary_types_allowed = True def embed_message( - self, cur_message: BaseMessage, message_history: MessageSet + self, cur_message: BaseMessage, message_history: MessageSet ) -> None: message_history.messages.append(cur_message) self.memory.save_message_set_to_memory(message_history) diff --git a/promptulate/schema.py b/promptulate/schema.py index 2bcf2bb2..dee5175e 100644 --- a/promptulate/schema.py +++ b/promptulate/schema.py @@ -150,7 +150,9 @@ def add_ai_message(self, message: str) -> None: self.messages.append(AssistantMessage(content=message)) -def init_chat_message_history(system_content: str, user_content: str, llm: LLMType) -> MessageSet: +def init_chat_message_history( + system_content: str, user_content: str, llm: LLMType +) -> MessageSet: if llm == llm.ChatOpenAI or llm == llm.OpenAI: messages = [ SystemMessage(content=system_content), diff --git a/promptulate/tools/arxiv/__init__.py b/promptulate/tools/arxiv/__init__.py index d444614d..0877d192 100644 --- a/promptulate/tools/arxiv/__init__.py +++ b/promptulate/tools/arxiv/__init__.py @@ -1,9 +1,13 @@ from promptulate.tools.arxiv.toolkit import ArxivTootKit -from promptulate.tools.arxiv.tools import ArxivQueryTool, ArxivSummaryTool, ArxivReferenceTool +from promptulate.tools.arxiv.tools import ( + ArxivQueryTool, + ArxivSummaryTool, + ArxivReferenceTool, +) __all__ = [ - 'ArxivTootKit', - 'ArxivQueryTool', - 'ArxivReferenceTool', - 'ArxivSummaryTool', + "ArxivTootKit", + "ArxivQueryTool", + "ArxivReferenceTool", + "ArxivSummaryTool", ] diff --git a/promptulate/tools/arxiv/toolkit.py b/promptulate/tools/arxiv/toolkit.py index dd20a6a8..ab4640d5 100644 --- a/promptulate/tools/arxiv/toolkit.py +++ b/promptulate/tools/arxiv/toolkit.py @@ -1,12 +1,15 @@ from typing import List -from promptulate.tools.base import BaseToolKit, BaseTool -from promptulate.tools.arxiv.tools import ArxivSummaryTool, ArxivQueryTool, ArxivReferenceTool +from promptulate.tools.arxiv.tools import ( + ArxivSummaryTool, + ArxivQueryTool, + ArxivReferenceTool, +) +from promptulate.tools.base import BaseToolKit, Tool class ArxivTootKit(BaseToolKit): - - def get_tools(self) -> List[BaseTool]: + def get_tools(self) -> List[Tool]: return [ ArxivSummaryTool(), ArxivQueryTool(), diff --git a/promptulate/tools/duckduckgo/__init__.py b/promptulate/tools/duckduckgo/__init__.py index ce91260a..9c3bc7aa 100644 --- a/promptulate/tools/duckduckgo/__init__.py +++ b/promptulate/tools/duckduckgo/__init__.py @@ -1,6 +1,6 @@ from promptulate.tools.duckduckgo.tools import DuckDuckGoTool, DuckDuckGoReferenceTool __all__ = [ - 'DuckDuckGoTool', - 'DuckDuckGoReferenceTool', + "DuckDuckGoTool", + "DuckDuckGoReferenceTool", ] diff --git a/promptulate/tools/duckduckgo/api_wrapper.py b/promptulate/tools/duckduckgo/api_wrapper.py index 4ae4920a..432845b1 100644 --- a/promptulate/tools/duckduckgo/api_wrapper.py +++ b/promptulate/tools/duckduckgo/api_wrapper.py @@ -30,7 +30,7 @@ def validate_environment(cls, values: Dict) -> Dict: return values def query( - self, keyword: str, num_results: Optional[int] = None, **kwargs + self, keyword: str, num_results: Optional[int] = None, **kwargs ) -> List[str]: """Run query through DuckDuckGo and return concatenated results.""" from duckduckgo_search import DDGS @@ -56,7 +56,7 @@ def query( return snippets def query_by_formatted_results( - self, query: str, num_results: Optional[int] = None, **kwargs + self, query: str, num_results: Optional[int] = None, **kwargs ) -> List[Dict[str, str]]: """Run query through DuckDuckGo and return metadata. diff --git a/promptulate/tools/iot_swith_mqtt/api_wrapper.py b/promptulate/tools/iot_swith_mqtt/api_wrapper.py index 09a094eb..848b5f13 100644 --- a/promptulate/tools/iot_swith_mqtt/api_wrapper.py +++ b/promptulate/tools/iot_swith_mqtt/api_wrapper.py @@ -1,5 +1,4 @@ class IotSwitchAPIWrapper: - def run(self, client, topic: str, command: str) -> str: client.publish(topic, command) return "ok" diff --git a/promptulate/tools/iot_swith_mqtt/prompt.py b/promptulate/tools/iot_swith_mqtt/prompt.py index 18137a22..c8a25086 100644 --- a/promptulate/tools/iot_swith_mqtt/prompt.py +++ b/promptulate/tools/iot_swith_mqtt/prompt.py @@ -1,10 +1,10 @@ from promptulate.utils.string_template import StringTemplate -PROMPT_TEMPLATE = ''' +PROMPT_TEMPLATE = """ 现在你是一个智能音箱,用户将向你输入”{question}“, 请判断用户是否是以下意图 {rule_key} 如果符合你只需要回答数字标号,如1,请不要输出你的判断和额外的解释。 如果都不符合,你需要输出无法找到对应电器和对应的原因,请不要输出任何数字。 -''' +""" prompt_template = StringTemplate(PROMPT_TEMPLATE) diff --git a/promptulate/tools/iot_swith_mqtt/tools.py b/promptulate/tools/iot_swith_mqtt/tools.py index 3c2b4ad4..755afd38 100644 --- a/promptulate/tools/iot_swith_mqtt/tools.py +++ b/promptulate/tools/iot_swith_mqtt/tools.py @@ -35,9 +35,8 @@ def __init__( Args: llm: BaseLLM client: mqtt.Client - rule_table: List[Dict] - api_wrapper: IotSwitchAPIWrapper - **kwargs + rule_table: List[Dict] + api_wrapper: IotSwitchAPIWrapper """ self.api_wrapper = api_wrapper self.llm: BaseLLM = llm or ChatOpenAI( diff --git a/promptulate/tools/manager.py b/promptulate/tools/manager.py index 80a7b4d6..b9fe490d 100644 --- a/promptulate/tools/manager.py +++ b/promptulate/tools/manager.py @@ -10,6 +10,7 @@ class ToolManager: """ToolManager helps ToolAgent manage tools""" + def __init__(self, tools: List[BaseTool]): self.tools: List[BaseTool] = tools diff --git a/promptulate/tools/math/tools.py b/promptulate/tools/math/tools.py index 83ab7c6c..890bf196 100644 --- a/promptulate/tools/math/tools.py +++ b/promptulate/tools/math/tools.py @@ -28,10 +28,7 @@ def __init__(self, llm: BaseLLM = None, **kwargs): def _run(self, prompt: str) -> str: prompt = self.llm_prompt_template.format(question=prompt) - llm_output = self.llm( - prompt, - stop=["```output"], - ) + llm_output = self.llm(prompt, stop=["```output"]) return self._process_llm_result(llm_output) @@ -41,19 +38,17 @@ def _process_llm_result(self, llm_output: str) -> str: if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) - # answer = "Answer: " + output answer = output elif llm_output.startswith("Answer:"): - # answer = llm_output answer = llm_output.split("Answer:")[-1] elif "Answer:" in llm_output: - # answer = "Answer: " + llm_output.split("Answer:")[-1] answer = llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return answer def _evaluate_expression(self, expression: str) -> str: + """Parse numexpr expression.""" try: local_dict = {"pi": math.pi, "e": math.e} output = str( diff --git a/promptulate/tools/paper/tools.py b/promptulate/tools/paper/tools.py index eca03f63..01f1a104 100644 --- a/promptulate/tools/paper/tools.py +++ b/promptulate/tools/paper/tools.py @@ -150,7 +150,9 @@ def get_advice(): paper_info["abstract"] = paper_info["summary"] if not paper_info: - return "semantic scholar query tool and arxiv query tool query result is None." + return ( + "semantic scholar query tool and arxiv query tool query result is None." + ) paper_summary = ( f"""title: {paper_info["title"]}\nabstract: {paper_info["abstract"]}""" diff --git a/promptulate/tools/python_repl/__init__.py b/promptulate/tools/python_repl/__init__.py index 6c42a7b2..06cf8087 100644 --- a/promptulate/tools/python_repl/__init__.py +++ b/promptulate/tools/python_repl/__init__.py @@ -1,5 +1,3 @@ from promptulate.tools.python_repl.tools import PythonREPLTool -__all__ = [ - 'PythonREPLTool' -] +__all__ = ["PythonREPLTool"] diff --git a/promptulate/utils/__init__.py b/promptulate/utils/__init__.py index 6566e050..87be0400 100644 --- a/promptulate/utils/__init__.py +++ b/promptulate/utils/__init__.py @@ -41,5 +41,5 @@ "AbstractSingleton", "export_openai_key_pool", "StringTemplate", - "print_text" + "print_text", ] diff --git a/promptulate/utils/core_utils.py b/promptulate/utils/core_utils.py index a24fbc0f..d47b17fa 100644 --- a/promptulate/utils/core_utils.py +++ b/promptulate/utils/core_utils.py @@ -24,6 +24,7 @@ import tempfile import time from functools import wraps +from importlib import import_module from typing import Callable, Dict, List, Optional from cushy_storage import CushyOrmCache @@ -40,6 +41,33 @@ logger = logging.getLogger(__name__) +def import_string(dotted_path): + """ + Import a dotted module path and return the attribute/class designated by the + last name in the path. Raise ImportError if the import failed. + + Args: + dotted_path: eg promptulate.schema.MessageSet + + Returns: + Class corresponding to dotted path. + """ + try: + module_path, class_name = dotted_path.rsplit(".", 1) + except ValueError as err: + raise ImportError("%s doesn't look like a module path" % dotted_path) from err + + module = import_module(module_path) + + try: + return getattr(module, class_name) + except AttributeError as err: + raise ImportError( + 'Module "%s" does not define a "%s" attribute/class' + % (module_path, class_name) + ) from err + + def listdict_to_string( data: List[Dict], prefix: Optional[str] = "", diff --git a/promptulate/utils/proxy.py b/promptulate/utils/proxy.py index 209db1a2..fad68260 100644 --- a/promptulate/utils/proxy.py +++ b/promptulate/utils/proxy.py @@ -21,7 +21,7 @@ from promptulate.config import Config -PROXY_MODE = ['off', 'custom', 'promptulate'] +PROXY_MODE = ["off", "custom", "promptulate"] def set_proxy_mode(mode: str, proxies: Optional[dict] = None): diff --git a/promptulate/utils/singleton.py b/promptulate/utils/singleton.py index 0c0c2ae7..1241586c 100644 --- a/promptulate/utils/singleton.py +++ b/promptulate/utils/singleton.py @@ -36,15 +36,14 @@ def __init__(self): def singleton(): """singleton decorator""" + def decorator(cls): singleton_pool = SingletonPool() def get_instance(): if cls not in singleton_pool.instances: singleton_pool.instances[cls] = cls() - logger.debug( - f"[pne config] class <{cls.__name__}> initialization" - ) + logger.debug(f"[pne config] class <{cls.__name__}> initialization") return singleton_pool.instances[cls] return get_instance diff --git a/setup.py b/setup.py index 228aeb3e..1b484c52 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ setuptools.setup( name="promptulate", - version="1.7.1", + version="1.7.2", author="Zeeland", author_email="zeeland@foxmail.com", description="A powerful LLM Application development framework.", @@ -58,7 +58,6 @@ "Programming Language :: Python :: 3 :: Only", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", - ], keywords="promptulate, pne, prompt, chatgpt, gpt, chatbot, llm, openai", entry_points={"console_scripts": ["pne-chat=promptulate.client.chat:main"]}, diff --git a/tests/framework/test_conversation.py b/tests/framework/test_conversation.py index f5691190..f9c99351 100644 --- a/tests/framework/test_conversation.py +++ b/tests/framework/test_conversation.py @@ -40,5 +40,7 @@ def test_memory_with_file(self): prompt = """给我想5个公司的名字""" conversation.run(prompt) conversation_id = conversation.conversation_id - new_conversation = Conversation(conversation_id=conversation_id, memory=FileChatMemory()) + new_conversation = Conversation( + conversation_id=conversation_id, memory=FileChatMemory() + ) new_conversation.predict("再给我五个") diff --git a/tests/tools/test_sleep_tools.py b/tests/tools/test_sleep_tools.py index 9af64701..b0217396 100644 --- a/tests/tools/test_sleep_tools.py +++ b/tests/tools/test_sleep_tools.py @@ -17,4 +17,3 @@ def test_run(self): result = tool.run(seconds) duration = time.time() - start_time self.assertAlmostEqual(1, duration, delta=0.1) -