-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPCBuddy.py
161 lines (131 loc) · 4.74 KB
/
PCBuddy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import os
import uuid
from datetime import date, datetime
from enum import Enum
from typing import Annotated
from typing_extensions import TypedDict, Optional
from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.runnables import ensure_config
from langchain_core.tools import tool
from langgraph.graph import StateGraph,END
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langgraph.checkpoint.sqlite import SqliteSaver
from dotenv import load_dotenv
import subprocess
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
llm = ChatOpenAI(model = "gpt-3.5-turbo-0125")
llm = ChatOpenAI(model = "gpt-4o")
class State(TypedDict):
messages : Annotated[list[AnyMessage], add_messages]
agent_prompt = ChatPromptTemplate.from_messages(
[
("system",
"You are a PC Assistant agent."
"Your role is to provide users with requested information about their files, CPU, and memory usage by invoking the available tools."
"Ensure to execute the code from the folder from where the script is being run"
"\nCurrent Time: {time}"
),
("placeholder",
"{messages}")
]
).partial(time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
class Assistant:
def __init__(self,runnable : Runnable):
self.runnable = runnable
def __call__(self, state: State, config: RunnableConfig):
while True:
result = self.runnable.invoke(state)
if not result.tool_calls and (
not result.content
or isinstance(result.content, list)
and not result.content[0].get("text")
):
messages = state["messages"] + [("user","Respond to user")]
state = {**state, "messages": messages}
else:
break
return {"messages": result}
@tool
def llm_tool_execute_code(llm_generated_code):
"""
Executes the Python code generated by the LLM and returns the result.
Parameters:
llm_generated_code (str): The Python code generated by the LLM.
Returns:
dict: A dictionary containing 'status' (success or error) and 'result' (output or error message).
"""
import contextlib
import io
print("Executing code:\n", llm_generated_code)
# Capture the output and error
output = io.StringIO()
error = io.StringIO()
try:
with contextlib.redirect_stdout(output), contextlib.redirect_stderr(error):
exec(llm_generated_code, globals(), locals())
result = output.getvalue().strip()
print("Result :",result)
if not result:
result = "Code executed successfully with no output."
return {'status': 'success', 'result': result}
except Exception as e:
error_message = f"Error: {str(e)}\n{error.getvalue().strip()}"
return {'status': 'error', 'result': error_message}
finally:
output.close()
error.close()
@tool
def execute_code(code):
"""
This function executes the provided Python code snippet
Args:
code (str): The Python code snippet to be executed.
Returns:
str: The standard output of the executed code,
or an error message if execution fails.
"""
print("Code :", code)
script_directory = os.path.dirname(os.path.abspath(__file__))
print(f"Changing directory to: {script_directory}")
os.chdir(script_directory)
print(f"Current directory after change: {os.getcwd()}")
try:
# Use subprocess.run to execute the code with capture output
result = subprocess.run(
["python", "-c", code], capture_output=True, text=True, check=True
)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
# Handle errors during execution
return f"Error: {e}"
assistant_runnable = agent_prompt | llm.bind_tools([execute_code])
tools = [execute_code]
graph = StateGraph(State)
graph.add_node("agent", Assistant(assistant_runnable))
tool_node = ToolNode(tools)
graph.add_node("tools", tool_node)
graph.add_conditional_edges("agent",tools_condition)
graph.add_edge("tools", "agent")
graph.set_entry_point("agent")
memory = SqliteSaver.from_conn_string(":memory:")
graph = graph.compile(
checkpointer=memory
)
thread_id = str(uuid.uuid4())
config = {"configurable":{
"thread_id":thread_id,
}}
events = graph.stream(
{"messages":("user","From which folder am I executing the code from?")},
config=config,
stream_mode="values"
)
for event in events:
print(event["messages"][-1].pretty_print())
print(os.getcwd())