Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implemented the first step in deep focus initiative. Now, when the us… #1057

Merged
merged 17 commits into from
Jul 26, 2024
190 changes: 190 additions & 0 deletions core/agents/bug_hunter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
from enum import Enum
from typing import Annotated, Literal, Union

from pydantic import BaseModel, Field

from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.config import magic_words
from core.db.models.project_state import IterationStatus
from core.llm.parser import JSONParser
from core.log import get_logger

log = get_logger(__name__)


class StepType(str, Enum):
ADD_LOG = "add_log"
EXPLAIN_PROBLEM = "explain_problem"
GET_ADDITIONAL_FILES = "get_additional_files"


class Log(BaseModel):
filePath: str
referenceCodeSnippet: str = Field(description="Five lines of code before the line where the log needs to be added. Make sure that this contains **ONLY** the code that is currently written in the file. It must not contain the log that you want to add.")
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved
log: str


class AddLog(BaseModel):
type: Literal[StepType.ADD_LOG] = StepType.ADD_LOG
logsToAdd: list[Log]


class ExplainProblem(BaseModel):
type: Literal[StepType.EXPLAIN_PROBLEM] = StepType.EXPLAIN_PROBLEM
problem_explanation: str


class GetAdditionalFiles(BaseModel):
type: Literal[StepType.GET_ADDITIONAL_FILES] = StepType.GET_ADDITIONAL_FILES
filePath: str


# TODO enable LLM to ask for more files
class LoggingOptions(BaseModel):
decision: Annotated[
Union[AddLog, ExplainProblem, GetAdditionalFiles],
Field(discriminator="type"),
]


class HuntConclusionType(str, Enum):
ADD_LOGS = magic_words.ADD_LOGS
PROBLEM_IDENTIFIED = magic_words.PROBLEM_IDENTIFIED


class HuntConclusionOptions(BaseModel):
conclusion: HuntConclusionType = Field(description=f"If more logs are needed to identify the problem, respond with '{magic_words.ADD_LOGS}'. If the problem is identified, respond with '{magic_words.PROBLEM_IDENTIFIED}'.")
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved


class BugHunter(BaseAgent):
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved
agent_type = "bug-hunter"
display_name = "Bug Hunter"

async def run(self) -> AgentResponse:
current_iteration = self.current_state.current_iteration

if "bug_reproduction_description" not in current_iteration:
await self.get_bug_reproduction_instructions()
if current_iteration["status"] == IterationStatus.HUNTING_FOR_BUG:
# TODO determine how to find a bug (eg. check in db, ask user a question, etc.)
return await self.check_logs()
elif current_iteration["status"] == IterationStatus.AWAITING_USER_TEST:
return await self.ask_user_to_test()
elif current_iteration["status"] == IterationStatus.AWAITING_BUG_REPRODUCTION:
return await self.ask_user_to_test()
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved

async def get_bug_reproduction_instructions(self):
llm = self.get_llm()
convo = (
AgentConvo(self)
.template(
"get_bug_reproduction_instructions",
current_task=self.current_state.current_task,
user_feedback=self.current_state.current_iteration["user_feedback"],
user_feedback_qa=self.current_state.current_iteration["user_feedback_qa"],
docs=self.current_state.docs,
next_solution_to_try=None,
)
)
bug_reproduction_instructions = await llm(convo, temperature=0)
self.next_state.current_iteration["bug_reproduction_description"] = bug_reproduction_instructions

async def check_logs(self, logs_message: str = None):
llm = self.get_llm()
convo = (
AgentConvo(self)
.template(
"iteration",
current_task=self.current_state.current_task,
user_feedback=self.current_state.current_iteration["user_feedback"],
user_feedback_qa=self.current_state.current_iteration["user_feedback_qa"],
docs=self.current_state.docs,
magic_words=magic_words,
next_solution_to_try=None
)
)

for hunting_cycle in self.current_state.current_iteration["bug_hunting_cycles"]:
convo = (convo
.assistant(hunting_cycle["human_readable_instructions"])
.template(
"log_data",
backend_logs=hunting_cycle["backend_logs"],
frontend_logs=hunting_cycle["frontend_logs"],
fix_attempted=hunting_cycle["fix_attempted"]
))

human_readable_instructions = await llm(convo, temperature=0.5)

convo = (
AgentConvo(self)
.template(
"bug_found_or_add_logs",
hunt_conclusion=human_readable_instructions,
)
.require_schema(HuntConclusionOptions)
)
hunt_conclusion = await llm(convo, parser=JSONParser(HuntConclusionOptions), temperature=0)

self.next_state.current_iteration["description"] = human_readable_instructions
self.next_state.current_iteration["bug_hunting_cycles"] += [{
"human_readable_instructions": human_readable_instructions,
"fix_attempted": False
}]

if False and hunt_conclusion.conclusion == magic_words.PROBLEM_IDENTIFIED:
# if no need for logs, implement iteration same as before
self.next_state.current_iteration["status"] = IterationStatus.AWAITING_BUG_FIX
await self.send_message("The bug is found - I'm attempting to fix it.")
else:
# if logs are needed, add logging steps
self.next_state.current_iteration["status"] = IterationStatus.AWAITING_LOGGING
await self.send_message("Adding more logs to identify the bug.")

self.next_state.flag_iterations_as_modified()
return AgentResponse.done(self)

async def ask_user_to_test(self):

reproduce_bug_and_get_logs = self.current_state.current_iteration["status"] == IterationStatus.AWAITING_BUG_REPRODUCTION
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved

await self.send_message("You can reproduce the bug like this:\n\n" + self.current_state.current_iteration["bug_reproduction_description"])
if self.current_state.current_iteration["status"] == IterationStatus.AWAITING_USER_TEST:
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved
user_feedback = await self.ask_question(
"Is the bug you reported fixed now?",
buttons={"yes": "Yes, the issue is fixed", "no": "No"},
default="continue",
buttons_only=True,
hint="Instructions for testing:\n\n" + self.current_state.current_iteration["bug_reproduction_description"]
)
self.next_state.current_iteration["bug_hunting_cycles"][-1]["fix_attempted"] = True

if user_feedback.button == "yes":
self.next_state.complete_iteration()
else:
reproduce_bug_and_get_logs = True

if reproduce_bug_and_get_logs:
# TODO how can we get FE and BE logs automatically?
backend_logs = await self.ask_question(
"Please do exactly what you did in the last iteration, paste **BACKEND** logs here and click CONTINUE.",
buttons={"continue": "Continue"},
default="continue",
hint="Instructions for testing:\n\n" + self.current_state.current_iteration["bug_reproduction_description"]
)

frontend_logs = await self.ask_question(
"Please paste **frontend** logs here and click CONTINUE.",
buttons={"continue": "Continue"},
default="continue",
hint="Instructions for testing:\n\n" + self.current_state.current_iteration["bug_reproduction_description"]
)

# TODO select only the logs that are new (with PYTHAGORA_DEBUGGING_LOG)
self.next_state.current_iteration["bug_hunting_cycles"][-1]["backend_logs"] = backend_logs.text
self.next_state.current_iteration["bug_hunting_cycles"][-1]["frontend_logs"] = frontend_logs.text
self.next_state.current_iteration["status"] = IterationStatus.HUNTING_FOR_BUG

return AgentResponse.done(self)
72 changes: 25 additions & 47 deletions core/agents/developer.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from enum import Enum
from typing import Annotated, Literal, Optional, Union
from typing import Optional
from uuid import uuid4

from pydantic import BaseModel, Field

from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.mixins import TaskSteps
from core.agents.response import AgentResponse, ResponseType
from core.db.models.project_state import TaskStatus
from core.db.models.project_state import IterationStatus, TaskStatus
from core.db.models.specification import Complexity
from core.llm.parser import JSONParser
from core.log import get_logger
Expand All @@ -16,47 +16,6 @@
log = get_logger(__name__)


class StepType(str, Enum):
COMMAND = "command"
SAVE_FILE = "save_file"
HUMAN_INTERVENTION = "human_intervention"


class CommandOptions(BaseModel):
command: str = Field(description="Command to run")
timeout: int = Field(description="Timeout in seconds")
success_message: str = ""


class SaveFileOptions(BaseModel):
path: str


class SaveFileStep(BaseModel):
type: Literal[StepType.SAVE_FILE] = StepType.SAVE_FILE
save_file: SaveFileOptions


class CommandStep(BaseModel):
type: Literal[StepType.COMMAND] = StepType.COMMAND
command: CommandOptions


class HumanInterventionStep(BaseModel):
type: Literal[StepType.HUMAN_INTERVENTION] = StepType.HUMAN_INTERVENTION
human_intervention_description: str


Step = Annotated[
Union[SaveFileStep, CommandStep, HumanInterventionStep],
Field(discriminator="type"),
]


class TaskSteps(BaseModel):
steps: list[Step]


class RelevantFiles(BaseModel):
relevant_files: list[str] = Field(description="List of relevant files for the current task.")

Expand Down Expand Up @@ -109,6 +68,17 @@ async def breakdown_current_iteration(self, task_review_feedback: Optional[str]
n_tasks = 1
log.debug(f"Breaking down the task review feedback {task_review_feedback}")
await self.send_message("Breaking down the task review feedback...")
elif (self.current_state.current_iteration["status"] == IterationStatus.AWAITING_BUG_FIX or
self.current_state.current_iteration["status"] == IterationStatus.AWAITING_LOGGING):
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved
iteration = self.current_state.current_iteration
current_task["task_review_feedback"] = None

description = iteration["bug_hunting_cycles"][-1]["human_readable_instructions"]
user_feedback = iteration["user_feedback"]
source = "bug_hunt"
n_tasks = len(self.next_state.iterations)
log.debug(f"Breaking down the logging cycle {description}")
await self.send_message("Breaking down the current iteration logging cycle ...")
else:
iteration = self.current_state.current_iteration
current_task["task_review_feedback"] = None
Expand Down Expand Up @@ -156,8 +126,14 @@ async def breakdown_current_iteration(self, task_review_feedback: Optional[str]
self.set_next_steps(response, source)

if iteration:
self.next_state.complete_iteration()
self.next_state.action = f"Troubleshooting #{len(self.current_state.iterations)}"
# fixme please :cry:
if ("status" in iteration) and (iteration["status"] == IterationStatus.AWAITING_BUG_FIX or
LeonOstrez marked this conversation as resolved.
Show resolved Hide resolved
iteration["status"] == IterationStatus.AWAITING_LOGGING):
self.next_state.current_iteration["status"] = IterationStatus.AWAITING_BUG_REPRODUCTION if (
iteration["status"] == IterationStatus.AWAITING_LOGGING) else IterationStatus.AWAITING_USER_TEST
else:
self.next_state.complete_iteration()
self.next_state.action = f"Troubleshooting #{len(self.current_state.iterations)}"
else:
self.next_state.action = "Task review feedback"

Expand Down Expand Up @@ -265,7 +241,9 @@ def set_next_steps(self, response: TaskSteps, source: str):
}
for step in response.steps
]
if len(self.next_state.unfinished_steps) > 0 and source != "review":
if (len(self.next_state.unfinished_steps) > 0 and
source != "review" and (self.next_state.current_iteration is None or
self.next_state.current_iteration["status"] != IterationStatus.AWAITING_LOGGING)):
self.next_state.steps += [
# TODO: add refactor step here once we have the refactor agent
{
Expand Down
3 changes: 2 additions & 1 deletion core/agents/error_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from core.agents.base import BaseAgent
from core.agents.convo import AgentConvo
from core.agents.response import AgentResponse
from core.db.models.project_state import IterationStatus
from core.log import get_logger

log = get_logger(__name__)
Expand Down Expand Up @@ -110,7 +111,7 @@ async def handle_command_error(self, message: str, details: dict) -> AgentRespon
"description": llm_response,
"alternative_solutions": [],
"attempts": 1,
"completed": False,
"status": IterationStatus.HUNTING_FOR_BUG,
}
]
# TODO: maybe have ProjectState.finished_steps as well? would make the debug/ran_command prompts nicer too
Expand Down
49 changes: 48 additions & 1 deletion core/agents/mixins.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,52 @@
from typing import Optional
from enum import Enum
from typing import Annotated, Literal, Optional, Union

from pydantic import BaseModel, Field

from core.agents.convo import AgentConvo


class StepType(str, Enum):
COMMAND = "command"
SAVE_FILE = "save_file"
HUMAN_INTERVENTION = "human_intervention"


class CommandOptions(BaseModel):
command: str = Field(description="Command to run")
timeout: int = Field(description="Timeout in seconds")
success_message: str = ""


class SaveFileOptions(BaseModel):
path: str


class SaveFileStep(BaseModel):
type: Literal[StepType.SAVE_FILE] = StepType.SAVE_FILE
save_file: SaveFileOptions


class CommandStep(BaseModel):
type: Literal[StepType.COMMAND] = StepType.COMMAND
command: CommandOptions


class HumanInterventionStep(BaseModel):
type: Literal[StepType.HUMAN_INTERVENTION] = StepType.HUMAN_INTERVENTION
human_intervention_description: str


Step = Annotated[
Union[SaveFileStep, CommandStep, HumanInterventionStep],
Field(discriminator="type"),
]


class TaskSteps(BaseModel):
steps: list[Step]


class IterationPromptMixin:
"""
Provides a method to find a solution to a problem based on user feedback.
Expand All @@ -16,13 +60,15 @@ async def find_solution(
*,
user_feedback_qa: Optional[list[str]] = None,
next_solution_to_try: Optional[str] = None,
bug_hunting_cycles: Optional[dict] = None,
) -> str:
"""
Generate a new solution for the problem the user reported.

:param user_feedback: User feedback about the problem.
:param user_feedback_qa: Additional q/a about the problem provided by the user (optional).
:param next_solution_to_try: Hint from ProblemSolver on which solution to try (optional).
:param bug_hunting_cycles: Data about logs that need to be added to the code (optional).
:return: The generated solution to the problem.
"""
llm = self.get_llm()
Expand All @@ -32,6 +78,7 @@ async def find_solution(
user_feedback=user_feedback,
user_feedback_qa=user_feedback_qa,
next_solution_to_try=next_solution_to_try,
bug_hunting_cycles=bug_hunting_cycles,
)
llm_solution: str = await llm(convo)
return llm_solution
Loading
Loading