Skip to content
This repository has been archived by the owner on Jan 7, 2025. It is now read-only.

Commit

Permalink
Cleanup code context and code features (#401)
Browse files Browse the repository at this point in the history
  • Loading branch information
PCSwingle authored Dec 20, 2023
1 parent cae4930 commit 4002e18
Show file tree
Hide file tree
Showing 25 changed files with 517 additions and 723 deletions.
42 changes: 16 additions & 26 deletions mentat/agent_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
ChatCompletionSystemMessageParam,
)

from mentat.code_feature import CodeMessageLevel
from mentat.llm_api_handler import count_tokens, get_max_tokens, prompt_tokens
from mentat.llm_api_handler import prompt_tokens
from mentat.prompts.prompts import read_prompt
from mentat.session_context import SESSION_CONTEXT
from mentat.session_input import ask_yes_no, collect_user_input
Expand Down Expand Up @@ -39,11 +38,10 @@ def disable_agent_mode(self):
async def enable_agent_mode(self):
ctx = SESSION_CONTEXT.get()

self._agent_enabled = True
ctx.stream.send(
"Finding files to determine how to test changes...", color="cyan"
)
features = ctx.code_context.get_all_features(CodeMessageLevel.FILE_NAME)
features = ctx.code_context.get_all_features(split_intervals=False)
messages: List[ChatCompletionMessageParam] = [
ChatCompletionSystemMessageParam(
role="system", content=self.agent_file_selection_prompt
Expand Down Expand Up @@ -73,53 +71,45 @@ async def enable_agent_mode(self):
color="cyan",
)
ctx.stream.send("\n".join(str(path) for path in paths))
ctx.cost_tracker.log_api_call_stats(
prompt_tokens(messages, model),
count_tokens(content, model, full_message=False),
model,
)
ctx.cost_tracker.display_last_api_call()

messages.append(
ChatCompletionAssistantMessageParam(role="assistant", content=content)
)
ctx.conversation.add_transcript_message(
ModelMessage(message=content, prior_messages=messages, message_type="agent")
)
self._agent_enabled = True

async def _determine_commands(self) -> List[str]:
ctx = SESSION_CONTEXT.get()

model = ctx.config.model
messages = ctx.conversation.get_messages(include_system_prompt=False) + [
messages = [
ChatCompletionSystemMessageParam(
role="system", content=self.agent_command_prompt
),
ChatCompletionSystemMessageParam(
role="system", content=self.agent_file_message
),
]
max_tokens = get_max_tokens()
if max_tokens is not None:
max_tokens -= prompt_tokens(messages, model) + ctx.config.token_buffer
code_message = await ctx.code_context.get_code_message(max_tokens=max_tokens)
] + ctx.conversation.get_messages(include_system_prompt=False)
code_message = await ctx.code_context.get_code_message(
prompt_tokens=prompt_tokens(messages, model)
)
code_message = ChatCompletionSystemMessageParam(
role="system", content=code_message
)
messages.append(code_message)
messages.append(
ChatCompletionSystemMessageParam(
role="system", content=self.agent_command_prompt
),
)
messages.insert(1, code_message)

try:
# TODO: Should this even be a separate call or should we collect commands in the edit call?
response = await ctx.llm_api_handler.call_llm_api(messages, model, False)
ctx.cost_tracker.display_last_api_call()
except BadRequestError as e:
ctx.stream.send(f"Error accessing OpenAI API: {e.message}", color="red")
return []

content = response.choices[0].message.content or ""
ctx.cost_tracker.log_api_call_stats(
prompt_tokens(messages, model),
count_tokens(content, model, full_message=False),
model,
)

messages.append(
ChatCompletionAssistantMessageParam(role="assistant", content=content)
Expand Down
Loading

0 comments on commit 4002e18

Please sign in to comment.