diff --git a/tests/envs/test_parallel.py b/tests/envs/test_parallel.py index 5b10889ad..6285135e9 100644 --- a/tests/envs/test_parallel.py +++ b/tests/envs/test_parallel.py @@ -21,14 +21,14 @@ async def test_parallel_sotopia_env() -> None: { "agent1": LLMAgent( "agent1", - model_name="gpt-3.5-turbo", + model_name="gpt-4o-mini", agent_profile=AgentProfile( **{"first_name": "John", "last_name": "Doe"} ), ), "agent2": LLMAgent( "agent2", - model_name="gpt-3.5-turbo", + model_name="gpt-4o-mini", agent_profile=AgentProfile( **{"first_name": "Jane", "last_name": "Doe"} ), @@ -66,7 +66,7 @@ async def test_parallel_sotopia_env_script_writing_single_step() -> None: { "agent1": LLMAgent( "agent1", - model_name="gpt-3.5-turbo", + model_name="gpt-4o-mini", agent_profile=AgentProfile( **{"first_name": "John", "last_name": "Doe"} ), @@ -74,7 +74,7 @@ async def test_parallel_sotopia_env_script_writing_single_step() -> None: ), "agent2": LLMAgent( "agent2", - model_name="gpt-3.5-turbo", + model_name="gpt-4o-mini", agent_profile=AgentProfile( **{"first_name": "Jane", "last_name": "Doe"} ), diff --git a/tests/generation_utils/test_generation.py b/tests/generation_utils/test_generation.py index af8ac7256..e7b91c336 100644 --- a/tests/generation_utils/test_generation.py +++ b/tests/generation_utils/test_generation.py @@ -17,7 +17,7 @@ async def test_agenerate_list_integer() -> None: """ length, lower, upper = 5, -10, 10 list_of_int = await agenerate( - "gpt-3.5-turbo", + "gpt-4o-mini", "{format_instructions}", {}, ListOfIntOutputParser(length, (lower, upper)), @@ -52,7 +52,7 @@ async def test_logging_behavior(caplog: Any) -> None: # Call the function under test caplog.set_level(15) await agenerate( - "gpt-3.5-turbo", + "gpt-4o-mini", "{format_instructions}", {}, ListOfIntOutputParser(5, (-10, 10)), diff --git a/tests/sampler/test_sampler.py b/tests/sampler/test_sampler.py index 64e6f6ebf..7e723df74 100644 --- a/tests/sampler/test_sampler.py +++ b/tests/sampler/test_sampler.py @@ -77,7 +77,7 @@ def test_uniform_sampler() -> None: ], ) env_params = { - "model_name": "gpt-3.5-turbo", + "model_name": "gpt-4o-mini", "action_order": "random", "evaluators": [ RuleBasedTerminatedEvaluator(), @@ -88,7 +88,7 @@ def test_uniform_sampler() -> None: agent_classes=[LLMAgent] * n_agent, n_agent=n_agent, env_params=env_params, - agents_params=[{"model_name": "gpt-3.5-turbo"}] * n_agent, + agents_params=[{"model_name": "gpt-4o-mini"}] * n_agent, ) ) agents = Agents({agent.agent_name: agent for agent in agent_list}) @@ -105,7 +105,7 @@ def test_constrain_sampler() -> None: env_candidates=[str(borrow_money.pk)] ) env_params = { - "model_name": "gpt-3.5-turbo", + "model_name": "gpt-4o-mini", "action_order": "random", "evaluators": [ RuleBasedTerminatedEvaluator(), @@ -118,7 +118,7 @@ def test_constrain_sampler() -> None: replacement=False, size=2, env_params=env_params, - agents_params=[{"model_name": "gpt-3.5-turbo"}] * n_agent, + agents_params=[{"model_name": "gpt-4o-mini"}] * n_agent, ) ) agents = Agents({agent.agent_name: agent for agent in agent_list}) @@ -130,7 +130,7 @@ def test_constrain_sampler() -> None: replacement=True, size=2, env_params=env_params, - agents_params=[{"model_name": "gpt-3.5-turbo"}] * n_agent, + agents_params=[{"model_name": "gpt-4o-mini"}] * n_agent, ) ) agents = Agents({agent.agent_name: agent for agent in agent_list})