From d35e50ee2558f5662085a1a1f9858795dc0b4ca0 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Fri, 26 Jul 2024 22:14:11 -0400 Subject: [PATCH] Use langchain and py39 version gating correctly --- tests/contrib/langchain/test_langchain.py | 115 +++++------------- .../langchain/test_langchain_community.py | 8 +- .../langchain/test_langchain_llmobs.py | 38 ++---- 3 files changed, 45 insertions(+), 116 deletions(-) diff --git a/tests/contrib/langchain/test_langchain.py b/tests/contrib/langchain/test_langchain.py index df7ecf2edfe..35a5cca1d4f 100644 --- a/tests/contrib/langchain/test_langchain.py +++ b/tests/contrib/langchain/test_langchain.py @@ -16,6 +16,8 @@ parse_version(_langchain.__version__) >= (0, 1, 0), reason="This module only tests langchain < 0.1" ) +PY39 = sys.version_info <= (3, 9) + @pytest.fixture(scope="session") def request_vcr(): @@ -32,10 +34,7 @@ def test_global_tags(ddtrace_config_langchain, langchain, request_vcr, mock_metr """ llm = langchain.llms.OpenAI(model="text-davinci-003") with override_global_config(dict(service="test-svc", env="staging", version="1234")): - if sys.version_info >= (3, 10): - cassette_name = "openai_completion_sync.yaml" - else: - cassette_name = "openai_completion_sync_39.yaml" + cassette_name = "openai_completion_sync_39.yaml" if PY39 else "openai_completion_sync.yaml" with request_vcr.use_cassette(cassette_name): llm("What does Nietzsche mean by 'God is dead'?") @@ -75,7 +74,7 @@ def test_global_tags(ddtrace_config_langchain, langchain, request_vcr, mock_metr ) -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Python 3.10+ specific test") +@pytest.mark.skipif(PY39, reason="Python 3.10+ specific test") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost", "resource"]) def test_openai_llm_sync(langchain, request_vcr): llm = langchain.llms.OpenAI(model="text-davinci-003") @@ -83,7 +82,7 @@ def test_openai_llm_sync(langchain, request_vcr): llm("Can you explain what Descartes meant by 'I think, therefore I am'?") -@pytest.mark.skipif(sys.version_info >= (3, 10), reason="Python 3.9 specific test") +@pytest.mark.skipif(not PY39, reason="Python 3.9 specific test") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost"]) def test_openai_llm_sync_39(langchain, request_vcr): llm = langchain.llms.OpenAI(model="text-davinci-003") @@ -91,7 +90,7 @@ def test_openai_llm_sync_39(langchain, request_vcr): llm("Can you explain what Descartes meant by 'I think, therefore I am'?") -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Python 3.10+ specific test") +@pytest.mark.skipif(PY39, reason="Python 3.10+ specific test") @pytest.mark.snapshot(ignores=["resource"]) def test_openai_llm_sync_multiple_prompts(langchain, request_vcr): llm = langchain.llms.OpenAI(model="text-davinci-003") @@ -104,7 +103,7 @@ def test_openai_llm_sync_multiple_prompts(langchain, request_vcr): ) -@pytest.mark.skipif(sys.version_info >= (3, 10), reason="Python 3.9 specific test") +@pytest.mark.skipif(not PY39, reason="Python 3.9 specific test") @pytest.mark.snapshot def test_openai_llm_sync_multiple_prompts_39(langchain, request_vcr): llm = langchain.llms.OpenAI(model="text-davinci-003") @@ -121,10 +120,7 @@ def test_openai_llm_sync_multiple_prompts_39(langchain, request_vcr): @pytest.mark.snapshot(ignores=["resource", "langchain.request.openai.parameters.request_timeout"]) async def test_openai_llm_async(langchain, request_vcr): llm = langchain.llms.OpenAI(model="text-davinci-003") - if sys.version_info >= (3, 10): - cassette_name = "openai_completion_async.yaml" - else: - cassette_name = "openai_completion_async_39.yaml" + cassette_name = "openai_completion_async_39.yaml" if PY39 else "openai_completion_async.yaml" with request_vcr.use_cassette(cassette_name): await llm.agenerate(["Which team won the 2019 NBA finals?"]) @@ -165,20 +161,14 @@ def test_huggingfacehub_llm_sync(langchain, request_vcr): @pytest.mark.snapshot(ignores=["meta.langchain.response.completions.0.text", "resource"]) def test_ai21_llm_sync(langchain, request_vcr): llm = langchain.llms.AI21(ai21_api_key=os.getenv("AI21_API_KEY", "")) - if sys.version_info >= (3, 10): - cassette_name = "ai21_completion_sync.yaml" - else: - cassette_name = "ai21_completion_sync_39.yaml" + cassette_name = "ai21_completion_sync_39.yaml" if PY39 else "ai21_completion_sync.yaml" with request_vcr.use_cassette(cassette_name): llm("Why does everyone in Bikini Bottom hate Plankton?") def test_openai_llm_metrics(langchain, request_vcr, mock_metrics, mock_logs, snapshot_tracer): llm = langchain.llms.OpenAI(model="text-davinci-003") - if sys.version_info >= (3, 10): - cassette_name = "openai_completion_sync.yaml" - else: - cassette_name = "openai_completion_sync_39.yaml" + cassette_name = "openai_completion_sync_39.yaml" if PY39 else "openai_completion_sync.yaml" with request_vcr.use_cassette(cassette_name): llm("Can you explain what Descartes meant by 'I think, therefore I am'?") expected_tags = [ @@ -210,10 +200,7 @@ def test_openai_llm_metrics(langchain, request_vcr, mock_metrics, mock_logs, sna ) def test_llm_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, mock_metrics, mock_tracer): llm = langchain.llms.OpenAI(model="text-davinci-003") - if sys.version_info >= (3, 10): - cassette_name = "openai_completion_sync.yaml" - else: - cassette_name = "openai_completion_sync_39.yaml" + cassette_name = "openai_completion_sync_39.yaml" if PY39 else "openai_completion_sync.yaml" with request_vcr.use_cassette(cassette_name): llm("Can you explain what Descartes meant by 'I think, therefore I am'?") span = mock_tracer.pop_traces()[0][0] @@ -240,7 +227,7 @@ def test_llm_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, m mock_metrics.count.assert_not_called() -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Python 3.10+ specific test") +@pytest.mark.skipif(PY39, reason="Python 3.10+ specific test") @pytest.mark.snapshot( token="tests.contrib.langchain.test_langchain.test_openai_chat_model_call", ignores=["metrics.langchain.tokens.total_cost", "resource"], @@ -251,7 +238,7 @@ def test_openai_chat_model_sync_call(langchain, request_vcr): chat(messages=[langchain.schema.HumanMessage(content="When do you use 'whom' instead of 'who'?")]) -@pytest.mark.skipif(sys.version_info >= (3, 10), reason="Python 3.9 specific test") +@pytest.mark.skipif(not PY39, reason="Python 3.9 specific test") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost"]) def test_openai_chat_model_sync_call_39(langchain, request_vcr): chat = langchain.chat_models.ChatOpenAI(temperature=0, max_tokens=256) @@ -259,7 +246,7 @@ def test_openai_chat_model_sync_call_39(langchain, request_vcr): chat([langchain.schema.HumanMessage(content="When do you use 'whom' instead of 'who'?")]) -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Python 3.10+ specific test") +@pytest.mark.skipif(PY39, reason="Python 3.10+ specific test") @pytest.mark.snapshot( token="tests.contrib.langchain.test_langchain.test_openai_chat_model_generate", ignores=["metrics.langchain.tokens.total_cost", "resource"], @@ -283,7 +270,7 @@ def test_openai_chat_model_sync_generate(langchain, request_vcr): ) -@pytest.mark.skipif(sys.version_info >= (3, 10), reason="Python 3.9 specific test") +@pytest.mark.skipif(not PY39, reason="Python 3.9 specific test") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost"]) def test_openai_chat_model_sync_generate_39(langchain, request_vcr): chat = langchain.chat_models.ChatOpenAI(temperature=0, max_tokens=256) @@ -341,10 +328,7 @@ async def test_openai_chat_model_async_generate(langchain, request_vcr): def test_chat_model_metrics(langchain, request_vcr, mock_metrics, mock_logs, snapshot_tracer): chat = langchain.chat_models.ChatOpenAI(temperature=0, max_tokens=256) - if sys.version_info >= (3, 10): - cassette_name = "openai_chat_completion_sync_call.yaml" - else: - cassette_name = "openai_chat_completion_sync_call_39.yaml" + cassette_name = "openai_chat_completion_sync_call_39.yaml" if PY39 else "openai_chat_completion_sync_call.yaml" with request_vcr.use_cassette(cassette_name): chat([langchain.schema.HumanMessage(content="When do you use 'whom' instead of 'who'?")]) expected_tags = [ @@ -376,10 +360,7 @@ def test_chat_model_metrics(langchain, request_vcr, mock_metrics, mock_logs, sna ) def test_chat_model_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, mock_metrics, mock_tracer): chat = langchain.chat_models.ChatOpenAI(temperature=0, max_tokens=256) - if sys.version_info >= (3, 10): - cassette_name = "openai_chat_completion_sync_call.yaml" - else: - cassette_name = "openai_chat_completion_sync_call_39.yaml" + cassette_name = "openai_chat_completion_sync_call_39.yaml" if PY39 else "openai_chat_completion_sync_call.yaml" with request_vcr.use_cassette(cassette_name): chat([langchain.schema.HumanMessage(content="When do you use 'whom' instead of 'who'?")]) span = mock_tracer.pop_traces()[0][0] @@ -409,10 +390,7 @@ def test_chat_model_logs(langchain, ddtrace_config_langchain, request_vcr, mock_ @pytest.mark.snapshot def test_openai_embedding_query(langchain, request_vcr): embeddings = langchain.embeddings.OpenAIEmbeddings() - if sys.version_info >= (3, 10): - cassette_name = "openai_embedding_query.yaml" - else: - cassette_name = "openai_embedding_query_39.yaml" + cassette_name = "openai_embedding_query_39.yaml" if PY39 else "openai_embedding_query.yaml" with request_vcr.use_cassette(cassette_name): embeddings.embed_query("this is a test query.") @@ -421,10 +399,7 @@ def test_openai_embedding_query(langchain, request_vcr): @pytest.mark.snapshot def test_openai_embedding_document(langchain, request_vcr): embeddings = langchain.embeddings.OpenAIEmbeddings() - if sys.version_info >= (3, 10): - cassette_name = "openai_embedding_document.yaml" - else: - cassette_name = "openai_embedding_document_39.yaml" + cassette_name = "openai_embedding_document_39.yaml" if PY39 else "openai_embedding_document.yaml" with request_vcr.use_cassette(cassette_name): embeddings.embed_documents(["this is", "a test document."]) @@ -443,10 +418,7 @@ def test_fake_embedding_document(langchain): def test_openai_embedding_metrics(langchain, request_vcr, mock_metrics, mock_logs, snapshot_tracer): embeddings = langchain.embeddings.OpenAIEmbeddings() - if sys.version_info >= (3, 10): - cassette_name = "openai_embedding_query.yaml" - else: - cassette_name = "openai_embedding_query_39.yaml" + cassette_name = "openai_embedding_query_39.yaml" if PY39 else "openai_embedding_query.yaml" with request_vcr.use_cassette(cassette_name): embeddings.embed_query("this is a test query.") expected_tags = [ @@ -472,10 +444,7 @@ def test_openai_embedding_metrics(langchain, request_vcr, mock_metrics, mock_log ) def test_embedding_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, mock_metrics, mock_tracer): embeddings = langchain.embeddings.OpenAIEmbeddings() - if sys.version_info >= (3, 10): - cassette_name = "openai_embedding_query.yaml" - else: - cassette_name = "openai_embedding_query_39.yaml" + cassette_name = "openai_embedding_query_39.yaml" if PY39 else "openai_embedding_query.yaml" with request_vcr.use_cassette(cassette_name): embeddings.embed_query("this is a test query.") span = mock_tracer.pop_traces()[0][0] @@ -511,10 +480,7 @@ def test_openai_math_chain_sync(langchain, request_vcr): the overall LLMMathChain, LLMChain, and underlying OpenAI interface. """ chain = langchain.chains.LLMMathChain(llm=langchain.llms.OpenAI(temperature=0)) - if sys.version_info >= (3, 10): - cassette_name = "openai_math_chain_sync.yaml" - else: - cassette_name = "openai_math_chain_sync_39.yaml" + cassette_name = "openai_math_chain_sync_39.yaml" if PY39 else "openai_math_chain_sync.yaml" with request_vcr.use_cassette(cassette_name): chain.run("what is two raised to the fifty-fourth power?") @@ -547,7 +513,7 @@ def test_cohere_math_chain_sync(langchain, request_vcr): chain.run("what is thirteen raised to the .3432 power?") -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires unnecessary cassette file for Python 3.9") +@pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") @pytest.mark.snapshot( token="tests.contrib.langchain.test_langchain.test_openai_sequential_chain", ignores=["metrics.langchain.tokens.total_cost", "resource"], @@ -604,7 +570,7 @@ def _transform_func(inputs): sequential_chain.run({"text": input_text, "style": "a 90s rapper"}) -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires unnecessary cassette file for Python 3.9") +@pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") @pytest.mark.snapshot(ignores=["langchain.tokens.total_cost", "resource"]) def test_openai_sequential_chain_with_multiple_llm_sync(langchain, request_vcr): template = """Paraphrase this text: @@ -663,10 +629,7 @@ async def test_openai_sequential_chain_with_multiple_llm_async(langchain, reques def test_openai_chain_metrics(langchain, request_vcr, mock_metrics, mock_logs, snapshot_tracer): chain = langchain.chains.LLMMathChain(llm=langchain.llms.OpenAI(temperature=0)) - if sys.version_info >= (3, 10): - cassette_name = "openai_math_chain_sync.yaml" - else: - cassette_name = "openai_math_chain_sync_39.yaml" + cassette_name = "openai_math_chain_sync_39.yaml" if PY39 else "openai_math_chain_sync.yaml" with request_vcr.use_cassette(cassette_name): chain.run("what is two raised to the fifty-fourth power?") expected_tags = [ @@ -698,10 +661,7 @@ def test_openai_chain_metrics(langchain, request_vcr, mock_metrics, mock_logs, s ) def test_chain_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, mock_metrics, mock_tracer): chain = langchain.chains.LLMMathChain(llm=langchain.llms.OpenAI(temperature=0)) - if sys.version_info >= (3, 10): - cassette_name = "openai_math_chain_sync.yaml" - else: - cassette_name = "openai_math_chain_sync_39.yaml" + cassette_name = "openai_math_chain_sync_39.yaml" if PY39 else "openai_math_chain_sync.yaml" with request_vcr.use_cassette(cassette_name): chain.run("what is two raised to the fifty-fourth power?") traces = mock_tracer.pop_traces() @@ -811,10 +771,7 @@ def test_pinecone_vectorstore_similarity_search(langchain, request_vcr): """ import pinecone - if sys.version_info >= (3, 10): - cassette_name = "openai_pinecone_similarity_search.yaml" - else: - cassette_name = "openai_pinecone_similarity_search_39.yaml" + cassette_name = "openai_pinecone_similarity_search_39.yaml" if PY39 else "openai_pinecone_similarity_search.yaml" with request_vcr.use_cassette(cassette_name): pinecone.init( api_key=os.getenv("PINECONE_API_KEY", ""), @@ -826,7 +783,7 @@ def test_pinecone_vectorstore_similarity_search(langchain, request_vcr): vectorstore.similarity_search("Who was Alan Turing?", 1) -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Cassette specific to Python 3.10+") +@pytest.mark.skipif(PY39, reason="Cassette specific to Python 3.10+") @pytest.mark.snapshot def test_pinecone_vectorstore_retrieval_chain(langchain, request_vcr): """ @@ -851,7 +808,7 @@ def test_pinecone_vectorstore_retrieval_chain(langchain, request_vcr): qa_with_sources("Who was Alan Turing?") -@pytest.mark.skipif(sys.version_info >= (3, 10), reason="Cassette specific to Python 3.9") +@pytest.mark.skipif(PY39, reason="Cassette specific to Python 3.9") @pytest.mark.snapshot def test_pinecone_vectorstore_retrieval_chain_39(langchain, request_vcr): """ @@ -879,10 +836,7 @@ def test_pinecone_vectorstore_retrieval_chain_39(langchain, request_vcr): def test_vectorstore_similarity_search_metrics(langchain, request_vcr, mock_metrics, mock_logs, snapshot_tracer): import pinecone - if sys.version_info >= (3, 10): - cassette_name = "openai_pinecone_similarity_search.yaml" - else: - cassette_name = "openai_pinecone_similarity_search_39.yaml" + cassette_name = "openai_pinecone_similarity_search_39.yaml" if PY39 else "openai_pinecone_similarity_search.yaml" with request_vcr.use_cassette(cassette_name): pinecone.init( api_key=os.getenv("PINECONE_API_KEY", ""), @@ -916,10 +870,7 @@ def test_vectorstore_similarity_search_metrics(langchain, request_vcr, mock_metr def test_vectorstore_logs(langchain, ddtrace_config_langchain, request_vcr, mock_logs, mock_metrics, mock_tracer): import pinecone - if sys.version_info >= (3, 10): - cassette_name = "openai_pinecone_similarity_search.yaml" - else: - cassette_name = "openai_pinecone_similarity_search_39.yaml" + cassette_name = "openai_pinecone_similarity_search_39.yaml" if PY39 else "openai_pinecone_similarity_search.yaml" with request_vcr.use_cassette(cassette_name): pinecone.init( api_key=os.getenv("PINECONE_API_KEY", ""), @@ -973,7 +924,7 @@ def test_vectorstore_logs(langchain, ddtrace_config_langchain, request_vcr, mock mock_metrics.count.assert_not_called() -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires unnecessary cassette file for Python 3.9") +@pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost", "resource"]) def test_openai_integration(langchain, request_vcr, ddtrace_run_python_code_in_subprocess): env = os.environ.copy() @@ -1005,7 +956,7 @@ def test_openai_integration(langchain, request_vcr, ddtrace_run_python_code_in_s assert err == b"" -@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires unnecessary cassette file for Python 3.9") +@pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") @pytest.mark.snapshot(ignores=["metrics.langchain.tokens.total_cost", "resource"]) @pytest.mark.parametrize("schema_version", [None, "v0", "v1"]) @pytest.mark.parametrize("service_name", [None, "mysvc"]) diff --git a/tests/contrib/langchain/test_langchain_community.py b/tests/contrib/langchain/test_langchain_community.py index fc0dce0d862..996eb14078f 100644 --- a/tests/contrib/langchain/test_langchain_community.py +++ b/tests/contrib/langchain/test_langchain_community.py @@ -15,8 +15,7 @@ pytestmark = pytest.mark.skipif( - # parse_version(langchain.__version__) < (0, 1, 0) or (sys.version_info[0] == 3 and sys.version_info[1] == 9), - parse_version(langchain.__version__) < (0, 1, 0), + parse_version(langchain.__version__) < (0, 1, 0) or sys.version_info < (3, 10), reason="This module only tests langchain >= 0.1 and Python 3.10+", ) @@ -44,7 +43,6 @@ def test_global_tags(ddtrace_config_langchain, langchain_openai, request_vcr, mo The env should be used for all data The version should be used for all data """ - assert 0, sys.version_info llm = langchain_openai.OpenAI() with override_global_config(dict(service="test-svc", env="staging", version="1234")): with request_vcr.use_cassette("openai_completion_sync.yaml"): @@ -1157,7 +1155,7 @@ async def test_lcel_chain_simple_async(langchain_core, langchain_openai, request @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs") @pytest.mark.snapshot(ignores=IGNORE_FIELDS) -@pytest.mark.skipif(sys.version_info >= (3, 11, 0), reason="Python <3.11 test") +@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Python <3.11 test") def test_lcel_chain_batch(langchain_core, langchain_openai, request_vcr): """ Test that invoking a chain with a batch of inputs will result in a 4-span trace, @@ -1174,7 +1172,7 @@ def test_lcel_chain_batch(langchain_core, langchain_openai, request_vcr): @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs") @pytest.mark.snapshot(ignores=IGNORE_FIELDS) -@pytest.mark.skipif(sys.version_info < (3, 11, 0), reason="Python 3.11+ required") +@pytest.mark.skipif(sys.version_info < (3, 11), reason="Python 3.11+ required") def test_lcel_chain_batch_311(langchain_core, langchain_openai, request_vcr): """ Test that invoking a chain with a batch of inputs will result in a 4-span trace, diff --git a/tests/contrib/langchain/test_langchain_llmobs.py b/tests/contrib/langchain/test_langchain_llmobs.py index 71afc50abd3..ff5296cd32d 100644 --- a/tests/contrib/langchain/test_langchain_llmobs.py +++ b/tests/contrib/langchain/test_langchain_llmobs.py @@ -19,6 +19,7 @@ LANGCHAIN_VERSION = parse_version(langchain_.__version__) +PY39 = sys.version_info <= (3, 9) if LANGCHAIN_VERSION < (0, 1, 0): from langchain.schema import AIMessage @@ -130,10 +131,7 @@ def _invoke_chain(cls, chain, prompt, mock_tracer, cassette_name, batch=False): class TestLLMObsLangchain(BaseTestLLMObsLangchain): cassette_subdirectory_name = "langchain" - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_openai_llm(self, langchain, mock_llmobs_span_writer, mock_tracer): span = self._invoke_llm( llm=langchain.llms.OpenAI(model="gpt-3.5-turbo-instruct"), @@ -154,10 +152,7 @@ def test_llmobs_cohere_llm(self, langchain, mock_llmobs_span_writer, mock_tracer assert mock_llmobs_span_writer.enqueue.call_count == 1 _assert_expected_llmobs_llm_span(span, mock_llmobs_span_writer) - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_ai21_llm(self, langchain, mock_llmobs_span_writer, mock_tracer): llm = langchain.llms.AI21() span = self._invoke_llm( @@ -184,10 +179,7 @@ def test_llmobs_huggingfacehub_llm(self, langchain, mock_llmobs_span_writer, moc assert mock_llmobs_span_writer.enqueue.call_count == 1 _assert_expected_llmobs_llm_span(span, mock_llmobs_span_writer) - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_openai_chat_model(self, langchain, mock_llmobs_span_writer, mock_tracer): chat = langchain.chat_models.ChatOpenAI(temperature=0, max_tokens=256) span = self._invoke_chat( @@ -199,10 +191,7 @@ def test_llmobs_openai_chat_model(self, langchain, mock_llmobs_span_writer, mock assert mock_llmobs_span_writer.enqueue.call_count == 1 _assert_expected_llmobs_llm_span(span, mock_llmobs_span_writer, input_role="user") - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_chain(self, langchain, mock_llmobs_span_writer, mock_tracer): chain = langchain.chains.LLMMathChain(llm=langchain.llms.OpenAI(temperature=0, max_tokens=256)) @@ -237,10 +226,7 @@ def test_llmobs_chain(self, langchain, mock_llmobs_span_writer, mock_tracer): ) _assert_expected_llmobs_llm_span(trace[2], mock_llmobs_span_writer) - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_chain_nested(self, langchain, mock_llmobs_span_writer, mock_tracer): template = "Paraphrase this text:\n{input_text}\nParaphrase: " prompt = langchain.PromptTemplate(input_variables=["input_text"], template=template) @@ -279,10 +265,7 @@ def test_llmobs_chain_nested(self, langchain, mock_llmobs_span_writer, mock_trac _assert_expected_llmobs_chain_span(trace[3], mock_llmobs_span_writer) _assert_expected_llmobs_llm_span(trace[4], mock_llmobs_span_writer) - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_chain_schema_io(self, langchain, mock_llmobs_span_writer, mock_tracer): prompt = langchain.prompts.ChatPromptTemplate.from_messages( [ @@ -358,10 +341,7 @@ def test_llmobs_cohere_llm(self, langchain_community, mock_llmobs_span_writer, m assert mock_llmobs_span_writer.enqueue.call_count == 1 _assert_expected_llmobs_llm_span(span, mock_llmobs_span_writer) - @pytest.mark.skipif( - sys.version_info[0] == 3 and sys.version_info[1] == 9, - reason="Requires unnecessary cassette file for Python 3.9", - ) + @pytest.mark.skipif(PY39, reason="Requires unnecessary cassette file for Python 3.9") def test_llmobs_ai21_llm(self, langchain_community, mock_llmobs_span_writer, mock_tracer): if langchain_community is None: pytest.skip("langchain-community not installed which is required for this test.") @@ -446,7 +426,7 @@ def test_llmobs_chain_nested(self, langchain_core, langchain_openai, mock_llmobs _assert_expected_llmobs_llm_span(trace[2], mock_llmobs_span_writer, input_role="user") _assert_expected_llmobs_llm_span(trace[3], mock_llmobs_span_writer, input_role="user") - @pytest.mark.skipif(sys.version_info[0] == 3 and sys.version_info[1] >= 11, reason="Python <3.11 required") + @pytest.mark.skipif(sys.version_info >= (3, 11), reason="Python <3.11 required") def test_llmobs_chain_batch(self, langchain_core, langchain_openai, mock_llmobs_span_writer, mock_tracer): prompt = langchain_core.prompts.ChatPromptTemplate.from_template("Tell me a short joke about {topic}") output_parser = langchain_core.output_parsers.StrOutputParser()