diff --git a/.circleci/config.templ.yml b/.circleci/config.templ.yml index 05da52d643a..29cb1d886db 100644 --- a/.circleci/config.templ.yml +++ b/.circleci/config.templ.yml @@ -404,12 +404,12 @@ jobs: paths: - "." - appsec_integrations: + appsec_integrations_pygoat: <<: *machine_executor parallelism: 13 steps: - run_test: - pattern: 'appsec_integrations' + pattern: 'appsec_integrations_pygoat' snapshot: true run_agent_checks: false docker_services: "pygoat" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 23a48b6f344..b8a8a48d8f8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -119,8 +119,6 @@ tests/appsec/ @DataDog/asm-python tests/contrib/dbapi/test_dbapi_appsec.py @DataDog/asm-python tests/contrib/subprocess @DataDog/asm-python tests/contrib/flask/test_flask_appsec.py @DataDog/asm-python -tests/contrib/django/django_app/appsec_urls.py @DataDog/asm-python -tests/contrib/django/test_django_appsec.py @DataDog/asm-python tests/snapshots/tests*appsec*.json @DataDog/asm-python tests/contrib/*/test*appsec*.py @DataDog/asm-python scripts/iast/* @DataDog/asm-python @@ -147,6 +145,8 @@ ddtrace/contrib/internal/google_generativeai @DataDog/ml-observabilit ddtrace/contrib/google_generativeai @DataDog/ml-observability ddtrace/contrib/internal/vertexai @DataDog/ml-observability ddtrace/contrib/vertexai @DataDog/ml-observability +ddtrace/contrib/langgraph @DataDog/ml-observability +ddtrace/contrib/internal/langgraph @DataDog/ml-observability tests/llmobs @DataDog/ml-observability tests/contrib/openai @DataDog/ml-observability tests/contrib/langchain @DataDog/ml-observability @@ -156,6 +156,7 @@ tests/contrib/botocore/bedrock_cassettes @DataDog/ml-observabilit tests/contrib/anthropic @DataDog/ml-observability tests/contrib/google_generativeai @DataDog/ml-observability tests/contrib/vertexai @DataDog/ml-observability +tests/contrib/langgraph @DataDog/ml-observability .gitlab/tests/llmobs.yml @DataDog/ml-observability # Remote Config diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml index ccf6c6501d9..3f9d9308c83 100644 --- a/.github/workflows/system-tests.yml +++ b/.github/workflows/system-tests.yml @@ -47,6 +47,7 @@ jobs: - weblog-variant: fastapi # runs django-poc for 3.12 - weblog-variant: python3.12 + - weblog-variant: django-py3.13 fail-fast: false env: TEST_LIBRARY: python @@ -96,7 +97,7 @@ jobs: needs: [system-tests-build-agent, system-tests-build-weblog] strategy: matrix: - weblog-variant: [flask-poc, uwsgi-poc , django-poc, fastapi, python3.12] + weblog-variant: [flask-poc, uwsgi-poc , django-poc, fastapi, python3.12, django-py3.13] scenario: [remote-config, appsec, appsec-1, other, debugger-1, debugger-2] fail-fast: false diff --git a/.gitlab/prepare-oci-package.sh b/.gitlab/prepare-oci-package.sh index 27d5354219e..5958c31e731 100755 --- a/.gitlab/prepare-oci-package.sh +++ b/.gitlab/prepare-oci-package.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -eo pipefail if [ -n "$CI_COMMIT_TAG" ] && [ -z "$PYTHON_PACKAGE_VERSION" ]; then PYTHON_PACKAGE_VERSION=${CI_COMMIT_TAG##v} @@ -38,3 +39,18 @@ fi cp -r ../pywheels-dep/site-packages* sources/ddtrace_pkgs cp ../lib-injection/sources/* sources/ + +if ! type rdfind &> /dev/null; then + clean-apt install rdfind +fi +echo "Deduplicating package files" +cd ./sources +rdfind -makesymlinks true -makeresultsfile true -checksum sha256 -deterministic true -outputname deduped.txt . +echo "Converting symlinks to relative symlinks" +find . -type l | while read -r l; do + target="$(realpath "$l")" + rel_target="$(realpath --relative-to="$(dirname "$(realpath -s "$l")")" "$target")" + dest_base="$(basename "$l")" + dest_dir="$(dirname "$l")" + (cd "${dest_dir}" && ln -sf "${rel_target}" "${dest_base}") +done diff --git a/.riot/requirements/1147cef.txt b/.riot/requirements/1147cef.txt deleted file mode 100644 index a760b2a10c4..00000000000 --- a/.riot/requirements/1147cef.txt +++ /dev/null @@ -1,62 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/1147cef.in -# -aiohttp==3.9.1 -aiosignal==1.3.1 -annotated-types==0.6.0 -anyio==4.2.0 -async-timeout==4.0.3 -attrs==23.2.0 -blinker==1.7.0 -certifi==2023.11.17 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.4.0 -dataclasses-json==0.6.3 -exceptiongroup==1.2.0 -flask==3.0.0 -frozenlist==1.4.1 -greenlet==3.0.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.6 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -jsonpatch==1.33 -jsonpointer==2.4 -langchain==0.0.354 -langchain-community==0.0.8 -langchain-core==0.1.5 -langchain-experimental==0.0.47 -langsmith==0.0.77 -markupsafe==2.1.3 -marshmallow==3.20.1 -mock==5.1.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -numpy==1.26.3 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pydantic==2.5.3 -pydantic-core==2.14.6 -pytest==7.4.4 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pyyaml==6.0.1 -requests==2.31.0 -sniffio==1.3.0 -sortedcontainers==2.4.0 -sqlalchemy==2.0.25 -tenacity==8.2.3 -tomli==2.0.1 -typing-extensions==4.9.0 -typing-inspect==0.9.0 -urllib3==2.1.0 -werkzeug==3.0.1 -yarl==1.9.4 diff --git a/.riot/requirements/115283d.txt b/.riot/requirements/115283d.txt new file mode 100644 index 00000000000..758f6375b90 --- /dev/null +++ b/.riot/requirements/115283d.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/115283d.in +# +anyio==4.7.0 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.28 +langgraph==0.2.60 +langgraph-checkpoint==2.0.9 +langgraph-sdk==0.1.48 +langsmith==0.2.6 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +orjson==3.10.12 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.25.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pyyaml==6.0.2 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tenacity==9.0.0 +typing-extensions==4.12.2 +urllib3==2.3.0 diff --git a/.riot/requirements/1221a04.txt b/.riot/requirements/1221a04.txt deleted file mode 100644 index b07a317c8ce..00000000000 --- a/.riot/requirements/1221a04.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.7 -# by the following command: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/1221a04.in -# -attrs==23.1.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.2.7 -exceptiongroup==1.1.3 -flask==2.2.5 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -importlib-metadata==6.7.0 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -markupsafe==2.1.3 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.2.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.11.1 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -typing-extensions==4.7.1 -urllib3==2.0.7 -werkzeug==2.2.3 -zipp==3.15.0 diff --git a/.riot/requirements/131666a.txt b/.riot/requirements/131666a.txt deleted file mode 100644 index e2346f500df..00000000000 --- a/.riot/requirements/131666a.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.9 -# To update, run: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/131666a.in -# -attrs==23.1.0 -blinker==1.7.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.3.2 -exceptiongroup==1.1.3 -flask==2.3.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -importlib-metadata==6.8.0 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -markupsafe==2.1.3 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -urllib3==2.1.0 -werkzeug==3.0.1 -zipp==3.17.0 diff --git a/.riot/requirements/16ed652.txt b/.riot/requirements/16ed652.txt deleted file mode 100644 index e4914dd4ab4..00000000000 --- a/.riot/requirements/16ed652.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/16ed652.in -# -attrs==23.1.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==7.1.2 -coverage[toml]==7.3.2 -exceptiongroup==1.1.3 -flask==1.1.4 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -iniconfig==2.0.0 -itsdangerous==1.1.0 -jinja2==2.11.3 -markupsafe==1.1.1 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -urllib3==2.1.0 -werkzeug==1.0.1 diff --git a/.riot/requirements/173e759.txt b/.riot/requirements/173e759.txt deleted file mode 100644 index 305b7740f25..00000000000 --- a/.riot/requirements/173e759.txt +++ /dev/null @@ -1,64 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/173e759.in -# -aiohttp==3.9.1 -aiosignal==1.3.1 -annotated-types==0.6.0 -anyio==4.2.0 -async-timeout==4.0.3 -attrs==23.2.0 -blinker==1.7.0 -certifi==2023.11.17 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.4.0 -dataclasses-json==0.6.3 -exceptiongroup==1.2.0 -flask==3.0.0 -frozenlist==1.4.1 -greenlet==3.0.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -jsonpatch==1.33 -jsonpointer==2.4 -langchain==0.0.354 -langchain-community==0.0.8 -langchain-core==0.1.5 -langchain-experimental==0.0.47 -langsmith==0.0.77 -markupsafe==2.1.3 -marshmallow==3.20.1 -mock==5.1.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -numpy==1.24.4 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pydantic==2.5.3 -pydantic-core==2.14.6 -pytest==7.4.4 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pyyaml==6.0.1 -requests==2.31.0 -sniffio==1.3.0 -sortedcontainers==2.4.0 -sqlalchemy==2.0.25 -tenacity==8.2.3 -tomli==2.0.1 -typing-extensions==4.9.0 -typing-inspect==0.9.0 -urllib3==2.1.0 -werkzeug==3.0.1 -yarl==1.9.4 -zipp==3.17.0 diff --git a/.riot/requirements/17b5eda.txt b/.riot/requirements/17b5eda.txt deleted file mode 100644 index 6ae81a00705..00000000000 --- a/.riot/requirements/17b5eda.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/17b5eda.in -# -attrs==23.1.0 -blinker==1.7.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.3.2 -flask==2.3.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -markupsafe==2.1.3 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -urllib3==2.1.0 -werkzeug==3.0.1 diff --git a/.riot/requirements/18551a1.txt b/.riot/requirements/18551a1.txt new file mode 100644 index 00000000000..a858d4972ca --- /dev/null +++ b/.riot/requirements/18551a1.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18551a1.in +# +annotated-types==0.7.0 +anyio==4.8.0 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.29 +langgraph==0.2.61 +langgraph-checkpoint==2.0.9 +langgraph-sdk==0.1.48 +langsmith==0.2.10 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +orjson==3.10.14 +packaging==24.2 +pluggy==1.5.0 +pydantic==2.10.5 +pydantic-core==2.27.2 +pytest==8.3.4 +pytest-asyncio==0.25.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pyyaml==6.0.2 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tenacity==9.0.0 +typing-extensions==4.12.2 +urllib3==2.3.0 diff --git a/.riot/requirements/1a1ddb4.txt b/.riot/requirements/1a1ddb4.txt deleted file mode 100644 index 3e64e630c33..00000000000 --- a/.riot/requirements/1a1ddb4.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.7 -# by the following command: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/1a1ddb4.in -# -attrs==23.1.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==7.1.2 -coverage[toml]==7.2.7 -exceptiongroup==1.1.3 -flask==1.1.4 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -importlib-metadata==6.7.0 -iniconfig==2.0.0 -itsdangerous==1.1.0 -jinja2==2.11.3 -markupsafe==1.1.1 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.2.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.11.1 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -typing-extensions==4.7.1 -urllib3==2.0.7 -werkzeug==1.0.1 -zipp==3.15.0 diff --git a/.riot/requirements/1b0d603.txt b/.riot/requirements/1b0d603.txt deleted file mode 100644 index c6dfa7f6fae..00000000000 --- a/.riot/requirements/1b0d603.txt +++ /dev/null @@ -1,64 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.9 -# To update, run: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/1b0d603.in -# -aiohttp==3.9.1 -aiosignal==1.3.1 -annotated-types==0.6.0 -anyio==4.2.0 -async-timeout==4.0.3 -attrs==23.2.0 -blinker==1.7.0 -certifi==2023.11.17 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.4.0 -dataclasses-json==0.6.3 -exceptiongroup==1.2.0 -flask==3.0.0 -frozenlist==1.4.1 -greenlet==3.0.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -jsonpatch==1.33 -jsonpointer==2.4 -langchain==0.0.354 -langchain-community==0.0.8 -langchain-core==0.1.5 -langchain-experimental==0.0.47 -langsmith==0.0.77 -markupsafe==2.1.3 -marshmallow==3.20.1 -mock==5.1.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -numpy==1.26.3 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pydantic==2.5.3 -pydantic-core==2.14.6 -pytest==7.4.4 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pyyaml==6.0.1 -requests==2.31.0 -sniffio==1.3.0 -sortedcontainers==2.4.0 -sqlalchemy==2.0.25 -tenacity==8.2.3 -tomli==2.0.1 -typing-extensions==4.9.0 -typing-inspect==0.9.0 -urllib3==2.1.0 -werkzeug==3.0.1 -yarl==1.9.4 -zipp==3.17.0 diff --git a/.riot/requirements/1d4e95e.txt b/.riot/requirements/1d4e95e.txt new file mode 100644 index 00000000000..9d2871696ae --- /dev/null +++ b/.riot/requirements/1d4e95e.txt @@ -0,0 +1,27 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1d4e95e.in +# +attrs==24.3.0 +coverage[toml]==7.6.10 +gevent==24.11.1 +greenlet==3.1.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.8.0 diff --git a/.riot/requirements/1e2d655.txt b/.riot/requirements/1e2d655.txt deleted file mode 100644 index 7f6b56e2776..00000000000 --- a/.riot/requirements/1e2d655.txt +++ /dev/null @@ -1,60 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e2d655.in -# -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 -aiosignal==1.3.1 -annotated-types==0.7.0 -anyio==4.4.0 -attrs==24.2.0 -blinker==1.8.2 -certifi==2024.7.4 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.6.1 -dataclasses-json==0.6.7 -flask==3.0.3 -frozenlist==1.4.1 -greenlet==3.0.3 -gunicorn==23.0.0 -hypothesis==6.45.0 -idna==3.8 -iniconfig==2.0.0 -itsdangerous==2.2.0 -jinja2==3.1.4 -jsonpatch==1.33 -jsonpointer==3.0.0 -langchain==0.0.354 -langchain-community==0.0.20 -langchain-core==0.1.23 -langchain-experimental==0.0.47 -langsmith==0.0.87 -markupsafe==2.1.5 -marshmallow==3.22.0 -mock==5.1.0 -multidict==6.0.5 -mypy-extensions==1.0.0 -numpy==1.26.4 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.5.0 -psycopg2-binary==2.9.9 -pydantic==2.8.2 -pydantic-core==2.20.1 -pytest==8.3.2 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -pyyaml==6.0.2 -requests==2.32.3 -sniffio==1.3.1 -sortedcontainers==2.4.0 -sqlalchemy==2.0.32 -tenacity==8.5.0 -typing-extensions==4.12.2 -typing-inspect==0.9.0 -urllib3==2.2.2 -werkzeug==3.0.4 -yarl==1.9.4 diff --git a/.riot/requirements/1e81527.txt b/.riot/requirements/1e81527.txt deleted file mode 100644 index 3eae5fd518c..00000000000 --- a/.riot/requirements/1e81527.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1e81527.in -# -attrs==23.1.0 -blinker==1.7.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.3.2 -exceptiongroup==1.1.3 -flask==2.3.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -importlib-metadata==6.8.0 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -markupsafe==2.1.3 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -urllib3==2.1.0 -werkzeug==3.0.1 -zipp==3.17.0 diff --git a/.riot/requirements/1fb9968.txt b/.riot/requirements/1fb9968.txt deleted file mode 100644 index 57524a248f5..00000000000 --- a/.riot/requirements/1fb9968.txt +++ /dev/null @@ -1,59 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1fb9968.in -# -aiohttp==3.9.1 -aiosignal==1.3.1 -annotated-types==0.6.0 -anyio==4.2.0 -attrs==23.2.0 -blinker==1.7.0 -certifi==2023.11.17 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.4.0 -dataclasses-json==0.6.3 -flask==3.0.0 -frozenlist==1.4.1 -greenlet==3.0.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.6 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -jsonpatch==1.33 -jsonpointer==2.4 -langchain==0.0.354 -langchain-community==0.0.8 -langchain-core==0.1.5 -langchain-experimental==0.0.47 -langsmith==0.0.77 -markupsafe==2.1.3 -marshmallow==3.20.1 -mock==5.1.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -numpy==1.26.3 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pydantic==2.5.3 -pydantic-core==2.14.6 -pytest==7.4.4 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pyyaml==6.0.1 -requests==2.31.0 -sniffio==1.3.0 -sortedcontainers==2.4.0 -sqlalchemy==2.0.25 -tenacity==8.2.3 -typing-extensions==4.9.0 -typing-inspect==0.9.0 -urllib3==2.1.0 -werkzeug==3.0.1 -yarl==1.9.4 diff --git a/.riot/requirements/2b94418.txt b/.riot/requirements/2b94418.txt deleted file mode 100644 index a64003e98e9..00000000000 --- a/.riot/requirements/2b94418.txt +++ /dev/null @@ -1,34 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/2b94418.in -# -attrs==23.1.0 -blinker==1.7.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==8.1.7 -coverage[toml]==7.3.2 -exceptiongroup==1.1.3 -flask==2.3.3 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -iniconfig==2.0.0 -itsdangerous==2.1.2 -jinja2==3.1.2 -markupsafe==2.1.3 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -urllib3==2.1.0 -werkzeug==3.0.1 diff --git a/.riot/requirements/2bd0151.txt b/.riot/requirements/2bd0151.txt new file mode 100644 index 00000000000..7f978514b48 --- /dev/null +++ b/.riot/requirements/2bd0151.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/2bd0151.in +# +annotated-types==0.7.0 +anyio==4.7.0 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.0 +coverage[toml]==7.6.9 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.28 +langgraph==0.2.60 +langgraph-checkpoint==2.0.9 +langgraph-sdk==0.1.48 +langsmith==0.2.4 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +orjson==3.10.12 +packaging==24.2 +pluggy==1.5.0 +pydantic==2.10.4 +pydantic-core==2.27.2 +pytest==8.3.4 +pytest-asyncio==0.25.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pyyaml==6.0.2 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tenacity==9.0.0 +typing-extensions==4.12.2 +urllib3==2.3.0 diff --git a/.riot/requirements/2d19e52.txt b/.riot/requirements/2d19e52.txt deleted file mode 100644 index 8de360e7316..00000000000 --- a/.riot/requirements/2d19e52.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.7 -# by the following command: -# -# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/2d19e52.in -# -attrs==24.2.0 -coverage[toml]==7.2.7 -exceptiongroup==1.2.2 -gevent==22.10.2 -greenlet==3.1.1 -hypothesis==6.45.0 -importlib-metadata==6.7.0 -iniconfig==2.0.0 -mock==5.1.0 -msgpack==1.0.5 -opentracing==2.4.0 -packaging==24.0 -pluggy==1.2.0 -pytest==7.4.4 -pytest-cov==4.1.0 -pytest-mock==3.11.1 -pytest-randomly==3.12.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -typing-extensions==4.7.1 -zipp==3.15.0 -zope-event==5.0 -zope-interface==6.4.post2 - -# The following packages are considered to be unsafe in a requirements file: -setuptools==68.0.0 diff --git a/.riot/requirements/2e4fbd3.txt b/.riot/requirements/2e4fbd3.txt new file mode 100644 index 00000000000..3204235701a --- /dev/null +++ b/.riot/requirements/2e4fbd3.txt @@ -0,0 +1,47 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/2e4fbd3.in +# +annotated-types==0.7.0 +anyio==4.7.0 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +exceptiongroup==1.2.2 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.28 +langgraph==0.2.60 +langgraph-checkpoint==2.0.9 +langgraph-sdk==0.1.48 +langsmith==0.2.6 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +orjson==3.10.12 +packaging==24.2 +pluggy==1.5.0 +pydantic==2.10.4 +pydantic-core==2.27.2 +pytest==8.3.4 +pytest-asyncio==0.25.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pyyaml==6.0.2 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tenacity==9.0.0 +tomli==2.2.1 +typing-extensions==4.12.2 +urllib3==2.3.0 diff --git a/.riot/requirements/5e31227.txt b/.riot/requirements/5e31227.txt new file mode 100644 index 00000000000..a3815ab0d74 --- /dev/null +++ b/.riot/requirements/5e31227.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/5e31227.in +# +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.3.0 diff --git a/.riot/requirements/628e8fe.txt b/.riot/requirements/628e8fe.txt new file mode 100644 index 00000000000..163d0416c31 --- /dev/null +++ b/.riot/requirements/628e8fe.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/628e8fe.in +# +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.3.0 diff --git a/.riot/requirements/27d0ff3.txt b/.riot/requirements/6bec1ec.txt similarity index 95% rename from .riot/requirements/27d0ff3.txt rename to .riot/requirements/6bec1ec.txt index c03419edbdb..3e128a77c79 100644 --- a/.riot/requirements/27d0ff3.txt +++ b/.riot/requirements/6bec1ec.txt @@ -2,13 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/27d0ff3.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/6bec1ec.in # attrs==24.3.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 gevent==24.2.1 -greenlet==3.1.1 +greenlet==3.1.0 hypothesis==6.45.0 importlib-metadata==8.5.0 iniconfig==2.0.0 diff --git a/.riot/requirements/7dd4bcd.txt b/.riot/requirements/7dd4bcd.txt new file mode 100644 index 00000000000..e1f2b4d3a03 --- /dev/null +++ b/.riot/requirements/7dd4bcd.txt @@ -0,0 +1,47 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7dd4bcd.in +# +annotated-types==0.7.0 +anyio==4.7.0 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +exceptiongroup==1.2.2 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.28 +langgraph==0.2.60 +langgraph-checkpoint==2.0.9 +langgraph-sdk==0.1.48 +langsmith==0.2.6 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +orjson==3.10.12 +packaging==24.2 +pluggy==1.5.0 +pydantic==2.10.4 +pydantic-core==2.27.2 +pytest==8.3.4 +pytest-asyncio==0.25.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pyyaml==6.0.2 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tenacity==9.0.0 +tomli==2.2.1 +typing-extensions==4.12.2 +urllib3==2.3.0 diff --git a/.riot/requirements/8dd53b1.txt b/.riot/requirements/8dd53b1.txt new file mode 100644 index 00000000000..1dbf9b66b89 --- /dev/null +++ b/.riot/requirements/8dd53b1.txt @@ -0,0 +1,25 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/8dd53b1.in +# +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +tomli==2.2.1 +urllib3==2.3.0 diff --git a/.riot/requirements/968fdc9.txt b/.riot/requirements/968fdc9.txt new file mode 100644 index 00000000000..6633b871d53 --- /dev/null +++ b/.riot/requirements/968fdc9.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/968fdc9.in +# +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.3.0 diff --git a/.riot/requirements/e53ccba.txt b/.riot/requirements/e53ccba.txt deleted file mode 100644 index 0da0b56ed64..00000000000 --- a/.riot/requirements/e53ccba.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.9 -# To update, run: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/e53ccba.in -# -attrs==23.1.0 -certifi==2023.7.22 -charset-normalizer==3.3.2 -click==7.1.2 -coverage[toml]==7.3.2 -exceptiongroup==1.1.3 -flask==1.1.4 -gunicorn==21.2.0 -hypothesis==6.45.0 -idna==3.4 -iniconfig==2.0.0 -itsdangerous==1.1.0 -jinja2==2.11.3 -markupsafe==1.1.1 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -requests==2.31.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -urllib3==2.1.0 -werkzeug==1.0.1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2039d597f9a..a29299f8987 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,81 @@ Changelogs for versions not listed here can be found at https://github.com/DataDog/dd-trace-py/releases +--- + +## 2.19.1 +### Bug Fixes + +- Profiling + - Fixes an issue where the memory allocation profiler can cause a segmentation fault due to data races when accessing its own global data structures from multiple threads. + - Fixes a bug where profiling mutexes were not cleared on fork in the child process. This could cause deadlocks in certain configurations. + - Removes a system call from the memory allocation profiler, used to detect forks, which ran on every allocation and resulted in a significant slowdown. + +- Tracing + - `django`: Fixes issue where django cache is represented as a django service rather than the third party service. + - `botocore`: Resolves formatting errors in the bedrock integration when parsing request model IDs, which can now accept AWS ARNs. + + +--- + +## 2.19.0 +### New Features +- ASM + - Introduces "Standalone SCA billing", opting out for APM billing and applying to only SCA. Enable this by setting these two environment variables: `DD_APPSEC_SCA_ENABLED` and `DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED` + +- Code Security + - Introduces stack trace reports for Code Security. + +- Profiling + - Adds an experimental integration with the PyTorch profiler which can be enabled by setting `DD_PROFILING_PYTORCH_ENABLED=true`. This feature instruments the PyTorch profiler API () so that GPU profiling data can be sent to Datadog for visualization. This feature supports torch version \>= 1.8.1. + +- Tracing + - `azure_functions`: Introduces support for Azure Functions. + +### Upgrade Notes +- Makes the library compatible with Python 3.13 + +### Bug Fixes +- ASM + - Resolves an issue where AppSec was using a patched request and builtins functions, creating telemetry errors. + +- Code Security + - Adds more modules to the IAST patching denylist to improve startup time + +- Lib-Injection + - Fixes missing lib-injection telemetry for common abort scenarios. + +- LLM Observability + - Resolves an issue where `LLMObs.enable()` ignored global patch configurations, specifically + the `DD_TRACE__ENABLED` and `DD_PATCH_MODULES` environment variables. + +- Telemetry + - library: Resolves deadlocks that could occur when sending instrumentation telemetry data after an unhandled exception is raised. + +- Tracing + - `ASGI`: This fix resolves an issue parsing response cookies in FastAPI and awsgi + - `asyncio`: Resolves an issue where asyncio event loops fail to register when `ddtrace-run`/`import ddtrace.auto` is used and gevent is installed. + - `datastreams`: Logs at warning level for Kinesis errors that break the Data Streams Monitoring map. + + +--- + +## 2.18.2 + + +### Bug Fixes + +- Code Security + - Adds more modules to the IAST patching denylist to improve startup time + +- Profiling + - Removes a system call from the memory allocation profiler, used to detect forks, which ran on every allocation and resulted in a significant slowdown. + +- Tracing + - `ASGI`: Resolves an issue parsing response cookies in FastAPI and awsgi + - Integrations: Improves error handling for exceptions raised during the startup of ddtrace integrations. This reduces the likelihood of the ddtrace library raising unhandled exceptions. + + --- ## 2.18.1 @@ -117,6 +192,39 @@ Tracing: - Tracing - Removed x-forwarded from headers used for client IP resolution (but not from collected headers). We lack evidence of actual usage, and whether this should follow RFC 7239 or regular XFF list format. +--- +## 2.17.5 + +### Bug Fixes + +- Tracing + - `celery`: Fixes an issue where `celery.apply` spans from Celery pre-run got closed too soon leading to span tags being missing. + + +--- + +## 2.17.4 + +### Bug Fixes + +- Code Security + - Adds more modules to the IAST patching denylist to improve startup time + +- ASM + - Resolves an issue where AppSec was using a patched JSON loads, creating telemetry errors. + - Resolves an issue where AppSec was using a patched request and builtins functions, creating telemetry errors. + +- LLM Observability + - Resolves an issue where `LLMObs.enable()` ignored global patch configurations, specifically the `DD_TRACE__ENABLED` and `DD_PATCH_MODULES` environment variables. + - `langchain`: Resolves a JSON decoding issue resulting from tagging streamed outputs from chains ending with a PydanticOutputParser. + +- Profiling + - Updates setup.py to ignore int-ptr conversion warnings for the profiler stack.pyx file. This is important because gcc 14 makes these conversions an error, alpine 3.21.0 ships with gcc 14, and any patch version of a Python alpine image cut after December 5th, 2024, will have this issue. + +- Tracing + - `ASGI`: Resolves an issue parsing response cookies in FastAPI and awsgi + + --- ## 2.17.3 diff --git a/benchmarks/bm/utils.py b/benchmarks/bm/utils.py index ba6461336b5..dd7b4991c57 100644 --- a/benchmarks/bm/utils.py +++ b/benchmarks/bm/utils.py @@ -8,8 +8,8 @@ from ddtrace import __version__ as ddtrace_version from ddtrace._trace.span import Span -from ddtrace.filters import TraceFilter from ddtrace.internal import telemetry +from ddtrace.trace import TraceFilter _Span = Span diff --git a/ddtrace/__init__.py b/ddtrace/__init__.py index 1f2049cd0a5..b555d1117ca 100644 --- a/ddtrace/__init__.py +++ b/ddtrace/__init__.py @@ -1,4 +1,5 @@ import sys +import os import warnings @@ -26,7 +27,7 @@ from ._monkey import patch_all # noqa: E402 from .internal.compat import PYTHON_VERSION_INFO # noqa: E402 from .internal.utils.deprecations import DDTraceDeprecationWarning # noqa: E402 -from .pin import Pin # noqa: E402 +from ddtrace._trace.pin import Pin # noqa: E402 from ddtrace._trace.span import Span # noqa: E402 from ddtrace._trace.tracer import Tracer # noqa: E402 from ddtrace.vendor import debtcollector @@ -42,39 +43,42 @@ # initialization, which added this module to sys.modules. We catch deprecation # warnings as this is only to retain a side effect of the package # initialization. +# TODO: Remove this in v3.0 when the ddtrace/tracer.py module is removed with warnings.catch_warnings(): warnings.simplefilter("ignore") from .tracer import Tracer as _ - __version__ = get_version() -# a global tracer instance with integration settings -tracer = Tracer() +# TODO: Deprecate accessing tracer from ddtrace.__init__ module in v4.0 +if os.environ.get("_DD_GLOBAL_TRACER_INIT", "true").lower() in ("1", "true"): + from ddtrace.trace import tracer # noqa: F401 __all__ = [ "patch", "patch_all", "Pin", "Span", - "tracer", "Tracer", "config", "DDTraceDeprecationWarning", ] -_DEPRECATED_MODULE_ATTRIBUTES = [ +_DEPRECATED_TRACE_ATTRIBUTES = [ "Span", "Tracer", + "Pin", ] def __getattr__(name): - if name in _DEPRECATED_MODULE_ATTRIBUTES: + if name in _DEPRECATED_TRACE_ATTRIBUTES: debtcollector.deprecate( ("%s.%s is deprecated" % (__name__, name)), + message="Import from ddtrace.trace instead.", category=DDTraceDeprecationWarning, + removal_version="3.0.0", ) if name in globals(): diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index 788eb03bfa8..75c70114ef2 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -6,6 +6,7 @@ from wrapt.importer import when_imported from ddtrace.appsec import load_common_appsec_modules +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from .appsec._iast._utils import _is_iast_enabled from .internal import telemetry @@ -48,6 +49,7 @@ "grpc": True, "httpx": True, "kafka": True, + "langgraph": False, "mongoengine": True, "mysql": True, "mysqldb": True, @@ -148,6 +150,10 @@ "httplib": ("http.client",), "kafka": ("confluent_kafka",), "google_generativeai": ("google.generativeai",), + "langgraph": ( + "langgraph", + "langgraph.graph", + ), } @@ -190,7 +196,10 @@ def on_import(hook): ) telemetry.telemetry_writer.add_integration(module, False, PATCH_MODULES.get(module) is True, str(e)) telemetry.telemetry_writer.add_count_metric( - "tracers", "integration_errors", 1, (("integration_name", module), ("error_type", type(e).__name__)) + TELEMETRY_NAMESPACE.TRACERS, + "integration_errors", + 1, + (("integration_name", module), ("error_type", type(e).__name__)), ) else: if hasattr(imported_module, "get_versions"): diff --git a/ddtrace/_trace/filters.py b/ddtrace/_trace/filters.py new file mode 100644 index 00000000000..a2e6884f05c --- /dev/null +++ b/ddtrace/_trace/filters.py @@ -0,0 +1,72 @@ +import abc +import re +from typing import TYPE_CHECKING # noqa:F401 +from typing import List # noqa:F401 +from typing import Optional # noqa:F401 +from typing import Union # noqa:F401 + +from ddtrace._trace.processor import TraceProcessor +from ddtrace.ext import http + + +if TYPE_CHECKING: # pragma: no cover + from ddtrace._trace.span import Span # noqa:F401 + + +class TraceFilter(TraceProcessor): + @abc.abstractmethod + def process_trace(self, trace): + # type: (List[Span]) -> Optional[List[Span]] + """Processes a trace. + + None can be returned to prevent the trace from being exported. + """ + pass + + +class FilterRequestsOnUrl(TraceFilter): + r"""Filter out traces from incoming http requests based on the request's url. + + This class takes as argument a list of regular expression patterns + representing the urls to be excluded from tracing. A trace will be excluded + if its root span contains a ``http.url`` tag and if this tag matches any of + the provided regular expression using the standard python regexp match + semantic (https://docs.python.org/3/library/re.html#re.match). + + :param list regexps: a list of regular expressions (or a single string) defining + the urls that should be filtered out. + + Examples: + To filter out http calls to domain api.example.com:: + + FilterRequestsOnUrl(r'http://api\\.example\\.com') + + To filter out http calls to all first level subdomains from example.com:: + + FilterRequestOnUrl(r'http://.*+\\.example\\.com') + + To filter out calls to both http://test.example.com and http://example.com/healthcheck:: + + FilterRequestOnUrl([r'http://test\\.example\\.com', r'http://example\\.com/healthcheck']) + """ + + def __init__(self, regexps: Union[str, List[str]]): + if isinstance(regexps, str): + regexps = [regexps] + self._regexps = [re.compile(regexp) for regexp in regexps] + + def process_trace(self, trace): + # type: (List[Span]) -> Optional[List[Span]] + """ + When the filter is registered in the tracer, process_trace is called by + on each trace before it is sent to the agent, the returned value will + be fed to the next filter in the list. If process_trace returns None, + the whole trace is discarded. + """ + for span in trace: + url = span.get_tag(http.URL) + if span.parent_id is None and url is not None: + for regexp in self._regexps: + if regexp.match(url): + return None + return trace diff --git a/ddtrace/_trace/pin.py b/ddtrace/_trace/pin.py new file mode 100644 index 00000000000..7dd83474749 --- /dev/null +++ b/ddtrace/_trace/pin.py @@ -0,0 +1,229 @@ +from typing import TYPE_CHECKING # noqa:F401 +from typing import Any # noqa:F401 +from typing import Dict # noqa:F401 +from typing import Optional # noqa:F401 + +import wrapt + +import ddtrace +from ddtrace.vendor.debtcollector import deprecate + +from ..internal.logger import get_logger + + +log = get_logger(__name__) + + +# To set attributes on wrapt proxy objects use this prefix: +# http://wrapt.readthedocs.io/en/latest/wrappers.html +_DD_PIN_NAME = "_datadog_pin" +_DD_PIN_PROXY_NAME = "_self_" + _DD_PIN_NAME + + +class Pin(object): + """Pin (a.k.a Patch INfo) is a small class which is used to + set tracing metadata on a particular traced connection. + This is useful if you wanted to, say, trace two different + database clusters. + + >>> conn = sqlite.connect('/tmp/user.db') + >>> # Override a pin for a specific connection + >>> pin = Pin.override(conn, service='user-db') + >>> conn = sqlite.connect('/tmp/image.db') + """ + + __slots__ = ["tags", "tracer", "_target", "_config", "_initialized"] + + def __init__( + self, + service=None, # type: Optional[str] + tags=None, # type: Optional[Dict[str, str]] + tracer=None, + _config=None, # type: Optional[Dict[str, Any]] + ): + # type: (...) -> None + if tracer is not None and tracer is not ddtrace.tracer: + deprecate( + "Initializing ddtrace.Pin with `tracer` argument is deprecated", + message="All Pin instances should use the global tracer instance", + removal_version="3.0.0", + ) + tracer = tracer or ddtrace.tracer + self.tags = tags + self.tracer = tracer + self._target = None # type: Optional[int] + # keep the configuration attribute internal because the + # public API to access it is not the Pin class + self._config = _config or {} # type: Dict[str, Any] + # [Backward compatibility]: service argument updates the `Pin` config + self._config["service_name"] = service + self._initialized = True + + @property + def service(self): + # type: () -> str + """Backward compatibility: accessing to `pin.service` returns the underlying + configuration value. + """ + return self._config["service_name"] + + def __setattr__(self, name, value): + if getattr(self, "_initialized", False) and name != "_target": + raise AttributeError("can't mutate a pin, use override() or clone() instead") + super(Pin, self).__setattr__(name, value) + + def __repr__(self): + return "Pin(service=%s, tags=%s, tracer=%s)" % (self.service, self.tags, self.tracer) + + @staticmethod + def _find(*objs): + # type: (Any) -> Optional[Pin] + """ + Return the first :class:`ddtrace.pin.Pin` found on any of the provided objects or `None` if none were found + + + >>> pin = Pin._find(wrapper, instance, conn) + + :param objs: The objects to search for a :class:`ddtrace.pin.Pin` on + :type objs: List of objects + :rtype: :class:`ddtrace.pin.Pin`, None + :returns: The first found :class:`ddtrace.pin.Pin` or `None` is none was found + """ + for obj in objs: + pin = Pin.get_from(obj) + if pin: + return pin + return None + + @staticmethod + def get_from(obj): + # type: (Any) -> Optional[Pin] + """Return the pin associated with the given object. If a pin is attached to + `obj` but the instance is not the owner of the pin, a new pin is cloned and + attached. This ensures that a pin inherited from a class is a copy for the new + instance, avoiding that a specific instance overrides other pins values. + + >>> pin = Pin.get_from(conn) + + :param obj: The object to look for a :class:`ddtrace.pin.Pin` on + :type obj: object + :rtype: :class:`ddtrace.pin.Pin`, None + :returns: :class:`ddtrace.pin.Pin` associated with the object, or None if none was found + """ + if hasattr(obj, "__getddpin__"): + return obj.__getddpin__() + + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + pin = getattr(obj, pin_name, None) + # detect if the PIN has been inherited from a class + if pin is not None and pin._target != id(obj): + pin = pin.clone() + pin.onto(obj) + return pin + + @classmethod + def override( + cls, + obj, # type: Any + service=None, # type: Optional[str] + tags=None, # type: Optional[Dict[str, str]] + tracer=None, + ): + # type: (...) -> None + """Override an object with the given attributes. + + That's the recommended way to customize an already instrumented client, without + losing existing attributes. + + >>> conn = sqlite.connect('/tmp/user.db') + >>> # Override a pin for a specific connection + >>> Pin.override(conn, service='user-db') + """ + if tracer is not None: + deprecate( + "Calling ddtrace.Pin.override(...) with the `tracer` argument is deprecated", + message="All Pin instances should use the global tracer instance", + removal_version="3.0.0", + ) + if not obj: + return + + pin = cls.get_from(obj) + if pin is None: + Pin(service=service, tags=tags, tracer=tracer).onto(obj) + else: + pin.clone(service=service, tags=tags, tracer=tracer).onto(obj) + + def enabled(self): + # type: () -> bool + """Return true if this pin's tracer is enabled.""" + # inline to avoid circular imports + from ddtrace.settings.asm import config as asm_config + + return bool(self.tracer) and (self.tracer.enabled or asm_config._apm_opt_out) + + def onto(self, obj, send=True): + # type: (Any, bool) -> None + """Patch this pin onto the given object. If send is true, it will also + queue the metadata to be sent to the server. + """ + # Actually patch it on the object. + try: + if hasattr(obj, "__setddpin__"): + return obj.__setddpin__(self) + + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + + # set the target reference; any get_from, clones and retarget the new PIN + self._target = id(obj) + if self.service: + ddtrace.config._add_extra_service(self.service) + return setattr(obj, pin_name, self) + except AttributeError: + log.debug("can't pin onto object. skipping", exc_info=True) + + def remove_from(self, obj): + # type: (Any) -> None + # Remove pin from the object. + try: + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + + pin = Pin.get_from(obj) + if pin is not None: + delattr(obj, pin_name) + except AttributeError: + log.debug("can't remove pin from object. skipping", exc_info=True) + + def clone( + self, + service=None, # type: Optional[str] + tags=None, # type: Optional[Dict[str, str]] + tracer=None, + ): + # type: (...) -> Pin + """Return a clone of the pin with the given attributes replaced.""" + # do a shallow copy of Pin dicts + if not tags and self.tags: + tags = self.tags.copy() + + if tracer is not None: + deprecate( + "Initializing ddtrace.Pin with `tracer` argument is deprecated", + message="All Pin instances should use the global tracer instance", + removal_version="3.0.0", + ) + + # we use a copy instead of a deepcopy because we expect configurations + # to have only a root level dictionary without nested objects. Using + # deepcopy introduces a big overhead: + # + # copy: 0.00654911994934082 + # deepcopy: 0.2787208557128906 + config = self._config.copy() + + return Pin( + service=service or self.service, + tags=tags, + tracer=tracer or self.tracer, # do not clone the Tracer + _config=config, + ) diff --git a/ddtrace/_trace/processor/__init__.py b/ddtrace/_trace/processor/__init__.py index 03d815b86d2..fc59a64828b 100644 --- a/ddtrace/_trace/processor/__init__.py +++ b/ddtrace/_trace/processor/__init__.py @@ -26,7 +26,7 @@ from ddtrace.internal.sampling import is_single_span_sampled from ddtrace.internal.service import ServiceStatusError from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_TRACER +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.writer import TraceWriter @@ -392,6 +392,6 @@ def _queue_span_count_metrics(self, metric_name: str, tag_name: str, min_count: if config._telemetry_enabled and sum(self._span_metrics[metric_name].values()) >= min_count: for tag_value, count in self._span_metrics[metric_name].items(): telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, metric_name, count, tags=((tag_name, tag_value),) + TELEMETRY_NAMESPACE.TRACERS, metric_name, count, tags=((tag_name, tag_value),) ) self._span_metrics[metric_name] = defaultdict(int) diff --git a/ddtrace/_trace/telemetry.py b/ddtrace/_trace/telemetry.py index f9cd9ef79b9..929acd101ec 100644 --- a/ddtrace/_trace/telemetry.py +++ b/ddtrace/_trace/telemetry.py @@ -2,11 +2,12 @@ from typing import Tuple from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE def record_span_pointer_calculation(context: str, span_pointer_count: int) -> None: telemetry_writer.add_count_metric( - namespace="tracers", + namespace=TELEMETRY_NAMESPACE.TRACERS, name="span_pointer_calculation", value=1, tags=(("context", context), ("count", _span_pointer_count_to_tag(span_pointer_count))), @@ -45,7 +46,7 @@ def record_span_pointer_calculation_issue( tags += additional_tags telemetry_writer.add_count_metric( - namespace="tracers", + namespace=TELEMETRY_NAMESPACE.TRACERS, name="span_pointer_calculation.issue", value=1, tags=tags, diff --git a/ddtrace/_trace/tracer.py b/ddtrace/_trace/tracer.py index 502e12ece07..84b9252930a 100644 --- a/ddtrace/_trace/tracer.py +++ b/ddtrace/_trace/tracer.py @@ -34,7 +34,6 @@ from ddtrace.constants import HOSTNAME_KEY from ddtrace.constants import PID from ddtrace.constants import VERSION_KEY -from ddtrace.filters import TraceFilter from ddtrace.internal import agent from ddtrace.internal import atexit from ddtrace.internal import compat @@ -103,7 +102,7 @@ def _start_appsec_processor() -> Optional[Any]: def _default_span_processors_factory( - trace_filters: List[TraceFilter], + trace_filters: List[TraceProcessor], trace_writer: TraceWriter, partial_flush_enabled: bool, partial_flush_min_spans: int, @@ -195,6 +194,7 @@ class Tracer(object): """ SHUTDOWN_TIMEOUT = 5 + _instance = None def __init__( self, @@ -210,7 +210,25 @@ def __init__( :param dogstatsd_url: The DogStatsD URL. """ - self._filters: List[TraceFilter] = [] + # Do not set self._instance if this is a subclass of Tracer. Here we only want + # to reference the global instance. + if type(self) is Tracer: + if Tracer._instance is None: + Tracer._instance = self + else: + # ddtrace library does not support context propagation for multiple tracers. + # All instances of ddtrace ContextProviders share the same ContextVars. This means that + # if you create multiple instances of Tracer, spans will be shared between them creating a + # broken experience. + # TODO(mabdinur): Convert this warning to an ValueError in 3.0.0 + deprecate( + "Support for multiple Tracer instances is deprecated", + ". Use ddtrace.tracer instead.", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", + ) + + self._user_trace_processors: List[TraceProcessor] = [] # globally set tags self._tags = config.tags.copy() @@ -264,7 +282,7 @@ def __init__( # Direct link to the appsec processor self._endpoint_call_counter_span_processor = EndpointCallCounterProcessor() self._span_processors, self._appsec_processor, self._deferred_processors = _default_span_processors_factory( - self._filters, + self._user_trace_processors, self._writer, self._partial_flush_enabled, self._partial_flush_min_spans, @@ -421,7 +439,6 @@ def get_log_correlation_context(self, active: Optional[Union[Context, Span]] = N "env": config.env or "", } - # TODO: deprecate this method and make sure users create a new tracer if they need different parameters def configure( self, enabled: Optional[bool] = None, @@ -443,42 +460,135 @@ def configure( appsec_enabled: Optional[bool] = None, iast_enabled: Optional[bool] = None, appsec_standalone_enabled: Optional[bool] = None, + trace_processors: Optional[List[TraceProcessor]] = None, ) -> None: """Configure a Tracer. - :param bool enabled: If True, finished traces will be submitted to the API, else they'll be dropped. - :param str hostname: Hostname running the Trace Agent - :param int port: Port of the Trace Agent - :param str uds_path: The Unix Domain Socket path of the agent. - :param bool https: Whether to use HTTPS or HTTP. - :param object sampler: A custom Sampler instance, locally deciding to totally drop the trace or not. :param object context_provider: The ``ContextProvider`` that will be used to retrieve automatically the current call context. This is an advanced option that usually - doesn't need to be changed from the default value + doesn't need to be changed from the default value. + :param bool appsec_enabled: Enables Application Security Monitoring (ASM) for the tracer. + :param bool iast_enabled: Enables IAST support for the tracer + :param bool appsec_standalone_enabled: When tracing is disabled ensures ASM support is still enabled. + :param List[TraceProcessor] trace_processors: This parameter sets TraceProcessor (ex: TraceFilters). + Trace processors are used to modify and filter traces based on certain criteria. + + :param bool enabled: If True, finished traces will be submitted to the API, else they'll be dropped. + This parameter is deprecated and will be removed. + :param str hostname: Hostname running the Trace Agent. This parameter is deprecated and will be removed. + :param int port: Port of the Trace Agent. This parameter is deprecated and will be removed. + :param str uds_path: The Unix Domain Socket path of the agent. This parameter is deprecated and will be removed. + :param bool https: Whether to use HTTPS or HTTP. This parameter is deprecated and will be removed. + :param object sampler: A custom Sampler instance, locally deciding to totally drop the trace or not. + This parameter is deprecated and will be removed. :param object wrap_executor: callable that is used when a function is decorated with ``Tracer.wrap()``. This is an advanced option that usually doesn't need to be changed - from the default value - :param priority_sampling: This argument is deprecated and will be removed in a future version. + from the default value. This parameter is deprecated and will be removed. + :param priority_sampling: This parameter is deprecated and will be removed in a future version. + :param bool settings: This parameter is deprecated and will be removed. :param str dogstatsd_url: URL for UDP or Unix socket connection to DogStatsD + This parameter is deprecated and will be removed. + :param TraceWriter writer: This parameter is deprecated and will be removed. + :param bool partial_flush_enabled: This parameter is deprecated and will be removed. + :param bool partial_flush_min_spans: This parameter is deprecated and will be removed. + :param str api_version: This parameter is deprecated and will be removed. + :param bool compute_stats_enabled: This parameter is deprecated and will be removed. """ + if settings is not None: + deprecate( + "Support for ``tracer.configure(...)`` with the settings parameter is deprecated", + message="Please use the trace_processors parameter instead of settings['FILTERS'].", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) + trace_processors = (trace_processors or []) + (settings.get("FILTERS") or []) + + return self._configure( + enabled, + hostname, + port, + uds_path, + https, + sampler, + context_provider, + wrap_executor, + priority_sampling, + trace_processors, + dogstatsd_url, + writer, + partial_flush_enabled, + partial_flush_min_spans, + api_version, + compute_stats_enabled, + appsec_enabled, + iast_enabled, + appsec_standalone_enabled, + True, + ) + + def _configure( + self, + enabled: Optional[bool] = None, + hostname: Optional[str] = None, + port: Optional[int] = None, + uds_path: Optional[str] = None, + https: Optional[bool] = None, + sampler: Optional[BaseSampler] = None, + context_provider: Optional[DefaultContextProvider] = None, + wrap_executor: Optional[Callable] = None, + priority_sampling: Optional[bool] = None, + trace_processors: Optional[List[TraceProcessor]] = None, + dogstatsd_url: Optional[str] = None, + writer: Optional[TraceWriter] = None, + partial_flush_enabled: Optional[bool] = None, + partial_flush_min_spans: Optional[int] = None, + api_version: Optional[str] = None, + compute_stats_enabled: Optional[bool] = None, + appsec_enabled: Optional[bool] = None, + iast_enabled: Optional[bool] = None, + appsec_standalone_enabled: Optional[bool] = None, + log_deprecations: bool = False, + ) -> None: if enabled is not None: self.enabled = enabled + if log_deprecations: + deprecate( + "Enabling/Disabling tracing after application start is deprecated", + message="Please use DD_TRACE_ENABLED instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) - if priority_sampling is not None: + if priority_sampling is not None and log_deprecations: deprecate( - "Configuring priority sampling on tracing clients is deprecated", + "Disabling priority sampling is deprecated", + message="Calling `tracer.configure(priority_sampling=....) has no effect", version="3.0.0", category=DDTraceDeprecationWarning, ) - if settings is not None: - self._filters = settings.get("FILTERS") or self._filters + if trace_processors is not None: + self._user_trace_processors = trace_processors if partial_flush_enabled is not None: self._partial_flush_enabled = partial_flush_enabled + if log_deprecations: + deprecate( + "Configuring partial flushing after application start is deprecated", + message="Please use DD_TRACE_PARTIAL_FLUSH_ENABLED to enable/disable the partial flushing instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) if partial_flush_min_spans is not None: self._partial_flush_min_spans = partial_flush_min_spans + if log_deprecations: + deprecate( + "Configuring partial flushing after application start is deprecated", + message="Please use DD_TRACE_PARTIAL_FLUSH_MIN_SPANS to set the flushing threshold instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) if appsec_enabled is not None: asm_config._asm_enabled = appsec_enabled @@ -510,10 +620,33 @@ def configure( if sampler is not None: self._sampler = sampler self._user_sampler = self._sampler + if log_deprecations: + deprecate( + "Configuring custom samplers is deprecated", + message="Please use DD_TRACE_SAMPLING_RULES to configure the sample rates instead", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", + ) - self._dogstatsd_url = dogstatsd_url or self._dogstatsd_url + if dogstatsd_url is not None: + if log_deprecations: + deprecate( + "Configuring dogstatsd_url after application start is deprecated", + message="Please use DD_DOGSTATSD_URL instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) + self._dogstatsd_url = dogstatsd_url if any(x is not None for x in [hostname, port, uds_path, https]): + if log_deprecations: + deprecate( + "Configuring tracer agent connection after application start is deprecated", + message="Please use DD_TRACE_AGENT_URL instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) + # If any of the parts of the URL have updated, merge them with # the previous writer values. prev_url_parsed = compat.parse.urlparse(self._agent_url) @@ -537,6 +670,13 @@ def configure( new_url = None if compute_stats_enabled is not None: + if log_deprecations: + deprecate( + "Configuring tracer stats computation after application start is deprecated", + message="Please use DD_TRACE_STATS_COMPUTATION_ENABLED instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) self._compute_stats = compute_stats_enabled try: @@ -545,6 +685,14 @@ def configure( # It's possible the writer never got started pass + if api_version is not None and log_deprecations: + deprecate( + "Configuring Tracer API version after application start is deprecated", + message="Please use DD_TRACE_API_VERSION instead.", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) + if writer is not None: self._writer = writer elif any(x is not None for x in [new_url, api_version, sampler, dogstatsd_url, appsec_enabled]): @@ -583,14 +731,14 @@ def configure( uds_path, api_version, sampler, - settings.get("FILTERS") if settings is not None else None, + trace_processors, compute_stats_enabled, appsec_enabled, iast_enabled, ] ): self._span_processors, self._appsec_processor, self._deferred_processors = _default_span_processors_factory( - self._filters, + self._user_trace_processors, self._writer, self._partial_flush_enabled, self._partial_flush_min_spans, @@ -606,6 +754,12 @@ def configure( if wrap_executor is not None: self._wrap_executor = wrap_executor + if log_deprecations: + deprecate( + "Support for tracer.configure(...) with the wrap_executor parameter is deprecated", + version="3.0.0", + category=DDTraceDeprecationWarning, + ) self._generate_diagnostic_logs() @@ -653,7 +807,7 @@ def _child_after_fork(self): # Re-create the background writer thread self._writer = self._writer.recreate() self._span_processors, self._appsec_processor, self._deferred_processors = _default_span_processors_factory( - self._filters, + self._user_trace_processors, self._writer, self._partial_flush_enabled, self._partial_flush_min_spans, @@ -775,8 +929,7 @@ def _start_span( service = config.service_mapping.get(service, service) links = context._span_links if not parent else [] - - if trace_id: + if trace_id or links or context._baggage: # child_of a non-empty context, so either a local child span or from a remote context span = Span( name=name, diff --git a/ddtrace/appsec/_api_security/api_manager.py b/ddtrace/appsec/_api_security/api_manager.py index b5d935626b4..57f50a11fae 100644 --- a/ddtrace/appsec/_api_security/api_manager.py +++ b/ddtrace/appsec/_api_security/api_manager.py @@ -7,10 +7,6 @@ from ddtrace import constants from ddtrace._trace._limits import MAX_SPAN_META_VALUE_LEN -from ddtrace.appsec import _processor as appsec_processor -from ddtrace.appsec._asm_request_context import add_context_callback -from ddtrace.appsec._asm_request_context import call_waf_callback -from ddtrace.appsec._asm_request_context import remove_context_callback from ddtrace.appsec._constants import API_SECURITY from ddtrace.appsec._constants import SPAN_DATA_NAMES from ddtrace.internal.logger import get_logger @@ -55,6 +51,7 @@ def enable(cls) -> None: log.debug("Enabling %s", cls.__name__) cls._instance = cls() cls._instance.start() + log.debug("%s enabled", cls.__name__) @classmethod @@ -75,12 +72,18 @@ def __init__(self) -> None: log.debug("%s initialized", self.__class__.__name__) self._hashtable: collections.OrderedDict[int, float] = collections.OrderedDict() + from ddtrace.appsec import _processor as appsec_processor + import ddtrace.appsec._asm_request_context as _asm_request_context + + self._asm_context = _asm_request_context + self._appsec_processor = appsec_processor + def _stop_service(self) -> None: - remove_context_callback(self._schema_callback, global_callback=True) + self._asm_context.remove_context_callback(self._schema_callback, global_callback=True) self._hashtable.clear() def _start_service(self) -> None: - add_context_callback(self._schema_callback, global_callback=True) + self._asm_context.add_context_callback(self._schema_callback, global_callback=True) def _should_collect_schema(self, env, priority: int) -> bool: # Rate limit per route @@ -143,7 +146,7 @@ def _schema_callback(self, env): try: headers = env.waf_addresses.get(SPAN_DATA_NAMES.REQUEST_HEADERS_NO_COOKIES, _sentinel) if headers is not _sentinel: - appsec_processor._set_headers(root, headers, kind="request") + self._appsec_processor._set_headers(root, headers, kind="request") except Exception: log.debug("Failed to enrich request span with headers", exc_info=True) @@ -159,7 +162,7 @@ def _schema_callback(self, env): value = transform(value) waf_payload[address] = value - result = call_waf_callback(waf_payload) + result = self._asm_context.call_waf_callback(waf_payload) if result is None: return for meta, schema in result.derivatives.items(): diff --git a/ddtrace/appsec/_constants.py b/ddtrace/appsec/_constants.py index 45a96834cc1..92b9e239900 100644 --- a/ddtrace/appsec/_constants.py +++ b/ddtrace/appsec/_constants.py @@ -75,6 +75,8 @@ class APPSEC(metaclass=Constant_Class): CUSTOM_EVENT_PREFIX: Literal["appsec.events"] = "appsec.events" USER_LOGIN_EVENT_PREFIX: Literal["_dd.appsec.events.users.login"] = "_dd.appsec.events.users.login" USER_LOGIN_EVENT_PREFIX_PUBLIC: Literal["appsec.events.users.login"] = "appsec.events.users.login" + USER_LOGIN_USERID: Literal["_dd.appsec.usr.id"] = "_dd.appsec.usr.id" + USER_LOGIN_USERNAME: Literal["_dd.appsec.usr.login"] = "_dd.appsec.usr.login" USER_LOGIN_EVENT_SUCCESS_TRACK: Literal[ "appsec.events.users.login.success.track" ] = "appsec.events.users.login.success.track" @@ -180,6 +182,7 @@ class WAF_DATA_NAMES(metaclass=Constant_Class): REQUEST_COOKIES: Literal["server.request.cookies"] = "server.request.cookies" REQUEST_HTTP_IP: Literal["http.client_ip"] = "http.client_ip" REQUEST_USER_ID: Literal["usr.id"] = "usr.id" + REQUEST_USERNAME: Literal["usr.login"] = "usr.login" RESPONSE_STATUS: Literal["server.response.status"] = "server.response.status" RESPONSE_HEADERS_NO_COOKIES: Literal["server.response.headers.no_cookies"] = "server.response.headers.no_cookies" RESPONSE_BODY: Literal["server.response.body"] = "server.response.body" @@ -194,6 +197,7 @@ class WAF_DATA_NAMES(metaclass=Constant_Class): REQUEST_COOKIES, REQUEST_HTTP_IP, REQUEST_USER_ID, + REQUEST_USERNAME, RESPONSE_STATUS, RESPONSE_HEADERS_NO_COOKIES, RESPONSE_BODY, diff --git a/ddtrace/appsec/_exploit_prevention/stack_traces.py b/ddtrace/appsec/_exploit_prevention/stack_traces.py index 8d32a028ab9..6c8db41f578 100644 --- a/ddtrace/appsec/_exploit_prevention/stack_traces.py +++ b/ddtrace/appsec/_exploit_prevention/stack_traces.py @@ -8,7 +8,6 @@ from ddtrace._trace.span import Span from ddtrace.appsec._constants import STACK_TRACE from ddtrace.settings.asm import config as asm_config -import ddtrace.tracer def report_stack( @@ -34,7 +33,10 @@ def report_stack( return False if span is None: - span = ddtrace.tracer.current_span() + from ddtrace import tracer + + span = tracer.current_span() + if span is None or stack_id is None: return False root_span = span._local_root or span diff --git a/ddtrace/appsec/_iast/_ast/ast_patching.py b/ddtrace/appsec/_iast/_ast/ast_patching.py index c4a224c2a02..bb3a9c74d44 100644 --- a/ddtrace/appsec/_iast/_ast/ast_patching.py +++ b/ddtrace/appsec/_iast/_ast/ast_patching.py @@ -9,6 +9,7 @@ from types import ModuleType from typing import Iterable from typing import Optional +from typing import Set from typing import Text from typing import Tuple @@ -26,57 +27,43 @@ _PREFIX = IAST.PATCH_ADDED_SYMBOL_PREFIX # Prefixes for modules where IAST patching is allowed -IAST_ALLOWLIST: Tuple[Text, ...] = ("tests.appsec.iast.",) +# Only packages that have the test_propagation=True in test_packages and are not in the denylist must be here +IAST_ALLOWLIST: Tuple[Text, ...] = ( + "attrs.", + "beautifulsoup4.", + "cachetools.", + "cryptography.", + "docutils.", + "idna.", + "iniconfig.", + "jinja2.", + "lxml.", + "multidict.", + "platformdirs", + "pygments.", + "pynacl.", + "pyparsing.", + "multipart", + "sqlalchemy.", + "tomli", + "yarl.", +) + +# NOTE: For testing reasons, don't add astunparse here, see test_ast_patching.py IAST_DENYLIST: Tuple[Text, ...] = ( - "altgraph.", - "dipy.", - "black.", - "mypy.", - "mypy_extensions.", - "autopep8.", - "pycodestyle.", - "pydicom.", - "pyinstaller.", - "pystray.", - "contourpy.", - "cx_logging.", - "dateutil.", - "pytz.", - "wcwidth.", - "win32ctypes.", - "xlib.", - "cycler.", - "cython.", - "dnspython.", - "elasticdeform.", - "numpy.", - "matplotlib.", - "skbase.", - "scipy.", - "networkx.", - "imageio.", - "fonttools.", - "nibabel.", - "nilearn.", - "gprof2dot.", - "h5py.", - "kiwisolver.", - "pandas.", - "pdf2image.", - "pefile.", - "pil.", - "threadpoolctl.", - "tifffile.", - "tqdm.", - "trx.", - "flask.", - "werkzeug.", + "_psycopg.", # PostgreSQL adapter for Python (v3) + "_pytest.", "aiohttp._helpers.", "aiohttp._http_parser.", "aiohttp._http_writer.", "aiohttp._websocket.", "aiohttp.log.", "aiohttp.tcp_helpers.", + "aioquic.", + "altgraph.", + "anyio.", + "api_pb2.", # Patching crashes with these auto-generated modules, propagation is not needed + "api_pb2_grpc.", # Patching crashes with these auto-generated modules, propagation is not needed "asyncio.base_events.", "asyncio.base_futures.", "asyncio.base_subprocess.", @@ -99,11 +86,15 @@ "asyncio.transports.", "asyncio.trsock.", "asyncio.unix_events.", + "asyncpg.pgproto.", "attr._config.", "attr._next_gen.", "attr.filters.", "attr.setters.", + "autopep8.", "backports.", + "black.", + "blinker.", "boto3.docs.docstring.", "boto3.s3.", "botocore.docs.bcdoc.", @@ -111,6 +102,8 @@ "botocore.vendored.requests.", "brotli.", "brotlicffi.", + "bytecode.", + "cattrs.", "cchardet.", "certifi.", "cffi.", @@ -145,14 +138,23 @@ "colorama.", "concurrent.futures.", "configparser.", + "contourpy.", "coreschema.", "crispy_forms.", + "crypto.", # This module is patched by the IAST patch methods, propagation is not needed + "cx_logging.", + "cycler.", + "cython.", + "dateutil.", "dateutil.", + "ddsketch.", + "ddtrace.", "defusedxml.", + "deprecated.", "difflib.", "dill.info.", "dill.settings.", - "silk.", # django-silk package + "dipy.", "django.apps.config.", "django.apps.registry.", "django.conf.", @@ -298,72 +300,87 @@ "django_filters.rest_framework.filterset.", "django_filters.utils.", "django_filters.widgets.", - "crypto.", # This module is patched by the IAST patch methods, propagation is not needed - "deprecated.", - "api_pb2.", # Patching crashes with these auto-generated modules, propagation is not needed - "api_pb2_grpc.", # Patching crashes with these auto-generated modules, propagation is not needed - "asyncpg.pgproto.", - "blinker.", - "bytecode.", - "cattrs.", - "ddsketch.", - "ddtrace.", + "dnspython.", + "elasticdeform.", "envier.", "exceptiongroup.", + "flask.", + "fonttools.", "freezegun.", # Testing utilities for time manipulation + "google.auth.", + "googlecloudsdk.", + "gprof2dot.", + "h11.", + "h5py.", + "httpcore.", + "httptools.", + "httpx.", "hypothesis.", # Testing utilities + "imageio.", "importlib_metadata.", "inspect.", # this package is used to get the stack frames, propagation is not needed "itsdangerous.", + "kiwisolver.", + "matplotlib.", "moto.", # used for mocking AWS, propagation is not needed + "mypy.", + "mypy_extensions.", + "networkx.", + "nibabel.", + "nilearn.", + "numba.", + "numpy.", "opentelemetry-api.", "packaging.", + "pandas.", + "pdf2image.", + "pefile.", + "pil.", "pip.", "pkg_resources.", "pluggy.", "protobuf.", "psycopg.", # PostgreSQL adapter for Python (v3) - "_psycopg.", # PostgreSQL adapter for Python (v3) "psycopg2.", # PostgreSQL adapter for Python (v2) + "pycodestyle.", "pycparser.", # this package is called when a module is imported, propagation is not needed + "pydicom.", + "pyinstaller.", + "pynndescent.", + "pystray.", "pytest.", # Testing framework - "_pytest.", + "pytz.", + "rich.", + "sanic.", + "scipy.", "setuptools.", + "silk.", # django-silk package + "skbase.", "sklearn.", # Machine learning library + "sniffio.", "sqlalchemy.orm.interfaces.", # Performance optimization + "threadpoolctl.", + "tifffile.", + "tqdm.", + "trx.", "typing_extensions.", + "umap.", "unittest.mock.", - "uvloop.", "urlpatterns_reverse.tests.", # assertRaises eat exceptions in native code, so we don't call the original function - "wrapt.", - "zipp.", - # This is a workaround for Sanic failures: + "uvicorn.", + "uvloop.", + "wcwidth.", "websocket.", - "h11.", - "aioquic.", - "httptools.", - "sniffio.", - "sanic.", - "rich.", - "httpx.", "websockets.", - "uvicorn.", - "anyio.", - "httpcore.", - "google.auth.", - "googlecloudsdk.", - "umap.", - "pynndescent.", - "numba.", + "werkzeug.", + "win32ctypes.", + "wrapt.", + "xlib.", + "zipp.", ) - -if IAST.PATCH_MODULES in os.environ: - IAST_ALLOWLIST += tuple(os.environ[IAST.PATCH_MODULES].split(IAST.SEP_MODULES)) - -if IAST.DENY_MODULES in os.environ: - IAST_DENYLIST += tuple(os.environ[IAST.DENY_MODULES].split(IAST.SEP_MODULES)) - +USER_ALLOWLIST = tuple(os.environ.get(IAST.PATCH_MODULES, "").split(IAST.SEP_MODULES)) +USER_DENYLIST = tuple(os.environ.get(IAST.DENY_MODULES, "").split(IAST.SEP_MODULES)) ENCODING = "" @@ -399,6 +416,8 @@ def build_trie(words: Iterable[str]) -> _TrieNode: _TRIE_ALLOWLIST = build_trie(IAST_ALLOWLIST) _TRIE_DENYLIST = build_trie(IAST_DENYLIST) +_TRIE_USER_ALLOWLIST = build_trie(USER_ALLOWLIST) +_TRIE_USER_DENYLIST = build_trie(USER_DENYLIST) def _trie_has_prefix_for(trie: _TrieNode, string: str) -> bool: @@ -429,11 +448,26 @@ def get_encoding(module_path: Text) -> Text: _NOT_PATCH_MODULE_NAMES = {i.lower() for i in _stdlib_for_python_version() | set(builtin_module_names)} +_IMPORTLIB_PACKAGES: Set[str] = set() + def _in_python_stdlib(module_name: str) -> bool: return module_name.split(".")[0].lower() in _NOT_PATCH_MODULE_NAMES +def _is_first_party(module_name: str): + global _IMPORTLIB_PACKAGES + if "vendor." in module_name or "vendored." in module_name: + return False + + if not _IMPORTLIB_PACKAGES: + from ddtrace.internal.packages import get_package_distributions + + _IMPORTLIB_PACKAGES = set(get_package_distributions()) + + return module_name.split(".")[0] not in _IMPORTLIB_PACKAGES + + def _should_iast_patch(module_name: Text) -> bool: """ select if module_name should be patch from the longest prefix that match in allow or deny list. @@ -444,17 +478,30 @@ def _should_iast_patch(module_name: Text) -> bool: # max_deny = max((len(prefix) for prefix in IAST_DENYLIST if module_name.startswith(prefix)), default=-1) # diff = max_allow - max_deny # return diff > 0 or (diff == 0 and not _in_python_stdlib_or_third_party(module_name)) + if _in_python_stdlib(module_name): + log.debug("IAST: denying %s. it's in the _in_python_stdlib", module_name) + return False + + if _is_first_party(module_name): + return True + + # else: third party. Check that is in the allow list and not in the deny list dotted_module_name = module_name.lower() + "." + + # User allow or deny list set by env var have priority + if _trie_has_prefix_for(_TRIE_USER_ALLOWLIST, dotted_module_name): + return True + + if _trie_has_prefix_for(_TRIE_USER_DENYLIST, dotted_module_name): + return False + if _trie_has_prefix_for(_TRIE_ALLOWLIST, dotted_module_name): + if _trie_has_prefix_for(_TRIE_DENYLIST, dotted_module_name): + return False log.debug("IAST: allowing %s. it's in the IAST_ALLOWLIST", module_name) return True - if _trie_has_prefix_for(_TRIE_DENYLIST, dotted_module_name): - log.debug("IAST: denying %s. it's in the IAST_DENYLIST", module_name) - return False - if _in_python_stdlib(module_name): - log.debug("IAST: denying %s. it's in the _in_python_stdlib", module_name) - return False - return True + log.debug("IAST: denying %s. it's in the IAST_DENYLIST", module_name) + return False def visit_ast( diff --git a/ddtrace/appsec/_iast/_handlers.py b/ddtrace/appsec/_iast/_handlers.py index 33dd7aa348b..e873d8a6a5a 100644 --- a/ddtrace/appsec/_iast/_handlers.py +++ b/ddtrace/appsec/_iast/_handlers.py @@ -171,8 +171,6 @@ def _on_django_func_wrapped(fn_args, fn_kwargs, first_arg_expected_type, *_): http_req = fn_args[0] http_req.COOKIES = taint_structure(http_req.COOKIES, OriginType.COOKIE_NAME, OriginType.COOKIE) - http_req.GET = taint_structure(http_req.GET, OriginType.PARAMETER_NAME, OriginType.PARAMETER) - http_req.POST = taint_structure(http_req.POST, OriginType.BODY, OriginType.BODY) if ( getattr(http_req, "_body", None) is not None and len(getattr(http_req, "_body", None)) > 0 @@ -202,6 +200,8 @@ def _on_django_func_wrapped(fn_args, fn_kwargs, first_arg_expected_type, *_): except AttributeError: log.debug("IAST can't set attribute http_req.body", exc_info=True) + http_req.GET = taint_structure(http_req.GET, OriginType.PARAMETER_NAME, OriginType.PARAMETER) + http_req.POST = taint_structure(http_req.POST, OriginType.PARAMETER_NAME, OriginType.BODY) http_req.headers = taint_structure(http_req.headers, OriginType.HEADER_NAME, OriginType.HEADER) http_req.path = taint_pyobject( http_req.path, source_name="path", source_value=http_req.path, source_origin=OriginType.PATH diff --git a/ddtrace/appsec/_iast/_metrics.py b/ddtrace/appsec/_iast/_metrics.py index e9e0f604e69..35e2729565e 100644 --- a/ddtrace/appsec/_iast/_metrics.py +++ b/ddtrace/appsec/_iast/_metrics.py @@ -12,7 +12,7 @@ from ddtrace.internal import telemetry from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_IAST +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.settings.asm import config as asm_config @@ -73,19 +73,19 @@ def _set_metric_iast_instrumented_source(source_type): from ._taint_tracking import origin_to_str telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_IAST, "instrumented.source", 1, (("source_type", origin_to_str(source_type)),) + TELEMETRY_NAMESPACE.IAST, "instrumented.source", 1, (("source_type", origin_to_str(source_type)),) ) @metric_verbosity(TELEMETRY_MANDATORY_VERBOSITY) def _set_metric_iast_instrumented_propagation(): - telemetry.telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_IAST, "instrumented.propagation", 1) + telemetry.telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.IAST, "instrumented.propagation", 1) @metric_verbosity(TELEMETRY_MANDATORY_VERBOSITY) def _set_metric_iast_instrumented_sink(vulnerability_type, counter=1): telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_IAST, "instrumented.sink", counter, (("vulnerability_type", vulnerability_type),) + TELEMETRY_NAMESPACE.IAST, "instrumented.sink", counter, (("vulnerability_type", vulnerability_type),) ) @@ -94,14 +94,14 @@ def _set_metric_iast_executed_source(source_type): from ._taint_tracking import origin_to_str telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_IAST, "executed.source", 1, (("source_type", origin_to_str(source_type)),) + TELEMETRY_NAMESPACE.IAST, "executed.source", 1, (("source_type", origin_to_str(source_type)),) ) @metric_verbosity(TELEMETRY_INFORMATION_VERBOSITY) def _set_metric_iast_executed_sink(vulnerability_type): telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_IAST, "executed.sink", 1, (("vulnerability_type", vulnerability_type),) + TELEMETRY_NAMESPACE.IAST, "executed.sink", 1, (("vulnerability_type", vulnerability_type),) ) @@ -115,9 +115,7 @@ def _request_tainted(): def _set_metric_iast_request_tainted(): total_objects_tainted = _request_tainted() if total_objects_tainted > 0: - telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_IAST, "request.tainted", total_objects_tainted - ) + telemetry.telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.IAST, "request.tainted", total_objects_tainted) def _set_span_tag_iast_request_tainted(span): diff --git a/ddtrace/appsec/_iast/taint_sinks/code_injection.py b/ddtrace/appsec/_iast/taint_sinks/code_injection.py index 402952a504b..3c1673de004 100644 --- a/ddtrace/appsec/_iast/taint_sinks/code_injection.py +++ b/ddtrace/appsec/_iast/taint_sinks/code_injection.py @@ -13,11 +13,10 @@ from ddtrace.appsec._iast._patch import try_wrap_function_wrapper from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast.constants import VULN_CODE_INJECTION +from ddtrace.appsec._iast.taint_sinks._base import VulnerabilityBase from ddtrace.internal.logger import get_logger from ddtrace.settings.asm import config as asm_config -from ._base import VulnerabilityBase - log = get_logger(__name__) @@ -52,29 +51,27 @@ def _iast_coi(wrapped, instance, args, kwargs): if asm_config._iast_enabled and len(args) >= 1: _iast_report_code_injection(args[0]) - return wrapped(*args, **kwargs) - - -def _iast_coi_exec(wrapped, instance, args, kwargs): - if asm_config._iast_enabled and len(args) >= 1: - _iast_report_code_injection(args[0]) - - caller_frame = inspect.currentframe().f_back.f_back - if caller_frame is None: - return wrapped(*args, **kwargs) - - caller_globals = caller_frame.f_globals - caller_locals = caller_frame.f_locals - - original_globals = {} + caller_frame = None if len(args) > 1: - original_globals = args[1] + func_globals = args[1] + elif kwargs.get("globals"): + func_globals = kwargs.get("globals") + else: + frames = inspect.currentframe() + caller_frame = frames.f_back + func_globals = caller_frame.f_globals - original_locals = {} if len(args) > 2: - original_locals = args[2] - - return wrapped(args[0], {**caller_globals, **original_globals}, {**caller_locals, **original_locals}) + func_locals = args[2] + elif kwargs.get("locals"): + func_locals = kwargs.get("locals") + else: + if caller_frame is None: + frames = inspect.currentframe() + caller_frame = frames.f_back + func_locals = caller_frame.f_locals + + return wrapped(args[0], func_globals, func_locals) @oce.register diff --git a/ddtrace/appsec/_metrics.py b/ddtrace/appsec/_metrics.py index cbe8490d717..75c329b50e1 100644 --- a/ddtrace/appsec/_metrics.py +++ b/ddtrace/appsec/_metrics.py @@ -1,16 +1,16 @@ from ddtrace.appsec import _asm_request_context from ddtrace.appsec import _constants -from ddtrace.appsec._ddwaf import version as _version from ddtrace.appsec._deduplications import deduplication +from ddtrace.appsec._processor import ddwaf from ddtrace.internal import telemetry from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_APPSEC +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) -DDWAF_VERSION = _version() +DDWAF_VERSION = ddwaf.version() @deduplication @@ -36,7 +36,7 @@ def _set_waf_updates_metric(info): tags = (("waf_version", DDWAF_VERSION),) telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, "waf.updates", 1.0, tags=tags, @@ -56,7 +56,7 @@ def _set_waf_init_metric(info): tags = (("waf_version", DDWAF_VERSION),) telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, "waf.init", 1.0, tags=tags, @@ -90,7 +90,7 @@ def _set_waf_request_metrics(*args): ) telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, "waf.requests", 1.0, tags=tags_request, @@ -101,7 +101,7 @@ def _set_waf_request_metrics(*args): for rule_type, value in rasp[t].items(): if value: telemetry.telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, n, float(value), tags=_TYPES_AND_TAGS.get(rule_type, ()) + (("waf_version", DDWAF_VERSION),), diff --git a/ddtrace/appsec/_processor.py b/ddtrace/appsec/_processor.py index 54a9f624afe..fb18c268fdb 100644 --- a/ddtrace/appsec/_processor.py +++ b/ddtrace/appsec/_processor.py @@ -24,12 +24,7 @@ from ddtrace.appsec._constants import STACK_TRACE from ddtrace.appsec._constants import WAF_ACTIONS from ddtrace.appsec._constants import WAF_DATA_NAMES -from ddtrace.appsec._ddwaf import DDWaf_result -from ddtrace.appsec._ddwaf.ddwaf_types import ddwaf_context_capsule from ddtrace.appsec._exploit_prevention.stack_traces import report_stack -from ddtrace.appsec._metrics import _set_waf_init_metric -from ddtrace.appsec._metrics import _set_waf_request_metrics -from ddtrace.appsec._metrics import _set_waf_updates_metric from ddtrace.appsec._trace_utils import _asm_manual_keep from ddtrace.appsec._utils import has_triggers from ddtrace.constants import ORIGIN_KEY @@ -141,12 +136,11 @@ def enabled(self): def __post_init__(self) -> None: from ddtrace.appsec import load_appsec - from ddtrace.appsec._ddwaf import DDWaf load_appsec() self.obfuscation_parameter_key_regexp = asm_config._asm_obfuscation_parameter_key_regexp.encode() self.obfuscation_parameter_value_regexp = asm_config._asm_obfuscation_parameter_value_regexp.encode() - self._rules = None + self._rules: Optional[Dict[str, Any]] = None try: with open(self.rule_filename, "r") as f: self._rules = json.load(f) @@ -169,12 +163,15 @@ def __post_init__(self) -> None: # TODO: try to log reasons log.error("[DDAS-0001-03] ASM could not read the rule file %s.", self.rule_filename) raise + + def delayed_init(self) -> None: try: - self._ddwaf = DDWaf( - self._rules, self.obfuscation_parameter_key_regexp, self.obfuscation_parameter_value_regexp - ) + if self._rules is not None and not hasattr(self, "_ddwaf"): + self._ddwaf = ddwaf.DDWaf( + self._rules, self.obfuscation_parameter_key_regexp, self.obfuscation_parameter_value_regexp + ) _set_waf_init_metric(self._ddwaf.info) - except ValueError: + except Exception: # Partial of DDAS-0005-00 log.warning("[DDAS-0005-00] WAF initialization failed") raise @@ -190,6 +187,8 @@ def _update_required(self): self._addresses_to_keep.add(WAF_DATA_NAMES.RESPONSE_HEADERS_NO_COOKIES) def _update_rules(self, new_rules: Dict[str, Any]) -> bool: + if not hasattr(self, "_ddwaf"): + self.delayed_init() result = False if asm_config._asm_static_rule_file is not None: return result @@ -221,6 +220,9 @@ def rasp_sqli_enabled(self) -> bool: def on_span_start(self, span: Span) -> None: from ddtrace.contrib import trace_utils + if not hasattr(self, "_ddwaf"): + self.delayed_init() + if span.span_type not in {SpanTypes.WEB, SpanTypes.GRPC}: return @@ -258,11 +260,12 @@ def waf_callable(custom_data=None, **kwargs): def _waf_action( self, span: Span, - ctx: ddwaf_context_capsule, + ctx: "ddwaf.ddwaf_types.ddwaf_context_capsule", custom_data: Optional[Dict[str, Any]] = None, crop_trace: Optional[str] = None, rule_type: Optional[str] = None, - ) -> Optional[DDWaf_result]: + force_sent: bool = False, + ) -> Optional["ddwaf.DDWaf_result"]: """ Call the `WAF` with the given parameters. If `custom_data_names` is specified as a list of `(WAF_NAME, WAF_STR)` tuples specifying what values of the `WAF_DATA_NAMES` @@ -293,7 +296,7 @@ def _waf_action( force_keys = custom_data.get("PROCESSOR_SETTINGS", {}).get("extract-schema", False) if custom_data else False for key, waf_name in iter_data: # type: ignore[attr-defined] - if key in data_already_sent: + if key in data_already_sent and not force_sent: continue # ensure ephemeral addresses are sent, event when value is None if waf_name not in WAF_DATA_NAMES.PERSISTENT_ADDRESSES and custom_data: @@ -433,3 +436,10 @@ def on_span_finish(self, span: Span) -> None: del self._span_to_waf_ctx[s] except Exception: # nosec B110 pass + + +# load waf at the end only to avoid possible circular imports with gevent +import ddtrace.appsec._ddwaf as ddwaf # noqa: E402 +from ddtrace.appsec._metrics import _set_waf_init_metric # noqa: E402 +from ddtrace.appsec._metrics import _set_waf_request_metrics # noqa: E402 +from ddtrace.appsec._metrics import _set_waf_updates_metric # noqa: E402 diff --git a/ddtrace/appsec/_remoteconfiguration.py b/ddtrace/appsec/_remoteconfiguration.py index 1a6fd2e4b6c..e4785cd5f40 100644 --- a/ddtrace/appsec/_remoteconfiguration.py +++ b/ddtrace/appsec/_remoteconfiguration.py @@ -221,12 +221,12 @@ def _appsec_1click_activation(features: Mapping[str, Any], test_tracer: Optional if rc_asm_enabled: if not asm_config._asm_enabled: - tracer.configure(appsec_enabled=True) + tracer._configure(appsec_enabled=True) else: asm_config._asm_enabled = True else: if asm_config._asm_enabled: - tracer.configure(appsec_enabled=False) + tracer._configure(appsec_enabled=False) else: asm_config._asm_enabled = False diff --git a/ddtrace/appsec/_trace_utils.py b/ddtrace/appsec/_trace_utils.py index 3603c18533c..77cb1aaca3a 100644 --- a/ddtrace/appsec/_trace_utils.py +++ b/ddtrace/appsec/_trace_utils.py @@ -9,11 +9,13 @@ from ddtrace.appsec._asm_request_context import in_asm_context from ddtrace.appsec._constants import APPSEC from ddtrace.appsec._constants import LOGIN_EVENTS_MODE +from ddtrace.appsec._constants import WAF_ACTIONS from ddtrace.appsec._utils import _hash_user_id from ddtrace.contrib.trace_utils import set_user from ddtrace.ext import SpanTypes from ddtrace.ext import user from ddtrace.internal import core +from ddtrace.internal._exceptions import BlockingException from ddtrace.internal.logger import get_logger from ddtrace.settings.asm import config as asm_config @@ -71,6 +73,9 @@ def _track_user_login_common( span.set_tag_str("%s.%s" % (tag_metadata_prefix, k), str(v)) if login: + span.set_tag_str(f"{APPSEC.USER_LOGIN_EVENT_PREFIX_PUBLIC}.{success_str}.usr.login", login) + if login_events_mode != LOGIN_EVENTS_MODE.SDK: + span.set_tag_str(APPSEC.USER_LOGIN_USERNAME, login) span.set_tag_str("%s.login" % tag_prefix, login) if email: @@ -118,19 +123,31 @@ def track_user_login_success_event( real_mode = login_events_mode if login_events_mode != LOGIN_EVENTS_MODE.AUTO else asm_config._user_event_mode if real_mode == LOGIN_EVENTS_MODE.DISABLED: return + initial_login = login + initial_user_id = user_id if real_mode == LOGIN_EVENTS_MODE.ANON: - login = name = email = None + name = email = None + login = None if login is None else _hash_user_id(str(login)) span = _track_user_login_common(tracer, True, metadata, login_events_mode, login, name, email, span) if not span: return - if real_mode == LOGIN_EVENTS_MODE.ANON and isinstance(user_id, str): user_id = _hash_user_id(user_id) - if in_asm_context(): - call_waf_callback(custom_data={"REQUEST_USER_ID": str(user_id), "LOGIN_SUCCESS": real_mode}) - + if login_events_mode != LOGIN_EVENTS_MODE.SDK: + span.set_tag_str(APPSEC.USER_LOGIN_USERID, str(user_id)) set_user(tracer, user_id, name, email, scope, role, session_id, propagate, span) + if in_asm_context(): + res = call_waf_callback( + custom_data={ + "REQUEST_USER_ID": str(initial_user_id) if initial_user_id else None, + "REQUEST_USERNAME": initial_login, + "LOGIN_SUCCESS": real_mode, + }, + force_sent=True, + ) + if res and any(action in [WAF_ACTIONS.BLOCK_ACTION, WAF_ACTIONS.REDIRECT_ACTION] for action in res.actions): + raise BlockingException(get_blocked()) def track_user_login_failure_event( @@ -154,7 +171,9 @@ def track_user_login_failure_event( real_mode = login_events_mode if login_events_mode != LOGIN_EVENTS_MODE.AUTO else asm_config._user_event_mode if real_mode == LOGIN_EVENTS_MODE.DISABLED: return - span = _track_user_login_common(tracer, False, metadata, login_events_mode) + if real_mode == LOGIN_EVENTS_MODE.ANON and isinstance(login, str): + login = _hash_user_id(login) + span = _track_user_login_common(tracer, False, metadata, login_events_mode, login) if not span: return if exists is not None: @@ -163,6 +182,8 @@ def track_user_login_failure_event( if user_id: if real_mode == LOGIN_EVENTS_MODE.ANON and isinstance(user_id, str): user_id = _hash_user_id(user_id) + if login_events_mode != LOGIN_EVENTS_MODE.SDK: + span.set_tag_str(APPSEC.USER_LOGIN_USERID, str(user_id)) span.set_tag_str("%s.failure.%s" % (APPSEC.USER_LOGIN_EVENT_PREFIX_PUBLIC, user.ID), str(user_id)) # if called from the SDK, set the login, email and name if login_events_mode in (LOGIN_EVENTS_MODE.SDK, LOGIN_EVENTS_MODE.AUTO): @@ -183,7 +204,7 @@ def track_user_signup_event( if span: success_str = "true" if success else "false" span.set_tag_str(APPSEC.USER_SIGNUP_EVENT, success_str) - span.set_tag_str(user.ID, user_id) + span.set_tag_str(user.ID, str(user_id)) _asm_manual_keep(span) # This is used to mark if the call was done from the SDK of the automatic login events @@ -258,7 +279,7 @@ def should_block_user(tracer: Tracer, userid: str) -> bool: if get_blocked(): return True - _asm_request_context.call_waf_callback(custom_data={"REQUEST_USER_ID": str(userid)}) + _asm_request_context.call_waf_callback(custom_data={"REQUEST_USER_ID": str(userid)}, force_sent=True) return bool(get_blocked()) @@ -295,23 +316,16 @@ def block_request_if_user_blocked(tracer: Tracer, userid: str) -> None: _asm_request_context.block_request() -def _on_django_login( - pin, - request, - user, - mode, - info_retriever, - django_config, -): +def _on_django_login(pin, request, user, mode, info_retriever, django_config): if user: from ddtrace.contrib.internal.django.compat import user_is_authenticated + user_id, user_extra = info_retriever.get_user_info( + login=django_config.include_user_login, + email=django_config.include_user_email, + name=django_config.include_user_realname, + ) if user_is_authenticated(user): - user_id, user_extra = info_retriever.get_user_info( - login=django_config.include_user_login, - email=django_config.include_user_email, - name=django_config.include_user_realname, - ) with pin.tracer.trace("django.contrib.auth.login", span_type=SpanTypes.AUTH): session_key = getattr(request, "session_key", None) track_user_login_success_event( @@ -324,8 +338,10 @@ def _on_django_login( ) else: # Login failed and the user is unknown (may exist or not) - user_id = info_retriever.get_userid() - track_user_login_failure_event(pin.tracer, user_id=user_id, login_events_mode=mode) + # DEV: DEAD CODE? + track_user_login_failure_event( + pin.tracer, user_id=user_id, login_events_mode=mode, login=user_extra.get("login", None) + ) def _on_django_auth(result_user, mode, kwargs, pin, info_retriever, django_config): @@ -344,17 +360,18 @@ def _on_django_auth(result_user, mode, kwargs, pin, info_retriever, django_confi if not result_user: with pin.tracer.trace("django.contrib.auth.login", span_type=SpanTypes.AUTH): exists = info_retriever.user_exists() - if exists: - user_id, user_extra = info_retriever.get_user_info( - login=django_config.include_user_login, - email=django_config.include_user_email, - name=django_config.include_user_realname, - ) - track_user_login_failure_event( - pin.tracer, user_id=user_id, login_events_mode=mode, exists=True, **user_extra - ) - else: - track_user_login_failure_event(pin.tracer, user_id=user_id, login_events_mode=mode, exists=False) + user_id_found, user_extra = info_retriever.get_user_info( + login=django_config.include_user_login, + email=django_config.include_user_email, + name=django_config.include_user_realname, + ) + if user_extra.get("login") is None: + user_extra["login"] = user_id + user_id = user_id_found or user_id + + track_user_login_failure_event( + pin.tracer, user_id=user_id, login_events_mode=mode, exists=exists, **user_extra + ) return False, None diff --git a/ddtrace/contrib/aiomysql/__init__.py b/ddtrace/contrib/aiomysql/__init__.py index d1ca2acc7ea..5b7b0c15ab0 100644 --- a/ddtrace/contrib/aiomysql/__init__.py +++ b/ddtrace/contrib/aiomysql/__init__.py @@ -19,7 +19,7 @@ To configure the integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin import asyncio import aiomysql diff --git a/ddtrace/contrib/aiopg/__init__.py b/ddtrace/contrib/aiopg/__init__.py index 704011ec584..ba99150eedb 100644 --- a/ddtrace/contrib/aiopg/__init__.py +++ b/ddtrace/contrib/aiopg/__init__.py @@ -1,7 +1,8 @@ """ Instrument aiopg to report a span for each executed Postgres queries:: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import aiopg # If not patched yet, you can patch aiopg specifically diff --git a/ddtrace/contrib/aioredis/__init__.py b/ddtrace/contrib/aioredis/__init__.py index 16c4970839b..f2373f6820b 100644 --- a/ddtrace/contrib/aioredis/__init__.py +++ b/ddtrace/contrib/aioredis/__init__.py @@ -55,7 +55,7 @@ ``Pin`` API:: import aioredis - from ddtrace import Pin + from ddtrace.trace import Pin myaioredis = aioredis.Aioredis() Pin.override(myaioredis, service="myaioredis") diff --git a/ddtrace/contrib/anthropic/__init__.py b/ddtrace/contrib/anthropic/__init__.py index 96d2e504acf..2faaf16b87c 100644 --- a/ddtrace/contrib/anthropic/__init__.py +++ b/ddtrace/contrib/anthropic/__init__.py @@ -76,7 +76,8 @@ ``Pin`` API:: import anthropic - from ddtrace import Pin, config + from ddtrace import config + from ddtrace.trace import Pin Pin.override(anthropic, service="my-anthropic-service") """ # noqa: E501 diff --git a/ddtrace/contrib/aredis/__init__.py b/ddtrace/contrib/aredis/__init__.py index a3cf176cfb1..5118753334a 100644 --- a/ddtrace/contrib/aredis/__init__.py +++ b/ddtrace/contrib/aredis/__init__.py @@ -50,10 +50,10 @@ Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ -To configure particular aredis instances use the :class:`Pin ` API:: +To configure particular aredis instances use the :class:`Pin ` API:: import aredis - from ddtrace import Pin + from ddtrace.trace import Pin client = aredis.StrictRedis(host="localhost", port=6379) diff --git a/ddtrace/contrib/asyncpg/__init__.py b/ddtrace/contrib/asyncpg/__init__.py index 9a38962dc02..e3f25604a64 100644 --- a/ddtrace/contrib/asyncpg/__init__.py +++ b/ddtrace/contrib/asyncpg/__init__.py @@ -38,7 +38,7 @@ basis use the ``Pin`` API:: import asyncpg - from ddtrace import Pin + from ddtrace.trace import Pin conn = asyncpg.connect("postgres://localhost:5432") Pin.override(conn, service="custom-service") diff --git a/ddtrace/contrib/cassandra/__init__.py b/ddtrace/contrib/cassandra/__init__.py index c7e8b90fa10..b13335cadef 100644 --- a/ddtrace/contrib/cassandra/__init__.py +++ b/ddtrace/contrib/cassandra/__init__.py @@ -3,7 +3,8 @@ ``import ddtrace.auto`` will automatically patch your Cluster instance to make it work. :: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin from cassandra.cluster import Cluster # If not patched yet, you can patch cassandra specifically diff --git a/ddtrace/contrib/consul/__init__.py b/ddtrace/contrib/consul/__init__.py index bf6cb8f7463..956a15a7e6d 100644 --- a/ddtrace/contrib/consul/__init__.py +++ b/ddtrace/contrib/consul/__init__.py @@ -5,7 +5,8 @@ ``import ddtrace.auto`` will automatically patch your Consul client to make it work. :: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import consul # If not patched yet, you can patch consul specifically diff --git a/ddtrace/contrib/dbapi/__init__.py b/ddtrace/contrib/dbapi/__init__.py index 358b928eadd..0b772ac04ec 100644 --- a/ddtrace/contrib/dbapi/__init__.py +++ b/ddtrace/contrib/dbapi/__init__.py @@ -20,7 +20,7 @@ from ...ext import SpanTypes from ...ext import db from ...ext import sql -from ...pin import Pin +from ...trace import Pin from ..trace_utils import ext_service from ..trace_utils import iswrapped diff --git a/ddtrace/contrib/dbapi_async/__init__.py b/ddtrace/contrib/dbapi_async/__init__.py index 6528d2b348a..d0c43fc1c2b 100644 --- a/ddtrace/contrib/dbapi_async/__init__.py +++ b/ddtrace/contrib/dbapi_async/__init__.py @@ -13,7 +13,7 @@ from ...constants import SPAN_MEASURED_KEY from ...ext import SpanKind from ...ext import SpanTypes -from ...pin import Pin +from ...trace import Pin from ..dbapi import TracedConnection from ..dbapi import TracedCursor from ..trace_utils import ext_service diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index cfe071d099d..7d342ad58b6 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -73,7 +73,8 @@ ``Pin`` API:: import google.generativeai as genai - from ddtrace import Pin, config + from ddtrace import config + from ddtrace.trace import Pin Pin.override(genai, service="my-gemini-service") """ # noqa: E501 diff --git a/ddtrace/contrib/graphql/__init__.py b/ddtrace/contrib/graphql/__init__.py index 1bbf41f06ba..7fc6dec3304 100644 --- a/ddtrace/contrib/graphql/__init__.py +++ b/ddtrace/contrib/graphql/__init__.py @@ -39,7 +39,7 @@ To configure the graphql integration using the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin import graphql Pin.override(graphql, service="mygraphql") diff --git a/ddtrace/contrib/grpc/__init__.py b/ddtrace/contrib/grpc/__init__.py index 39ec689b52a..1a9322854e1 100644 --- a/ddtrace/contrib/grpc/__init__.py +++ b/ddtrace/contrib/grpc/__init__.py @@ -45,13 +45,14 @@ ``Pin`` API:: import grpc - from ddtrace import Pin, patch, Tracer + from ddtrace import patch + from ddtrace.trace import Pin + patch(grpc=True) - custom_tracer = Tracer() # override the pin on the client - Pin.override(grpc.Channel, service='mygrpc', tracer=custom_tracer) + Pin.override(grpc.Channel, service='mygrpc') with grpc.insecure_channel('localhost:50051') as channel: # create stubs and send requests pass @@ -61,13 +62,13 @@ import grpc from grpc.framework.foundation import logging_pool - from ddtrace import Pin, patch, Tracer + from ddtrace import patch + from ddtrace.trace import Pin patch(grpc=True) - custom_tracer = Tracer() # override the pin on the server - Pin.override(grpc.Server, service='mygrpc', tracer=custom_tracer) + Pin.override(grpc.Server, service='mygrpc') server = grpc.server(logging_pool.pool(2)) server.add_insecure_port('localhost:50051') add_MyServicer_to_server(MyServicer(), server) diff --git a/ddtrace/contrib/httpx/__init__.py b/ddtrace/contrib/httpx/__init__.py index 5743d052e79..78de204078c 100644 --- a/ddtrace/contrib/httpx/__init__.py +++ b/ddtrace/contrib/httpx/__init__.py @@ -57,10 +57,10 @@ Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ -To configure particular ``httpx`` client instances use the :class:`Pin ` API:: +To configure particular ``httpx`` client instances use the :class:`Pin ` API:: import httpx - from ddtrace import Pin + from ddtrace.trace import Pin client = httpx.Client() # Override service name for this instance diff --git a/ddtrace/contrib/internal/aiobotocore/patch.py b/ddtrace/contrib/internal/aiobotocore/patch.py index c5dafcaaa41..7431bd5c592 100644 --- a/ddtrace/contrib/internal/aiobotocore/patch.py +++ b/ddtrace/contrib/internal/aiobotocore/patch.py @@ -21,7 +21,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import deep_getattr from ddtrace.internal.utils.version import parse_version -from ddtrace.pin import Pin +from ddtrace.trace import Pin aiobotocore_version_str = getattr(aiobotocore, "__version__", "") diff --git a/ddtrace/contrib/internal/aiohttp/middlewares.py b/ddtrace/contrib/internal/aiohttp/middlewares.py index 07c8afbb07d..59045d7bcc9 100644 --- a/ddtrace/contrib/internal/aiohttp/middlewares.py +++ b/ddtrace/contrib/internal/aiohttp/middlewares.py @@ -173,7 +173,7 @@ def trace_app(app, tracer, service="aiohttp-web"): } # the tracer must work with asynchronous Context propagation - tracer.configure(context_provider=context_provider) + tracer._configure(context_provider=context_provider) # add the async tracer middleware as a first middleware # and be sure that the on_prepare signal is the last one diff --git a/ddtrace/contrib/internal/aiohttp/patch.py b/ddtrace/contrib/internal/aiohttp/patch.py index 13b55ecb4fb..e0f0bc869e9 100644 --- a/ddtrace/contrib/internal/aiohttp/patch.py +++ b/ddtrace/contrib/internal/aiohttp/patch.py @@ -21,8 +21,8 @@ from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/aiohttp_jinja2/patch.py b/ddtrace/contrib/internal/aiohttp_jinja2/patch.py index 284352b54f0..84553899c39 100644 --- a/ddtrace/contrib/internal/aiohttp_jinja2/patch.py +++ b/ddtrace/contrib/internal/aiohttp_jinja2/patch.py @@ -1,6 +1,5 @@ import aiohttp_jinja2 -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.trace_utils import unwrap from ddtrace.contrib.trace_utils import with_traced_module @@ -8,6 +7,7 @@ from ddtrace.ext import SpanTypes from ddtrace.internal.constants import COMPONENT from ddtrace.internal.utils import get_argument_value +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/aiomysql/patch.py b/ddtrace/contrib/internal/aiomysql/patch.py index 7f090b4c71d..0053e4f8a5b 100644 --- a/ddtrace/contrib/internal/aiomysql/patch.py +++ b/ddtrace/contrib/internal/aiomysql/patch.py @@ -1,7 +1,6 @@ import aiomysql import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND @@ -18,6 +17,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.wrappers import unwrap from ddtrace.propagation._database_monitoring import _DBM_Propagator +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/aiopg/connection.py b/ddtrace/contrib/internal/aiopg/connection.py index b2522ae3888..1daf84b2987 100644 --- a/ddtrace/contrib/internal/aiopg/connection.py +++ b/ddtrace/contrib/internal/aiopg/connection.py @@ -15,7 +15,7 @@ from ddtrace.internal.schema import schematize_database_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.version import parse_version -from ddtrace.pin import Pin +from ddtrace.trace import Pin AIOPG_VERSION = parse_version(__version__) diff --git a/ddtrace/contrib/internal/aioredis/patch.py b/ddtrace/contrib/internal/aioredis/patch.py index 7915f652641..dc6004b9caa 100644 --- a/ddtrace/contrib/internal/aioredis/patch.py +++ b/ddtrace/contrib/internal/aioredis/patch.py @@ -27,7 +27,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import stringify_cache_args from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin +from ddtrace.trace import Pin from ddtrace.vendor.packaging.version import parse as parse_version diff --git a/ddtrace/contrib/internal/algoliasearch/patch.py b/ddtrace/contrib/internal/algoliasearch/patch.py index 5217861409e..e3074225570 100644 --- a/ddtrace/contrib/internal/algoliasearch/patch.py +++ b/ddtrace/contrib/internal/algoliasearch/patch.py @@ -10,7 +10,7 @@ from ddtrace.internal.schema import schematize_cloud_api_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin +from ddtrace.trace import Pin from ddtrace.vendor.packaging.version import parse as parse_version diff --git a/ddtrace/contrib/internal/anthropic/patch.py b/ddtrace/contrib/internal/anthropic/patch.py index e82c4421e78..24f72f2b511 100644 --- a/ddtrace/contrib/internal/anthropic/patch.py +++ b/ddtrace/contrib/internal/anthropic/patch.py @@ -18,7 +18,7 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.llmobs._integrations import AnthropicIntegration from ddtrace.llmobs._utils import _get_attr -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/aredis/patch.py b/ddtrace/contrib/internal/aredis/patch.py index c9ba000ea36..bd8c5b4c750 100644 --- a/ddtrace/contrib/internal/aredis/patch.py +++ b/ddtrace/contrib/internal/aredis/patch.py @@ -12,7 +12,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import stringify_cache_args from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/asgi/middleware.py b/ddtrace/contrib/internal/asgi/middleware.py index 98d352cf75f..2b3e23eb78b 100644 --- a/ddtrace/contrib/internal/asgi/middleware.py +++ b/ddtrace/contrib/internal/asgi/middleware.py @@ -150,7 +150,13 @@ async def __call__(self, scope, receive, send): if scope["type"] == "http": operation_name = schematize_url_operation(operation_name, direction=SpanDirection.INBOUND, protocol="http") - pin = ddtrace.pin.Pin(service="asgi", tracer=self.tracer) + # Calling ddtrace.trace.Pin(...) with the `tracer` argument is deprecated + # Remove this if statement when the `tracer` argument is removed + if self.tracer is ddtrace.tracer: + pin = ddtrace.trace.Pin(service="asgi") + else: + pin = ddtrace.trace.Pin(service="asgi", tracer=self.tracer) + with core.context_with_data( "asgi.__call__", remote_addr=scope.get("REMOTE_ADDR"), diff --git a/ddtrace/contrib/internal/asyncio/patch.py b/ddtrace/contrib/internal/asyncio/patch.py index 83f1918e9eb..ed64ca1bf5d 100644 --- a/ddtrace/contrib/internal/asyncio/patch.py +++ b/ddtrace/contrib/internal/asyncio/patch.py @@ -1,10 +1,10 @@ import asyncio -from ddtrace import Pin from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils import set_argument_value from ddtrace.internal.wrapping import unwrap from ddtrace.internal.wrapping import wrap +from ddtrace.trace import Pin def get_version(): diff --git a/ddtrace/contrib/internal/asyncpg/patch.py b/ddtrace/contrib/internal/asyncpg/patch.py index 7b2b269d5f2..ac1347a7de6 100644 --- a/ddtrace/contrib/internal/asyncpg/patch.py +++ b/ddtrace/contrib/internal/asyncpg/patch.py @@ -2,7 +2,7 @@ from types import ModuleType import asyncpg -from ddtrace import Pin +from ddtrace.trace import Pin from ddtrace import config from ddtrace.internal import core from ddtrace.internal.constants import COMPONENT diff --git a/ddtrace/contrib/internal/avro/patch.py b/ddtrace/contrib/internal/avro/patch.py index 6e66fbe20b0..3ef2adbcb0c 100644 --- a/ddtrace/contrib/internal/avro/patch.py +++ b/ddtrace/contrib/internal/avro/patch.py @@ -3,7 +3,7 @@ from ddtrace import config from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin from .schema_iterator import SchemaExtractor diff --git a/ddtrace/contrib/internal/azure_functions/patch.py b/ddtrace/contrib/internal/azure_functions/patch.py index 15089a2e733..1c0c658a9eb 100644 --- a/ddtrace/contrib/internal/azure_functions/patch.py +++ b/ddtrace/contrib/internal/azure_functions/patch.py @@ -8,7 +8,7 @@ from ddtrace.internal import core from ddtrace.internal.schema import schematize_cloud_faas_operation from ddtrace.internal.schema import schematize_service_name -from ddtrace.pin import Pin +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/boto/patch.py b/ddtrace/contrib/internal/boto/patch.py index 8551056dfb3..e7418aba878 100644 --- a/ddtrace/contrib/internal/boto/patch.py +++ b/ddtrace/contrib/internal/boto/patch.py @@ -19,7 +19,7 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin # Original boto client class diff --git a/ddtrace/contrib/internal/botocore/patch.py b/ddtrace/contrib/internal/botocore/patch.py index febad29f982..07c0bd403e4 100644 --- a/ddtrace/contrib/internal/botocore/patch.py +++ b/ddtrace/contrib/internal/botocore/patch.py @@ -33,8 +33,8 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import deep_getattr from ddtrace.llmobs._integrations import BedrockIntegration -from ddtrace.pin import Pin from ddtrace.settings.config import Config +from ddtrace.trace import Pin from .services.bedrock import patched_bedrock_api_call from .services.kinesis import patched_kinesis_api_call diff --git a/ddtrace/contrib/internal/botocore/services/bedrock.py b/ddtrace/contrib/internal/botocore/services/bedrock.py index 7c5f26b07a5..00e9aa5756f 100644 --- a/ddtrace/contrib/internal/botocore/services/bedrock.py +++ b/ddtrace/contrib/internal/botocore/services/bedrock.py @@ -24,6 +24,17 @@ _META = "meta" _STABILITY = "stability" +_MODEL_TYPE_IDENTIFIERS = ( + "foundation-model/", + "custom-model/", + "provisioned-model/", + "imported-model/", + "prompt/", + "endpoint/", + "inference-profile/", + "default-prompt-router/", +) + class TracedBotocoreStreamingBody(wrapt.ObjectProxy): """ @@ -320,14 +331,45 @@ def handle_bedrock_response( return result +def _parse_model_id(model_id: str): + """Best effort to extract the model provider and model name from the bedrock model ID. + model_id can be a 1/2 period-separated string or a full AWS ARN, based on the following formats: + 1. Base model: "{model_provider}.{model_name}" + 2. Cross-region model: "{region}.{model_provider}.{model_name}" + 3. Other: Prefixed by AWS ARN "arn:aws{+region?}:bedrock:{region}:{account-id}:" + a. Foundation model: ARN prefix + "foundation-model/{region?}.{model_provider}.{model_name}" + b. Custom model: ARN prefix + "custom-model/{model_provider}.{model_name}" + c. Provisioned model: ARN prefix + "provisioned-model/{model-id}" + d. Imported model: ARN prefix + "imported-module/{model-id}" + e. Prompt management: ARN prefix + "prompt/{prompt-id}" + f. Sagemaker: ARN prefix + "endpoint/{model-id}" + g. Inference profile: ARN prefix + "{application-?}inference-profile/{model-id}" + h. Default prompt router: ARN prefix + "default-prompt-router/{prompt-id}" + If model provider cannot be inferred from the model_id formatting, then default to "custom" + """ + if not model_id.startswith("arn:aws"): + model_meta = model_id.split(".") + if len(model_meta) < 2: + return "custom", model_meta[0] + return model_meta[-2], model_meta[-1] + for identifier in _MODEL_TYPE_IDENTIFIERS: + if identifier not in model_id: + continue + model_id = model_id.rsplit(identifier, 1)[-1] + if identifier in ("foundation-model/", "custom-model/"): + model_meta = model_id.split(".") + if len(model_meta) < 2: + return "custom", model_id + return model_meta[-2], model_meta[-1] + return "custom", model_id + return "custom", "custom" + + def patched_bedrock_api_call(original_func, instance, args, kwargs, function_vars): params = function_vars.get("params") pin = function_vars.get("pin") - model_meta = params.get("modelId").split(".") - if len(model_meta) == 2: - model_provider, model_name = model_meta - else: - _, model_provider, model_name = model_meta # cross-region inference + model_id = params.get("modelId") + model_provider, model_name = _parse_model_id(model_id) integration = function_vars.get("integration") submit_to_llmobs = integration.llmobs_enabled and "embed" not in model_name with core.context_with_data( diff --git a/ddtrace/contrib/internal/cassandra/session.py b/ddtrace/contrib/internal/cassandra/session.py index 33d307c13c7..7f02d8c0af6 100644 --- a/ddtrace/contrib/internal/cassandra/session.py +++ b/ddtrace/contrib/internal/cassandra/session.py @@ -39,7 +39,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import deep_getattr -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/celery/app.py b/ddtrace/contrib/internal/celery/app.py index 42eed2cb468..54ad5834769 100644 --- a/ddtrace/contrib/internal/celery/app.py +++ b/ddtrace/contrib/internal/celery/app.py @@ -3,8 +3,8 @@ import celery from celery import signals -from ddtrace import Pin from ddtrace import config +from ddtrace._trace.pin import _DD_PIN_NAME from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND from ddtrace.constants import SPAN_MEASURED_KEY @@ -19,7 +19,7 @@ from ddtrace.ext import SpanTypes from ddtrace.internal import core from ddtrace.internal.logger import get_logger -from ddtrace.pin import _DD_PIN_NAME +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/celery/signals.py b/ddtrace/contrib/internal/celery/signals.py index 8f27fcc53b0..ea9d8c15863 100644 --- a/ddtrace/contrib/internal/celery/signals.py +++ b/ddtrace/contrib/internal/celery/signals.py @@ -3,7 +3,6 @@ from celery import current_app from celery import registry -from ddtrace import Pin from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND @@ -24,6 +23,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.logger import get_logger from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/consul/patch.py b/ddtrace/contrib/internal/consul/patch.py index b4725e807ba..b24b138b632 100644 --- a/ddtrace/contrib/internal/consul/patch.py +++ b/ddtrace/contrib/internal/consul/patch.py @@ -15,7 +15,7 @@ from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin +from ddtrace.trace import Pin _KV_FUNCS = ["put", "get", "delete"] diff --git a/ddtrace/contrib/internal/django/patch.py b/ddtrace/contrib/internal/django/patch.py index d4b14487e39..8bc523dd1c1 100644 --- a/ddtrace/contrib/internal/django/patch.py +++ b/ddtrace/contrib/internal/django/patch.py @@ -17,7 +17,7 @@ import wrapt from wrapt.importer import when_imported -from ddtrace import Pin +import ddtrace from ddtrace import config from ddtrace.appsec._utils import _UserInfoRetriever from ddtrace.constants import SPAN_KIND @@ -49,6 +49,7 @@ from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings.asm import config as asm_config from ddtrace.settings.integration import IntegrationConfig +from ddtrace.trace import Pin from ddtrace.vendor.packaging.version import parse as parse_version @@ -147,7 +148,12 @@ def cursor(django, pin, func, instance, args, kwargs): tags = {"django.db.vendor": vendor, "django.db.alias": alias} tags.update(getattr(conn, "_datadog_tags", {})) - pin = Pin(service, tags=tags, tracer=pin.tracer) + # Calling ddtrace.pin.Pin(...) with the `tracer` argument generates a deprecation warning. + # Remove this if statement when the `tracer` argument is removed + if pin.tracer is ddtrace.tracer: + pin = Pin(service, tags=tags) + else: + pin = Pin(service, tags=tags, tracer=pin.tracer) cursor = func(*args, **kwargs) diff --git a/ddtrace/contrib/internal/dogpile_cache/lock.py b/ddtrace/contrib/internal/dogpile_cache/lock.py index c592562f94f..76cdc2eb839 100644 --- a/ddtrace/contrib/internal/dogpile_cache/lock.py +++ b/ddtrace/contrib/internal/dogpile_cache/lock.py @@ -1,7 +1,7 @@ import dogpile from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin +from ddtrace.trace import Pin def _wrap_lock_ctor(func, instance, args, kwargs): diff --git a/ddtrace/contrib/internal/dogpile_cache/patch.py b/ddtrace/contrib/internal/dogpile_cache/patch.py index f4f41284a29..f78ea5cb23f 100644 --- a/ddtrace/contrib/internal/dogpile_cache/patch.py +++ b/ddtrace/contrib/internal/dogpile_cache/patch.py @@ -7,10 +7,10 @@ from wrapt import wrap_function_wrapper as _w +from ddtrace._trace.pin import _DD_PIN_NAME +from ddtrace._trace.pin import _DD_PIN_PROXY_NAME from ddtrace.internal.schema import schematize_service_name -from ddtrace.pin import _DD_PIN_NAME -from ddtrace.pin import _DD_PIN_PROXY_NAME -from ddtrace.pin import Pin +from ddtrace.trace import Pin from .lock import _wrap_lock_ctor from .region import _wrap_get_create diff --git a/ddtrace/contrib/internal/dogpile_cache/region.py b/ddtrace/contrib/internal/dogpile_cache/region.py index 04b70402e3d..0c89d2d84d9 100644 --- a/ddtrace/contrib/internal/dogpile_cache/region.py +++ b/ddtrace/contrib/internal/dogpile_cache/region.py @@ -7,7 +7,7 @@ from ddtrace.internal.schema import schematize_cache_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils import get_argument_value -from ddtrace.pin import Pin +from ddtrace.trace import Pin def _wrap_get_create(func, instance, args, kwargs): diff --git a/ddtrace/contrib/internal/elasticsearch/patch.py b/ddtrace/contrib/internal/elasticsearch/patch.py index 455d0678d02..7c408db55a5 100644 --- a/ddtrace/contrib/internal/elasticsearch/patch.py +++ b/ddtrace/contrib/internal/elasticsearch/patch.py @@ -21,7 +21,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/fastapi/patch.py b/ddtrace/contrib/internal/fastapi/patch.py index b431f3c83f8..485c0424a5f 100644 --- a/ddtrace/contrib/internal/fastapi/patch.py +++ b/ddtrace/contrib/internal/fastapi/patch.py @@ -5,7 +5,6 @@ from wrapt import ObjectProxy from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace.appsec._iast._utils import _is_iast_enabled from ddtrace.contrib.internal.asgi.middleware import TraceMiddleware @@ -15,6 +14,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.wrappers import unwrap as _u +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/flask/patch.py b/ddtrace/contrib/internal/flask/patch.py index 429a9d05667..010df5218c5 100644 --- a/ddtrace/contrib/internal/flask/patch.py +++ b/ddtrace/contrib/internal/flask/patch.py @@ -29,7 +29,6 @@ from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.wsgi.wsgi import _DDWSGIMiddlewareBase from ddtrace.contrib.trace_utils import unwrap as _u @@ -37,6 +36,7 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.importlib import func_name from ddtrace.internal.utils.version import parse_version +from ddtrace.trace import Pin from .wrappers import _wrap_call_with_pin_check from .wrappers import get_current_app diff --git a/ddtrace/contrib/internal/flask/wrappers.py b/ddtrace/contrib/internal/flask/wrappers.py index 3aca2a1466a..d65697224ba 100644 --- a/ddtrace/contrib/internal/flask/wrappers.py +++ b/ddtrace/contrib/internal/flask/wrappers.py @@ -7,7 +7,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.importlib import func_name -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 29bc18dc756..3564f9ec1ec 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -14,7 +14,7 @@ from ddtrace.contrib.trace_utils import wrap from ddtrace.llmobs._integrations import GeminiIntegration from ddtrace.llmobs._integrations.utils import extract_model_name_google -from ddtrace.pin import Pin +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/graphql/patch.py b/ddtrace/contrib/internal/graphql/patch.py index b54df97e520..18916f4222a 100644 --- a/ddtrace/contrib/internal/graphql/patch.py +++ b/ddtrace/contrib/internal/graphql/patch.py @@ -39,7 +39,7 @@ from ddtrace.internal.utils.version import parse_version from ddtrace.internal.wrapping import unwrap from ddtrace.internal.wrapping import wrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin _graphql_version_str = graphql.__version__ diff --git a/ddtrace/contrib/internal/grpc/aio_client_interceptor.py b/ddtrace/contrib/internal/grpc/aio_client_interceptor.py index bf6f156de7e..5c03d1b8527 100644 --- a/ddtrace/contrib/internal/grpc/aio_client_interceptor.py +++ b/ddtrace/contrib/internal/grpc/aio_client_interceptor.py @@ -11,7 +11,6 @@ from grpc.aio._typing import ResponseIterableType from grpc.aio._typing import ResponseType -from ddtrace import Pin from ddtrace import Span from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY @@ -30,6 +29,7 @@ from ddtrace.internal.schema import schematize_url_operation from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/grpc/aio_server_interceptor.py b/ddtrace/contrib/internal/grpc/aio_server_interceptor.py index 2361e3c3be9..d5ec9ed32ab 100644 --- a/ddtrace/contrib/internal/grpc/aio_server_interceptor.py +++ b/ddtrace/contrib/internal/grpc/aio_server_interceptor.py @@ -13,7 +13,6 @@ from grpc.aio._typing import ResponseType import wrapt -from ddtrace import Pin # noqa:F401 from ddtrace import Span # noqa:F401 from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY @@ -30,6 +29,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.schema import schematize_url_operation from ddtrace.internal.schema.span_attribute_schema import SpanDirection +from ddtrace.trace import Pin # noqa:F401 Continuation = Callable[[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]] diff --git a/ddtrace/contrib/internal/grpc/patch.py b/ddtrace/contrib/internal/grpc/patch.py index 9c41c5cc342..122893b030f 100644 --- a/ddtrace/contrib/internal/grpc/patch.py +++ b/ddtrace/contrib/internal/grpc/patch.py @@ -1,7 +1,6 @@ import grpc from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.grpc import constants from ddtrace.contrib.internal.grpc import utils @@ -13,6 +12,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils import set_argument_value +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/httplib/patch.py b/ddtrace/contrib/internal/httplib/patch.py index e42241a2ca2..3e354aeedea 100644 --- a/ddtrace/contrib/internal/httplib/patch.py +++ b/ddtrace/contrib/internal/httplib/patch.py @@ -20,9 +20,9 @@ from ddtrace.internal.schema import schematize_url_operation from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin span_name = "http.client.request" diff --git a/ddtrace/contrib/internal/httpx/patch.py b/ddtrace/contrib/internal/httpx/patch.py index e6d1893880f..8a9e4eebc3a 100644 --- a/ddtrace/contrib/internal/httpx/patch.py +++ b/ddtrace/contrib/internal/httpx/patch.py @@ -22,8 +22,8 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.version import parse_version from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin HTTPX_VERSION = parse_version(httpx.__version__) diff --git a/ddtrace/contrib/internal/jinja2/patch.py b/ddtrace/contrib/internal/jinja2/patch.py index 83aad083747..cdf1254527d 100644 --- a/ddtrace/contrib/internal/jinja2/patch.py +++ b/ddtrace/contrib/internal/jinja2/patch.py @@ -10,7 +10,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.utils import ArgumentError from ddtrace.internal.utils import get_argument_value -from ddtrace.pin import Pin +from ddtrace.trace import Pin from .constants import DEFAULT_TEMPLATE_NAME diff --git a/ddtrace/contrib/internal/kafka/patch.py b/ddtrace/contrib/internal/kafka/patch.py index 339e2469914..6f69cda3239 100644 --- a/ddtrace/contrib/internal/kafka/patch.py +++ b/ddtrace/contrib/internal/kafka/patch.py @@ -24,8 +24,8 @@ from ddtrace.internal.utils import set_argument_value from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.version import parse_version -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator as Propagator +from ddtrace.trace import Pin _Producer = confluent_kafka.Producer diff --git a/ddtrace/contrib/internal/kombu/patch.py b/ddtrace/contrib/internal/kombu/patch.py index fd571fd445c..fa63e5c4f86 100644 --- a/ddtrace/contrib/internal/kombu/patch.py +++ b/ddtrace/contrib/internal/kombu/patch.py @@ -22,8 +22,8 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin from .constants import DEFAULT_SERVICE from .utils import HEADER_POS diff --git a/ddtrace/contrib/internal/langchain/patch.py b/ddtrace/contrib/internal/langchain/patch.py index b7513539da7..f9c58249cb2 100644 --- a/ddtrace/contrib/internal/langchain/patch.py +++ b/ddtrace/contrib/internal/langchain/patch.py @@ -62,7 +62,7 @@ from ddtrace.internal.utils.version import parse_version from ddtrace.llmobs._integrations import LangChainIntegration from ddtrace.llmobs._utils import safe_json -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/langgraph/patch.py b/ddtrace/contrib/internal/langgraph/patch.py new file mode 100644 index 00000000000..1da216be9a3 --- /dev/null +++ b/ddtrace/contrib/internal/langgraph/patch.py @@ -0,0 +1,240 @@ +import os +import sys + +import langgraph + +from ddtrace import config +from ddtrace.contrib.trace_utils import unwrap +from ddtrace.contrib.trace_utils import with_traced_module +from ddtrace.contrib.trace_utils import wrap +from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._integrations.langgraph import LangGraphIntegration +from ddtrace.pin import Pin + + +def get_version(): + from langgraph import version + + return getattr(version, "__version__", "") + + +config._add("langgraph", {}) + + +def _get_node_name(instance): + """Gets the name of the first step in a RunnableSeq instance as the node name.""" + steps = getattr(instance, "steps", []) + first_step = steps[0] if steps else None + return getattr(first_step, "name", None) + + +@with_traced_module +def traced_runnable_seq_invoke(langgraph, pin, func, instance, args, kwargs): + """ + Traces an invocation of a RunnableSeq, which represents a node in a graph. + It represents the sequence containing node invocation (function, graph, callable), the channel write, + and then any routing logic. + + We utilize `instance.steps` to grab the first step as the node. + + One caveat is that if the node represents a subgraph (LangGraph), we should skip tracing at this step, as + we will trace the graph invocation separately with `traced_pregel_stream`. + """ + integration: LangGraphIntegration = langgraph._datadog_integration + + node_name = _get_node_name(instance) + + if node_name in ("_write", "_route"): + return func(*args, **kwargs) + if node_name == "LangGraph": + config = get_argument_value(args, kwargs, 1, "config", optional=True) or {} + config.get("metadata", {})["_dd.subgraph"] = True + return func(*args, **kwargs) + + span = integration.trace( + pin, + "%s.%s.%s" % (instance.__module__, instance.__class__.__name__, node_name), + submit_to_llmobs=True, + ) + result = None + try: + result = func(*args, **kwargs) + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=result, operation="node") + span.finish() + return result + + +@with_traced_module +async def traced_runnable_seq_ainvoke(langgraph, pin, func, instance, args, kwargs): + """Async version of traced_runnable_seq_invoke.""" + integration: LangGraphIntegration = langgraph._datadog_integration + + node_name = _get_node_name(instance) + + if node_name in ("_write", "_route"): + return await func(*args, **kwargs) + if node_name == "LangGraph": + config = get_argument_value(args, kwargs, 1, "config", optional=True) or {} + config.get("metadata", {})["_dd.subgraph"] = True + return await func(*args, **kwargs) + + span = integration.trace( + pin, + "%s.%s.%s" % (instance.__module__, instance.__class__.__name__, node_name), + submit_to_llmobs=True, + ) + result = None + try: + result = await func(*args, **kwargs) + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=result, operation="node") + span.finish() + return result + + +@with_traced_module +def traced_pregel_stream(langgraph, pin, func, instance, args, kwargs): + """ + Trace the streaming of a Pregel (CompiledGraph) instance. + This operation represents the parent execution of an individual graph. + This graph could be standalone, or embedded as a subgraph in a node of a larger graph. + Under the hood, this graph will `tick` through until all computed tasks are completed. + + Calling `invoke` on a graph calls `stream` under the hood. + """ + integration: LangGraphIntegration = langgraph._datadog_integration + name = getattr(instance, "name", "LangGraph") + span = integration.trace( + pin, + "%s.%s.%s" % (instance.__module__, instance.__class__.__name__, name), + submit_to_llmobs=True, + ) + + try: + result = func(*args, **kwargs) + except Exception: + span.set_exc_info(*sys.exc_info()) + span.finish() + raise + + def _stream(): + item = None + while True: + try: + item = next(result) + yield item + except StopIteration: + response = item[-1] if isinstance(item, tuple) else item + integration.llmobs_set_tags( + span, args=args, kwargs={**kwargs, "name": name}, response=response, operation="graph" + ) + span.finish() + break + except Exception: + span.set_exc_info(*sys.exc_info()) + span.finish() + raise + + return _stream() + + +@with_traced_module +def traced_pregel_astream(langgraph, pin, func, instance, args, kwargs): + """Async version of traced_pregel_stream.""" + integration: LangGraphIntegration = langgraph._datadog_integration + name = getattr(instance, "name", "LangGraph") + span = integration.trace( + pin, + "%s.%s.%s" % (instance.__module__, instance.__class__.__name__, name), + submit_to_llmobs=True, + ) + + try: + result = func(*args, **kwargs) + except Exception: + span.set_exc_info(*sys.exc_info()) + span.finish() + raise + + async def _astream(): + item = None + while True: + try: + item = await result.__anext__() + yield item + except StopAsyncIteration: + response = item[-1] if isinstance(item, tuple) else item + integration.llmobs_set_tags( + span, args=args, kwargs={**kwargs, "name": name}, response=response, operation="graph" + ) + span.finish() + break + except Exception: + span.set_exc_info(*sys.exc_info()) + span.finish() + raise + + return _astream() + + +@with_traced_module +def patched_pregel_loop_tick(langgraph, pin, func, instance, args, kwargs): + """No tracing is done, and processing only happens if LLM Observability is enabled.""" + integration: LangGraphIntegration = langgraph._datadog_integration + if not integration.llmobs_enabled: + return func(*args, **kwargs) + + finished_tasks = getattr(instance, "tasks", {}) + result = func(*args, **kwargs) + next_tasks = getattr(instance, "tasks", {}) # instance.tasks gets updated by loop.tick() + is_subgraph_node = getattr(instance, "config", {}).get("metadata", {}).get("_dd.subgraph", False) + integration.llmobs_handle_pregel_loop_tick(finished_tasks, next_tasks, result, is_subgraph_node) + return result + + +def patch(): + should_patch = os.getenv("_DD_TRACE_LANGGRAPH_ENABLED", "false").lower() in ("true", "1") + if not should_patch or getattr(langgraph, "_datadog_patch", False): + return + + langgraph._datadog_patch = True + + Pin().onto(langgraph) + integration = LangGraphIntegration(integration_config=config.langgraph) + langgraph._datadog_integration = integration + + from langgraph.pregel import Pregel + from langgraph.pregel.loop import PregelLoop + from langgraph.utils.runnable import RunnableSeq + + wrap(RunnableSeq, "invoke", traced_runnable_seq_invoke(langgraph)) + wrap(RunnableSeq, "ainvoke", traced_runnable_seq_ainvoke(langgraph)) + wrap(Pregel, "stream", traced_pregel_stream(langgraph)) + wrap(Pregel, "astream", traced_pregel_astream(langgraph)) + wrap(PregelLoop, "tick", patched_pregel_loop_tick(langgraph)) + + +def unpatch(): + if not getattr(langgraph, "_datadog_patch", False): + return + + langgraph._datadog_patch = False + + from langgraph.pregel import Pregel + from langgraph.pregel.loop import PregelLoop + from langgraph.utils.runnable import RunnableSeq + + unwrap(RunnableSeq, "invoke") + unwrap(RunnableSeq, "ainvoke") + unwrap(Pregel, "stream") + unwrap(Pregel, "astream") + unwrap(PregelLoop, "tick") + + delattr(langgraph, "_datadog_integration") diff --git a/ddtrace/contrib/internal/mako/patch.py b/ddtrace/contrib/internal/mako/patch.py index 7db3b2e47df..d39a51238a2 100644 --- a/ddtrace/contrib/internal/mako/patch.py +++ b/ddtrace/contrib/internal/mako/patch.py @@ -11,7 +11,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.importlib import func_name -from ddtrace.pin import Pin +from ddtrace.trace import Pin from .constants import DEFAULT_TEMPLATE_NAME diff --git a/ddtrace/contrib/internal/mariadb/patch.py b/ddtrace/contrib/internal/mariadb/patch.py index b4ab267c5e3..1307403f6d4 100644 --- a/ddtrace/contrib/internal/mariadb/patch.py +++ b/ddtrace/contrib/internal/mariadb/patch.py @@ -3,7 +3,6 @@ import mariadb import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.dbapi import TracedConnection from ddtrace.ext import db @@ -11,6 +10,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.wrappers import unwrap +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/molten/patch.py b/ddtrace/contrib/internal/molten/patch.py index fd6fa53b195..7c60d37d0d6 100644 --- a/ddtrace/contrib/internal/molten/patch.py +++ b/ddtrace/contrib/internal/molten/patch.py @@ -4,7 +4,6 @@ import wrapt from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND @@ -21,6 +20,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.importlib import func_name from ddtrace.internal.utils.version import parse_version +from ddtrace.trace import Pin from .wrappers import MOLTEN_ROUTE from .wrappers import WrapperComponent diff --git a/ddtrace/contrib/internal/molten/wrappers.py b/ddtrace/contrib/internal/molten/wrappers.py index 7446224fe45..0a3e325ca0b 100644 --- a/ddtrace/contrib/internal/molten/wrappers.py +++ b/ddtrace/contrib/internal/molten/wrappers.py @@ -1,7 +1,6 @@ import molten import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.constants import SPAN_KIND from ddtrace.contrib import trace_utils @@ -9,6 +8,7 @@ from ddtrace.ext import http from ddtrace.internal.constants import COMPONENT from ddtrace.internal.utils.importlib import func_name +from ddtrace.trace import Pin MOLTEN_ROUTE = "molten.route" diff --git a/ddtrace/contrib/internal/mongoengine/trace.py b/ddtrace/contrib/internal/mongoengine/trace.py index c5f3e834aed..93868e096ce 100644 --- a/ddtrace/contrib/internal/mongoengine/trace.py +++ b/ddtrace/contrib/internal/mongoengine/trace.py @@ -23,12 +23,17 @@ class WrappedConnect(wrapt.ObjectProxy): def __init__(self, connect): super(WrappedConnect, self).__init__(connect) - ddtrace.Pin(_SERVICE, tracer=ddtrace.tracer).onto(self) + ddtrace.trace.Pin(_SERVICE).onto(self) def __call__(self, *args, **kwargs): client = self.__wrapped__(*args, **kwargs) - pin = ddtrace.Pin.get_from(self) + pin = ddtrace.trace.Pin.get_from(self) if pin: - ddtrace.Pin(service=pin.service, tracer=pin.tracer).onto(client) + # Calling ddtrace.pin.Pin(...) with the `tracer` argument generates a deprecation warning. + # Remove this if statement when the `tracer` argument is removed + if pin.tracer is ddtrace.tracer: + ddtrace.trace.Pin(service=pin.service).onto(client) + else: + ddtrace.trace.Pin(service=pin.service, tracer=pin.tracer).onto(client) return client diff --git a/ddtrace/contrib/internal/mysql/patch.py b/ddtrace/contrib/internal/mysql/patch.py index 2d5a8500cb3..d18d357d107 100644 --- a/ddtrace/contrib/internal/mysql/patch.py +++ b/ddtrace/contrib/internal/mysql/patch.py @@ -3,7 +3,6 @@ import mysql.connector import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION @@ -16,6 +15,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/mysqldb/patch.py b/ddtrace/contrib/internal/mysqldb/patch.py index 291d6cb865e..8b6aa7bb7f2 100644 --- a/ddtrace/contrib/internal/mysqldb/patch.py +++ b/ddtrace/contrib/internal/mysqldb/patch.py @@ -3,7 +3,6 @@ import MySQLdb from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION @@ -23,6 +22,7 @@ from ddtrace.internal.utils.wrappers import unwrap as _u from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/openai/patch.py b/ddtrace/contrib/internal/openai/patch.py index 4ad76a17084..d87b06b3aba 100644 --- a/ddtrace/contrib/internal/openai/patch.py +++ b/ddtrace/contrib/internal/openai/patch.py @@ -13,7 +13,7 @@ from ddtrace.internal.utils.version import parse_version from ddtrace.internal.wrapping import wrap from ddtrace.llmobs._integrations import OpenAIIntegration -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/protobuf/patch.py b/ddtrace/contrib/internal/protobuf/patch.py index 607c29eb1c0..8ecdd7aefa5 100644 --- a/ddtrace/contrib/internal/protobuf/patch.py +++ b/ddtrace/contrib/internal/protobuf/patch.py @@ -4,7 +4,7 @@ from ddtrace import config from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin from .schema_iterator import SchemaExtractor diff --git a/ddtrace/contrib/internal/psycopg/async_connection.py b/ddtrace/contrib/internal/psycopg/async_connection.py index 14ec854ffd1..72c8d70e7ec 100644 --- a/ddtrace/contrib/internal/psycopg/async_connection.py +++ b/ddtrace/contrib/internal/psycopg/async_connection.py @@ -1,4 +1,3 @@ -from ddtrace import Pin from ddtrace import config from ddtrace.constants import SPAN_KIND from ddtrace.constants import SPAN_MEASURED_KEY @@ -11,6 +10,7 @@ from ddtrace.ext import SpanTypes from ddtrace.ext import db from ddtrace.internal.constants import COMPONENT +from ddtrace.trace import Pin class Psycopg3TracedAsyncConnection(dbapi_async.TracedAsyncConnection): diff --git a/ddtrace/contrib/internal/psycopg/connection.py b/ddtrace/contrib/internal/psycopg/connection.py index c823e17dc61..a5e5353ad13 100644 --- a/ddtrace/contrib/internal/psycopg/connection.py +++ b/ddtrace/contrib/internal/psycopg/connection.py @@ -1,4 +1,3 @@ -from ddtrace import Pin from ddtrace import config from ddtrace.constants import SPAN_KIND from ddtrace.constants import SPAN_MEASURED_KEY @@ -15,6 +14,7 @@ from ddtrace.ext import net from ddtrace.ext import sql from ddtrace.internal.constants import COMPONENT +from ddtrace.trace import Pin class Psycopg3TracedConnection(dbapi.TracedConnection): diff --git a/ddtrace/contrib/internal/psycopg/patch.py b/ddtrace/contrib/internal/psycopg/patch.py index 7da5c1c73c7..9e24cee6696 100644 --- a/ddtrace/contrib/internal/psycopg/patch.py +++ b/ddtrace/contrib/internal/psycopg/patch.py @@ -3,9 +3,9 @@ import os from typing import List # noqa:F401 -from ddtrace import Pin from ddtrace import config from ddtrace.contrib import dbapi +from ddtrace.trace import Pin try: diff --git a/ddtrace/contrib/internal/pylibmc/client.py b/ddtrace/contrib/internal/pylibmc/client.py index 917a42b293e..3ea6f09c62c 100644 --- a/ddtrace/contrib/internal/pylibmc/client.py +++ b/ddtrace/contrib/internal/pylibmc/client.py @@ -51,7 +51,12 @@ def __init__(self, client=None, service=memcached.SERVICE, tracer=None, *args, * super(TracedClient, self).__init__(client) schematized_service = schematize_service_name(service) - pin = ddtrace.Pin(service=schematized_service, tracer=tracer) + # Calling ddtrace.pin.Pin(...) with the `tracer` argument generates a deprecation warning. + # Remove this if statement when the `tracer` argument is removed + if tracer is ddtrace.tracer: + pin = ddtrace.trace.Pin(service=schematized_service) + else: + pin = ddtrace.trace.Pin(service=schematized_service, tracer=tracer) pin.onto(self) # attempt to collect the pool of urls this client talks to @@ -64,7 +69,7 @@ def clone(self, *args, **kwargs): # rewrap new connections. cloned = self.__wrapped__.clone(*args, **kwargs) traced_client = TracedClient(cloned) - pin = ddtrace.Pin.get_from(self) + pin = ddtrace.trace.Pin.get_from(self) if pin: pin.clone().onto(traced_client) return traced_client @@ -155,7 +160,7 @@ def _no_span(self): def _span(self, cmd_name): """Return a span timing the given command.""" - pin = ddtrace.Pin.get_from(self) + pin = ddtrace.trace.Pin.get_from(self) if not pin or not pin.enabled(): return self._no_span() diff --git a/ddtrace/contrib/internal/pymemcache/client.py b/ddtrace/contrib/internal/pymemcache/client.py index 18a46a41053..37e14842a94 100644 --- a/ddtrace/contrib/internal/pymemcache/client.py +++ b/ddtrace/contrib/internal/pymemcache/client.py @@ -29,7 +29,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.schema import schematize_cache_operation from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/pymemcache/patch.py b/ddtrace/contrib/internal/pymemcache/patch.py index 07402680e9e..dd3687cd248 100644 --- a/ddtrace/contrib/internal/pymemcache/patch.py +++ b/ddtrace/contrib/internal/pymemcache/patch.py @@ -1,11 +1,11 @@ import pymemcache import pymemcache.client.hash +from ddtrace._trace.pin import _DD_PIN_NAME +from ddtrace._trace.pin import _DD_PIN_PROXY_NAME +from ddtrace._trace.pin import Pin from ddtrace.ext import memcached as memcachedx from ddtrace.internal.schema import schematize_service_name -from ddtrace.pin import _DD_PIN_NAME -from ddtrace.pin import _DD_PIN_PROXY_NAME -from ddtrace.pin import Pin from .client import WrappedClient from .client import WrappedHashClient diff --git a/ddtrace/contrib/internal/pymongo/client.py b/ddtrace/contrib/internal/pymongo/client.py index d5b2530d1f7..2cdf2185586 100644 --- a/ddtrace/contrib/internal/pymongo/client.py +++ b/ddtrace/contrib/internal/pymongo/client.py @@ -10,7 +10,6 @@ # project import ddtrace -from ddtrace import Pin from ddtrace import config from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND @@ -26,6 +25,7 @@ from ddtrace.internal.schema import schematize_database_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils import get_argument_value +from ddtrace.trace import Pin from .parse import parse_msg from .parse import parse_query @@ -61,7 +61,7 @@ def __setddpin__(client, pin): pin.onto(client._topology) def __getddpin__(client): - return ddtrace.Pin.get_from(client._topology) + return ddtrace.trace.Pin.get_from(client._topology) # Set a pin on the mongoclient pin on the topology object # This allows us to pass the same pin to the server objects @@ -103,7 +103,7 @@ def _trace_topology_select_server(func, args, kwargs): # Ensure the pin used on the traced mongo client is passed down to the topology instance # This allows us to pass the same pin in traced server objects. topology_instance = get_argument_value(args, kwargs, 0, "self") - pin = ddtrace.Pin.get_from(topology_instance) + pin = ddtrace.trace.Pin.get_from(topology_instance) if pin is not None: pin.onto(server) @@ -125,7 +125,7 @@ def _datadog_trace_operation(operation, wrapped): log.exception("error parsing query") # Gets the pin from the mogno client (through the topology object) - pin = ddtrace.Pin.get_from(wrapped) + pin = ddtrace.trace.Pin.get_from(wrapped) # if we couldn't parse or shouldn't trace the message, just go. if not cmd or not pin or not pin.enabled(): return None @@ -220,7 +220,7 @@ def _trace_socket_command(func, args, kwargs): except Exception: log.exception("error parsing spec. skipping trace") - pin = ddtrace.Pin.get_from(socket_instance) + pin = ddtrace.trace.Pin.get_from(socket_instance) # skip tracing if we don't have a piece of data we need if not dbname or not cmd or not pin or not pin.enabled(): return func(*args, **kwargs) @@ -239,7 +239,7 @@ def _trace_socket_write_command(func, args, kwargs): except Exception: log.exception("error parsing msg") - pin = ddtrace.Pin.get_from(socket_instance) + pin = ddtrace.trace.Pin.get_from(socket_instance) # if we couldn't parse it, don't try to trace it. if not cmd or not pin or not pin.enabled(): return func(*args, **kwargs) @@ -252,7 +252,7 @@ def _trace_socket_write_command(func, args, kwargs): def _trace_cmd(cmd, socket_instance, address): - pin = ddtrace.Pin.get_from(socket_instance) + pin = ddtrace.trace.Pin.get_from(socket_instance) s = pin.tracer.trace( schematize_database_operation("pymongo.cmd", database_provider="mongodb"), span_type=SpanTypes.MONGODB, diff --git a/ddtrace/contrib/internal/pymongo/patch.py b/ddtrace/contrib/internal/pymongo/patch.py index 0c0927ffea1..200a4a902b8 100644 --- a/ddtrace/contrib/internal/pymongo/patch.py +++ b/ddtrace/contrib/internal/pymongo/patch.py @@ -2,7 +2,6 @@ import pymongo -from ddtrace import Pin from ddtrace import config from ddtrace.constants import SPAN_KIND from ddtrace.constants import SPAN_MEASURED_KEY @@ -15,6 +14,7 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.wrapping import unwrap as _u from ddtrace.internal.wrapping import wrap as _w +from ddtrace.trace import Pin from ....internal.schema import schematize_service_name diff --git a/ddtrace/contrib/internal/pymysql/patch.py b/ddtrace/contrib/internal/pymysql/patch.py index 00fee4f5ad7..a9a16d50608 100644 --- a/ddtrace/contrib/internal/pymysql/patch.py +++ b/ddtrace/contrib/internal/pymysql/patch.py @@ -3,7 +3,6 @@ import pymysql import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.dbapi import TracedConnection from ddtrace.contrib.trace_utils import _convert_to_string @@ -13,6 +12,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool from ddtrace.propagation._database_monitoring import _DBM_Propagator +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/pynamodb/patch.py b/ddtrace/contrib/internal/pynamodb/patch.py index 15e1874ee77..be4ba00c893 100644 --- a/ddtrace/contrib/internal/pynamodb/patch.py +++ b/ddtrace/contrib/internal/pynamodb/patch.py @@ -20,7 +20,7 @@ from ddtrace.internal.utils import ArgumentError from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import deep_getattr -from ddtrace.pin import Pin +from ddtrace.trace import Pin # Pynamodb connection class diff --git a/ddtrace/contrib/internal/pyodbc/patch.py b/ddtrace/contrib/internal/pyodbc/patch.py index 40b561d2f53..180895a202e 100644 --- a/ddtrace/contrib/internal/pyodbc/patch.py +++ b/ddtrace/contrib/internal/pyodbc/patch.py @@ -2,7 +2,6 @@ import pyodbc -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.dbapi import TracedConnection from ddtrace.contrib.dbapi import TracedCursor @@ -11,6 +10,7 @@ from ddtrace.ext import db from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/pytest/__init__.py b/ddtrace/contrib/internal/pytest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ddtrace/contrib/pytest/_atr_utils.py b/ddtrace/contrib/internal/pytest/_atr_utils.py similarity index 94% rename from ddtrace/contrib/pytest/_atr_utils.py rename to ddtrace/contrib/internal/pytest/_atr_utils.py index 0d684486602..82d65c284b0 100644 --- a/ddtrace/contrib/pytest/_atr_utils.py +++ b/ddtrace/contrib/internal/pytest/_atr_utils.py @@ -3,15 +3,15 @@ import _pytest import pytest -from ddtrace.contrib.pytest._retry_utils import RetryOutcomes -from ddtrace.contrib.pytest._retry_utils import _get_outcome_from_retry -from ddtrace.contrib.pytest._retry_utils import _get_retry_attempt_string -from ddtrace.contrib.pytest._retry_utils import set_retry_num -from ddtrace.contrib.pytest._types import _pytest_report_teststatus_return_type -from ddtrace.contrib.pytest._types import pytest_TestReport -from ddtrace.contrib.pytest._utils import PYTEST_STATUS -from ddtrace.contrib.pytest._utils import _get_test_id_from_item -from ddtrace.contrib.pytest._utils import _TestOutcome +from ddtrace.contrib.internal.pytest._retry_utils import RetryOutcomes +from ddtrace.contrib.internal.pytest._retry_utils import _get_outcome_from_retry +from ddtrace.contrib.internal.pytest._retry_utils import _get_retry_attempt_string +from ddtrace.contrib.internal.pytest._retry_utils import set_retry_num +from ddtrace.contrib.internal.pytest._types import _pytest_report_teststatus_return_type +from ddtrace.contrib.internal.pytest._types import pytest_TestReport +from ddtrace.contrib.internal.pytest._utils import PYTEST_STATUS +from ddtrace.contrib.internal.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.internal.pytest._utils import _TestOutcome from ddtrace.ext.test_visibility.api import TestStatus from ddtrace.internal.logger import get_logger from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId diff --git a/ddtrace/contrib/pytest/_benchmark_utils.py b/ddtrace/contrib/internal/pytest/_benchmark_utils.py similarity index 87% rename from ddtrace/contrib/pytest/_benchmark_utils.py rename to ddtrace/contrib/internal/pytest/_benchmark_utils.py index 77dd6061b13..70a79c60700 100644 --- a/ddtrace/contrib/pytest/_benchmark_utils.py +++ b/ddtrace/contrib/internal/pytest/_benchmark_utils.py @@ -1,7 +1,7 @@ import pytest -from ddtrace.contrib.pytest._utils import _get_test_id_from_item -from ddtrace.contrib.pytest_benchmark.constants import PLUGIN_METRICS_V2 +from ddtrace.contrib.internal.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.internal.pytest_benchmark.constants import PLUGIN_METRICS_V2 from ddtrace.internal.logger import get_logger from ddtrace.internal.test_visibility._benchmark_mixin import BenchmarkDurationData from ddtrace.internal.test_visibility.api import InternalTest diff --git a/ddtrace/contrib/pytest/_efd_utils.py b/ddtrace/contrib/internal/pytest/_efd_utils.py similarity index 94% rename from ddtrace/contrib/pytest/_efd_utils.py rename to ddtrace/contrib/internal/pytest/_efd_utils.py index 1e16934bb11..a64148cd574 100644 --- a/ddtrace/contrib/pytest/_efd_utils.py +++ b/ddtrace/contrib/internal/pytest/_efd_utils.py @@ -3,15 +3,15 @@ import _pytest import pytest -from ddtrace.contrib.pytest._retry_utils import RetryOutcomes -from ddtrace.contrib.pytest._retry_utils import _get_outcome_from_retry -from ddtrace.contrib.pytest._retry_utils import _get_retry_attempt_string -from ddtrace.contrib.pytest._retry_utils import set_retry_num -from ddtrace.contrib.pytest._types import _pytest_report_teststatus_return_type -from ddtrace.contrib.pytest._types import pytest_TestReport -from ddtrace.contrib.pytest._utils import PYTEST_STATUS -from ddtrace.contrib.pytest._utils import _get_test_id_from_item -from ddtrace.contrib.pytest._utils import _TestOutcome +from ddtrace.contrib.internal.pytest._retry_utils import RetryOutcomes +from ddtrace.contrib.internal.pytest._retry_utils import _get_outcome_from_retry +from ddtrace.contrib.internal.pytest._retry_utils import _get_retry_attempt_string +from ddtrace.contrib.internal.pytest._retry_utils import set_retry_num +from ddtrace.contrib.internal.pytest._types import _pytest_report_teststatus_return_type +from ddtrace.contrib.internal.pytest._types import pytest_TestReport +from ddtrace.contrib.internal.pytest._utils import PYTEST_STATUS +from ddtrace.contrib.internal.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.internal.pytest._utils import _TestOutcome from ddtrace.ext.test_visibility.api import TestStatus from ddtrace.internal.logger import get_logger from ddtrace.internal.test_visibility._efd_mixins import EFDTestStatus diff --git a/ddtrace/contrib/pytest/_plugin_v1.py b/ddtrace/contrib/internal/pytest/_plugin_v1.py similarity index 98% rename from ddtrace/contrib/pytest/_plugin_v1.py rename to ddtrace/contrib/internal/pytest/_plugin_v1.py index e7b7b2caac9..fc4982bdc67 100644 --- a/ddtrace/contrib/pytest/_plugin_v1.py +++ b/ddtrace/contrib/internal/pytest/_plugin_v1.py @@ -30,15 +30,15 @@ from ddtrace.contrib.internal.coverage.patch import unpatch as unpatch_coverage from ddtrace.contrib.internal.coverage.utils import _is_coverage_invoked_by_coverage_run from ddtrace.contrib.internal.coverage.utils import _is_coverage_patched -from ddtrace.contrib.pytest._utils import _extract_span -from ddtrace.contrib.pytest._utils import _is_enabled_early -from ddtrace.contrib.pytest._utils import _is_pytest_8_or_later -from ddtrace.contrib.pytest._utils import _is_test_unskippable -from ddtrace.contrib.pytest.constants import FRAMEWORK -from ddtrace.contrib.pytest.constants import KIND -from ddtrace.contrib.pytest.constants import XFAIL_REASON -from ddtrace.contrib.pytest.plugin import is_enabled -from ddtrace.contrib.unittest import unpatch as unpatch_unittest +from ddtrace.contrib.internal.pytest._utils import _extract_span +from ddtrace.contrib.internal.pytest._utils import _is_enabled_early +from ddtrace.contrib.internal.pytest._utils import _is_pytest_8_or_later +from ddtrace.contrib.internal.pytest._utils import _is_test_unskippable +from ddtrace.contrib.internal.pytest.constants import FRAMEWORK +from ddtrace.contrib.internal.pytest.constants import KIND +from ddtrace.contrib.internal.pytest.constants import XFAIL_REASON +from ddtrace.contrib.internal.pytest.plugin import is_enabled +from ddtrace.contrib.internal.unittest.patch import unpatch as unpatch_unittest from ddtrace.ext import SpanTypes from ddtrace.ext import test from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility diff --git a/ddtrace/contrib/pytest/_plugin_v2.py b/ddtrace/contrib/internal/pytest/_plugin_v2.py similarity index 90% rename from ddtrace/contrib/pytest/_plugin_v2.py rename to ddtrace/contrib/internal/pytest/_plugin_v2.py index f15373a776a..ece2098e05e 100644 --- a/ddtrace/contrib/pytest/_plugin_v2.py +++ b/ddtrace/contrib/internal/pytest/_plugin_v2.py @@ -6,38 +6,38 @@ from ddtrace import DDTraceDeprecationWarning from ddtrace import config as dd_config -from ddtrace import patch +from ddtrace._monkey import patch from ddtrace.contrib.coverage import patch as patch_coverage from ddtrace.contrib.internal.coverage.constants import PCT_COVERED_KEY from ddtrace.contrib.internal.coverage.data import _coverage_data from ddtrace.contrib.internal.coverage.patch import run_coverage_report from ddtrace.contrib.internal.coverage.utils import _is_coverage_invoked_by_coverage_run from ddtrace.contrib.internal.coverage.utils import _is_coverage_patched -from ddtrace.contrib.pytest._benchmark_utils import _set_benchmark_data_from_item -from ddtrace.contrib.pytest._plugin_v1 import _extract_reason -from ddtrace.contrib.pytest._plugin_v1 import _is_pytest_cov_enabled -from ddtrace.contrib.pytest._types import _pytest_report_teststatus_return_type -from ddtrace.contrib.pytest._types import pytest_CallInfo -from ddtrace.contrib.pytest._types import pytest_Config -from ddtrace.contrib.pytest._types import pytest_TestReport -from ddtrace.contrib.pytest._utils import PYTEST_STATUS -from ddtrace.contrib.pytest._utils import _get_module_path_from_item -from ddtrace.contrib.pytest._utils import _get_names_from_item -from ddtrace.contrib.pytest._utils import _get_session_command -from ddtrace.contrib.pytest._utils import _get_source_file_info -from ddtrace.contrib.pytest._utils import _get_test_id_from_item -from ddtrace.contrib.pytest._utils import _get_test_parameters_json -from ddtrace.contrib.pytest._utils import _is_enabled_early -from ddtrace.contrib.pytest._utils import _is_test_unskippable -from ddtrace.contrib.pytest._utils import _pytest_marked_to_skip -from ddtrace.contrib.pytest._utils import _pytest_version_supports_atr -from ddtrace.contrib.pytest._utils import _pytest_version_supports_efd -from ddtrace.contrib.pytest._utils import _pytest_version_supports_retries -from ddtrace.contrib.pytest._utils import _TestOutcome -from ddtrace.contrib.pytest.constants import FRAMEWORK -from ddtrace.contrib.pytest.constants import XFAIL_REASON -from ddtrace.contrib.pytest.plugin import is_enabled -from ddtrace.contrib.unittest import unpatch as unpatch_unittest +from ddtrace.contrib.internal.pytest._benchmark_utils import _set_benchmark_data_from_item +from ddtrace.contrib.internal.pytest._plugin_v1 import _extract_reason +from ddtrace.contrib.internal.pytest._plugin_v1 import _is_pytest_cov_enabled +from ddtrace.contrib.internal.pytest._types import _pytest_report_teststatus_return_type +from ddtrace.contrib.internal.pytest._types import pytest_CallInfo +from ddtrace.contrib.internal.pytest._types import pytest_Config +from ddtrace.contrib.internal.pytest._types import pytest_TestReport +from ddtrace.contrib.internal.pytest._utils import PYTEST_STATUS +from ddtrace.contrib.internal.pytest._utils import _get_module_path_from_item +from ddtrace.contrib.internal.pytest._utils import _get_names_from_item +from ddtrace.contrib.internal.pytest._utils import _get_session_command +from ddtrace.contrib.internal.pytest._utils import _get_source_file_info +from ddtrace.contrib.internal.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.internal.pytest._utils import _get_test_parameters_json +from ddtrace.contrib.internal.pytest._utils import _is_enabled_early +from ddtrace.contrib.internal.pytest._utils import _is_test_unskippable +from ddtrace.contrib.internal.pytest._utils import _pytest_marked_to_skip +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_atr +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_efd +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_retries +from ddtrace.contrib.internal.pytest._utils import _TestOutcome +from ddtrace.contrib.internal.pytest.constants import FRAMEWORK +from ddtrace.contrib.internal.pytest.constants import XFAIL_REASON +from ddtrace.contrib.internal.pytest.plugin import is_enabled +from ddtrace.contrib.internal.unittest.patch import unpatch as unpatch_unittest from ddtrace.ext import test from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL from ddtrace.ext.test_visibility.api import TestExcInfo @@ -63,21 +63,21 @@ if _pytest_version_supports_retries(): - from ddtrace.contrib.pytest._retry_utils import get_retry_num + from ddtrace.contrib.internal.pytest._retry_utils import get_retry_num if _pytest_version_supports_efd(): - from ddtrace.contrib.pytest._efd_utils import efd_get_failed_reports - from ddtrace.contrib.pytest._efd_utils import efd_get_teststatus - from ddtrace.contrib.pytest._efd_utils import efd_handle_retries - from ddtrace.contrib.pytest._efd_utils import efd_pytest_terminal_summary_post_yield + from ddtrace.contrib.internal.pytest._efd_utils import efd_get_failed_reports + from ddtrace.contrib.internal.pytest._efd_utils import efd_get_teststatus + from ddtrace.contrib.internal.pytest._efd_utils import efd_handle_retries + from ddtrace.contrib.internal.pytest._efd_utils import efd_pytest_terminal_summary_post_yield if _pytest_version_supports_atr(): - from ddtrace.contrib.pytest._atr_utils import atr_get_failed_reports - from ddtrace.contrib.pytest._atr_utils import atr_get_teststatus - from ddtrace.contrib.pytest._atr_utils import atr_handle_retries - from ddtrace.contrib.pytest._atr_utils import atr_pytest_terminal_summary_post_yield - from ddtrace.contrib.pytest._atr_utils import quarantine_atr_get_teststatus - from ddtrace.contrib.pytest._atr_utils import quarantine_pytest_terminal_summary_post_yield + from ddtrace.contrib.internal.pytest._atr_utils import atr_get_failed_reports + from ddtrace.contrib.internal.pytest._atr_utils import atr_get_teststatus + from ddtrace.contrib.internal.pytest._atr_utils import atr_handle_retries + from ddtrace.contrib.internal.pytest._atr_utils import atr_pytest_terminal_summary_post_yield + from ddtrace.contrib.internal.pytest._atr_utils import quarantine_atr_get_teststatus + from ddtrace.contrib.internal.pytest._atr_utils import quarantine_pytest_terminal_summary_post_yield log = get_logger(__name__) @@ -217,7 +217,7 @@ def pytest_configure(config: pytest_Config) -> None: # pytest-bdd plugin support if config.pluginmanager.hasplugin("pytest-bdd"): - from ddtrace.contrib.pytest._pytest_bdd_subplugin import _PytestBddSubPlugin + from ddtrace.contrib.internal.pytest._pytest_bdd_subplugin import _PytestBddSubPlugin config.pluginmanager.register(_PytestBddSubPlugin(), "_datadog-pytest-bdd") else: diff --git a/ddtrace/contrib/pytest/_pytest_bdd_subplugin.py b/ddtrace/contrib/internal/pytest/_pytest_bdd_subplugin.py similarity index 88% rename from ddtrace/contrib/pytest/_pytest_bdd_subplugin.py rename to ddtrace/contrib/internal/pytest/_pytest_bdd_subplugin.py index 7c964af3d59..4349f10654e 100644 --- a/ddtrace/contrib/pytest/_pytest_bdd_subplugin.py +++ b/ddtrace/contrib/internal/pytest/_pytest_bdd_subplugin.py @@ -13,13 +13,13 @@ import pytest -from ddtrace.contrib.pytest._utils import _get_test_id_from_item -from ddtrace.contrib.pytest_bdd import get_version -from ddtrace.contrib.pytest_bdd._plugin import _extract_span -from ddtrace.contrib.pytest_bdd._plugin import _get_step_func_args_json -from ddtrace.contrib.pytest_bdd._plugin import _store_span -from ddtrace.contrib.pytest_bdd.constants import FRAMEWORK -from ddtrace.contrib.pytest_bdd.constants import STEP_KIND +from ddtrace.contrib.internal.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.internal.pytest_bdd._plugin import _extract_span +from ddtrace.contrib.internal.pytest_bdd._plugin import _get_step_func_args_json +from ddtrace.contrib.internal.pytest_bdd._plugin import _store_span +from ddtrace.contrib.internal.pytest_bdd.constants import FRAMEWORK +from ddtrace.contrib.internal.pytest_bdd.constants import STEP_KIND +from ddtrace.contrib.internal.pytest_bdd.patch import get_version from ddtrace.ext import test from ddtrace.internal.logger import get_logger from ddtrace.internal.test_visibility.api import InternalTest diff --git a/ddtrace/contrib/pytest/_retry_utils.py b/ddtrace/contrib/internal/pytest/_retry_utils.py similarity index 97% rename from ddtrace/contrib/pytest/_retry_utils.py rename to ddtrace/contrib/internal/pytest/_retry_utils.py index 6e38a2974c8..eab45f049be 100644 --- a/ddtrace/contrib/pytest/_retry_utils.py +++ b/ddtrace/contrib/internal/pytest/_retry_utils.py @@ -8,8 +8,8 @@ from _pytest.runner import CallInfo import pytest -from ddtrace.contrib.pytest._types import tmppath_result_key -from ddtrace.contrib.pytest._utils import _TestOutcome +from ddtrace.contrib.internal.pytest._types import tmppath_result_key +from ddtrace.contrib.internal.pytest._utils import _TestOutcome from ddtrace.ext.test_visibility.api import TestExcInfo from ddtrace.ext.test_visibility.api import TestStatus from ddtrace.internal import core diff --git a/ddtrace/contrib/pytest/_types.py b/ddtrace/contrib/internal/pytest/_types.py similarity index 90% rename from ddtrace/contrib/pytest/_types.py rename to ddtrace/contrib/internal/pytest/_types.py index ff1d07feb4d..8222bc7bc54 100644 --- a/ddtrace/contrib/pytest/_types.py +++ b/ddtrace/contrib/internal/pytest/_types.py @@ -1,6 +1,6 @@ import typing as t -from ddtrace.contrib.pytest._utils import _get_pytest_version_tuple +from ddtrace.contrib.internal.pytest._utils import _get_pytest_version_tuple if _get_pytest_version_tuple() >= (7, 0, 0): diff --git a/ddtrace/contrib/pytest/_utils.py b/ddtrace/contrib/internal/pytest/_utils.py similarity index 95% rename from ddtrace/contrib/pytest/_utils.py rename to ddtrace/contrib/internal/pytest/_utils.py index 8dc53ab0228..7e8b2bc2714 100644 --- a/ddtrace/contrib/pytest/_utils.py +++ b/ddtrace/contrib/internal/pytest/_utils.py @@ -7,10 +7,10 @@ import pytest -from ddtrace.contrib.pytest.constants import ATR_MIN_SUPPORTED_VERSION -from ddtrace.contrib.pytest.constants import EFD_MIN_SUPPORTED_VERSION -from ddtrace.contrib.pytest.constants import ITR_MIN_SUPPORTED_VERSION -from ddtrace.contrib.pytest.constants import RETRIES_MIN_SUPPORTED_VERSION +from ddtrace.contrib.internal.pytest.constants import ATR_MIN_SUPPORTED_VERSION +from ddtrace.contrib.internal.pytest.constants import EFD_MIN_SUPPORTED_VERSION +from ddtrace.contrib.internal.pytest.constants import ITR_MIN_SUPPORTED_VERSION +from ddtrace.contrib.internal.pytest.constants import RETRIES_MIN_SUPPORTED_VERSION from ddtrace.ext.test_visibility.api import TestExcInfo from ddtrace.ext.test_visibility.api import TestModuleId from ddtrace.ext.test_visibility.api import TestSourceFileInfo diff --git a/ddtrace/contrib/internal/pytest/constants.py b/ddtrace/contrib/internal/pytest/constants.py new file mode 100644 index 00000000000..cc5d768fc38 --- /dev/null +++ b/ddtrace/contrib/internal/pytest/constants.py @@ -0,0 +1,11 @@ +FRAMEWORK = "pytest" +KIND = "test" + + +# XFail Reason +XFAIL_REASON = "pytest.xfail.reason" + +ITR_MIN_SUPPORTED_VERSION = (7, 2, 0) +RETRIES_MIN_SUPPORTED_VERSION = (7, 0, 0) +EFD_MIN_SUPPORTED_VERSION = RETRIES_MIN_SUPPORTED_VERSION +ATR_MIN_SUPPORTED_VERSION = RETRIES_MIN_SUPPORTED_VERSION diff --git a/ddtrace/contrib/internal/pytest/newhooks.py b/ddtrace/contrib/internal/pytest/newhooks.py new file mode 100644 index 00000000000..c44fd0a1535 --- /dev/null +++ b/ddtrace/contrib/internal/pytest/newhooks.py @@ -0,0 +1,26 @@ +"""pytest-ddtrace hooks. + +These hooks are used to provide extra data used by the Datadog CI Visibility plugin. + +For example: module, suite, and test names for a given item. + +Note that these names will affect th display and reporting of tests in the Datadog UI, as well as information stored +the Intelligent Test Runner. Differing hook implementations may impact the behavior of Datadog CI Visibility products. +""" + +import pytest + + +@pytest.hookspec(firstresult=True) +def pytest_ddtrace_get_item_module_name(item: pytest.Item) -> str: + """Returns the module name to use when reporting CI Visibility results, should be unique""" + + +@pytest.hookspec(firstresult=True) +def pytest_ddtrace_get_item_suite_name(item: pytest.Item) -> str: + """Returns the suite name to use when reporting CI Visibility result, should be unique""" + + +@pytest.hookspec(firstresult=True) +def pytest_ddtrace_get_item_test_name(item: pytest.Item) -> str: + """Returns the test name to use when reporting CI Visibility result, should be unique""" diff --git a/ddtrace/contrib/internal/pytest/patch.py b/ddtrace/contrib/internal/pytest/patch.py new file mode 100644 index 00000000000..0299b665268 --- /dev/null +++ b/ddtrace/contrib/internal/pytest/patch.py @@ -0,0 +1,6 @@ +# Get version is imported from patch.py in _monkey.py +def get_version(): + # type: () -> str + import pytest + + return pytest.__version__ diff --git a/ddtrace/contrib/internal/pytest/plugin.py b/ddtrace/contrib/internal/pytest/plugin.py new file mode 100644 index 00000000000..52cf54a6f9c --- /dev/null +++ b/ddtrace/contrib/internal/pytest/plugin.py @@ -0,0 +1,178 @@ +""" +This custom pytest plugin implements tracing for pytest by using pytest hooks. The plugin registers tracing code +to be run at specific points during pytest execution. The most important hooks used are: + + * pytest_sessionstart: during pytest session startup, a custom trace filter is configured to the global tracer to + only send test spans, which are generated by the plugin. + * pytest_runtest_protocol: this wraps around the execution of a pytest test function, which we trace. Most span + tags are generated and added in this function. We also store the span on the underlying pytest test item to + retrieve later when we need to report test status/result. + * pytest_runtest_makereport: this hook is used to set the test status/result tag, including skipped tests and + expected failures. + +""" +import os +from typing import Dict # noqa:F401 + +import pytest + +from ddtrace import config +from ddtrace.appsec._iast._pytest_plugin import ddtrace_iast # noqa:F401 +from ddtrace.appsec._iast._utils import _is_iast_enabled +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _extract_span +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_itr + + +# pytest default settings +config._add( + "pytest", + dict( + _default_service="pytest", + operation_name=os.getenv("DD_PYTEST_OPERATION_NAME", default="pytest.test"), + ), +) + + +DDTRACE_HELP_MSG = "Enable tracing of pytest functions." +NO_DDTRACE_HELP_MSG = "Disable tracing of pytest functions." +DDTRACE_INCLUDE_CLASS_HELP_MSG = "Prepend 'ClassName.' to names of class-based tests." +PATCH_ALL_HELP_MSG = "Call ddtrace.patch_all before running tests." + + +def is_enabled(config): + """Check if the ddtrace plugin is enabled.""" + return (config.getoption("ddtrace") or config.getini("ddtrace")) and not config.getoption("no-ddtrace") + + +def pytest_addoption(parser): + """Add ddtrace options.""" + group = parser.getgroup("ddtrace") + + group._addoption( + "--ddtrace", + action="store_true", + dest="ddtrace", + default=False, + help=DDTRACE_HELP_MSG, + ) + + group._addoption( + "--no-ddtrace", + action="store_true", + dest="no-ddtrace", + default=False, + help=NO_DDTRACE_HELP_MSG, + ) + + group._addoption( + "--ddtrace-patch-all", + action="store_true", + dest="ddtrace-patch-all", + default=False, + help=PATCH_ALL_HELP_MSG, + ) + + group._addoption( + "--ddtrace-include-class-name", + action="store_true", + dest="ddtrace-include-class-name", + default=False, + help=DDTRACE_INCLUDE_CLASS_HELP_MSG, + ) + + group._addoption( + "--ddtrace-iast-fail-tests", + action="store_true", + dest="ddtrace-iast-fail-tests", + default=False, + help=DDTRACE_INCLUDE_CLASS_HELP_MSG, + ) + + parser.addini("ddtrace", DDTRACE_HELP_MSG, type="bool") + parser.addini("no-ddtrace", DDTRACE_HELP_MSG, type="bool") + parser.addini("ddtrace-patch-all", PATCH_ALL_HELP_MSG, type="bool") + parser.addini("ddtrace-include-class-name", DDTRACE_INCLUDE_CLASS_HELP_MSG, type="bool") + if _is_iast_enabled(): + from ddtrace.appsec._iast import _iast_pytest_activation + + _iast_pytest_activation() + + +# Version-specific pytest hooks +if _USE_PLUGIN_V2: + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_collection_finish # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_configure as _versioned_pytest_configure + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_ddtrace_get_item_module_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_ddtrace_get_item_suite_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_ddtrace_get_item_test_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_load_initial_conftests # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_report_teststatus # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_runtest_makereport # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_runtest_protocol # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_sessionfinish # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_sessionstart # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v2 import pytest_terminal_summary # noqa: F401 +else: + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_collection_modifyitems # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_configure as _versioned_pytest_configure + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_ddtrace_get_item_module_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_ddtrace_get_item_suite_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_ddtrace_get_item_test_name # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_load_initial_conftests # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_runtest_makereport # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_runtest_protocol # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_sessionfinish # noqa: F401 + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_sessionstart # noqa: F401 + + # Internal coverage is only used for ITR at the moment, so the hook is only added if the pytest version supports it + if _pytest_version_supports_itr(): + from ddtrace.contrib.internal.pytest._plugin_v1 import pytest_terminal_summary # noqa: F401 + + +def pytest_configure(config): + config.addinivalue_line("markers", "dd_tags(**kwargs): add tags to current span") + if is_enabled(config): + _versioned_pytest_configure(config) + + +@pytest.hookimpl +def pytest_addhooks(pluginmanager): + from ddtrace.contrib.internal.pytest import newhooks + + pluginmanager.add_hookspecs(newhooks) + + +@pytest.fixture(scope="function") +def ddspan(request): + """Return the :class:`ddtrace._trace.span.Span` instance associated with the + current test when Datadog CI Visibility is enabled. + """ + from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility + + if _CIVisibility.enabled: + return _extract_span(request.node) + + +@pytest.fixture(scope="session") +def ddtracer(): + """Return the :class:`ddtrace.tracer.Tracer` instance for Datadog CI + visibility if it is enabled, otherwise return the default Datadog tracer. + """ + import ddtrace + from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility + + if _CIVisibility.enabled: + return _CIVisibility._instance.tracer + return ddtrace.tracer + + +@pytest.fixture(scope="session", autouse=True) +def patch_all(request): + """Patch all available modules for Datadog tracing when ddtrace-patch-all + is specified in command or .ini. + """ + import ddtrace + + if request.config.getoption("ddtrace-patch-all") or request.config.getini("ddtrace-patch-all"): + ddtrace.patch_all() diff --git a/ddtrace/contrib/internal/pytest_bdd/__init__.py b/ddtrace/contrib/internal/pytest_bdd/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ddtrace/contrib/pytest_bdd/_plugin.py b/ddtrace/contrib/internal/pytest_bdd/_plugin.py similarity index 94% rename from ddtrace/contrib/pytest_bdd/_plugin.py rename to ddtrace/contrib/internal/pytest_bdd/_plugin.py index 1ed8b8099e5..eb7bcc1028a 100644 --- a/ddtrace/contrib/pytest_bdd/_plugin.py +++ b/ddtrace/contrib/internal/pytest_bdd/_plugin.py @@ -4,10 +4,10 @@ import pytest -from ddtrace.contrib.pytest._utils import _extract_span as _extract_feature_span -from ddtrace.contrib.pytest_bdd import get_version -from ddtrace.contrib.pytest_bdd.constants import FRAMEWORK -from ddtrace.contrib.pytest_bdd.constants import STEP_KIND +from ddtrace.contrib.internal.pytest._utils import _extract_span as _extract_feature_span +from ddtrace.contrib.internal.pytest_bdd.constants import FRAMEWORK +from ddtrace.contrib.internal.pytest_bdd.constants import STEP_KIND +from ddtrace.contrib.internal.pytest_bdd.patch import get_version from ddtrace.ext import test from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility from ddtrace.internal.logger import get_logger diff --git a/ddtrace/contrib/internal/pytest_bdd/constants.py b/ddtrace/contrib/internal/pytest_bdd/constants.py new file mode 100644 index 00000000000..2dd377f7619 --- /dev/null +++ b/ddtrace/contrib/internal/pytest_bdd/constants.py @@ -0,0 +1,2 @@ +FRAMEWORK = "pytest_bdd" +STEP_KIND = "pytest_bdd.step" diff --git a/ddtrace/contrib/internal/pytest_bdd/patch.py b/ddtrace/contrib/internal/pytest_bdd/patch.py new file mode 100644 index 00000000000..efab83aee4b --- /dev/null +++ b/ddtrace/contrib/internal/pytest_bdd/patch.py @@ -0,0 +1,9 @@ +# ddtrace/_monkey.py expects all integrations to define get_version in /patch.py file +def get_version(): + # type: () -> str + try: + import importlib.metadata as importlib_metadata + except ImportError: + import importlib_metadata # type: ignore[no-redef] + + return str(importlib_metadata.version("pytest-bdd")) diff --git a/ddtrace/contrib/internal/pytest_bdd/plugin.py b/ddtrace/contrib/internal/pytest_bdd/plugin.py new file mode 100644 index 00000000000..22856056162 --- /dev/null +++ b/ddtrace/contrib/internal/pytest_bdd/plugin.py @@ -0,0 +1,30 @@ +from ddtrace import DDTraceDeprecationWarning +from ddtrace import config +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.vendor.debtcollector import deprecate + + +# pytest-bdd default settings +config._add( + "pytest_bdd", + dict( + _default_service="pytest_bdd", + ), +) + + +def pytest_configure(config): + if config.pluginmanager.hasplugin("pytest-bdd") and config.pluginmanager.hasplugin("ddtrace"): + if not _USE_PLUGIN_V2: + if is_ddtrace_enabled(config): + from ._plugin import _PytestBddPlugin + + deprecate( + "the ddtrace.pytest_bdd plugin is deprecated", + message="it will be integrated with the main pytest ddtrace plugin", + removal_version="3.0.0", + category=DDTraceDeprecationWarning, + ) + + config.pluginmanager.register(_PytestBddPlugin(), "_datadog-pytest-bdd") diff --git a/ddtrace/contrib/internal/pytest_benchmark/__init__.py b/ddtrace/contrib/internal/pytest_benchmark/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ddtrace/contrib/pytest_benchmark/_plugin.py b/ddtrace/contrib/internal/pytest_benchmark/_plugin.py similarity index 73% rename from ddtrace/contrib/pytest_benchmark/_plugin.py rename to ddtrace/contrib/internal/pytest_benchmark/_plugin.py index ac6afa350d6..ee54660afc8 100644 --- a/ddtrace/contrib/pytest_benchmark/_plugin.py +++ b/ddtrace/contrib/internal/pytest_benchmark/_plugin.py @@ -1,9 +1,9 @@ import pytest -from ddtrace.contrib.pytest._utils import _extract_span -from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_INFO -from ddtrace.contrib.pytest_benchmark.constants import PLUGIN_METRICS -from ddtrace.contrib.pytest_benchmark.constants import PLUGIN_OUTLIERS +from ddtrace.contrib.internal.pytest._utils import _extract_span +from ddtrace.contrib.internal.pytest_benchmark.constants import BENCHMARK_INFO +from ddtrace.contrib.internal.pytest_benchmark.constants import PLUGIN_METRICS +from ddtrace.contrib.internal.pytest_benchmark.constants import PLUGIN_OUTLIERS from ddtrace.ext.test import TEST_TYPE diff --git a/ddtrace/contrib/internal/pytest_benchmark/constants.py b/ddtrace/contrib/internal/pytest_benchmark/constants.py new file mode 100644 index 00000000000..b4c4f7f5b27 --- /dev/null +++ b/ddtrace/contrib/internal/pytest_benchmark/constants.py @@ -0,0 +1,79 @@ +BENCHMARK_INFO = "benchmark.duration.info" +BENCHMARK_MEAN = "benchmark.duration.mean" +BENCHMARK_RUN = "benchmark.duration.runs" + +STATISTICS_HD15IQR = "benchmark.duration.statistics.hd15iqr" +STATISTICS_IQR = "benchmark.duration.statistics.iqr" +STATISTICS_IQR_OUTLIERS = "benchmark.duration.statistics.iqr_outliers" +STATISTICS_LD15IQR = "benchmark.duration.statistics.ld15iqr" +STATISTICS_MAX = "benchmark.duration.statistics.max" +STATISTICS_MEAN = "benchmark.duration.statistics.mean" +STATISTICS_MEDIAN = "benchmark.duration.statistics.median" +STATISTICS_MIN = "benchmark.duration.statistics.min" +STATISTICS_N = "benchmark.duration.statistics.n" +STATISTICS_OPS = "benchmark.duration.statistics.ops" +STATISTICS_OUTLIERS = "benchmark.duration.statistics.outliers" +STATISTICS_Q1 = "benchmark.duration.statistics.q1" +STATISTICS_Q3 = "benchmark.duration.statistics.q3" +STATISTICS_STDDEV = "benchmark.duration.statistics.std_dev" +STATISTICS_STDDEV_OUTLIERS = "benchmark.duration.statistics.std_dev_outliers" +STATISTICS_TOTAL = "benchmark.duration.statistics.total" + +PLUGIN_HD15IQR = "hd15iqr" +PLUGIN_IQR = "iqr" +PLUGIN_IQR_OUTLIERS = "iqr_outliers" +PLUGIN_LD15IQR = "ld15iqr" +PLUGIN_MAX = "max" +PLUGIN_MEAN = "mean" +PLUGIN_MEDIAN = "median" +PLUGIN_MIN = "min" +PLUGIN_OPS = "ops" +PLUGIN_OUTLIERS = "outliers" +PLUGIN_Q1 = "q1" +PLUGIN_Q3 = "q3" +PLUGIN_ROUNDS = "rounds" +PLUGIN_STDDEV = "stddev" +PLUGIN_STDDEV_OUTLIERS = "stddev_outliers" +PLUGIN_TOTAL = "total" + +PLUGIN_METRICS = { + BENCHMARK_MEAN: PLUGIN_MEAN, + BENCHMARK_RUN: PLUGIN_ROUNDS, + STATISTICS_HD15IQR: PLUGIN_HD15IQR, + STATISTICS_IQR: PLUGIN_IQR, + STATISTICS_IQR_OUTLIERS: PLUGIN_IQR_OUTLIERS, + STATISTICS_LD15IQR: PLUGIN_LD15IQR, + STATISTICS_MAX: PLUGIN_MAX, + STATISTICS_MEAN: PLUGIN_MEAN, + STATISTICS_MEDIAN: PLUGIN_MEDIAN, + STATISTICS_MIN: PLUGIN_MIN, + STATISTICS_OPS: PLUGIN_OPS, + STATISTICS_OUTLIERS: PLUGIN_OUTLIERS, + STATISTICS_Q1: PLUGIN_Q1, + STATISTICS_Q3: PLUGIN_Q3, + STATISTICS_N: PLUGIN_ROUNDS, + STATISTICS_STDDEV: PLUGIN_STDDEV, + STATISTICS_STDDEV_OUTLIERS: PLUGIN_STDDEV_OUTLIERS, + STATISTICS_TOTAL: PLUGIN_TOTAL, +} + +PLUGIN_METRICS_V2 = { + "duration_mean": PLUGIN_MEAN, + "duration_runs": PLUGIN_ROUNDS, + "statistics_hd15iqr": PLUGIN_HD15IQR, + "statistics_iqr": PLUGIN_IQR, + "statistics_iqr_outliers": PLUGIN_IQR_OUTLIERS, + "statistics_ld15iqr": PLUGIN_LD15IQR, + "statistics_max": PLUGIN_MAX, + "statistics_mean": PLUGIN_MEAN, + "statistics_median": PLUGIN_MEDIAN, + "statistics_min": PLUGIN_MIN, + "statistics_n": PLUGIN_ROUNDS, + "statistics_ops": PLUGIN_OPS, + "statistics_outliers": PLUGIN_OUTLIERS, + "statistics_q1": PLUGIN_Q1, + "statistics_q3": PLUGIN_Q3, + "statistics_std_dev": PLUGIN_STDDEV, + "statistics_std_dev_outliers": PLUGIN_STDDEV_OUTLIERS, + "statistics_total": PLUGIN_TOTAL, +} diff --git a/ddtrace/contrib/internal/pytest_benchmark/plugin.py b/ddtrace/contrib/internal/pytest_benchmark/plugin.py new file mode 100644 index 00000000000..04728f764a3 --- /dev/null +++ b/ddtrace/contrib/internal/pytest_benchmark/plugin.py @@ -0,0 +1,19 @@ +from ddtrace import DDTraceDeprecationWarning +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.vendor.debtcollector import deprecate + + +def pytest_configure(config): + if config.pluginmanager.hasplugin("benchmark") and config.pluginmanager.hasplugin("ddtrace"): + if is_ddtrace_enabled(config): + deprecate( + "this version of the ddtrace.pytest_benchmark plugin is deprecated", + message="it will be integrated with the main pytest ddtrace plugin", + removal_version="3.0.0", + category=DDTraceDeprecationWarning, + ) + if not _USE_PLUGIN_V2: + from ._plugin import _PytestBenchmarkPlugin + + config.pluginmanager.register(_PytestBenchmarkPlugin(), "_datadog-pytest-benchmark") diff --git a/ddtrace/contrib/internal/redis/asyncio_patch.py b/ddtrace/contrib/internal/redis/asyncio_patch.py index 0115096ba0f..7c5bad354ab 100644 --- a/ddtrace/contrib/internal/redis/asyncio_patch.py +++ b/ddtrace/contrib/internal/redis/asyncio_patch.py @@ -4,7 +4,7 @@ from ddtrace._trace.utils_redis import _instrument_redis_execute_pipeline from ddtrace.contrib.redis_utils import _run_redis_command_async from ddtrace.internal.utils.formats import stringify_cache_args -from ddtrace.pin import Pin +from ddtrace.trace import Pin async def instrumented_async_execute_command(func, instance, args, kwargs): diff --git a/ddtrace/contrib/internal/redis/patch.py b/ddtrace/contrib/internal/redis/patch.py index 18b23fd68fa..33520e5894d 100644 --- a/ddtrace/contrib/internal/redis/patch.py +++ b/ddtrace/contrib/internal/redis/patch.py @@ -14,7 +14,7 @@ from ddtrace.internal.utils.formats import CMD_MAX_LEN from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import stringify_cache_args -from ddtrace.pin import Pin +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/rediscluster/patch.py b/ddtrace/contrib/internal/rediscluster/patch.py index a415096ef10..c550df7e9ea 100644 --- a/ddtrace/contrib/internal/rediscluster/patch.py +++ b/ddtrace/contrib/internal/rediscluster/patch.py @@ -23,7 +23,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import stringify_cache_args from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin # DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple, diff --git a/ddtrace/contrib/internal/requests/patch.py b/ddtrace/contrib/internal/requests/patch.py index a5867662d78..d4ec1f5182d 100644 --- a/ddtrace/contrib/internal/requests/patch.py +++ b/ddtrace/contrib/internal/requests/patch.py @@ -10,8 +10,8 @@ from ddtrace.contrib.trace_utils import unwrap as _u from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin from .connection import _wrap_send diff --git a/ddtrace/contrib/internal/requests/session.py b/ddtrace/contrib/internal/requests/session.py index 9551c70226c..783dda4ff7a 100644 --- a/ddtrace/contrib/internal/requests/session.py +++ b/ddtrace/contrib/internal/requests/session.py @@ -1,8 +1,8 @@ import requests from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config +from ddtrace.trace import Pin from .connection import _wrap_send diff --git a/ddtrace/contrib/internal/rq/patch.py b/ddtrace/contrib/internal/rq/patch.py index a6b54b28f27..c1f39431f57 100644 --- a/ddtrace/contrib/internal/rq/patch.py +++ b/ddtrace/contrib/internal/rq/patch.py @@ -1,6 +1,5 @@ import os -from ddtrace import Pin from ddtrace import config from ddtrace.constants import SPAN_KIND from ddtrace.internal import core @@ -10,6 +9,7 @@ from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool +from ddtrace.trace import Pin from ....ext import SpanKind from ....ext import SpanTypes diff --git a/ddtrace/contrib/internal/sanic/patch.py b/ddtrace/contrib/internal/sanic/patch.py index 826267cd341..5d105cf2f32 100644 --- a/ddtrace/contrib/internal/sanic/patch.py +++ b/ddtrace/contrib/internal/sanic/patch.py @@ -17,7 +17,7 @@ from ddtrace.internal.schema import schematize_url_operation from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/snowflake/patch.py b/ddtrace/contrib/internal/snowflake/patch.py index 87896aab109..d28844ea992 100644 --- a/ddtrace/contrib/internal/snowflake/patch.py +++ b/ddtrace/contrib/internal/snowflake/patch.py @@ -2,7 +2,6 @@ import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.dbapi import TracedConnection from ddtrace.contrib.dbapi import TracedCursor @@ -11,6 +10,7 @@ from ddtrace.ext import net from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/sqlalchemy/engine.py b/ddtrace/contrib/internal/sqlalchemy/engine.py index 05bdb7ca0f1..3b5f96be9e7 100644 --- a/ddtrace/contrib/internal/sqlalchemy/engine.py +++ b/ddtrace/contrib/internal/sqlalchemy/engine.py @@ -29,7 +29,7 @@ from ddtrace.internal.constants import COMPONENT from ddtrace.internal.schema import schematize_database_operation from ddtrace.internal.schema import schematize_service_name -from ddtrace.pin import Pin +from ddtrace.trace import Pin def trace_engine(engine, tracer=None, service=None): @@ -67,7 +67,12 @@ def __init__(self, tracer, service, engine): self.name = schematize_database_operation("%s.query" % self.vendor, database_provider=self.vendor) # attach the PIN - Pin(tracer=tracer, service=self.service).onto(engine) + # Calling ddtrace.pin.Pin(...) with the `tracer` argument generates a deprecation warning. + # Remove this if statement when the `tracer` argument is removed + if self.tracer is ddtrace.tracer: + Pin(service=self.service).onto(engine) + else: + Pin(tracer=tracer, service=self.service).onto(engine) listen(engine, "before_cursor_execute", self._before_cur_exec) listen(engine, "after_cursor_execute", self._after_cur_exec) diff --git a/ddtrace/contrib/internal/sqlite3/patch.py b/ddtrace/contrib/internal/sqlite3/patch.py index dedf92c6297..f47906146bc 100644 --- a/ddtrace/contrib/internal/sqlite3/patch.py +++ b/ddtrace/contrib/internal/sqlite3/patch.py @@ -15,8 +15,8 @@ from ddtrace.internal.schema import schematize_database_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool -from ddtrace.pin import Pin from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin # Original connect method diff --git a/ddtrace/contrib/internal/starlette/patch.py b/ddtrace/contrib/internal/starlette/patch.py index b872a77ecd7..064722b67f1 100644 --- a/ddtrace/contrib/internal/starlette/patch.py +++ b/ddtrace/contrib/internal/starlette/patch.py @@ -12,7 +12,6 @@ from wrapt import ObjectProxy from wrapt import wrap_function_wrapper as _w -from ddtrace import Pin from ddtrace import config from ddtrace._trace.span import Span # noqa:F401 from ddtrace.appsec._iast import _is_iast_enabled @@ -28,6 +27,7 @@ from ddtrace.internal.utils import get_blocked from ddtrace.internal.utils import set_argument_value from ddtrace.internal.utils.wrappers import unwrap as _u +from ddtrace.trace import Pin from ddtrace.vendor.packaging.version import parse as parse_version diff --git a/ddtrace/contrib/internal/subprocess/patch.py b/ddtrace/contrib/internal/subprocess/patch.py index 76530c195df..80d05b107bb 100644 --- a/ddtrace/contrib/internal/subprocess/patch.py +++ b/ddtrace/contrib/internal/subprocess/patch.py @@ -14,7 +14,6 @@ from typing import Union # noqa:F401 from typing import cast # noqa:F401 -from ddtrace import Pin from ddtrace import config from ddtrace.contrib import trace_utils from ddtrace.contrib.internal.subprocess.constants import COMMANDS @@ -23,6 +22,7 @@ from ddtrace.internal.compat import shjoin from ddtrace.internal.logger import get_logger from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/tornado/application.py b/ddtrace/contrib/internal/tornado/application.py index 3a7dc832b5e..993a6d27c46 100644 --- a/ddtrace/contrib/internal/tornado/application.py +++ b/ddtrace/contrib/internal/tornado/application.py @@ -34,19 +34,19 @@ def tracer_config(__init__, app, args, kwargs): service = settings["default_service"] # extract extra settings - extra_settings = settings.get("settings", {}) + trace_processors = settings.get("settings", {}).get("FILTERS") # the tracer must use the right Context propagation and wrap executor; # this action is done twice because the patch() method uses the # global tracer while here we can have a different instance (even if # this is not usual). - tracer.configure( + tracer._configure( context_provider=context_provider, wrap_executor=decorators.wrap_executor, enabled=settings.get("enabled", None), hostname=settings.get("agent_hostname", None), port=settings.get("agent_port", None), - settings=extra_settings, + trace_processors=trace_processors, ) # set global tags if any @@ -55,4 +55,9 @@ def tracer_config(__init__, app, args, kwargs): tracer.set_tags(tags) # configure the PIN object for template rendering - ddtrace.Pin(service=service, tracer=tracer).onto(template) + # Required for backwards compatibility. Remove the else clause when + # the `ddtrace.Pin` object no longer accepts the Pin argument. + if tracer is ddtrace.tracer: + ddtrace.trace.Pin(service=service).onto(template) + else: + ddtrace.trace.Pin(service=service, tracer=tracer).onto(template) diff --git a/ddtrace/contrib/internal/tornado/patch.py b/ddtrace/contrib/internal/tornado/patch.py index c8a0a1305e6..7645b97e790 100644 --- a/ddtrace/contrib/internal/tornado/patch.py +++ b/ddtrace/contrib/internal/tornado/patch.py @@ -50,7 +50,7 @@ def patch(): _w("tornado.template", "Template.generate", template.generate) # configure the global tracer - ddtrace.tracer.configure( + ddtrace.tracer._configure( context_provider=context_provider, wrap_executor=decorators.wrap_executor, ) diff --git a/ddtrace/contrib/internal/tornado/template.py b/ddtrace/contrib/internal/tornado/template.py index 5d94d2a358c..a47ee53b9e4 100644 --- a/ddtrace/contrib/internal/tornado/template.py +++ b/ddtrace/contrib/internal/tornado/template.py @@ -1,9 +1,9 @@ from tornado import template -from ddtrace import Pin from ddtrace import config from ddtrace.ext import SpanTypes from ddtrace.internal.constants import COMPONENT +from ddtrace.trace import Pin def generate(func, renderer, args, kwargs): diff --git a/ddtrace/contrib/internal/unittest/__init__.py b/ddtrace/contrib/internal/unittest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ddtrace/contrib/internal/unittest/constants.py b/ddtrace/contrib/internal/unittest/constants.py new file mode 100644 index 00000000000..dc58863a2a5 --- /dev/null +++ b/ddtrace/contrib/internal/unittest/constants.py @@ -0,0 +1,8 @@ +COMPONENT_VALUE = "unittest" +FRAMEWORK = "unittest" +KIND = "test" + +TEST_OPERATION_NAME = "unittest.test" +SUITE_OPERATION_NAME = "unittest.test_suite" +SESSION_OPERATION_NAME = "unittest.test_session" +MODULE_OPERATION_NAME = "unittest.test_module" diff --git a/ddtrace/contrib/internal/unittest/patch.py b/ddtrace/contrib/internal/unittest/patch.py new file mode 100644 index 00000000000..74ce8d1d6a0 --- /dev/null +++ b/ddtrace/contrib/internal/unittest/patch.py @@ -0,0 +1,868 @@ +import inspect +import os +from typing import Union +import unittest + +import wrapt + +import ddtrace +from ddtrace import config +from ddtrace.constants import SPAN_KIND +from ddtrace.contrib.internal.coverage.data import _coverage_data +from ddtrace.contrib.internal.coverage.patch import patch as patch_coverage +from ddtrace.contrib.internal.coverage.patch import run_coverage_report +from ddtrace.contrib.internal.coverage.patch import unpatch as unpatch_coverage +from ddtrace.contrib.internal.coverage.utils import _is_coverage_invoked_by_coverage_run +from ddtrace.contrib.internal.coverage.utils import _is_coverage_patched +from ddtrace.contrib.internal.unittest.constants import COMPONENT_VALUE +from ddtrace.contrib.internal.unittest.constants import FRAMEWORK +from ddtrace.contrib.internal.unittest.constants import KIND +from ddtrace.contrib.internal.unittest.constants import MODULE_OPERATION_NAME +from ddtrace.contrib.internal.unittest.constants import SESSION_OPERATION_NAME +from ddtrace.contrib.internal.unittest.constants import SUITE_OPERATION_NAME +from ddtrace.ext import SpanTypes +from ddtrace.ext import test +from ddtrace.ext.ci import RUNTIME_VERSION +from ddtrace.ext.ci import _get_runtime_and_os_metadata +from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility +from ddtrace.internal.ci_visibility.constants import EVENT_TYPE as _EVENT_TYPE +from ddtrace.internal.ci_visibility.constants import ITR_CORRELATION_ID_TAG_NAME +from ddtrace.internal.ci_visibility.constants import ITR_UNSKIPPABLE_REASON +from ddtrace.internal.ci_visibility.constants import MODULE_ID as _MODULE_ID +from ddtrace.internal.ci_visibility.constants import MODULE_TYPE as _MODULE_TYPE +from ddtrace.internal.ci_visibility.constants import SESSION_ID as _SESSION_ID +from ddtrace.internal.ci_visibility.constants import SESSION_TYPE as _SESSION_TYPE +from ddtrace.internal.ci_visibility.constants import SKIPPED_BY_ITR_REASON +from ddtrace.internal.ci_visibility.constants import SUITE_ID as _SUITE_ID +from ddtrace.internal.ci_visibility.constants import SUITE_TYPE as _SUITE_TYPE +from ddtrace.internal.ci_visibility.constants import TEST +from ddtrace.internal.ci_visibility.coverage import _module_has_dd_coverage_enabled +from ddtrace.internal.ci_visibility.coverage import _report_coverage_to_span +from ddtrace.internal.ci_visibility.coverage import _start_coverage +from ddtrace.internal.ci_visibility.coverage import _stop_coverage +from ddtrace.internal.ci_visibility.coverage import _switch_coverage_context +from ddtrace.internal.ci_visibility.utils import _add_pct_covered_to_span +from ddtrace.internal.ci_visibility.utils import _add_start_end_source_file_path_data_to_span +from ddtrace.internal.ci_visibility.utils import _generate_fully_qualified_test_name +from ddtrace.internal.ci_visibility.utils import get_relative_or_absolute_path_for_path +from ddtrace.internal.constants import COMPONENT +from ddtrace.internal.logger import get_logger +from ddtrace.internal.utils.formats import asbool +from ddtrace.internal.utils.wrappers import unwrap as _u + + +log = get_logger(__name__) +_global_skipped_elements = 0 + +# unittest default settings +config._add( + "unittest", + dict( + _default_service="unittest", + operation_name=os.getenv("DD_UNITTEST_OPERATION_NAME", default="unittest.test"), + strict_naming=asbool(os.getenv("DD_CIVISIBILITY_UNITTEST_STRICT_NAMING", default=True)), + ), +) + + +def get_version(): + # type: () -> str + return "" + + +def _enable_unittest_if_not_started(): + _initialize_unittest_data() + if _CIVisibility.enabled: + return + _CIVisibility.enable(config=ddtrace.config.unittest) + + +def _initialize_unittest_data(): + if not hasattr(_CIVisibility, "_unittest_data"): + _CIVisibility._unittest_data = {} + if "suites" not in _CIVisibility._unittest_data: + _CIVisibility._unittest_data["suites"] = {} + if "modules" not in _CIVisibility._unittest_data: + _CIVisibility._unittest_data["modules"] = {} + if "unskippable_tests" not in _CIVisibility._unittest_data: + _CIVisibility._unittest_data["unskippable_tests"] = set() + + +def _set_tracer(tracer: ddtrace.tracer): + """Manually sets the tracer instance to `unittest.`""" + unittest._datadog_tracer = tracer + + +def _is_test_coverage_enabled(test_object) -> bool: + return _CIVisibility._instance._collect_coverage_enabled and not _is_skipped_test(test_object) + + +def _is_skipped_test(test_object) -> bool: + testMethod = getattr(test_object, test_object._testMethodName, "") + return ( + (hasattr(test_object.__class__, "__unittest_skip__") and test_object.__class__.__unittest_skip__) + or (hasattr(testMethod, "__unittest_skip__") and testMethod.__unittest_skip__) + or _is_skipped_by_itr(test_object) + ) + + +def _is_skipped_by_itr(test_object) -> bool: + return hasattr(test_object, "_dd_itr_skip") and test_object._dd_itr_skip + + +def _should_be_skipped_by_itr(args: tuple, test_module_suite_path: str, test_name: str, test_object) -> bool: + return ( + len(args) + and _CIVisibility._instance._should_skip_path(test_module_suite_path, test_name) + and not _is_skipped_test(test_object) + ) + + +def _is_marked_as_unskippable(test_object) -> bool: + test_suite_name = _extract_suite_name_from_test_method(test_object) + test_name = _extract_test_method_name(test_object) + test_module_path = _extract_module_file_path(test_object) + test_module_suite_name = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) + return ( + hasattr(_CIVisibility, "_unittest_data") + and test_module_suite_name in _CIVisibility._unittest_data["unskippable_tests"] + ) + + +def _update_skipped_elements_and_set_tags(test_module_span: ddtrace.Span, test_session_span: ddtrace.Span): + global _global_skipped_elements + _global_skipped_elements += 1 + + test_module_span._metrics[test.ITR_TEST_SKIPPING_COUNT] += 1 + test_module_span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "true") + test_module_span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "true") + + test_session_span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "true") + test_session_span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "true") + + +def _store_test_span(item, span: ddtrace.Span): + """Store datadog span at `unittest` test instance.""" + item._datadog_span = span + + +def _store_module_identifier(test_object: unittest.TextTestRunner): + """Store module identifier at `unittest` module instance, this is useful to classify event types.""" + if hasattr(test_object, "test") and hasattr(test_object.test, "_tests"): + for module in test_object.test._tests: + if len(module._tests) and _extract_module_name_from_module(module): + _set_identifier(module, "module") + + +def _store_suite_identifier(module): + """Store suite identifier at `unittest` suite instance, this is useful to classify event types.""" + if hasattr(module, "_tests"): + for suite in module._tests: + if len(suite._tests) and _extract_module_name_from_module(suite): + _set_identifier(suite, "suite") + + +def _is_test(item) -> bool: + if ( + type(item) == unittest.TestSuite + or not hasattr(item, "_testMethodName") + or (ddtrace.config.unittest.strict_naming and not item._testMethodName.startswith("test")) + ): + return False + return True + + +def _extract_span(item) -> Union[ddtrace.Span, None]: + return getattr(item, "_datadog_span", None) + + +def _extract_command_name_from_session(session: unittest.TextTestRunner) -> str: + if not hasattr(session, "progName"): + return "python -m unittest" + return getattr(session, "progName", "") + + +def _extract_test_method_name(test_object) -> str: + """Extract test method name from `unittest` instance.""" + return getattr(test_object, "_testMethodName", "") + + +def _extract_session_span() -> Union[ddtrace.Span, None]: + return getattr(_CIVisibility, "_datadog_session_span", None) + + +def _extract_module_span(module_identifier: str) -> Union[ddtrace.Span, None]: + if hasattr(_CIVisibility, "_unittest_data") and module_identifier in _CIVisibility._unittest_data["modules"]: + return _CIVisibility._unittest_data["modules"][module_identifier].get("module_span") + return None + + +def _extract_suite_span(suite_identifier: str) -> Union[ddtrace.Span, None]: + if hasattr(_CIVisibility, "_unittest_data") and suite_identifier in _CIVisibility._unittest_data["suites"]: + return _CIVisibility._unittest_data["suites"][suite_identifier].get("suite_span") + return None + + +def _update_status_item(item: ddtrace.Span, status: str): + """ + Sets the status for each Span implementing the test FAIL logic override. + """ + existing_status = item.get_tag(test.STATUS) + if existing_status and (status == test.Status.SKIP.value or existing_status == test.Status.FAIL.value): + return None + item.set_tag_str(test.STATUS, status) + return None + + +def _extract_suite_name_from_test_method(item) -> str: + item_type = type(item) + return getattr(item_type, "__name__", "") + + +def _extract_module_name_from_module(item) -> str: + if _is_test(item): + return type(item).__module__ + return "" + + +def _extract_test_reason(item: tuple) -> str: + """ + Given a tuple of type [test_class, str], it returns the test failure/skip reason + """ + return item[1] + + +def _extract_test_file_name(item) -> str: + return os.path.basename(inspect.getfile(item.__class__)) + + +def _extract_module_file_path(item) -> str: + if _is_test(item): + try: + test_module_object = inspect.getfile(item.__class__) + except TypeError: + log.debug( + "Tried to collect module file path but it is a built-in Python function", + ) + return "" + return get_relative_or_absolute_path_for_path(test_module_object, os.getcwd()) + + return "" + + +def _generate_test_resource(suite_name: str, test_name: str) -> str: + return "{}.{}".format(suite_name, test_name) + + +def _generate_suite_resource(test_suite: str) -> str: + return "{}".format(test_suite) + + +def _generate_module_resource(test_module: str) -> str: + return "{}".format(test_module) + + +def _generate_session_resource(test_command: str) -> str: + return "{}".format(test_command) + + +def _set_test_skipping_tags_to_span(span: ddtrace.Span): + span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "true") + span.set_tag_str(test.ITR_TEST_SKIPPING_TYPE, TEST) + span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "false") + span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "false") + span.set_tag_str(test.ITR_FORCED_RUN, "false") + span.set_tag_str(test.ITR_UNSKIPPABLE, "false") + + +def _set_identifier(item, name: str): + """ + Adds an event type classification to a `unittest` test. + """ + item._datadog_object = name + + +def _is_valid_result(instance: unittest.TextTestRunner, args: tuple) -> bool: + return instance and isinstance(instance, unittest.runner.TextTestResult) and args + + +def _is_valid_test_call(kwargs: dict) -> bool: + """ + Validates that kwargs is empty to ensure that `unittest` is running a test + """ + return not len(kwargs) + + +def _is_valid_module_suite_call(func) -> bool: + """ + Validates that the mocked function is an actual function from `unittest` + """ + return type(func).__name__ == "method" or type(func).__name__ == "instancemethod" + + +def _is_invoked_by_cli(instance: unittest.TextTestRunner) -> bool: + return ( + hasattr(instance, "progName") + or hasattr(_CIVisibility, "_datadog_entry") + and _CIVisibility._datadog_entry == "cli" + ) + + +def _extract_test_method_object(test_object): + if hasattr(test_object, "_testMethodName"): + return getattr(test_object, test_object._testMethodName, None) + return None + + +def _is_invoked_by_text_test_runner() -> bool: + return hasattr(_CIVisibility, "_datadog_entry") and _CIVisibility._datadog_entry == "TextTestRunner" + + +def _generate_module_suite_path(test_module_path: str, test_suite_name: str) -> str: + return "{}.{}".format(test_module_path, test_suite_name) + + +def _populate_suites_and_modules(test_objects: list, seen_suites: dict, seen_modules: dict): + """ + Discovers suites and modules and initializes the seen_suites and seen_modules dictionaries. + """ + if not hasattr(test_objects, "__iter__"): + return + for test_object in test_objects: + if not _is_test(test_object): + _populate_suites_and_modules(test_object, seen_suites, seen_modules) + continue + test_module_path = _extract_module_file_path(test_object) + test_suite_name = _extract_suite_name_from_test_method(test_object) + test_module_suite_path = _generate_module_suite_path(test_module_path, test_suite_name) + if test_module_path not in seen_modules: + seen_modules[test_module_path] = { + "module_span": None, + "remaining_suites": 0, + } + if test_module_suite_path not in seen_suites: + seen_suites[test_module_suite_path] = { + "suite_span": None, + "remaining_tests": 0, + } + + seen_modules[test_module_path]["remaining_suites"] += 1 + + seen_suites[test_module_suite_path]["remaining_tests"] += 1 + + +def _finish_remaining_suites_and_modules(seen_suites: dict, seen_modules: dict): + """ + Forces all suite and module spans to finish and updates their statuses. + """ + for suite in seen_suites.values(): + test_suite_span = suite["suite_span"] + if test_suite_span and not test_suite_span.finished: + _finish_span(test_suite_span) + + for module in seen_modules.values(): + test_module_span = module["module_span"] + if test_module_span and not test_module_span.finished: + _finish_span(test_module_span) + del _CIVisibility._unittest_data + + +def _update_remaining_suites_and_modules( + test_module_suite_path: str, test_module_path: str, test_module_span: ddtrace.Span, test_suite_span: ddtrace.Span +): + """ + Updates the remaining test suite and test counter and finishes spans when these have finished their execution. + """ + suite_dict = _CIVisibility._unittest_data["suites"][test_module_suite_path] + modules_dict = _CIVisibility._unittest_data["modules"][test_module_path] + + suite_dict["remaining_tests"] -= 1 + if suite_dict["remaining_tests"] == 0: + modules_dict["remaining_suites"] -= 1 + _finish_span(test_suite_span) + if modules_dict["remaining_suites"] == 0: + _finish_span(test_module_span) + + +def _update_test_skipping_count_span(span: ddtrace.Span): + if _CIVisibility.test_skipping_enabled(): + span.set_metric(test.ITR_TEST_SKIPPING_COUNT, _global_skipped_elements) + + +def _extract_skip_if_reason(args, kwargs): + if len(args) >= 2: + return _extract_test_reason(args) + elif kwargs and "reason" in kwargs: + return kwargs["reason"] + return "" + + +def patch(): + """ + Patch the instrumented methods from unittest + """ + if getattr(unittest, "_datadog_patch", False) or _CIVisibility.enabled: + return + _initialize_unittest_data() + + unittest._datadog_patch = True + + _w = wrapt.wrap_function_wrapper + + _w(unittest, "TextTestResult.addSuccess", add_success_test_wrapper) + _w(unittest, "TextTestResult.addFailure", add_failure_test_wrapper) + _w(unittest, "TextTestResult.addError", add_failure_test_wrapper) + _w(unittest, "TextTestResult.addSkip", add_skip_test_wrapper) + _w(unittest, "TextTestResult.addExpectedFailure", add_xfail_test_wrapper) + _w(unittest, "TextTestResult.addUnexpectedSuccess", add_xpass_test_wrapper) + _w(unittest, "skipIf", skip_if_decorator) + _w(unittest, "TestCase.run", handle_test_wrapper) + _w(unittest, "TestSuite.run", collect_text_test_runner_session) + _w(unittest, "TextTestRunner.run", handle_text_test_runner_wrapper) + _w(unittest, "TestProgram.runTests", handle_cli_run) + + +def unpatch(): + """ + Undo patched instrumented methods from unittest + """ + if not getattr(unittest, "_datadog_patch", False): + return + + _u(unittest.TextTestResult, "addSuccess") + _u(unittest.TextTestResult, "addFailure") + _u(unittest.TextTestResult, "addError") + _u(unittest.TextTestResult, "addSkip") + _u(unittest.TextTestResult, "addExpectedFailure") + _u(unittest.TextTestResult, "addUnexpectedSuccess") + _u(unittest, "skipIf") + _u(unittest.TestSuite, "run") + _u(unittest.TestCase, "run") + _u(unittest.TextTestRunner, "run") + _u(unittest.TestProgram, "runTests") + + unittest._datadog_patch = False + _CIVisibility.disable() + + +def _set_test_span_status(test_item, status: str, exc_info: str = None, skip_reason: str = None): + span = _extract_span(test_item) + if not span: + log.debug("Tried setting test result for test but could not find span for %s", test_item) + return None + span.set_tag_str(test.STATUS, status) + if exc_info: + span.set_exc_info(exc_info[0], exc_info[1], exc_info[2]) + if status == test.Status.SKIP.value: + span.set_tag_str(test.SKIP_REASON, skip_reason) + + +def _set_test_xpass_xfail_result(test_item, result: str): + """ + Sets `test.result` and `test.status` to a XFAIL or XPASS test. + """ + span = _extract_span(test_item) + if not span: + log.debug("Tried setting test result for an xpass or xfail test but could not find span for %s", test_item) + return None + span.set_tag_str(test.RESULT, result) + status = span.get_tag(test.STATUS) + if result == test.Status.XFAIL.value: + if status == test.Status.PASS.value: + span.set_tag_str(test.STATUS, test.Status.FAIL.value) + elif status == test.Status.FAIL.value: + span.set_tag_str(test.STATUS, test.Status.PASS.value) + + +def add_success_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): + if _is_valid_result(instance, args): + _set_test_span_status(test_item=args[0], status=test.Status.PASS.value) + + return func(*args, **kwargs) + + +def add_failure_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): + if _is_valid_result(instance, args): + _set_test_span_status(test_item=args[0], exc_info=_extract_test_reason(args), status=test.Status.FAIL.value) + + return func(*args, **kwargs) + + +def add_xfail_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): + if _is_valid_result(instance, args): + _set_test_xpass_xfail_result(test_item=args[0], result=test.Status.XFAIL.value) + + return func(*args, **kwargs) + + +def add_skip_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): + if _is_valid_result(instance, args): + _set_test_span_status(test_item=args[0], skip_reason=_extract_test_reason(args), status=test.Status.SKIP.value) + + return func(*args, **kwargs) + + +def add_xpass_test_wrapper(func, instance, args: tuple, kwargs: dict): + if _is_valid_result(instance, args): + _set_test_xpass_xfail_result(test_item=args[0], result=test.Status.XPASS.value) + + return func(*args, **kwargs) + + +def _mark_test_as_unskippable(obj): + test_name = obj.__name__ + test_suite_name = str(obj).split(".")[0].split()[1] + test_module_path = get_relative_or_absolute_path_for_path(obj.__code__.co_filename, os.getcwd()) + test_module_suite_name = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) + _CIVisibility._unittest_data["unskippable_tests"].add(test_module_suite_name) + return obj + + +def _using_unskippable_decorator(args, kwargs): + return args[0] is False and _extract_skip_if_reason(args, kwargs) == ITR_UNSKIPPABLE_REASON + + +def skip_if_decorator(func, instance, args: tuple, kwargs: dict): + if _using_unskippable_decorator(args, kwargs): + return _mark_test_as_unskippable + return func(*args, **kwargs) + + +def handle_test_wrapper(func, instance, args: tuple, kwargs: dict): + """ + Creates module and suite spans for `unittest` test executions. + """ + if _is_valid_test_call(kwargs) and _is_test(instance) and hasattr(_CIVisibility, "_unittest_data"): + test_name = _extract_test_method_name(instance) + test_suite_name = _extract_suite_name_from_test_method(instance) + test_module_path = _extract_module_file_path(instance) + test_module_suite_path = _generate_module_suite_path(test_module_path, test_suite_name) + test_suite_span = _extract_suite_span(test_module_suite_path) + test_module_span = _extract_module_span(test_module_path) + if test_module_span is None and test_module_path in _CIVisibility._unittest_data["modules"]: + test_module_span = _start_test_module_span(instance) + _CIVisibility._unittest_data["modules"][test_module_path]["module_span"] = test_module_span + if test_suite_span is None and test_module_suite_path in _CIVisibility._unittest_data["suites"]: + test_suite_span = _start_test_suite_span(instance) + suite_dict = _CIVisibility._unittest_data["suites"][test_module_suite_path] + suite_dict["suite_span"] = test_suite_span + if not test_module_span or not test_suite_span: + log.debug("Suite and/or module span not found for test: %s", test_name) + return func(*args, **kwargs) + with _start_test_span(instance, test_suite_span) as span: + test_session_span = _CIVisibility._datadog_session_span + root_directory = os.getcwd() + fqn_test = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) + + if _CIVisibility.test_skipping_enabled(): + if ITR_CORRELATION_ID_TAG_NAME in _CIVisibility._instance._itr_meta: + span.set_tag_str( + ITR_CORRELATION_ID_TAG_NAME, _CIVisibility._instance._itr_meta[ITR_CORRELATION_ID_TAG_NAME] + ) + + if _is_marked_as_unskippable(instance): + span.set_tag_str(test.ITR_UNSKIPPABLE, "true") + test_module_span.set_tag_str(test.ITR_UNSKIPPABLE, "true") + test_session_span.set_tag_str(test.ITR_UNSKIPPABLE, "true") + test_module_suite_path_without_extension = "{}/{}".format( + os.path.splitext(test_module_path)[0], test_suite_name + ) + if _should_be_skipped_by_itr(args, test_module_suite_path_without_extension, test_name, instance): + if _is_marked_as_unskippable(instance): + span.set_tag_str(test.ITR_FORCED_RUN, "true") + test_module_span.set_tag_str(test.ITR_FORCED_RUN, "true") + test_session_span.set_tag_str(test.ITR_FORCED_RUN, "true") + else: + _update_skipped_elements_and_set_tags(test_module_span, test_session_span) + instance._dd_itr_skip = True + span.set_tag_str(test.ITR_SKIPPED, "true") + span.set_tag_str(test.SKIP_REASON, SKIPPED_BY_ITR_REASON) + + if _is_skipped_by_itr(instance): + result = args[0] + result.startTest(test=instance) + result.addSkip(test=instance, reason=SKIPPED_BY_ITR_REASON) + _set_test_span_status( + test_item=instance, skip_reason=SKIPPED_BY_ITR_REASON, status=test.Status.SKIP.value + ) + result.stopTest(test=instance) + else: + if _is_test_coverage_enabled(instance): + if not _module_has_dd_coverage_enabled(unittest, silent_mode=True): + unittest._dd_coverage = _start_coverage(root_directory) + _switch_coverage_context(unittest._dd_coverage, fqn_test) + result = func(*args, **kwargs) + _update_status_item(test_suite_span, span.get_tag(test.STATUS)) + if _is_test_coverage_enabled(instance): + _report_coverage_to_span(unittest._dd_coverage, span, root_directory) + + _update_remaining_suites_and_modules( + test_module_suite_path, test_module_path, test_module_span, test_suite_span + ) + return result + return func(*args, **kwargs) + + +def collect_text_test_runner_session(func, instance: unittest.TestSuite, args: tuple, kwargs: dict): + """ + Discovers test suites and tests for the current `unittest` `TextTestRunner` execution + """ + if not _is_valid_module_suite_call(func): + return func(*args, **kwargs) + _initialize_unittest_data() + if _is_invoked_by_text_test_runner(): + seen_suites = _CIVisibility._unittest_data["suites"] + seen_modules = _CIVisibility._unittest_data["modules"] + _populate_suites_and_modules(instance._tests, seen_suites, seen_modules) + + result = func(*args, **kwargs) + + return result + result = func(*args, **kwargs) + return result + + +def _start_test_session_span(instance) -> ddtrace.Span: + """ + Starts a test session span and sets the required tags for a `unittest` session instance. + """ + tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) + test_command = _extract_command_name_from_session(instance) + resource_name = _generate_session_resource(test_command) + test_session_span = tracer.trace( + SESSION_OPERATION_NAME, + service=_CIVisibility._instance._service, + span_type=SpanTypes.TEST, + resource=resource_name, + ) + test_session_span.set_tag_str(_EVENT_TYPE, _SESSION_TYPE) + test_session_span.set_tag_str(_SESSION_ID, str(test_session_span.span_id)) + + test_session_span.set_tag_str(COMPONENT, COMPONENT_VALUE) + test_session_span.set_tag_str(SPAN_KIND, KIND) + + test_session_span.set_tag_str(test.COMMAND, test_command) + test_session_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) + test_session_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) + + test_session_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) + test_session_span.set_tag_str( + test.ITR_TEST_CODE_COVERAGE_ENABLED, + "true" if _CIVisibility._instance._collect_coverage_enabled else "false", + ) + + _CIVisibility.set_test_session_name(test_command=test_command) + + if _CIVisibility.test_skipping_enabled(): + _set_test_skipping_tags_to_span(test_session_span) + else: + test_session_span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "false") + _store_module_identifier(instance) + if _is_coverage_invoked_by_coverage_run(): + patch_coverage() + return test_session_span + + +def _start_test_module_span(instance) -> ddtrace.Span: + """ + Starts a test module span and sets the required tags for a `unittest` module instance. + """ + tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) + test_session_span = _extract_session_span() + test_module_name = _extract_module_name_from_module(instance) + resource_name = _generate_module_resource(test_module_name) + test_module_span = tracer._start_span( + MODULE_OPERATION_NAME, + service=_CIVisibility._instance._service, + span_type=SpanTypes.TEST, + activate=True, + child_of=test_session_span, + resource=resource_name, + ) + test_module_span.set_tag_str(_EVENT_TYPE, _MODULE_TYPE) + test_module_span.set_tag_str(_SESSION_ID, str(test_session_span.span_id)) + test_module_span.set_tag_str(_MODULE_ID, str(test_module_span.span_id)) + + test_module_span.set_tag_str(COMPONENT, COMPONENT_VALUE) + test_module_span.set_tag_str(SPAN_KIND, KIND) + + test_module_span.set_tag_str(test.COMMAND, test_session_span.get_tag(test.COMMAND)) + test_module_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) + test_module_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) + + test_module_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) + test_module_span.set_tag_str(test.MODULE, test_module_name) + test_module_span.set_tag_str(test.MODULE_PATH, _extract_module_file_path(instance)) + test_module_span.set_tag_str( + test.ITR_TEST_CODE_COVERAGE_ENABLED, + "true" if _CIVisibility._instance._collect_coverage_enabled else "false", + ) + if _CIVisibility.test_skipping_enabled(): + _set_test_skipping_tags_to_span(test_module_span) + test_module_span.set_metric(test.ITR_TEST_SKIPPING_COUNT, 0) + else: + test_module_span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "false") + _store_suite_identifier(instance) + return test_module_span + + +def _start_test_suite_span(instance) -> ddtrace.Span: + """ + Starts a test suite span and sets the required tags for a `unittest` suite instance. + """ + tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) + test_module_path = _extract_module_file_path(instance) + test_module_span = _extract_module_span(test_module_path) + test_suite_name = _extract_suite_name_from_test_method(instance) + resource_name = _generate_suite_resource(test_suite_name) + test_suite_span = tracer._start_span( + SUITE_OPERATION_NAME, + service=_CIVisibility._instance._service, + span_type=SpanTypes.TEST, + child_of=test_module_span, + activate=True, + resource=resource_name, + ) + test_suite_span.set_tag_str(_EVENT_TYPE, _SUITE_TYPE) + test_suite_span.set_tag_str(_SESSION_ID, test_module_span.get_tag(_SESSION_ID)) + test_suite_span.set_tag_str(_SUITE_ID, str(test_suite_span.span_id)) + test_suite_span.set_tag_str(_MODULE_ID, str(test_module_span.span_id)) + + test_suite_span.set_tag_str(COMPONENT, COMPONENT_VALUE) + test_suite_span.set_tag_str(SPAN_KIND, KIND) + + test_suite_span.set_tag_str(test.COMMAND, test_module_span.get_tag(test.COMMAND)) + test_suite_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) + test_suite_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) + + test_suite_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) + test_suite_span.set_tag_str(test.SUITE, test_suite_name) + test_suite_span.set_tag_str(test.MODULE, test_module_span.get_tag(test.MODULE)) + test_suite_span.set_tag_str(test.MODULE_PATH, test_module_path) + return test_suite_span + + +def _start_test_span(instance, test_suite_span: ddtrace.Span) -> ddtrace.Span: + """ + Starts a test span and sets the required tags for a `unittest` test instance. + """ + tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) + test_name = _extract_test_method_name(instance) + test_method_object = _extract_test_method_object(instance) + test_suite_name = _extract_suite_name_from_test_method(instance) + resource_name = _generate_test_resource(test_suite_name, test_name) + span = tracer._start_span( + ddtrace.config.unittest.operation_name, + service=_CIVisibility._instance._service, + resource=resource_name, + span_type=SpanTypes.TEST, + child_of=test_suite_span, + activate=True, + ) + span.set_tag_str(_EVENT_TYPE, SpanTypes.TEST) + span.set_tag_str(_SESSION_ID, test_suite_span.get_tag(_SESSION_ID)) + span.set_tag_str(_MODULE_ID, test_suite_span.get_tag(_MODULE_ID)) + span.set_tag_str(_SUITE_ID, test_suite_span.get_tag(_SUITE_ID)) + + span.set_tag_str(COMPONENT, COMPONENT_VALUE) + span.set_tag_str(SPAN_KIND, KIND) + + span.set_tag_str(test.COMMAND, test_suite_span.get_tag(test.COMMAND)) + span.set_tag_str(test.FRAMEWORK, FRAMEWORK) + span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) + + span.set_tag_str(test.TYPE, SpanTypes.TEST) + span.set_tag_str(test.NAME, test_name) + span.set_tag_str(test.SUITE, test_suite_name) + span.set_tag_str(test.MODULE, test_suite_span.get_tag(test.MODULE)) + span.set_tag_str(test.MODULE_PATH, test_suite_span.get_tag(test.MODULE_PATH)) + span.set_tag_str(test.STATUS, test.Status.FAIL.value) + span.set_tag_str(test.CLASS_HIERARCHY, test_suite_name) + + _CIVisibility.set_codeowners_of(_extract_test_file_name(instance), span=span) + + _add_start_end_source_file_path_data_to_span(span, test_method_object, test_name, os.getcwd()) + + _store_test_span(instance, span) + return span + + +def _finish_span(current_span: ddtrace.Span): + """ + Finishes active span and populates span status upwards + """ + current_status = current_span.get_tag(test.STATUS) + parent_span = current_span._parent + if current_status and parent_span: + _update_status_item(parent_span, current_status) + elif not current_status: + current_span.set_tag_str(test.SUITE, test.Status.FAIL.value) + current_span.finish() + + +def _finish_test_session_span(): + _finish_remaining_suites_and_modules( + _CIVisibility._unittest_data["suites"], _CIVisibility._unittest_data["modules"] + ) + _update_test_skipping_count_span(_CIVisibility._datadog_session_span) + if _CIVisibility._instance._collect_coverage_enabled and _module_has_dd_coverage_enabled(unittest): + _stop_coverage(unittest) + if _is_coverage_patched() and _is_coverage_invoked_by_coverage_run(): + run_coverage_report() + _add_pct_covered_to_span(_coverage_data, _CIVisibility._datadog_session_span) + unpatch_coverage() + _finish_span(_CIVisibility._datadog_session_span) + + +def handle_cli_run(func, instance: unittest.TestProgram, args: tuple, kwargs: dict): + """ + Creates session span and discovers test suites and tests for the current `unittest` CLI execution + """ + if _is_invoked_by_cli(instance): + _enable_unittest_if_not_started() + for parent_module in instance.test._tests: + for module in parent_module._tests: + _populate_suites_and_modules( + module, _CIVisibility._unittest_data["suites"], _CIVisibility._unittest_data["modules"] + ) + + test_session_span = _start_test_session_span(instance) + _CIVisibility._datadog_entry = "cli" + _CIVisibility._datadog_session_span = test_session_span + + try: + result = func(*args, **kwargs) + except SystemExit as e: + if _CIVisibility.enabled and _CIVisibility._datadog_session_span and hasattr(_CIVisibility, "_unittest_data"): + _finish_test_session_span() + + raise e + return result + + +def handle_text_test_runner_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): + """ + Creates session span if unittest is called through the `TextTestRunner` method + """ + if _is_invoked_by_cli(instance): + return func(*args, **kwargs) + _enable_unittest_if_not_started() + _CIVisibility._datadog_entry = "TextTestRunner" + if not hasattr(_CIVisibility, "_datadog_session_span"): + _CIVisibility._datadog_session_span = _start_test_session_span(instance) + _CIVisibility._datadog_expected_sessions = 0 + _CIVisibility._datadog_finished_sessions = 0 + _CIVisibility._datadog_expected_sessions += 1 + try: + result = func(*args, **kwargs) + except SystemExit as e: + _CIVisibility._datadog_finished_sessions += 1 + if _CIVisibility._datadog_finished_sessions == _CIVisibility._datadog_expected_sessions: + _finish_test_session_span() + del _CIVisibility._datadog_session_span + raise e + _CIVisibility._datadog_finished_sessions += 1 + if _CIVisibility._datadog_finished_sessions == _CIVisibility._datadog_expected_sessions: + _finish_test_session_span() + del _CIVisibility._datadog_session_span + return result diff --git a/ddtrace/contrib/internal/urllib3/patch.py b/ddtrace/contrib/internal/urllib3/patch.py index 624dd9efbc6..6c10526c125 100644 --- a/ddtrace/contrib/internal/urllib3/patch.py +++ b/ddtrace/contrib/internal/urllib3/patch.py @@ -22,9 +22,9 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.pin import Pin from ddtrace.propagation.http import HTTPPropagator from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin # Ports which, if set, will not be used in hostnames/service names diff --git a/ddtrace/contrib/internal/vertexai/patch.py b/ddtrace/contrib/internal/vertexai/patch.py index 2dbce060234..bc6e46903c3 100644 --- a/ddtrace/contrib/internal/vertexai/patch.py +++ b/ddtrace/contrib/internal/vertexai/patch.py @@ -13,7 +13,7 @@ from ddtrace.contrib.trace_utils import wrap from ddtrace.llmobs._integrations import VertexAIIntegration from ddtrace.llmobs._integrations.utils import extract_model_name_google -from ddtrace.pin import Pin +from ddtrace.trace import Pin config._add( diff --git a/ddtrace/contrib/internal/vertica/patch.py b/ddtrace/contrib/internal/vertica/patch.py index 8e820248f14..b365ade8c05 100644 --- a/ddtrace/contrib/internal/vertica/patch.py +++ b/ddtrace/contrib/internal/vertica/patch.py @@ -18,7 +18,7 @@ from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/wsgi/wsgi.py b/ddtrace/contrib/internal/wsgi/wsgi.py index da86aa8f21e..44e1646f5f9 100644 --- a/ddtrace/contrib/internal/wsgi/wsgi.py +++ b/ddtrace/contrib/internal/wsgi/wsgi.py @@ -11,10 +11,10 @@ from typing import Mapping # noqa:F401 from typing import Optional # noqa:F401 - from ddtrace import Pin # noqa:F401 from ddtrace import Span # noqa:F401 from ddtrace import Tracer # noqa:F401 from ddtrace.settings import Config # noqa:F401 + from ddtrace.trace import Pin # noqa:F401 from urllib.parse import quote diff --git a/ddtrace/contrib/internal/yaaredis/patch.py b/ddtrace/contrib/internal/yaaredis/patch.py index eeba29994f6..58c5a47bda4 100644 --- a/ddtrace/contrib/internal/yaaredis/patch.py +++ b/ddtrace/contrib/internal/yaaredis/patch.py @@ -13,7 +13,7 @@ from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import stringify_cache_args from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.pin import Pin +from ddtrace.trace import Pin from ddtrace.vendor.debtcollector import deprecate diff --git a/ddtrace/contrib/kafka/__init__.py b/ddtrace/contrib/kafka/__init__.py index bfb1c569d42..6814e0982a2 100644 --- a/ddtrace/contrib/kafka/__init__.py +++ b/ddtrace/contrib/kafka/__init__.py @@ -31,7 +31,7 @@ To configure the kafka integration using the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin from ddtrace import patch # Make sure to patch before importing confluent_kafka diff --git a/ddtrace/contrib/kombu/__init__.py b/ddtrace/contrib/kombu/__init__.py index 8ecc28c4036..9d172d962db 100644 --- a/ddtrace/contrib/kombu/__init__.py +++ b/ddtrace/contrib/kombu/__init__.py @@ -11,7 +11,8 @@ without the whole trace being dropped. :: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import kombu # If not patched yet, you can patch kombu specifically diff --git a/ddtrace/contrib/langgraph/__init__.py b/ddtrace/contrib/langgraph/__init__.py new file mode 100644 index 00000000000..4b3696ad803 --- /dev/null +++ b/ddtrace/contrib/langgraph/__init__.py @@ -0,0 +1,12 @@ +from ddtrace.internal.utils.importlib import require_modules + + +required_modules = ["langgraph"] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from ddtrace.contrib.internal.langgraph.patch import get_version + from ddtrace.contrib.internal.langgraph.patch import patch + from ddtrace.contrib.internal.langgraph.patch import unpatch + + __all__ = ["patch", "unpatch", "get_version"] diff --git a/ddtrace/contrib/mariadb/__init__.py b/ddtrace/contrib/mariadb/__init__.py index 66d325bd4b2..4ba3ef128d9 100644 --- a/ddtrace/contrib/mariadb/__init__.py +++ b/ddtrace/contrib/mariadb/__init__.py @@ -34,7 +34,7 @@ To configure the mariadb integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin from ddtrace import patch # Make sure to patch before importing mariadb diff --git a/ddtrace/contrib/mongoengine/__init__.py b/ddtrace/contrib/mongoengine/__init__.py index fd6ba008c41..54a06d71253 100644 --- a/ddtrace/contrib/mongoengine/__init__.py +++ b/ddtrace/contrib/mongoengine/__init__.py @@ -3,7 +3,8 @@ ``import ddtrace.auto`` will automatically patch your mongoengine connect method to make it work. :: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import mongoengine # If not patched yet, you can patch mongoengine specifically diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py index ec057282e90..88d8b8555f6 100644 --- a/ddtrace/contrib/mysql/__init__.py +++ b/ddtrace/contrib/mysql/__init__.py @@ -41,7 +41,7 @@ To configure the mysql integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin # Make sure to import mysql.connector and not the 'connect' function, # otherwise you won't have access to the patched version import mysql.connector diff --git a/ddtrace/contrib/mysqldb/__init__.py b/ddtrace/contrib/mysqldb/__init__.py index 17d0ba3a6c2..fa1312bafb8 100644 --- a/ddtrace/contrib/mysqldb/__init__.py +++ b/ddtrace/contrib/mysqldb/__init__.py @@ -55,7 +55,7 @@ # Make sure to import MySQLdb and not the 'connect' function, # otherwise you won't have access to the patched version - from ddtrace import Pin + from ddtrace.trace import Pin import MySQLdb # This will report a span with the default settings diff --git a/ddtrace/contrib/openai/__init__.py b/ddtrace/contrib/openai/__init__.py index 9a3e3b0bc89..eb90e77370a 100644 --- a/ddtrace/contrib/openai/__init__.py +++ b/ddtrace/contrib/openai/__init__.py @@ -242,7 +242,8 @@ ``Pin`` API:: import openai - from ddtrace import Pin, config + from ddtrace import config + from ddtrace.trace import Pin Pin.override(openai, service="my-openai-service") """ # noqa: E501 diff --git a/ddtrace/contrib/psycopg/__init__.py b/ddtrace/contrib/psycopg/__init__.py index 24b2b1afe29..b528323cd88 100644 --- a/ddtrace/contrib/psycopg/__init__.py +++ b/ddtrace/contrib/psycopg/__init__.py @@ -50,7 +50,7 @@ To configure the psycopg integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin import psycopg db = psycopg.connect(connection_factory=factory) diff --git a/ddtrace/contrib/pylibmc/__init__.py b/ddtrace/contrib/pylibmc/__init__.py index fe219712780..6c21eab770e 100644 --- a/ddtrace/contrib/pylibmc/__init__.py +++ b/ddtrace/contrib/pylibmc/__init__.py @@ -5,7 +5,8 @@ # Be sure to import pylibmc and not pylibmc.Client directly, # otherwise you won't have access to the patched version - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import pylibmc # If not patched yet, you can patch pylibmc specifically diff --git a/ddtrace/contrib/pymemcache/__init__.py b/ddtrace/contrib/pymemcache/__init__.py index b6ef0be3ab6..c14f3e23ba0 100644 --- a/ddtrace/contrib/pymemcache/__init__.py +++ b/ddtrace/contrib/pymemcache/__init__.py @@ -2,7 +2,8 @@ ``import ddtrace.auto`` will automatically patch the pymemcache ``Client``:: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin # If not patched yet, patch pymemcache specifically patch(pymemcache=True) diff --git a/ddtrace/contrib/pymongo/__init__.py b/ddtrace/contrib/pymongo/__init__.py index 9a185f9ae70..b1c9594584c 100644 --- a/ddtrace/contrib/pymongo/__init__.py +++ b/ddtrace/contrib/pymongo/__init__.py @@ -8,7 +8,8 @@ # Be sure to import pymongo and not pymongo.MongoClient directly, # otherwise you won't have access to the patched version - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import pymongo # If not patched yet, you can patch pymongo specifically diff --git a/ddtrace/contrib/pymysql/__init__.py b/ddtrace/contrib/pymysql/__init__.py index 7f4d8e7a1f2..437928e7045 100644 --- a/ddtrace/contrib/pymysql/__init__.py +++ b/ddtrace/contrib/pymysql/__init__.py @@ -41,7 +41,7 @@ To configure the integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin from pymysql import connect # This will report a span with the default settings diff --git a/ddtrace/contrib/pyodbc/__init__.py b/ddtrace/contrib/pyodbc/__init__.py index 62f549fd357..efe7d1dc7e8 100644 --- a/ddtrace/contrib/pyodbc/__init__.py +++ b/ddtrace/contrib/pyodbc/__init__.py @@ -41,7 +41,7 @@ To configure the integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin import pyodbc # This will report a span with the default settings diff --git a/ddtrace/contrib/pytest/__init__.py b/ddtrace/contrib/pytest/__init__.py index 30be6789602..0037949af50 100644 --- a/ddtrace/contrib/pytest/__init__.py +++ b/ddtrace/contrib/pytest/__init__.py @@ -60,27 +60,15 @@ Default: ``"pytest.test"`` """ +from ddtrace.contrib.internal.pytest.patch import get_version # noqa: F401 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -import os -from ddtrace import config - - -# pytest default settings -config._add( - "pytest", - dict( - _default_service="pytest", - operation_name=os.getenv("DD_PYTEST_OPERATION_NAME", default="pytest.test"), - ), +deprecate( + ("%s is deprecated" % (__name__)), + message="Avoid using this package directly. " + "Use ``ddtrace.auto`` or the ``ddtrace-run`` command to enable and configure this integration.", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", ) - - -def get_version(): - # type: () -> str - import pytest - - return pytest.__version__ - - -__all__ = ["get_version"] diff --git a/ddtrace/contrib/pytest/constants.py b/ddtrace/contrib/pytest/constants.py index cc5d768fc38..695c48e5b95 100644 --- a/ddtrace/contrib/pytest/constants.py +++ b/ddtrace/contrib/pytest/constants.py @@ -1,11 +1,14 @@ -FRAMEWORK = "pytest" -KIND = "test" +from ddtrace.contrib.internal.pytest.constants import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -# XFail Reason -XFAIL_REASON = "pytest.xfail.reason" +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) -ITR_MIN_SUPPORTED_VERSION = (7, 2, 0) -RETRIES_MIN_SUPPORTED_VERSION = (7, 0, 0) -EFD_MIN_SUPPORTED_VERSION = RETRIES_MIN_SUPPORTED_VERSION -ATR_MIN_SUPPORTED_VERSION = RETRIES_MIN_SUPPORTED_VERSION + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest/newhooks.py b/ddtrace/contrib/pytest/newhooks.py index c44fd0a1535..b54e146fde9 100644 --- a/ddtrace/contrib/pytest/newhooks.py +++ b/ddtrace/contrib/pytest/newhooks.py @@ -1,26 +1,14 @@ -"""pytest-ddtrace hooks. +from ddtrace.contrib.internal.pytest.newhooks import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -These hooks are used to provide extra data used by the Datadog CI Visibility plugin. -For example: module, suite, and test names for a given item. +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) -Note that these names will affect th display and reporting of tests in the Datadog UI, as well as information stored -the Intelligent Test Runner. Differing hook implementations may impact the behavior of Datadog CI Visibility products. -""" - -import pytest - - -@pytest.hookspec(firstresult=True) -def pytest_ddtrace_get_item_module_name(item: pytest.Item) -> str: - """Returns the module name to use when reporting CI Visibility results, should be unique""" - - -@pytest.hookspec(firstresult=True) -def pytest_ddtrace_get_item_suite_name(item: pytest.Item) -> str: - """Returns the suite name to use when reporting CI Visibility result, should be unique""" - - -@pytest.hookspec(firstresult=True) -def pytest_ddtrace_get_item_test_name(item: pytest.Item) -> str: - """Returns the test name to use when reporting CI Visibility result, should be unique""" + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest/plugin.py b/ddtrace/contrib/pytest/plugin.py index a09a81be49a..05002fc74d4 100644 --- a/ddtrace/contrib/pytest/plugin.py +++ b/ddtrace/contrib/pytest/plugin.py @@ -1,166 +1,14 @@ -""" -This custom pytest plugin implements tracing for pytest by using pytest hooks. The plugin registers tracing code -to be run at specific points during pytest execution. The most important hooks used are: +from ddtrace.contrib.internal.pytest.plugin import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate - * pytest_sessionstart: during pytest session startup, a custom trace filter is configured to the global tracer to - only send test spans, which are generated by the plugin. - * pytest_runtest_protocol: this wraps around the execution of a pytest test function, which we trace. Most span - tags are generated and added in this function. We also store the span on the underlying pytest test item to - retrieve later when we need to report test status/result. - * pytest_runtest_makereport: this hook is used to set the test status/result tag, including skipped tests and - expected failures. -""" -from typing import Dict # noqa:F401 - -import pytest - -from ddtrace.appsec._iast._pytest_plugin import ddtrace_iast # noqa:F401 -from ddtrace.appsec._iast._utils import _is_iast_enabled -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest._utils import _extract_span -from ddtrace.contrib.pytest._utils import _pytest_version_supports_itr - - -DDTRACE_HELP_MSG = "Enable tracing of pytest functions." -NO_DDTRACE_HELP_MSG = "Disable tracing of pytest functions." -DDTRACE_INCLUDE_CLASS_HELP_MSG = "Prepend 'ClassName.' to names of class-based tests." -PATCH_ALL_HELP_MSG = "Call ddtrace.patch_all before running tests." - - -def is_enabled(config): - """Check if the ddtrace plugin is enabled.""" - return (config.getoption("ddtrace") or config.getini("ddtrace")) and not config.getoption("no-ddtrace") - - -def pytest_addoption(parser): - """Add ddtrace options.""" - group = parser.getgroup("ddtrace") - - group._addoption( - "--ddtrace", - action="store_true", - dest="ddtrace", - default=False, - help=DDTRACE_HELP_MSG, +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, ) - group._addoption( - "--no-ddtrace", - action="store_true", - dest="no-ddtrace", - default=False, - help=NO_DDTRACE_HELP_MSG, - ) - - group._addoption( - "--ddtrace-patch-all", - action="store_true", - dest="ddtrace-patch-all", - default=False, - help=PATCH_ALL_HELP_MSG, - ) - - group._addoption( - "--ddtrace-include-class-name", - action="store_true", - dest="ddtrace-include-class-name", - default=False, - help=DDTRACE_INCLUDE_CLASS_HELP_MSG, - ) - - group._addoption( - "--ddtrace-iast-fail-tests", - action="store_true", - dest="ddtrace-iast-fail-tests", - default=False, - help=DDTRACE_INCLUDE_CLASS_HELP_MSG, - ) - - parser.addini("ddtrace", DDTRACE_HELP_MSG, type="bool") - parser.addini("no-ddtrace", DDTRACE_HELP_MSG, type="bool") - parser.addini("ddtrace-patch-all", PATCH_ALL_HELP_MSG, type="bool") - parser.addini("ddtrace-include-class-name", DDTRACE_INCLUDE_CLASS_HELP_MSG, type="bool") - if _is_iast_enabled(): - from ddtrace.appsec._iast import _iast_pytest_activation - - _iast_pytest_activation() - - -# Version-specific pytest hooks -if _USE_PLUGIN_V2: - from ddtrace.contrib.pytest._plugin_v2 import pytest_collection_finish # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_configure as _versioned_pytest_configure - from ddtrace.contrib.pytest._plugin_v2 import pytest_ddtrace_get_item_module_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_ddtrace_get_item_suite_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_ddtrace_get_item_test_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_load_initial_conftests # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_report_teststatus # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_runtest_makereport # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_runtest_protocol # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_sessionfinish # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_sessionstart # noqa: F401 - from ddtrace.contrib.pytest._plugin_v2 import pytest_terminal_summary # noqa: F401 -else: - from ddtrace.contrib.pytest._plugin_v1 import pytest_collection_modifyitems # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_configure as _versioned_pytest_configure - from ddtrace.contrib.pytest._plugin_v1 import pytest_ddtrace_get_item_module_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_ddtrace_get_item_suite_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_ddtrace_get_item_test_name # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_load_initial_conftests # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_runtest_makereport # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_runtest_protocol # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_sessionfinish # noqa: F401 - from ddtrace.contrib.pytest._plugin_v1 import pytest_sessionstart # noqa: F401 - - # Internal coverage is only used for ITR at the moment, so the hook is only added if the pytest version supports it - if _pytest_version_supports_itr(): - from ddtrace.contrib.pytest._plugin_v1 import pytest_terminal_summary # noqa: F401 - - -def pytest_configure(config): - config.addinivalue_line("markers", "dd_tags(**kwargs): add tags to current span") - if is_enabled(config): - _versioned_pytest_configure(config) - - -@pytest.hookimpl -def pytest_addhooks(pluginmanager): - from ddtrace.contrib.pytest import newhooks - - pluginmanager.add_hookspecs(newhooks) - - -@pytest.fixture(scope="function") -def ddspan(request): - """Return the :class:`ddtrace._trace.span.Span` instance associated with the - current test when Datadog CI Visibility is enabled. - """ - from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility - - if _CIVisibility.enabled: - return _extract_span(request.node) - - -@pytest.fixture(scope="session") -def ddtracer(): - """Return the :class:`ddtrace.tracer.Tracer` instance for Datadog CI - visibility if it is enabled, otherwise return the default Datadog tracer. - """ - import ddtrace - from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility - - if _CIVisibility.enabled: - return _CIVisibility._instance.tracer - return ddtrace.tracer - - -@pytest.fixture(scope="session", autouse=True) -def patch_all(request): - """Patch all available modules for Datadog tracing when ddtrace-patch-all - is specified in command or .ini. - """ - import ddtrace - - if request.config.getoption("ddtrace-patch-all") or request.config.getini("ddtrace-patch-all"): - ddtrace.patch_all() + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest_bdd/__init__.py b/ddtrace/contrib/pytest_bdd/__init__.py index b1cc6701fda..2e91392914d 100644 --- a/ddtrace/contrib/pytest_bdd/__init__.py +++ b/ddtrace/contrib/pytest_bdd/__init__.py @@ -21,27 +21,18 @@ for more details. """ +from ddtrace.contrib.internal.pytest_bdd.patch import get_version +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -from ddtrace import config - -# pytest-bdd default settings -config._add( - "pytest_bdd", - dict( - _default_service="pytest_bdd", - ), +deprecate( + ("%s is deprecated" % (__name__)), + message="Avoid using this package directly. " + "Use ``ddtrace.auto`` or the ``ddtrace-run`` command to enable and configure this integration.", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", ) -def get_version(): - # type: () -> str - try: - import importlib.metadata as importlib_metadata - except ImportError: - import importlib_metadata # type: ignore[no-redef] - - return str(importlib_metadata.version("pytest-bdd")) - - __all__ = ["get_version"] diff --git a/ddtrace/contrib/pytest_bdd/constants.py b/ddtrace/contrib/pytest_bdd/constants.py index 2dd377f7619..9c2e907debd 100644 --- a/ddtrace/contrib/pytest_bdd/constants.py +++ b/ddtrace/contrib/pytest_bdd/constants.py @@ -1,2 +1,14 @@ -FRAMEWORK = "pytest_bdd" -STEP_KIND = "pytest_bdd.step" +from ddtrace.contrib.internal.pytest_bdd.constants import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate + + +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) + + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest_bdd/plugin.py b/ddtrace/contrib/pytest_bdd/plugin.py index 1dc714c89c5..88645368d38 100644 --- a/ddtrace/contrib/pytest_bdd/plugin.py +++ b/ddtrace/contrib/pytest_bdd/plugin.py @@ -1,20 +1,14 @@ -from ddtrace import DDTraceDeprecationWarning -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.contrib.internal.pytest_bdd.plugin import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning from ddtrace.vendor.debtcollector import deprecate -def pytest_configure(config): - if config.pluginmanager.hasplugin("pytest-bdd") and config.pluginmanager.hasplugin("ddtrace"): - if not _USE_PLUGIN_V2: - if is_ddtrace_enabled(config): - from ._plugin import _PytestBddPlugin +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) - deprecate( - "the ddtrace.pytest_bdd plugin is deprecated", - message="it will be integrated with the main pytest ddtrace plugin", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - - config.pluginmanager.register(_PytestBddPlugin(), "_datadog-pytest-bdd") + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest_benchmark/__init__.py b/ddtrace/contrib/pytest_benchmark/__init__.py index e69de29bb2d..3829deeb38a 100644 --- a/ddtrace/contrib/pytest_benchmark/__init__.py +++ b/ddtrace/contrib/pytest_benchmark/__init__.py @@ -0,0 +1,11 @@ +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate + + +deprecate( + ("%s is deprecated" % (__name__)), + message="Avoid using this package directly. " + "Use ``ddtrace.auto`` or the ``ddtrace-run`` command to enable and configure this integration.", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", +) diff --git a/ddtrace/contrib/pytest_benchmark/constants.py b/ddtrace/contrib/pytest_benchmark/constants.py index b4c4f7f5b27..522f664d4b8 100644 --- a/ddtrace/contrib/pytest_benchmark/constants.py +++ b/ddtrace/contrib/pytest_benchmark/constants.py @@ -1,79 +1,14 @@ -BENCHMARK_INFO = "benchmark.duration.info" -BENCHMARK_MEAN = "benchmark.duration.mean" -BENCHMARK_RUN = "benchmark.duration.runs" +from ddtrace.contrib.internal.pytest_benchmark.constants import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -STATISTICS_HD15IQR = "benchmark.duration.statistics.hd15iqr" -STATISTICS_IQR = "benchmark.duration.statistics.iqr" -STATISTICS_IQR_OUTLIERS = "benchmark.duration.statistics.iqr_outliers" -STATISTICS_LD15IQR = "benchmark.duration.statistics.ld15iqr" -STATISTICS_MAX = "benchmark.duration.statistics.max" -STATISTICS_MEAN = "benchmark.duration.statistics.mean" -STATISTICS_MEDIAN = "benchmark.duration.statistics.median" -STATISTICS_MIN = "benchmark.duration.statistics.min" -STATISTICS_N = "benchmark.duration.statistics.n" -STATISTICS_OPS = "benchmark.duration.statistics.ops" -STATISTICS_OUTLIERS = "benchmark.duration.statistics.outliers" -STATISTICS_Q1 = "benchmark.duration.statistics.q1" -STATISTICS_Q3 = "benchmark.duration.statistics.q3" -STATISTICS_STDDEV = "benchmark.duration.statistics.std_dev" -STATISTICS_STDDEV_OUTLIERS = "benchmark.duration.statistics.std_dev_outliers" -STATISTICS_TOTAL = "benchmark.duration.statistics.total" -PLUGIN_HD15IQR = "hd15iqr" -PLUGIN_IQR = "iqr" -PLUGIN_IQR_OUTLIERS = "iqr_outliers" -PLUGIN_LD15IQR = "ld15iqr" -PLUGIN_MAX = "max" -PLUGIN_MEAN = "mean" -PLUGIN_MEDIAN = "median" -PLUGIN_MIN = "min" -PLUGIN_OPS = "ops" -PLUGIN_OUTLIERS = "outliers" -PLUGIN_Q1 = "q1" -PLUGIN_Q3 = "q3" -PLUGIN_ROUNDS = "rounds" -PLUGIN_STDDEV = "stddev" -PLUGIN_STDDEV_OUTLIERS = "stddev_outliers" -PLUGIN_TOTAL = "total" +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) -PLUGIN_METRICS = { - BENCHMARK_MEAN: PLUGIN_MEAN, - BENCHMARK_RUN: PLUGIN_ROUNDS, - STATISTICS_HD15IQR: PLUGIN_HD15IQR, - STATISTICS_IQR: PLUGIN_IQR, - STATISTICS_IQR_OUTLIERS: PLUGIN_IQR_OUTLIERS, - STATISTICS_LD15IQR: PLUGIN_LD15IQR, - STATISTICS_MAX: PLUGIN_MAX, - STATISTICS_MEAN: PLUGIN_MEAN, - STATISTICS_MEDIAN: PLUGIN_MEDIAN, - STATISTICS_MIN: PLUGIN_MIN, - STATISTICS_OPS: PLUGIN_OPS, - STATISTICS_OUTLIERS: PLUGIN_OUTLIERS, - STATISTICS_Q1: PLUGIN_Q1, - STATISTICS_Q3: PLUGIN_Q3, - STATISTICS_N: PLUGIN_ROUNDS, - STATISTICS_STDDEV: PLUGIN_STDDEV, - STATISTICS_STDDEV_OUTLIERS: PLUGIN_STDDEV_OUTLIERS, - STATISTICS_TOTAL: PLUGIN_TOTAL, -} - -PLUGIN_METRICS_V2 = { - "duration_mean": PLUGIN_MEAN, - "duration_runs": PLUGIN_ROUNDS, - "statistics_hd15iqr": PLUGIN_HD15IQR, - "statistics_iqr": PLUGIN_IQR, - "statistics_iqr_outliers": PLUGIN_IQR_OUTLIERS, - "statistics_ld15iqr": PLUGIN_LD15IQR, - "statistics_max": PLUGIN_MAX, - "statistics_mean": PLUGIN_MEAN, - "statistics_median": PLUGIN_MEDIAN, - "statistics_min": PLUGIN_MIN, - "statistics_n": PLUGIN_ROUNDS, - "statistics_ops": PLUGIN_OPS, - "statistics_outliers": PLUGIN_OUTLIERS, - "statistics_q1": PLUGIN_Q1, - "statistics_q3": PLUGIN_Q3, - "statistics_std_dev": PLUGIN_STDDEV, - "statistics_std_dev_outliers": PLUGIN_STDDEV_OUTLIERS, - "statistics_total": PLUGIN_TOTAL, -} + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/pytest_benchmark/plugin.py b/ddtrace/contrib/pytest_benchmark/plugin.py index 4cb76148dbc..7a33bbf838d 100644 --- a/ddtrace/contrib/pytest_benchmark/plugin.py +++ b/ddtrace/contrib/pytest_benchmark/plugin.py @@ -1,19 +1,14 @@ -from ddtrace import DDTraceDeprecationWarning -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.contrib.internal.pytest_benchmark.plugin import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning from ddtrace.vendor.debtcollector import deprecate -def pytest_configure(config): - if config.pluginmanager.hasplugin("benchmark") and config.pluginmanager.hasplugin("ddtrace"): - if is_ddtrace_enabled(config): - deprecate( - "this version of the ddtrace.pytest_benchmark plugin is deprecated", - message="it will be integrated with the main pytest ddtrace plugin", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - if not _USE_PLUGIN_V2: - from ._plugin import _PytestBenchmarkPlugin +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) - config.pluginmanager.register(_PytestBenchmarkPlugin(), "_datadog-pytest-benchmark") + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/redis/__init__.py b/ddtrace/contrib/redis/__init__.py index 6276fa96be1..5c29e25c392 100644 --- a/ddtrace/contrib/redis/__init__.py +++ b/ddtrace/contrib/redis/__init__.py @@ -52,10 +52,10 @@ Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ -To configure particular redis instances use the :class:`Pin ` API:: +To configure particular redis instances use the :class:`Pin ` API:: import redis - from ddtrace import Pin + from ddtrace.trace import Pin client = redis.StrictRedis(host="localhost", port=6379) diff --git a/ddtrace/contrib/rediscluster/__init__.py b/ddtrace/contrib/rediscluster/__init__.py index b7fd204822a..419e30fb515 100644 --- a/ddtrace/contrib/rediscluster/__init__.py +++ b/ddtrace/contrib/rediscluster/__init__.py @@ -3,7 +3,8 @@ ``import ddtrace.auto`` will automatically patch your Redis Cluster client to make it work. :: - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin import rediscluster # If not patched yet, you can patch redis specifically diff --git a/ddtrace/contrib/rq/__init__.py b/ddtrace/contrib/rq/__init__.py index ff266454748..dfc4fc4260e 100644 --- a/ddtrace/contrib/rq/__init__.py +++ b/ddtrace/contrib/rq/__init__.py @@ -28,7 +28,7 @@ To override the service name for a queue:: - from ddtrace import Pin + from ddtrace.trace import Pin connection = redis.Redis() queue = rq.Queue(connection=connection) diff --git a/ddtrace/contrib/snowflake/__init__.py b/ddtrace/contrib/snowflake/__init__.py index 2ecb54fabd1..ee35c4ef0dd 100644 --- a/ddtrace/contrib/snowflake/__init__.py +++ b/ddtrace/contrib/snowflake/__init__.py @@ -9,13 +9,19 @@ The integration is not enabled automatically when using :ref:`ddtrace-run` or :ref:`import ddtrace.auto`. -Use :func:`patch()` to manually enable the integration:: +Use ``DD_TRACE_SNOWFLAKE_ENABLED=true`` to enable it with ``ddtrace-run`` - from ddtrace import patch, patch_all +or :func:`patch()` to manually enable the integration:: + + from ddtrace import patch patch(snowflake=True) + +or use :func:`patch_all()` to manually enable the integration:: + + from ddtrace import patch_all patch_all(snowflake=True) -or the ``DD_TRACE_SNOWFLAKE_ENABLED=true`` to enable it with ``ddtrace-run``. + Global Configuration @@ -45,7 +51,7 @@ To configure the integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin from snowflake.connector import connect # This will report a span with the default settings diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py index a50b3418a5a..c9b0f47715b 100644 --- a/ddtrace/contrib/sqlalchemy/__init__.py +++ b/ddtrace/contrib/sqlalchemy/__init__.py @@ -7,7 +7,8 @@ using the patch method that **must be called before** importing sqlalchemy:: # patch before importing `create_engine` - from ddtrace import Pin, patch + from ddtrace import patch + from ddtrace.trace import Pin patch(sqlalchemy=True) # use SQLAlchemy as usual diff --git a/ddtrace/contrib/sqlite3/__init__.py b/ddtrace/contrib/sqlite3/__init__.py index 009205fe569..2fc8d6b818c 100644 --- a/ddtrace/contrib/sqlite3/__init__.py +++ b/ddtrace/contrib/sqlite3/__init__.py @@ -41,7 +41,7 @@ To configure the integration on an per-connection basis use the ``Pin`` API:: - from ddtrace import Pin + from ddtrace.trace import Pin import sqlite3 # This will report a span with the default settings diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py index 49048e708af..0d7019f4cae 100644 --- a/ddtrace/contrib/tornado/__init__.py +++ b/ddtrace/contrib/tornado/__init__.py @@ -76,11 +76,6 @@ def log_exception(self, typ, value, tb): 'default_service': 'my-tornado-app', 'tags': {'env': 'production'}, 'distributed_tracing': False, - 'settings': { - 'FILTERS': [ - FilterRequestsOnUrl(r'http://test\\.example\\.com'), - ], - }, }, } diff --git a/ddtrace/contrib/trace_utils.py b/ddtrace/contrib/trace_utils.py index db8509d8c35..7d2ea0c9986 100644 --- a/ddtrace/contrib/trace_utils.py +++ b/ddtrace/contrib/trace_utils.py @@ -20,7 +20,6 @@ import wrapt -from ddtrace import Pin from ddtrace import config from ddtrace.ext import http from ddtrace.ext import net @@ -37,6 +36,7 @@ import ddtrace.internal.utils.wrappers from ddtrace.propagation.http import HTTPPropagator from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin if TYPE_CHECKING: # pragma: no cover @@ -561,13 +561,25 @@ def activate_distributed_headers(tracer, int_config=None, request_headers=None, if override is False: return None + # Only extract and activate if we don't already have an activate span + # DEV: Only do this if there is an active Span, an active Context is fine to override + # DEV: Use _DD_TRACE_EXTRACT_IGNORE_ACTIVE_SPAN env var to override the default behavior + current_span = tracer.current_span() + if current_span and not config._extract_ignore_active_span: + log.debug( + "will not extract distributed headers, a Span(trace_id%d, span_id=%d) is already active", + current_span.trace_id, + current_span.span_id, + ) + return + if override or (int_config and distributed_tracing_enabled(int_config)): context = HTTPPropagator.extract(request_headers) # Only need to activate the new context if something was propagated - if not context.trace_id: + # The new context must have one of these values in order for it to be activated + if not context.trace_id and not context._baggage and not context._span_links: return None - # Do not reactivate a context with the same trace id # DEV: An example could be nested web frameworks, when one layer already # parsed request headers and activated them. @@ -577,7 +589,14 @@ def activate_distributed_headers(tracer, int_config=None, request_headers=None, # app = Flask(__name__) # Traced via Flask instrumentation # app = DDWSGIMiddleware(app) # Extra layer on top for WSGI current_context = tracer.current_trace_context() - if current_context and current_context.trace_id == context.trace_id: + + # We accept incoming contexts with only baggage or only span_links, however if we + # already have a current_context then an incoming context not + # containing a trace_id or containing the same trace_id + # should not be activated. + if current_context and ( + not context.trace_id or (context.trace_id and context.trace_id == current_context.trace_id) + ): log.debug( "will not activate extracted Context(trace_id=%r, span_id=%r), a context with that trace id is already active", # noqa: E501 context.trace_id, diff --git a/ddtrace/contrib/trace_utils_async.py b/ddtrace/contrib/trace_utils_async.py index 63a3325db50..f58cc4e34bb 100644 --- a/ddtrace/contrib/trace_utils_async.py +++ b/ddtrace/contrib/trace_utils_async.py @@ -3,8 +3,8 @@ Note that this module should only be imported in Python 3.5+. """ -from ddtrace import Pin from ddtrace.internal.logger import get_logger +from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/unittest/__init__.py b/ddtrace/contrib/unittest/__init__.py index 5180b59c959..43a1e8a740c 100644 --- a/ddtrace/contrib/unittest/__init__.py +++ b/ddtrace/contrib/unittest/__init__.py @@ -34,11 +34,18 @@ Default: ``True`` """ +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate +from ..internal.unittest.patch import get_version # noqa: F401 +from ..internal.unittest.patch import patch # noqa: F401 +from ..internal.unittest.patch import unpatch # noqa: F401 -from .patch import get_version -from .patch import patch -from .patch import unpatch - -__all__ = ["patch", "unpatch", "get_version"] +deprecate( + ("%s is deprecated" % (__name__)), + message="Avoid using this package directly. " + "Use ``ddtrace.auto`` or the ``ddtrace-run`` command to enable and configure this integration.", + category=DDTraceDeprecationWarning, + removal_version="3.0.0", +) diff --git a/ddtrace/contrib/unittest/constants.py b/ddtrace/contrib/unittest/constants.py index dc58863a2a5..fc8643d5e06 100644 --- a/ddtrace/contrib/unittest/constants.py +++ b/ddtrace/contrib/unittest/constants.py @@ -1,8 +1,14 @@ -COMPONENT_VALUE = "unittest" -FRAMEWORK = "unittest" -KIND = "test" +from ddtrace.contrib.internal.unittest.constants import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -TEST_OPERATION_NAME = "unittest.test" -SUITE_OPERATION_NAME = "unittest.test_suite" -SESSION_OPERATION_NAME = "unittest.test_session" -MODULE_OPERATION_NAME = "unittest.test_module" + +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) + + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/unittest/patch.py b/ddtrace/contrib/unittest/patch.py index 2c8bdd299a6..277b3b421c6 100644 --- a/ddtrace/contrib/unittest/patch.py +++ b/ddtrace/contrib/unittest/patch.py @@ -1,868 +1,14 @@ -import inspect -import os -from typing import Union -import unittest +from ddtrace.contrib.internal.unittest.patch import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -import wrapt -import ddtrace -from ddtrace import config -from ddtrace.constants import SPAN_KIND -from ddtrace.contrib.internal.coverage.data import _coverage_data -from ddtrace.contrib.internal.coverage.patch import patch as patch_coverage -from ddtrace.contrib.internal.coverage.patch import run_coverage_report -from ddtrace.contrib.internal.coverage.patch import unpatch as unpatch_coverage -from ddtrace.contrib.internal.coverage.utils import _is_coverage_invoked_by_coverage_run -from ddtrace.contrib.internal.coverage.utils import _is_coverage_patched -from ddtrace.contrib.unittest.constants import COMPONENT_VALUE -from ddtrace.contrib.unittest.constants import FRAMEWORK -from ddtrace.contrib.unittest.constants import KIND -from ddtrace.contrib.unittest.constants import MODULE_OPERATION_NAME -from ddtrace.contrib.unittest.constants import SESSION_OPERATION_NAME -from ddtrace.contrib.unittest.constants import SUITE_OPERATION_NAME -from ddtrace.ext import SpanTypes -from ddtrace.ext import test -from ddtrace.ext.ci import RUNTIME_VERSION -from ddtrace.ext.ci import _get_runtime_and_os_metadata -from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility -from ddtrace.internal.ci_visibility.constants import EVENT_TYPE as _EVENT_TYPE -from ddtrace.internal.ci_visibility.constants import ITR_CORRELATION_ID_TAG_NAME -from ddtrace.internal.ci_visibility.constants import ITR_UNSKIPPABLE_REASON -from ddtrace.internal.ci_visibility.constants import MODULE_ID as _MODULE_ID -from ddtrace.internal.ci_visibility.constants import MODULE_TYPE as _MODULE_TYPE -from ddtrace.internal.ci_visibility.constants import SESSION_ID as _SESSION_ID -from ddtrace.internal.ci_visibility.constants import SESSION_TYPE as _SESSION_TYPE -from ddtrace.internal.ci_visibility.constants import SKIPPED_BY_ITR_REASON -from ddtrace.internal.ci_visibility.constants import SUITE_ID as _SUITE_ID -from ddtrace.internal.ci_visibility.constants import SUITE_TYPE as _SUITE_TYPE -from ddtrace.internal.ci_visibility.constants import TEST -from ddtrace.internal.ci_visibility.coverage import _module_has_dd_coverage_enabled -from ddtrace.internal.ci_visibility.coverage import _report_coverage_to_span -from ddtrace.internal.ci_visibility.coverage import _start_coverage -from ddtrace.internal.ci_visibility.coverage import _stop_coverage -from ddtrace.internal.ci_visibility.coverage import _switch_coverage_context -from ddtrace.internal.ci_visibility.utils import _add_pct_covered_to_span -from ddtrace.internal.ci_visibility.utils import _add_start_end_source_file_path_data_to_span -from ddtrace.internal.ci_visibility.utils import _generate_fully_qualified_test_name -from ddtrace.internal.ci_visibility.utils import get_relative_or_absolute_path_for_path -from ddtrace.internal.constants import COMPONENT -from ddtrace.internal.logger import get_logger -from ddtrace.internal.utils.formats import asbool -from ddtrace.internal.utils.wrappers import unwrap as _u - - -log = get_logger(__name__) -_global_skipped_elements = 0 - -# unittest default settings -config._add( - "unittest", - dict( - _default_service="unittest", - operation_name=os.getenv("DD_UNITTEST_OPERATION_NAME", default="unittest.test"), - strict_naming=asbool(os.getenv("DD_CIVISIBILITY_UNITTEST_STRICT_NAMING", default=True)), - ), -) - - -def get_version(): - # type: () -> str - return "" - - -def _enable_unittest_if_not_started(): - _initialize_unittest_data() - if _CIVisibility.enabled: - return - _CIVisibility.enable(config=ddtrace.config.unittest) - - -def _initialize_unittest_data(): - if not hasattr(_CIVisibility, "_unittest_data"): - _CIVisibility._unittest_data = {} - if "suites" not in _CIVisibility._unittest_data: - _CIVisibility._unittest_data["suites"] = {} - if "modules" not in _CIVisibility._unittest_data: - _CIVisibility._unittest_data["modules"] = {} - if "unskippable_tests" not in _CIVisibility._unittest_data: - _CIVisibility._unittest_data["unskippable_tests"] = set() - - -def _set_tracer(tracer: ddtrace.tracer): - """Manually sets the tracer instance to `unittest.`""" - unittest._datadog_tracer = tracer - - -def _is_test_coverage_enabled(test_object) -> bool: - return _CIVisibility._instance._collect_coverage_enabled and not _is_skipped_test(test_object) - - -def _is_skipped_test(test_object) -> bool: - testMethod = getattr(test_object, test_object._testMethodName, "") - return ( - (hasattr(test_object.__class__, "__unittest_skip__") and test_object.__class__.__unittest_skip__) - or (hasattr(testMethod, "__unittest_skip__") and testMethod.__unittest_skip__) - or _is_skipped_by_itr(test_object) - ) - - -def _is_skipped_by_itr(test_object) -> bool: - return hasattr(test_object, "_dd_itr_skip") and test_object._dd_itr_skip - - -def _should_be_skipped_by_itr(args: tuple, test_module_suite_path: str, test_name: str, test_object) -> bool: - return ( - len(args) - and _CIVisibility._instance._should_skip_path(test_module_suite_path, test_name) - and not _is_skipped_test(test_object) - ) - - -def _is_marked_as_unskippable(test_object) -> bool: - test_suite_name = _extract_suite_name_from_test_method(test_object) - test_name = _extract_test_method_name(test_object) - test_module_path = _extract_module_file_path(test_object) - test_module_suite_name = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) - return ( - hasattr(_CIVisibility, "_unittest_data") - and test_module_suite_name in _CIVisibility._unittest_data["unskippable_tests"] - ) - - -def _update_skipped_elements_and_set_tags(test_module_span: ddtrace.Span, test_session_span: ddtrace.Span): - global _global_skipped_elements - _global_skipped_elements += 1 - - test_module_span._metrics[test.ITR_TEST_SKIPPING_COUNT] += 1 - test_module_span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "true") - test_module_span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "true") - - test_session_span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "true") - test_session_span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "true") - - -def _store_test_span(item, span: ddtrace.Span): - """Store datadog span at `unittest` test instance.""" - item._datadog_span = span - - -def _store_module_identifier(test_object: unittest.TextTestRunner): - """Store module identifier at `unittest` module instance, this is useful to classify event types.""" - if hasattr(test_object, "test") and hasattr(test_object.test, "_tests"): - for module in test_object.test._tests: - if len(module._tests) and _extract_module_name_from_module(module): - _set_identifier(module, "module") - - -def _store_suite_identifier(module): - """Store suite identifier at `unittest` suite instance, this is useful to classify event types.""" - if hasattr(module, "_tests"): - for suite in module._tests: - if len(suite._tests) and _extract_module_name_from_module(suite): - _set_identifier(suite, "suite") - - -def _is_test(item) -> bool: - if ( - type(item) == unittest.TestSuite - or not hasattr(item, "_testMethodName") - or (ddtrace.config.unittest.strict_naming and not item._testMethodName.startswith("test")) - ): - return False - return True - - -def _extract_span(item) -> Union[ddtrace.Span, None]: - return getattr(item, "_datadog_span", None) - - -def _extract_command_name_from_session(session: unittest.TextTestRunner) -> str: - if not hasattr(session, "progName"): - return "python -m unittest" - return getattr(session, "progName", "") - - -def _extract_test_method_name(test_object) -> str: - """Extract test method name from `unittest` instance.""" - return getattr(test_object, "_testMethodName", "") - - -def _extract_session_span() -> Union[ddtrace.Span, None]: - return getattr(_CIVisibility, "_datadog_session_span", None) - - -def _extract_module_span(module_identifier: str) -> Union[ddtrace.Span, None]: - if hasattr(_CIVisibility, "_unittest_data") and module_identifier in _CIVisibility._unittest_data["modules"]: - return _CIVisibility._unittest_data["modules"][module_identifier].get("module_span") - return None - - -def _extract_suite_span(suite_identifier: str) -> Union[ddtrace.Span, None]: - if hasattr(_CIVisibility, "_unittest_data") and suite_identifier in _CIVisibility._unittest_data["suites"]: - return _CIVisibility._unittest_data["suites"][suite_identifier].get("suite_span") - return None - - -def _update_status_item(item: ddtrace.Span, status: str): - """ - Sets the status for each Span implementing the test FAIL logic override. - """ - existing_status = item.get_tag(test.STATUS) - if existing_status and (status == test.Status.SKIP.value or existing_status == test.Status.FAIL.value): - return None - item.set_tag_str(test.STATUS, status) - return None - - -def _extract_suite_name_from_test_method(item) -> str: - item_type = type(item) - return getattr(item_type, "__name__", "") - - -def _extract_module_name_from_module(item) -> str: - if _is_test(item): - return type(item).__module__ - return "" - - -def _extract_test_reason(item: tuple) -> str: - """ - Given a tuple of type [test_class, str], it returns the test failure/skip reason - """ - return item[1] - - -def _extract_test_file_name(item) -> str: - return os.path.basename(inspect.getfile(item.__class__)) - - -def _extract_module_file_path(item) -> str: - if _is_test(item): - try: - test_module_object = inspect.getfile(item.__class__) - except TypeError: - log.debug( - "Tried to collect module file path but it is a built-in Python function", - ) - return "" - return get_relative_or_absolute_path_for_path(test_module_object, os.getcwd()) - - return "" - - -def _generate_test_resource(suite_name: str, test_name: str) -> str: - return "{}.{}".format(suite_name, test_name) - - -def _generate_suite_resource(test_suite: str) -> str: - return "{}".format(test_suite) - - -def _generate_module_resource(test_module: str) -> str: - return "{}".format(test_module) - - -def _generate_session_resource(test_command: str) -> str: - return "{}".format(test_command) - - -def _set_test_skipping_tags_to_span(span: ddtrace.Span): - span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "true") - span.set_tag_str(test.ITR_TEST_SKIPPING_TYPE, TEST) - span.set_tag_str(test.ITR_TEST_SKIPPING_TESTS_SKIPPED, "false") - span.set_tag_str(test.ITR_DD_CI_ITR_TESTS_SKIPPED, "false") - span.set_tag_str(test.ITR_FORCED_RUN, "false") - span.set_tag_str(test.ITR_UNSKIPPABLE, "false") - - -def _set_identifier(item, name: str): - """ - Adds an event type classification to a `unittest` test. - """ - item._datadog_object = name - - -def _is_valid_result(instance: unittest.TextTestRunner, args: tuple) -> bool: - return instance and isinstance(instance, unittest.runner.TextTestResult) and args - - -def _is_valid_test_call(kwargs: dict) -> bool: - """ - Validates that kwargs is empty to ensure that `unittest` is running a test - """ - return not len(kwargs) - - -def _is_valid_module_suite_call(func) -> bool: - """ - Validates that the mocked function is an actual function from `unittest` - """ - return type(func).__name__ == "method" or type(func).__name__ == "instancemethod" - - -def _is_invoked_by_cli(instance: unittest.TextTestRunner) -> bool: - return ( - hasattr(instance, "progName") - or hasattr(_CIVisibility, "_datadog_entry") - and _CIVisibility._datadog_entry == "cli" +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, ) - -def _extract_test_method_object(test_object): - if hasattr(test_object, "_testMethodName"): - return getattr(test_object, test_object._testMethodName, None) - return None - - -def _is_invoked_by_text_test_runner() -> bool: - return hasattr(_CIVisibility, "_datadog_entry") and _CIVisibility._datadog_entry == "TextTestRunner" - - -def _generate_module_suite_path(test_module_path: str, test_suite_name: str) -> str: - return "{}.{}".format(test_module_path, test_suite_name) - - -def _populate_suites_and_modules(test_objects: list, seen_suites: dict, seen_modules: dict): - """ - Discovers suites and modules and initializes the seen_suites and seen_modules dictionaries. - """ - if not hasattr(test_objects, "__iter__"): - return - for test_object in test_objects: - if not _is_test(test_object): - _populate_suites_and_modules(test_object, seen_suites, seen_modules) - continue - test_module_path = _extract_module_file_path(test_object) - test_suite_name = _extract_suite_name_from_test_method(test_object) - test_module_suite_path = _generate_module_suite_path(test_module_path, test_suite_name) - if test_module_path not in seen_modules: - seen_modules[test_module_path] = { - "module_span": None, - "remaining_suites": 0, - } - if test_module_suite_path not in seen_suites: - seen_suites[test_module_suite_path] = { - "suite_span": None, - "remaining_tests": 0, - } - - seen_modules[test_module_path]["remaining_suites"] += 1 - - seen_suites[test_module_suite_path]["remaining_tests"] += 1 - - -def _finish_remaining_suites_and_modules(seen_suites: dict, seen_modules: dict): - """ - Forces all suite and module spans to finish and updates their statuses. - """ - for suite in seen_suites.values(): - test_suite_span = suite["suite_span"] - if test_suite_span and not test_suite_span.finished: - _finish_span(test_suite_span) - - for module in seen_modules.values(): - test_module_span = module["module_span"] - if test_module_span and not test_module_span.finished: - _finish_span(test_module_span) - del _CIVisibility._unittest_data - - -def _update_remaining_suites_and_modules( - test_module_suite_path: str, test_module_path: str, test_module_span: ddtrace.Span, test_suite_span: ddtrace.Span -): - """ - Updates the remaining test suite and test counter and finishes spans when these have finished their execution. - """ - suite_dict = _CIVisibility._unittest_data["suites"][test_module_suite_path] - modules_dict = _CIVisibility._unittest_data["modules"][test_module_path] - - suite_dict["remaining_tests"] -= 1 - if suite_dict["remaining_tests"] == 0: - modules_dict["remaining_suites"] -= 1 - _finish_span(test_suite_span) - if modules_dict["remaining_suites"] == 0: - _finish_span(test_module_span) - - -def _update_test_skipping_count_span(span: ddtrace.Span): - if _CIVisibility.test_skipping_enabled(): - span.set_metric(test.ITR_TEST_SKIPPING_COUNT, _global_skipped_elements) - - -def _extract_skip_if_reason(args, kwargs): - if len(args) >= 2: - return _extract_test_reason(args) - elif kwargs and "reason" in kwargs: - return kwargs["reason"] - return "" - - -def patch(): - """ - Patch the instrumented methods from unittest - """ - if getattr(unittest, "_datadog_patch", False) or _CIVisibility.enabled: - return - _initialize_unittest_data() - - unittest._datadog_patch = True - - _w = wrapt.wrap_function_wrapper - - _w(unittest, "TextTestResult.addSuccess", add_success_test_wrapper) - _w(unittest, "TextTestResult.addFailure", add_failure_test_wrapper) - _w(unittest, "TextTestResult.addError", add_failure_test_wrapper) - _w(unittest, "TextTestResult.addSkip", add_skip_test_wrapper) - _w(unittest, "TextTestResult.addExpectedFailure", add_xfail_test_wrapper) - _w(unittest, "TextTestResult.addUnexpectedSuccess", add_xpass_test_wrapper) - _w(unittest, "skipIf", skip_if_decorator) - _w(unittest, "TestCase.run", handle_test_wrapper) - _w(unittest, "TestSuite.run", collect_text_test_runner_session) - _w(unittest, "TextTestRunner.run", handle_text_test_runner_wrapper) - _w(unittest, "TestProgram.runTests", handle_cli_run) - - -def unpatch(): - """ - Undo patched instrumented methods from unittest - """ - if not getattr(unittest, "_datadog_patch", False): - return - - _u(unittest.TextTestResult, "addSuccess") - _u(unittest.TextTestResult, "addFailure") - _u(unittest.TextTestResult, "addError") - _u(unittest.TextTestResult, "addSkip") - _u(unittest.TextTestResult, "addExpectedFailure") - _u(unittest.TextTestResult, "addUnexpectedSuccess") - _u(unittest, "skipIf") - _u(unittest.TestSuite, "run") - _u(unittest.TestCase, "run") - _u(unittest.TextTestRunner, "run") - _u(unittest.TestProgram, "runTests") - - unittest._datadog_patch = False - _CIVisibility.disable() - - -def _set_test_span_status(test_item, status: str, exc_info: str = None, skip_reason: str = None): - span = _extract_span(test_item) - if not span: - log.debug("Tried setting test result for test but could not find span for %s", test_item) - return None - span.set_tag_str(test.STATUS, status) - if exc_info: - span.set_exc_info(exc_info[0], exc_info[1], exc_info[2]) - if status == test.Status.SKIP.value: - span.set_tag_str(test.SKIP_REASON, skip_reason) - - -def _set_test_xpass_xfail_result(test_item, result: str): - """ - Sets `test.result` and `test.status` to a XFAIL or XPASS test. - """ - span = _extract_span(test_item) - if not span: - log.debug("Tried setting test result for an xpass or xfail test but could not find span for %s", test_item) - return None - span.set_tag_str(test.RESULT, result) - status = span.get_tag(test.STATUS) - if result == test.Status.XFAIL.value: - if status == test.Status.PASS.value: - span.set_tag_str(test.STATUS, test.Status.FAIL.value) - elif status == test.Status.FAIL.value: - span.set_tag_str(test.STATUS, test.Status.PASS.value) - - -def add_success_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): - if _is_valid_result(instance, args): - _set_test_span_status(test_item=args[0], status=test.Status.PASS.value) - - return func(*args, **kwargs) - - -def add_failure_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): - if _is_valid_result(instance, args): - _set_test_span_status(test_item=args[0], exc_info=_extract_test_reason(args), status=test.Status.FAIL.value) - - return func(*args, **kwargs) - - -def add_xfail_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): - if _is_valid_result(instance, args): - _set_test_xpass_xfail_result(test_item=args[0], result=test.Status.XFAIL.value) - - return func(*args, **kwargs) - - -def add_skip_test_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): - if _is_valid_result(instance, args): - _set_test_span_status(test_item=args[0], skip_reason=_extract_test_reason(args), status=test.Status.SKIP.value) - - return func(*args, **kwargs) - - -def add_xpass_test_wrapper(func, instance, args: tuple, kwargs: dict): - if _is_valid_result(instance, args): - _set_test_xpass_xfail_result(test_item=args[0], result=test.Status.XPASS.value) - - return func(*args, **kwargs) - - -def _mark_test_as_unskippable(obj): - test_name = obj.__name__ - test_suite_name = str(obj).split(".")[0].split()[1] - test_module_path = get_relative_or_absolute_path_for_path(obj.__code__.co_filename, os.getcwd()) - test_module_suite_name = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) - _CIVisibility._unittest_data["unskippable_tests"].add(test_module_suite_name) - return obj - - -def _using_unskippable_decorator(args, kwargs): - return args[0] is False and _extract_skip_if_reason(args, kwargs) == ITR_UNSKIPPABLE_REASON - - -def skip_if_decorator(func, instance, args: tuple, kwargs: dict): - if _using_unskippable_decorator(args, kwargs): - return _mark_test_as_unskippable - return func(*args, **kwargs) - - -def handle_test_wrapper(func, instance, args: tuple, kwargs: dict): - """ - Creates module and suite spans for `unittest` test executions. - """ - if _is_valid_test_call(kwargs) and _is_test(instance) and hasattr(_CIVisibility, "_unittest_data"): - test_name = _extract_test_method_name(instance) - test_suite_name = _extract_suite_name_from_test_method(instance) - test_module_path = _extract_module_file_path(instance) - test_module_suite_path = _generate_module_suite_path(test_module_path, test_suite_name) - test_suite_span = _extract_suite_span(test_module_suite_path) - test_module_span = _extract_module_span(test_module_path) - if test_module_span is None and test_module_path in _CIVisibility._unittest_data["modules"]: - test_module_span = _start_test_module_span(instance) - _CIVisibility._unittest_data["modules"][test_module_path]["module_span"] = test_module_span - if test_suite_span is None and test_module_suite_path in _CIVisibility._unittest_data["suites"]: - test_suite_span = _start_test_suite_span(instance) - suite_dict = _CIVisibility._unittest_data["suites"][test_module_suite_path] - suite_dict["suite_span"] = test_suite_span - if not test_module_span or not test_suite_span: - log.debug("Suite and/or module span not found for test: %s", test_name) - return func(*args, **kwargs) - with _start_test_span(instance, test_suite_span) as span: - test_session_span = _CIVisibility._datadog_session_span - root_directory = os.getcwd() - fqn_test = _generate_fully_qualified_test_name(test_module_path, test_suite_name, test_name) - - if _CIVisibility.test_skipping_enabled(): - if ITR_CORRELATION_ID_TAG_NAME in _CIVisibility._instance._itr_meta: - span.set_tag_str( - ITR_CORRELATION_ID_TAG_NAME, _CIVisibility._instance._itr_meta[ITR_CORRELATION_ID_TAG_NAME] - ) - - if _is_marked_as_unskippable(instance): - span.set_tag_str(test.ITR_UNSKIPPABLE, "true") - test_module_span.set_tag_str(test.ITR_UNSKIPPABLE, "true") - test_session_span.set_tag_str(test.ITR_UNSKIPPABLE, "true") - test_module_suite_path_without_extension = "{}/{}".format( - os.path.splitext(test_module_path)[0], test_suite_name - ) - if _should_be_skipped_by_itr(args, test_module_suite_path_without_extension, test_name, instance): - if _is_marked_as_unskippable(instance): - span.set_tag_str(test.ITR_FORCED_RUN, "true") - test_module_span.set_tag_str(test.ITR_FORCED_RUN, "true") - test_session_span.set_tag_str(test.ITR_FORCED_RUN, "true") - else: - _update_skipped_elements_and_set_tags(test_module_span, test_session_span) - instance._dd_itr_skip = True - span.set_tag_str(test.ITR_SKIPPED, "true") - span.set_tag_str(test.SKIP_REASON, SKIPPED_BY_ITR_REASON) - - if _is_skipped_by_itr(instance): - result = args[0] - result.startTest(test=instance) - result.addSkip(test=instance, reason=SKIPPED_BY_ITR_REASON) - _set_test_span_status( - test_item=instance, skip_reason=SKIPPED_BY_ITR_REASON, status=test.Status.SKIP.value - ) - result.stopTest(test=instance) - else: - if _is_test_coverage_enabled(instance): - if not _module_has_dd_coverage_enabled(unittest, silent_mode=True): - unittest._dd_coverage = _start_coverage(root_directory) - _switch_coverage_context(unittest._dd_coverage, fqn_test) - result = func(*args, **kwargs) - _update_status_item(test_suite_span, span.get_tag(test.STATUS)) - if _is_test_coverage_enabled(instance): - _report_coverage_to_span(unittest._dd_coverage, span, root_directory) - - _update_remaining_suites_and_modules( - test_module_suite_path, test_module_path, test_module_span, test_suite_span - ) - return result - return func(*args, **kwargs) - - -def collect_text_test_runner_session(func, instance: unittest.TestSuite, args: tuple, kwargs: dict): - """ - Discovers test suites and tests for the current `unittest` `TextTestRunner` execution - """ - if not _is_valid_module_suite_call(func): - return func(*args, **kwargs) - _initialize_unittest_data() - if _is_invoked_by_text_test_runner(): - seen_suites = _CIVisibility._unittest_data["suites"] - seen_modules = _CIVisibility._unittest_data["modules"] - _populate_suites_and_modules(instance._tests, seen_suites, seen_modules) - - result = func(*args, **kwargs) - - return result - result = func(*args, **kwargs) - return result - - -def _start_test_session_span(instance) -> ddtrace.Span: - """ - Starts a test session span and sets the required tags for a `unittest` session instance. - """ - tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) - test_command = _extract_command_name_from_session(instance) - resource_name = _generate_session_resource(test_command) - test_session_span = tracer.trace( - SESSION_OPERATION_NAME, - service=_CIVisibility._instance._service, - span_type=SpanTypes.TEST, - resource=resource_name, - ) - test_session_span.set_tag_str(_EVENT_TYPE, _SESSION_TYPE) - test_session_span.set_tag_str(_SESSION_ID, str(test_session_span.span_id)) - - test_session_span.set_tag_str(COMPONENT, COMPONENT_VALUE) - test_session_span.set_tag_str(SPAN_KIND, KIND) - - test_session_span.set_tag_str(test.COMMAND, test_command) - test_session_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) - test_session_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) - - test_session_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) - test_session_span.set_tag_str( - test.ITR_TEST_CODE_COVERAGE_ENABLED, - "true" if _CIVisibility._instance._collect_coverage_enabled else "false", - ) - - _CIVisibility.set_test_session_name(test_command=test_command) - - if _CIVisibility.test_skipping_enabled(): - _set_test_skipping_tags_to_span(test_session_span) - else: - test_session_span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "false") - _store_module_identifier(instance) - if _is_coverage_invoked_by_coverage_run(): - patch_coverage() - return test_session_span - - -def _start_test_module_span(instance) -> ddtrace.Span: - """ - Starts a test module span and sets the required tags for a `unittest` module instance. - """ - tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) - test_session_span = _extract_session_span() - test_module_name = _extract_module_name_from_module(instance) - resource_name = _generate_module_resource(test_module_name) - test_module_span = tracer._start_span( - MODULE_OPERATION_NAME, - service=_CIVisibility._instance._service, - span_type=SpanTypes.TEST, - activate=True, - child_of=test_session_span, - resource=resource_name, - ) - test_module_span.set_tag_str(_EVENT_TYPE, _MODULE_TYPE) - test_module_span.set_tag_str(_SESSION_ID, str(test_session_span.span_id)) - test_module_span.set_tag_str(_MODULE_ID, str(test_module_span.span_id)) - - test_module_span.set_tag_str(COMPONENT, COMPONENT_VALUE) - test_module_span.set_tag_str(SPAN_KIND, KIND) - - test_module_span.set_tag_str(test.COMMAND, test_session_span.get_tag(test.COMMAND)) - test_module_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) - test_module_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) - - test_module_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) - test_module_span.set_tag_str(test.MODULE, test_module_name) - test_module_span.set_tag_str(test.MODULE_PATH, _extract_module_file_path(instance)) - test_module_span.set_tag_str( - test.ITR_TEST_CODE_COVERAGE_ENABLED, - "true" if _CIVisibility._instance._collect_coverage_enabled else "false", - ) - if _CIVisibility.test_skipping_enabled(): - _set_test_skipping_tags_to_span(test_module_span) - test_module_span.set_metric(test.ITR_TEST_SKIPPING_COUNT, 0) - else: - test_module_span.set_tag_str(test.ITR_TEST_SKIPPING_ENABLED, "false") - _store_suite_identifier(instance) - return test_module_span - - -def _start_test_suite_span(instance) -> ddtrace.Span: - """ - Starts a test suite span and sets the required tags for a `unittest` suite instance. - """ - tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) - test_module_path = _extract_module_file_path(instance) - test_module_span = _extract_module_span(test_module_path) - test_suite_name = _extract_suite_name_from_test_method(instance) - resource_name = _generate_suite_resource(test_suite_name) - test_suite_span = tracer._start_span( - SUITE_OPERATION_NAME, - service=_CIVisibility._instance._service, - span_type=SpanTypes.TEST, - child_of=test_module_span, - activate=True, - resource=resource_name, - ) - test_suite_span.set_tag_str(_EVENT_TYPE, _SUITE_TYPE) - test_suite_span.set_tag_str(_SESSION_ID, test_module_span.get_tag(_SESSION_ID)) - test_suite_span.set_tag_str(_SUITE_ID, str(test_suite_span.span_id)) - test_suite_span.set_tag_str(_MODULE_ID, str(test_module_span.span_id)) - - test_suite_span.set_tag_str(COMPONENT, COMPONENT_VALUE) - test_suite_span.set_tag_str(SPAN_KIND, KIND) - - test_suite_span.set_tag_str(test.COMMAND, test_module_span.get_tag(test.COMMAND)) - test_suite_span.set_tag_str(test.FRAMEWORK, FRAMEWORK) - test_suite_span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) - - test_suite_span.set_tag_str(test.TEST_TYPE, SpanTypes.TEST) - test_suite_span.set_tag_str(test.SUITE, test_suite_name) - test_suite_span.set_tag_str(test.MODULE, test_module_span.get_tag(test.MODULE)) - test_suite_span.set_tag_str(test.MODULE_PATH, test_module_path) - return test_suite_span - - -def _start_test_span(instance, test_suite_span: ddtrace.Span) -> ddtrace.Span: - """ - Starts a test span and sets the required tags for a `unittest` test instance. - """ - tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer) - test_name = _extract_test_method_name(instance) - test_method_object = _extract_test_method_object(instance) - test_suite_name = _extract_suite_name_from_test_method(instance) - resource_name = _generate_test_resource(test_suite_name, test_name) - span = tracer._start_span( - ddtrace.config.unittest.operation_name, - service=_CIVisibility._instance._service, - resource=resource_name, - span_type=SpanTypes.TEST, - child_of=test_suite_span, - activate=True, - ) - span.set_tag_str(_EVENT_TYPE, SpanTypes.TEST) - span.set_tag_str(_SESSION_ID, test_suite_span.get_tag(_SESSION_ID)) - span.set_tag_str(_MODULE_ID, test_suite_span.get_tag(_MODULE_ID)) - span.set_tag_str(_SUITE_ID, test_suite_span.get_tag(_SUITE_ID)) - - span.set_tag_str(COMPONENT, COMPONENT_VALUE) - span.set_tag_str(SPAN_KIND, KIND) - - span.set_tag_str(test.COMMAND, test_suite_span.get_tag(test.COMMAND)) - span.set_tag_str(test.FRAMEWORK, FRAMEWORK) - span.set_tag_str(test.FRAMEWORK_VERSION, _get_runtime_and_os_metadata()[RUNTIME_VERSION]) - - span.set_tag_str(test.TYPE, SpanTypes.TEST) - span.set_tag_str(test.NAME, test_name) - span.set_tag_str(test.SUITE, test_suite_name) - span.set_tag_str(test.MODULE, test_suite_span.get_tag(test.MODULE)) - span.set_tag_str(test.MODULE_PATH, test_suite_span.get_tag(test.MODULE_PATH)) - span.set_tag_str(test.STATUS, test.Status.FAIL.value) - span.set_tag_str(test.CLASS_HIERARCHY, test_suite_name) - - _CIVisibility.set_codeowners_of(_extract_test_file_name(instance), span=span) - - _add_start_end_source_file_path_data_to_span(span, test_method_object, test_name, os.getcwd()) - - _store_test_span(instance, span) - return span - - -def _finish_span(current_span: ddtrace.Span): - """ - Finishes active span and populates span status upwards - """ - current_status = current_span.get_tag(test.STATUS) - parent_span = current_span._parent - if current_status and parent_span: - _update_status_item(parent_span, current_status) - elif not current_status: - current_span.set_tag_str(test.SUITE, test.Status.FAIL.value) - current_span.finish() - - -def _finish_test_session_span(): - _finish_remaining_suites_and_modules( - _CIVisibility._unittest_data["suites"], _CIVisibility._unittest_data["modules"] - ) - _update_test_skipping_count_span(_CIVisibility._datadog_session_span) - if _CIVisibility._instance._collect_coverage_enabled and _module_has_dd_coverage_enabled(unittest): - _stop_coverage(unittest) - if _is_coverage_patched() and _is_coverage_invoked_by_coverage_run(): - run_coverage_report() - _add_pct_covered_to_span(_coverage_data, _CIVisibility._datadog_session_span) - unpatch_coverage() - _finish_span(_CIVisibility._datadog_session_span) - - -def handle_cli_run(func, instance: unittest.TestProgram, args: tuple, kwargs: dict): - """ - Creates session span and discovers test suites and tests for the current `unittest` CLI execution - """ - if _is_invoked_by_cli(instance): - _enable_unittest_if_not_started() - for parent_module in instance.test._tests: - for module in parent_module._tests: - _populate_suites_and_modules( - module, _CIVisibility._unittest_data["suites"], _CIVisibility._unittest_data["modules"] - ) - - test_session_span = _start_test_session_span(instance) - _CIVisibility._datadog_entry = "cli" - _CIVisibility._datadog_session_span = test_session_span - - try: - result = func(*args, **kwargs) - except SystemExit as e: - if _CIVisibility.enabled and _CIVisibility._datadog_session_span and hasattr(_CIVisibility, "_unittest_data"): - _finish_test_session_span() - - raise e - return result - - -def handle_text_test_runner_wrapper(func, instance: unittest.TextTestRunner, args: tuple, kwargs: dict): - """ - Creates session span if unittest is called through the `TextTestRunner` method - """ - if _is_invoked_by_cli(instance): - return func(*args, **kwargs) - _enable_unittest_if_not_started() - _CIVisibility._datadog_entry = "TextTestRunner" - if not hasattr(_CIVisibility, "_datadog_session_span"): - _CIVisibility._datadog_session_span = _start_test_session_span(instance) - _CIVisibility._datadog_expected_sessions = 0 - _CIVisibility._datadog_finished_sessions = 0 - _CIVisibility._datadog_expected_sessions += 1 - try: - result = func(*args, **kwargs) - except SystemExit as e: - _CIVisibility._datadog_finished_sessions += 1 - if _CIVisibility._datadog_finished_sessions == _CIVisibility._datadog_expected_sessions: - _finish_test_session_span() - del _CIVisibility._datadog_session_span - raise e - _CIVisibility._datadog_finished_sessions += 1 - if _CIVisibility._datadog_finished_sessions == _CIVisibility._datadog_expected_sessions: - _finish_test_session_span() - del _CIVisibility._datadog_session_span - return result + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/vertexai/__init__.py b/ddtrace/contrib/vertexai/__init__.py index d86db4ce474..6a913412e57 100644 --- a/ddtrace/contrib/vertexai/__init__.py +++ b/ddtrace/contrib/vertexai/__init__.py @@ -77,7 +77,8 @@ ``Pin`` API:: import vertexai - from ddtrace import Pin, config + from ddtrace import config + from ddtrace.trace import Pin Pin.override(vertexai, service="my-vertexai-service") """ # noqa: E501 diff --git a/ddtrace/contrib/vertica/__init__.py b/ddtrace/contrib/vertica/__init__.py index cf1c532e084..d63363436f6 100644 --- a/ddtrace/contrib/vertica/__init__.py +++ b/ddtrace/contrib/vertica/__init__.py @@ -27,16 +27,16 @@ To configure the Vertica integration on an instance-per-instance basis use the ``Pin`` API:: - from ddtrace import Pin, patch, Tracer + from ddtrace import patch + from ddtrace.trace import Pin patch(vertica=True) import vertica_python - custom_tracer = Tracer() conn = vertica_python.connect(**YOUR_VERTICA_CONFIG) - # override the service and tracer to be used - Pin.override(conn, service='myverticaservice', tracer=custom_tracer) + # override the service + Pin.override(conn, service='myverticaservice') """ diff --git a/ddtrace/contrib/yaaredis/__init__.py b/ddtrace/contrib/yaaredis/__init__.py index 54f49cbad40..659de7f7bea 100644 --- a/ddtrace/contrib/yaaredis/__init__.py +++ b/ddtrace/contrib/yaaredis/__init__.py @@ -50,10 +50,10 @@ Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ -To configure particular yaaredis instances use the :class:`Pin ` API:: +To configure particular yaaredis instances use the :class:`Pin ` API:: import yaaredis - from ddtrace import Pin + from ddtrace.trace import Pin client = yaaredis.StrictRedis(host="localhost", port=6379) diff --git a/ddtrace/filters.py b/ddtrace/filters.py index a2e6884f05c..bd6367d5635 100644 --- a/ddtrace/filters.py +++ b/ddtrace/filters.py @@ -1,72 +1,10 @@ -import abc -import re -from typing import TYPE_CHECKING # noqa:F401 -from typing import List # noqa:F401 -from typing import Optional # noqa:F401 -from typing import Union # noqa:F401 +from ddtrace._trace.filters import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -from ddtrace._trace.processor import TraceProcessor -from ddtrace.ext import http - -if TYPE_CHECKING: # pragma: no cover - from ddtrace._trace.span import Span # noqa:F401 - - -class TraceFilter(TraceProcessor): - @abc.abstractmethod - def process_trace(self, trace): - # type: (List[Span]) -> Optional[List[Span]] - """Processes a trace. - - None can be returned to prevent the trace from being exported. - """ - pass - - -class FilterRequestsOnUrl(TraceFilter): - r"""Filter out traces from incoming http requests based on the request's url. - - This class takes as argument a list of regular expression patterns - representing the urls to be excluded from tracing. A trace will be excluded - if its root span contains a ``http.url`` tag and if this tag matches any of - the provided regular expression using the standard python regexp match - semantic (https://docs.python.org/3/library/re.html#re.match). - - :param list regexps: a list of regular expressions (or a single string) defining - the urls that should be filtered out. - - Examples: - To filter out http calls to domain api.example.com:: - - FilterRequestsOnUrl(r'http://api\\.example\\.com') - - To filter out http calls to all first level subdomains from example.com:: - - FilterRequestOnUrl(r'http://.*+\\.example\\.com') - - To filter out calls to both http://test.example.com and http://example.com/healthcheck:: - - FilterRequestOnUrl([r'http://test\\.example\\.com', r'http://example\\.com/healthcheck']) - """ - - def __init__(self, regexps: Union[str, List[str]]): - if isinstance(regexps, str): - regexps = [regexps] - self._regexps = [re.compile(regexp) for regexp in regexps] - - def process_trace(self, trace): - # type: (List[Span]) -> Optional[List[Span]] - """ - When the filter is registered in the tracer, process_trace is called by - on each trace before it is sent to the agent, the returned value will - be fed to the next filter in the list. If process_trace returns None, - the whole trace is discarded. - """ - for span in trace: - url = span.get_tag(http.URL) - if span.parent_id is None and url is not None: - for regexp in self._regexps: - if regexp.match(url): - return None - return trace +deprecate( + "The ddtrace.filters module and the ``FilterRequestsOnUrl`` class is deprecated and will be removed.", + message="Import ``TraceFilter`` from the ddtrace.trace package.", + category=DDTraceDeprecationWarning, +) diff --git a/ddtrace/internal/ci_visibility/api/_test.py b/ddtrace/internal/ci_visibility/api/_test.py index 0f8a2efd41d..a7ab21dd459 100644 --- a/ddtrace/internal/ci_visibility/api/_test.py +++ b/ddtrace/internal/ci_visibility/api/_test.py @@ -5,7 +5,7 @@ from typing import Optional from typing import Union -from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_INFO +from ddtrace.contrib.internal.pytest_benchmark.constants import BENCHMARK_INFO from ddtrace.ext import SpanTypes from ddtrace.ext import test from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL diff --git a/ddtrace/internal/ci_visibility/filters.py b/ddtrace/internal/ci_visibility/filters.py index c90e7324533..f1b22d97e13 100644 --- a/ddtrace/internal/ci_visibility/filters.py +++ b/ddtrace/internal/ci_visibility/filters.py @@ -8,7 +8,7 @@ from ddtrace.constants import AUTO_KEEP from ddtrace.ext import SpanTypes from ddtrace.ext import ci -from ddtrace.filters import TraceFilter +from ddtrace.trace import TraceFilter if TYPE_CHECKING: diff --git a/ddtrace/internal/ci_visibility/recorder.py b/ddtrace/internal/ci_visibility/recorder.py index 609475506d3..34cc543f11d 100644 --- a/ddtrace/internal/ci_visibility/recorder.py +++ b/ddtrace/internal/ci_visibility/recorder.py @@ -145,6 +145,12 @@ def _do_request(method, url, payload, headers, timeout=DEFAULT_TIMEOUT): return result +class CIVisibilityTracer(Tracer): + def __init__(self, *args, **kwargs): + # Allows for multiple instances of the civis tracer to be created without logging a warning + super(CIVisibilityTracer, self).__init__(*args, **kwargs) + + class CIVisibility(Service): _instance = None # type: Optional[CIVisibility] enabled = False @@ -166,13 +172,13 @@ def __init__(self, tracer=None, config=None, service=None): log.debug("Using _CI_DD_AGENT_URL for CI Visibility tracer: %s", env_agent_url) url = env_agent_url - self.tracer = Tracer(context_provider=CIContextProvider(), url=url) + self.tracer = CIVisibilityTracer(context_provider=CIContextProvider(), url=url) else: self.tracer = ddtrace.tracer # Partial traces are required for ITR to work in suite-level skipping for long test sessions, but we # assume that a tracer is already configured if it's been passed in. - self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=TRACER_PARTIAL_FLUSH_MIN_SPANS) + self.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=TRACER_PARTIAL_FLUSH_MIN_SPANS) self._api_client: Optional[_TestVisibilityAPIClientBase] = None @@ -393,7 +399,7 @@ def _configure_writer(self, coverage_enabled=False, requests_mode=None, url=None itr_suite_skipping_mode=self._suite_skipping_mode, ) if writer is not None: - self.tracer.configure(writer=writer) + self.tracer._configure(writer=writer) def _agent_evp_proxy_is_available(self): # type: () -> bool @@ -591,10 +597,10 @@ def disable(cls): def _start_service(self): # type: () -> None - tracer_filters = self.tracer._filters + tracer_filters = self.tracer._user_trace_processors if not any(isinstance(tracer_filter, TraceCiVisibilityFilter) for tracer_filter in tracer_filters): tracer_filters += [TraceCiVisibilityFilter(self._tags, self._service)] # type: ignore[arg-type] - self.tracer.configure(settings={"FILTERS": tracer_filters}) + self.tracer._configure(trace_processors=tracer_filters) if self.test_skipping_enabled(): self._fetch_tests_to_skip() diff --git a/ddtrace/internal/ci_visibility/telemetry/api_request.py b/ddtrace/internal/ci_visibility/telemetry/api_request.py index 076cc0cca77..77f3ea5f626 100644 --- a/ddtrace/internal/ci_visibility/telemetry/api_request.py +++ b/ddtrace/internal/ci_visibility/telemetry/api_request.py @@ -1,10 +1,10 @@ import dataclasses from typing import Optional -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.ci_visibility.telemetry.constants import ERROR_TYPES from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -32,13 +32,15 @@ def record_api_request( error, ) - telemetry_writer.add_count_metric(_NAMESPACE, f"{metric_names.count}", 1) - telemetry_writer.add_distribution_metric(_NAMESPACE, f"{metric_names.duration}", duration) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, f"{metric_names.count}", 1) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, f"{metric_names.duration}", duration) if response_bytes is not None: if metric_names.response_bytes is not None: # We don't always want to record response bytes (for settings requests), so assume that no metric name # means we don't want to record it. - telemetry_writer.add_distribution_metric(_NAMESPACE, f"{metric_names.response_bytes}", response_bytes) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, f"{metric_names.response_bytes}", response_bytes + ) if error is not None: record_api_request_error(metric_names.error, error) @@ -46,4 +48,4 @@ def record_api_request( def record_api_request_error(error_metric_name: str, error: ERROR_TYPES): log.debug("Recording early flake detection request error telemetry: %s", error) - telemetry_writer.add_count_metric(_NAMESPACE, error_metric_name, 1, (("error_type", error),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, error_metric_name, 1, (("error_type", error),)) diff --git a/ddtrace/internal/ci_visibility/telemetry/constants.py b/ddtrace/internal/ci_visibility/telemetry/constants.py index dad54511c04..191338e86e9 100644 --- a/ddtrace/internal/ci_visibility/telemetry/constants.py +++ b/ddtrace/internal/ci_visibility/telemetry/constants.py @@ -1,9 +1,6 @@ from enum import Enum -CIVISIBILITY_TELEMETRY_NAMESPACE = "civisibility" - - class ERROR_TYPES(str, Enum): TIMEOUT = "timeout" NETWORK = "network" diff --git a/ddtrace/internal/ci_visibility/telemetry/coverage.py b/ddtrace/internal/ci_visibility/telemetry/coverage.py index e3370fbee6e..392196f7236 100644 --- a/ddtrace/internal/ci_visibility/telemetry/coverage.py +++ b/ddtrace/internal/ci_visibility/telemetry/coverage.py @@ -3,10 +3,10 @@ from typing import Optional from typing import Tuple -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.ci_visibility.telemetry.constants import TEST_FRAMEWORKS from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -30,7 +30,7 @@ def record_code_coverage_started(coverage_library: COVERAGE_LIBRARY, test_framew _tags: List[Tuple[str, str]] = [("library", coverage_library)] if test_framework is not None: _tags.append(("test_framework", test_framework)) - telemetry_writer.add_count_metric(_NAMESPACE, COVERAGE_TELEMETRY.STARTED, 1, tuple(_tags)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, COVERAGE_TELEMETRY.STARTED, 1, tuple(_tags)) def record_code_coverage_finished(coverage_library: COVERAGE_LIBRARY, test_framework: Optional[TEST_FRAMEWORKS] = None): @@ -38,19 +38,19 @@ def record_code_coverage_finished(coverage_library: COVERAGE_LIBRARY, test_frame _tags: List[Tuple[str, str]] = [("library", coverage_library)] if test_framework is not None: _tags.append(("test_framework", test_framework)) - telemetry_writer.add_count_metric(_NAMESPACE, COVERAGE_TELEMETRY.FINISHED, 1, tuple(_tags)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, COVERAGE_TELEMETRY.FINISHED, 1, tuple(_tags)) def record_code_coverage_empty(): log.debug("Recording code coverage empty telemetry") - telemetry_writer.add_count_metric(_NAMESPACE, COVERAGE_TELEMETRY.IS_EMPTY, 1) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, COVERAGE_TELEMETRY.IS_EMPTY, 1) def record_code_coverage_files(count_files: int): log.debug("Recording code coverage files telemetry: %s", count_files) - telemetry_writer.add_distribution_metric(_NAMESPACE, COVERAGE_TELEMETRY.FILES, count_files) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, COVERAGE_TELEMETRY.FILES, count_files) def record_code_coverage_error(): log.debug("Recording code coverage error telemetry") - telemetry_writer.add_count_metric(_NAMESPACE, COVERAGE_TELEMETRY.ERRORS, 1) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, COVERAGE_TELEMETRY.ERRORS, 1) diff --git a/ddtrace/internal/ci_visibility/telemetry/early_flake_detection.py b/ddtrace/internal/ci_visibility/telemetry/early_flake_detection.py index f8a512e7048..b9e9e48d021 100644 --- a/ddtrace/internal/ci_visibility/telemetry/early_flake_detection.py +++ b/ddtrace/internal/ci_visibility/telemetry/early_flake_detection.py @@ -1,8 +1,8 @@ from enum import Enum -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -19,5 +19,7 @@ class EARLY_FLAKE_DETECTION_TELEMETRY(str, Enum): def record_early_flake_detection_tests_count(early_flake_detection_count: int): log.debug("Recording early flake detection tests count telemetry: %s", early_flake_detection_count) telemetry_writer.add_distribution_metric( - _NAMESPACE, EARLY_FLAKE_DETECTION_TELEMETRY.RESPONSE_TESTS.value, early_flake_detection_count + TELEMETRY_NAMESPACE.CIVISIBILITY, + EARLY_FLAKE_DETECTION_TELEMETRY.RESPONSE_TESTS.value, + early_flake_detection_count, ) diff --git a/ddtrace/internal/ci_visibility/telemetry/events.py b/ddtrace/internal/ci_visibility/telemetry/events.py index 34c603c3b03..b630ee96413 100644 --- a/ddtrace/internal/ci_visibility/telemetry/events.py +++ b/ddtrace/internal/ci_visibility/telemetry/events.py @@ -3,11 +3,11 @@ from typing import Optional from typing import Tuple -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.ci_visibility.telemetry.constants import EVENT_TYPES from ddtrace.internal.ci_visibility.telemetry.constants import TEST_FRAMEWORKS from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -67,7 +67,7 @@ def _record_event( if early_flake_detection_abort_reason and event == EVENTS_TELEMETRY.FINISHED and event_type == EVENT_TYPES.SESSION: _tags.append(("early_flake_detection_abort_reason", early_flake_detection_abort_reason)) - telemetry_writer.add_count_metric(_NAMESPACE, event.value, 1, tuple(_tags)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, event.value, 1, tuple(_tags)) def record_event_created( @@ -117,11 +117,19 @@ def record_event_finished( def record_manual_api_event_created(event_type: EVENT_TYPES): # Note: _created suffix is added in cases we were to change the metric name in the future. # The current metric applies to event creation even though it does not specify it - telemetry_writer.add_count_metric(_NAMESPACE, EVENTS_TELEMETRY.MANUAL_API_EVENT, 1, (("event_type", event_type),)) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, + EVENTS_TELEMETRY.MANUAL_API_EVENT, + 1, + (("event_type", event_type),) + ) def record_events_enqueued_for_serialization(events_count: int): - telemetry_writer.add_count_metric(_NAMESPACE, EVENTS_TELEMETRY.ENQUEUED_FOR_SERIALIZATION, events_count) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, + EVENTS_TELEMETRY.ENQUEUED_FOR_SERIALIZATION, + events_count) def record_event_created_test( @@ -139,7 +147,7 @@ def record_event_created_test( if is_benchmark: tags.append(("is_benchmark", "true")) - telemetry_writer.add_count_metric(_NAMESPACE, EVENTS_TELEMETRY.FINISHED, 1, tuple(tags)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, EVENTS_TELEMETRY.FINISHED, 1, tuple(tags)) def record_event_finished_test( @@ -190,4 +198,4 @@ def record_event_finished_test( if is_quarantined: tags.append(("is_quarantined", "true")) - telemetry_writer.add_count_metric(_NAMESPACE, EVENTS_TELEMETRY.FINISHED, 1, tuple(tags)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, EVENTS_TELEMETRY.FINISHED, 1, tuple(tags)) diff --git a/ddtrace/internal/ci_visibility/telemetry/git.py b/ddtrace/internal/ci_visibility/telemetry/git.py index faf01621cde..41bca64a8fd 100644 --- a/ddtrace/internal/ci_visibility/telemetry/git.py +++ b/ddtrace/internal/ci_visibility/telemetry/git.py @@ -1,11 +1,11 @@ from typing import Optional -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.ci_visibility.telemetry.constants import ERROR_TYPES from ddtrace.internal.ci_visibility.telemetry.constants import GIT_TELEMETRY from ddtrace.internal.ci_visibility.telemetry.constants import GIT_TELEMETRY_COMMANDS from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -14,35 +14,45 @@ def record_git_command(command: GIT_TELEMETRY_COMMANDS, duration: float, exit_code: Optional[int]) -> None: log.debug("Recording git command telemetry: %s, %s, %s", command, duration, exit_code) tags = (("command", command),) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.COMMAND_COUNT, 1, tags) - telemetry_writer.add_distribution_metric(_NAMESPACE, GIT_TELEMETRY.COMMAND_MS, duration, tags) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.COMMAND_COUNT, 1, tags) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.COMMAND_MS, duration, tags) if exit_code is not None and exit_code != 0: error_tags = (("command", command), ("exit_code", str(exit_code))) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.COMMAND_ERRORS, 1, error_tags) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.COMMAND_ERRORS, 1, error_tags) def record_search_commits(duration: float, error: Optional[ERROR_TYPES] = None) -> None: log.debug("Recording search commits telemetry: %s, %s", duration, error) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.SEARCH_COMMITS_COUNT, 1) - telemetry_writer.add_distribution_metric(_NAMESPACE, GIT_TELEMETRY.SEARCH_COMMITS_MS, duration) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.SEARCH_COMMITS_COUNT, 1) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.SEARCH_COMMITS_MS, duration + ) if error is not None: error_tags = (("error_type", str(error)),) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.SEARCH_COMMITS_ERRORS, 1, error_tags) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.SEARCH_COMMITS_ERRORS, 1, error_tags + ) def record_objects_pack_request(duration: float, error: Optional[ERROR_TYPES] = None) -> None: log.debug("Recording objects pack request telmetry: %s, %s", duration, error) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.OBJECTS_PACK_COUNT, 1) - telemetry_writer.add_distribution_metric(_NAMESPACE, GIT_TELEMETRY.OBJECTS_PACK_MS, duration) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.OBJECTS_PACK_COUNT, 1) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.OBJECTS_PACK_MS, duration) if error is not None: error_tags = (("error", error),) - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.OBJECTS_PACK_ERRORS, 1, error_tags) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.OBJECTS_PACK_ERRORS, 1, error_tags + ) def record_objects_pack_data(num_files: int, num_bytes: int) -> None: log.debug("Recording objects pack data telemetry: %s, %s", num_files, num_bytes) - telemetry_writer.add_distribution_metric(_NAMESPACE, GIT_TELEMETRY.OBJECTS_PACK_BYTES, num_bytes) - telemetry_writer.add_distribution_metric(_NAMESPACE, GIT_TELEMETRY.OBJECTS_PACK_FILES, num_files) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.OBJECTS_PACK_BYTES, num_bytes + ) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.OBJECTS_PACK_FILES, num_files + ) def record_settings_response( @@ -87,4 +97,6 @@ def record_settings_response( response_tags.append(("quarantine_enabled", "true")) if response_tags: - telemetry_writer.add_count_metric(_NAMESPACE, GIT_TELEMETRY.SETTINGS_RESPONSE, 1, tuple(response_tags)) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, GIT_TELEMETRY.SETTINGS_RESPONSE, 1, tuple(response_tags) + ) diff --git a/ddtrace/internal/ci_visibility/telemetry/itr.py b/ddtrace/internal/ci_visibility/telemetry/itr.py index 210a4103734..b8bf6889471 100644 --- a/ddtrace/internal/ci_visibility/telemetry/itr.py +++ b/ddtrace/internal/ci_visibility/telemetry/itr.py @@ -2,10 +2,10 @@ import functools from ddtrace.internal.ci_visibility.constants import SUITE -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.ci_visibility.telemetry.constants import EVENT_TYPES from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -40,18 +40,24 @@ def wrapper(event_type: str): @_enforce_event_is_test_or_suite def record_itr_skipped(event_type: EVENT_TYPES): log.debug("Recording itr skipped telemetry for %s", event_type) - telemetry_writer.add_count_metric(_NAMESPACE, ITR_TELEMETRY.SKIPPED, 1, (("event_type", event_type.value),)) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ITR_TELEMETRY.SKIPPED, 1, (("event_type", event_type.value),) + ) @_enforce_event_is_test_or_suite def record_itr_unskippable(event_type: EVENT_TYPES): log.debug("Recording itr unskippable telemetry for %s", event_type) - telemetry_writer.add_count_metric(_NAMESPACE, ITR_TELEMETRY.UNSKIPPABLE, 1, (("event_type", event_type.value),)) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ITR_TELEMETRY.UNSKIPPABLE, 1, (("event_type", event_type.value),) + ) def record_itr_forced_run(event_type: EVENT_TYPES): log.debug("Recording itr forced run telemetry for %s", event_type) - telemetry_writer.add_count_metric(_NAMESPACE, ITR_TELEMETRY.FORCED_RUN, 1, (("event_type", event_type.value),)) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ITR_TELEMETRY.FORCED_RUN, 1, (("event_type", event_type.value),) + ) def record_skippable_count(skippable_count: int, skipping_level: str): @@ -60,4 +66,4 @@ def record_skippable_count(skippable_count: int, skipping_level: str): if skipping_level == SUITE else SKIPPABLE_TESTS_TELEMETRY.RESPONSE_TESTS ) - telemetry_writer.add_count_metric(_NAMESPACE, skippable_count_metric, skippable_count) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.CIVISIBILITY, skippable_count_metric, skippable_count) diff --git a/ddtrace/internal/ci_visibility/telemetry/payload.py b/ddtrace/internal/ci_visibility/telemetry/payload.py index 1cf41d306ff..f5dd7a9ca00 100644 --- a/ddtrace/internal/ci_visibility/telemetry/payload.py +++ b/ddtrace/internal/ci_visibility/telemetry/payload.py @@ -1,8 +1,8 @@ from enum import Enum -from ddtrace.internal.ci_visibility.telemetry.constants import CIVISIBILITY_TELEMETRY_NAMESPACE as _NAMESPACE from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -31,38 +31,46 @@ class REQUEST_ERROR_TYPE(str, Enum): def record_endpoint_payload_bytes(endpoint: ENDPOINT, nbytes: int) -> None: log.debug("Recording endpoint payload bytes: %s, %s", endpoint, nbytes) tags = (("endpoint", endpoint.value),) - telemetry_writer.add_distribution_metric(_NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.BYTES.value, nbytes, tags) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.BYTES.value, nbytes, tags + ) def record_endpoint_payload_request(endpoint: ENDPOINT) -> None: log.debug("Recording endpoint payload request: %s", endpoint) tags = (("endpoint", endpoint.value),) - telemetry_writer.add_count_metric(_NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_COUNT.value, 1, tags) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_COUNT.value, 1, tags + ) def record_endpoint_payload_request_time(endpoint: ENDPOINT, seconds: float) -> None: log.debug("Recording endpoint payload request time: %s, %s seconds", endpoint, seconds) tags = (("endpoint", endpoint.value),) telemetry_writer.add_distribution_metric( - _NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_MS.value, seconds * 1000, tags + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_MS.value, seconds * 1000, tags ) def record_endpoint_payload_request_error(endpoint: ENDPOINT, error_type: REQUEST_ERROR_TYPE) -> None: log.debug("Recording endpoint payload request error: %s, %s", endpoint, error_type) tags = (("endpoint", endpoint.value), ("error_type", error_type)) - telemetry_writer.add_count_metric(_NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_ERRORS.value, 1, tags) + telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.REQUESTS_ERRORS.value, 1, tags + ) def record_endpoint_payload_events_count(endpoint: ENDPOINT, count: int) -> None: log.debug("Recording endpoint payload events count: %s, %s", endpoint, count) tags = (("endpoint", endpoint.value),) - telemetry_writer.add_distribution_metric(_NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.EVENTS_COUNT.value, count, tags) + telemetry_writer.add_distribution_metric( + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.EVENTS_COUNT.value, count, tags + ) def record_endpoint_payload_events_serialization_time(endpoint: ENDPOINT, seconds: float) -> None: log.debug("Recording endpoint payload serialization time: %s, %s seconds", endpoint, seconds) tags = (("endpoint", endpoint.value),) telemetry_writer.add_distribution_metric( - _NAMESPACE, ENDPOINT_PAYLOAD_TELEMETRY.EVENTS_SERIALIZATION_MS.value, seconds * 1000, tags + TELEMETRY_NAMESPACE.CIVISIBILITY, ENDPOINT_PAYLOAD_TELEMETRY.EVENTS_SERIALIZATION_MS.value, seconds * 1000, tags ) diff --git a/ddtrace/internal/constants.py b/ddtrace/internal/constants.py index 4efdc754ef3..c4255035c41 100644 --- a/ddtrace/internal/constants.py +++ b/ddtrace/internal/constants.py @@ -19,6 +19,10 @@ _PROPAGATION_STYLE_NONE, _PROPAGATION_STYLE_BAGGAGE, ) +_PROPAGATION_BEHAVIOR_CONTINUE = "continue" +_PROPAGATION_BEHAVIOR_IGNORE = "ignore" +_PROPAGATION_BEHAVIOR_RESTART = "restart" +_PROPAGATION_BEHAVIOR_DEFAULT = _PROPAGATION_BEHAVIOR_CONTINUE W3C_TRACESTATE_KEY = "tracestate" W3C_TRACEPARENT_KEY = "traceparent" W3C_TRACESTATE_PARENT_ID_KEY = "p" diff --git a/ddtrace/internal/telemetry/constants.py b/ddtrace/internal/telemetry/constants.py index 3298fdd7616..a809b5f2f4f 100644 --- a/ddtrace/internal/telemetry/constants.py +++ b/ddtrace/internal/telemetry/constants.py @@ -1,9 +1,13 @@ from enum import Enum -TELEMETRY_NAMESPACE_TAG_TRACER = "tracers" -TELEMETRY_NAMESPACE_TAG_APPSEC = "appsec" -TELEMETRY_NAMESPACE_TAG_IAST = "iast" +class TELEMETRY_NAMESPACE(Enum): + TRACERS = "tracers" + APPSEC = "appsec" + IAST = "iast" + CIVISIBILITY = "civisibility" + MLOBS = "mlobs" + TELEMETRY_TYPE_GENERATE_METRICS = "generate-metrics" TELEMETRY_TYPE_DISTRIBUTION = "distributions" diff --git a/ddtrace/internal/telemetry/metrics_namespaces.py b/ddtrace/internal/telemetry/metrics_namespaces.py index 927f6de775d..4b432ba330c 100644 --- a/ddtrace/internal/telemetry/metrics_namespaces.py +++ b/ddtrace/internal/telemetry/metrics_namespaces.py @@ -5,6 +5,7 @@ from typing import Type # noqa:F401 from ddtrace.internal import forksafe +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_DISTRIBUTION from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_GENERATE_METRICS from ddtrace.internal.telemetry.metrics import DistributionMetric @@ -34,23 +35,31 @@ def flush(self): } return namespace_metrics - def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None): - # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None + def add_metric( + self, + metric_class: Type[Metric], + namespace: TELEMETRY_NAMESPACE, + name: str, + value: float = 1.0, + tags: MetricTagType = None, + interval: Optional[float] = None, + ) -> None: """ Telemetry Metrics are stored in DD dashboards, check the metrics in datadoghq.com/metric/explorer. The metric will store in dashboard as "dd.instrumentation_telemetry_data." + namespace + "." + name """ - metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type) + namespace_str = namespace.value + metric_id = Metric.get_id(name, namespace_str, tags, metric_class.metric_type) if metric_class is DistributionMetric: metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION else: metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS with self._lock: - existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id) + existing_metric = self._metrics_data[metrics_type_payload][namespace_str].get(metric_id) if existing_metric: existing_metric.add_point(value) else: - new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval) + new_metric = metric_class(namespace_str, name, tags=tags, common=True, interval=interval) new_metric.add_point(value) - self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric + self._metrics_data[metrics_type_payload][namespace_str][metric_id] = new_metric diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index 2be240c06fd..35a73d5e235 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -31,6 +31,7 @@ from . import modules from .constants import TELEMETRY_APM_PRODUCT from .constants import TELEMETRY_LOG_LEVEL # noqa:F401 +from .constants import TELEMETRY_NAMESPACE from .constants import TELEMETRY_TYPE_DISTRIBUTION from .constants import TELEMETRY_TYPE_GENERATE_METRICS from .constants import TELEMETRY_TYPE_LOGS @@ -337,7 +338,7 @@ def _app_started(self, register_app_shutdown=True): } # SOABI should help us identify which wheels people are getting from PyPI - self.add_configurations(get_python_config_vars()) # type: ignore + self.add_configurations(get_python_config_vars()) payload = { "configuration": self._flush_configuration_queue(), @@ -474,7 +475,6 @@ def add_configuration(self, configuration_name, configuration_value, origin="unk } def add_configurations(self, configuration_list): - # type: (List[Tuple[str, Union[bool, float, str], str]]) -> None """Creates and queues a list of configurations""" with self._service_lock: for name, value, _origin in configuration_list: @@ -485,7 +485,6 @@ def add_configurations(self, configuration_list): } def add_log(self, level, message, stack_trace="", tags=None): - # type: (TELEMETRY_LOG_LEVEL, str, str, Optional[Dict]) -> None """ Queues log. This event is meant to send library logs to Datadog’s backend through the Telemetry intake. This will make support cycles easier and ensure we know about potentially silent issues in libraries. @@ -507,8 +506,7 @@ def add_log(self, level, message, stack_trace="", tags=None): data["stack_trace"] = stack_trace self._logs.add(data) - def add_gauge_metric(self, namespace, name, value, tags=None): - # type: (str,str, float, MetricTagType) -> None + def add_gauge_metric(self, namespace: TELEMETRY_NAMESPACE, name: str, value: float, tags: MetricTagType = None): """ Queues gauge metric """ @@ -522,8 +520,7 @@ def add_gauge_metric(self, namespace, name, value, tags=None): self.interval, ) - def add_rate_metric(self, namespace, name, value=1.0, tags=None): - # type: (str,str, float, MetricTagType) -> None + def add_rate_metric(self, namespace: TELEMETRY_NAMESPACE, name: str, value: float, tags: MetricTagType = None): """ Queues rate metric """ @@ -537,8 +534,7 @@ def add_rate_metric(self, namespace, name, value=1.0, tags=None): self.interval, ) - def add_count_metric(self, namespace, name, value=1.0, tags=None): - # type: (str,str, float, MetricTagType) -> None + def add_count_metric(self, namespace: TELEMETRY_NAMESPACE, name: str, value: int = 1, tags: MetricTagType = None): """ Queues count metric """ @@ -551,8 +547,7 @@ def add_count_metric(self, namespace, name, value=1.0, tags=None): tags, ) - def add_distribution_metric(self, namespace, name, value=1.0, tags=None): - # type: (str,str, float, MetricTagType) -> None + def add_distribution_metric(self, namespace: TELEMETRY_NAMESPACE, name: str, value, tags: MetricTagType = None): """ Queues distributions metric """ @@ -708,7 +703,7 @@ def _telemetry_excepthook(self, tp, value, root_traceback): internal_index = dir_parts.index("internal") integration_name = dir_parts[internal_index + 1] self.add_count_metric( - "tracers", + TELEMETRY_NAMESPACE.TRACERS, "integration_errors", 1, (("integration_name", integration_name), ("error_type", tp.__name__)), diff --git a/ddtrace/llmobs/_constants.py b/ddtrace/llmobs/_constants.py index 27000b36aac..57cbd5bb179 100644 --- a/ddtrace/llmobs/_constants.py +++ b/ddtrace/llmobs/_constants.py @@ -61,3 +61,6 @@ FAITHFULNESS_DISAGREEMENTS_METADATA = "_dd.faithfulness_disagreements" EVALUATION_KIND_METADATA = "_dd.evaluation_kind" EVALUATION_SPAN_METADATA = "_dd.evaluation_span" + +SPAN_LINKS = "_ml_obs.span_links" +NAME = "_ml_obs.name" diff --git a/ddtrace/llmobs/_evaluators/ragas/answer_relevancy.py b/ddtrace/llmobs/_evaluators/ragas/answer_relevancy.py new file mode 100644 index 00000000000..9a640e08454 --- /dev/null +++ b/ddtrace/llmobs/_evaluators/ragas/answer_relevancy.py @@ -0,0 +1,146 @@ +import math +from typing import Optional +from typing import Tuple +from typing import Union + +from ddtrace.internal.logger import get_logger +from ddtrace.llmobs._constants import EVALUATION_SPAN_METADATA +from ddtrace.llmobs._evaluators.ragas.base import BaseRagasEvaluator +from ddtrace.llmobs._evaluators.ragas.base import _get_ml_app_for_ragas_trace + + +logger = get_logger(__name__) + + +class RagasAnswerRelevancyEvaluator(BaseRagasEvaluator): + """A class used by EvaluatorRunner to conduct ragas answer relevancy evaluations + on LLM Observability span events. The job of an Evaluator is to take a span and + submit evaluation metrics based on the span's attributes. + """ + + LABEL = "ragas_answer_relevancy" + METRIC_TYPE = "score" + + def __init__(self, llmobs_service): + """ + Initialize an evaluator that uses the ragas library to generate a context precision score on finished LLM spans. + + answer relevancy focuses on assessing how pertinent the generated answer is to a given question. + A lower score is assigned to answers that are incomplete or contain redundant information and higher scores + indicate better relevancy. This metric is computed using the question, contexts, and answer. + + For more information, see https://docs.ragas.io/en/latest/concepts/metrics/available_metrics/answer_relevance/ + + The `ragas.metrics.answer_relevancy` instance is used for answer relevancy scores. + If there is no llm attribute set on this instance, it will be set to the + default `llm_factory()` from ragas which uses openai. + If there is no embedding attribute set on this instance, it will be to to the + default `embedding_factory()` from ragas which uses openai + + :param llmobs_service: An instance of the LLM Observability service used for tracing the evaluation and + submitting evaluation metrics. + + Raises: NotImplementedError if the ragas library is not found or if ragas version is not supported. + """ + super().__init__(llmobs_service) + self.ragas_answer_relevancy_instance = self._get_answer_relevancy_instance() + self.answer_relevancy_output_parser = self.ragas_dependencies.RagasoutputParser( + pydantic_object=self.ragas_dependencies.AnswerRelevanceClassification + ) + + def _get_answer_relevancy_instance(self): + """ + This helper function ensures the answer relevancy instance used in + ragas evaluator is updated with the latest ragas answer relevancy instance + instance AND has an non-null llm + """ + if self.ragas_dependencies.answer_relevancy is None: + return None + ragas_answer_relevancy_instance = self.ragas_dependencies.answer_relevancy + if not ragas_answer_relevancy_instance.llm: + ragas_answer_relevancy_instance.llm = self.ragas_dependencies.llm_factory() + if not ragas_answer_relevancy_instance.embeddings: + ragas_answer_relevancy_instance.embeddings = self.ragas_dependencies.embedding_factory() + return ragas_answer_relevancy_instance + + def evaluate(self, span_event: dict) -> Tuple[Union[float, str], Optional[dict]]: + """ + Performs a answer relevancy evaluation on an llm span event, returning either + - answer relevancy score (float) OR failure reason (str) + - evaluation metadata (dict) + If the ragas answer relevancy instance does not have `llm` set, we set `llm` using the `llm_factory()` + method from ragas which currently defaults to openai's gpt-4o-turbo. + """ + self.ragas_answer_relevancy_instance = self._get_answer_relevancy_instance() + if not self.ragas_answer_relevancy_instance: + return "fail_answer_relevancy_is_none", {} + + evaluation_metadata = {} # type: dict[str, Union[str, dict, list]] + trace_metadata = {} # type: dict[str, Union[str, dict, list]] + + # initialize data we annotate for tracing ragas + score, answer_classifications = (math.nan, None) + + with self.llmobs_service.workflow( + "dd-ragas.answer_relevancy", ml_app=_get_ml_app_for_ragas_trace(span_event) + ) as ragas_ar_workflow: + try: + evaluation_metadata[EVALUATION_SPAN_METADATA] = self.llmobs_service.export_span(span=ragas_ar_workflow) + + answer_relevancy_inputs = self._extract_evaluation_inputs_from_span(span_event) + if answer_relevancy_inputs is None: + logger.debug( + "Failed to extract question and contexts from " + "span sampled for `ragas_answer_relevancy` evaluation" + ) + return "fail_extract_answer_relevancy_inputs", evaluation_metadata + + prompt = self.ragas_answer_relevancy_instance.question_generation.format( + answer=answer_relevancy_inputs["answer"], + context="\n".join(answer_relevancy_inputs["contexts"]), + ) + + trace_metadata["strictness"] = self.ragas_answer_relevancy_instance.strictness + result = self.ragas_answer_relevancy_instance.llm.generate_text( + prompt, n=self.ragas_answer_relevancy_instance.strictness + ) + + try: + answers = [self.answer_relevancy_output_parser.parse(res.text) for res in result.generations[0]] + answers = [answer for answer in answers if answer is not None] + except Exception as e: + logger.debug("Failed to parse answer relevancy output: %s", e) + return "fail_parse_answer_relevancy_output", evaluation_metadata + + gen_questions = [answer.question for answer in answers] + answer_classifications = [ + {"question": answer.question, "noncommittal": answer.noncommittal} for answer in answers + ] + trace_metadata["answer_classifications"] = answer_classifications + if all(q == "" for q in gen_questions): + logger.warning("Invalid JSON response. Expected dictionary with key 'question'") + return "fail_parse_answer_relevancy_output", evaluation_metadata + + # calculate cosine similarity between the question and generated questions + with self.llmobs_service.workflow("dd-ragas.calculate_similarity") as ragas_cs_workflow: + cosine_sim = self.ragas_answer_relevancy_instance.calculate_similarity( + answer_relevancy_inputs["question"], gen_questions + ) + self.llmobs_service.annotate( + span=ragas_cs_workflow, + input_data={ + "question": answer_relevancy_inputs["question"], + "generated_questions": gen_questions, + }, + output_data=cosine_sim.mean(), + ) + + score = cosine_sim.mean() * int(not any(answer.noncommittal for answer in answers)) + return score, evaluation_metadata + finally: + self.llmobs_service.annotate( + span=ragas_ar_workflow, + input_data=span_event, + output_data=score, + metadata=trace_metadata, + ) diff --git a/ddtrace/llmobs/_evaluators/ragas/base.py b/ddtrace/llmobs/_evaluators/ragas/base.py index 23aa4cd3caa..798c8e2fccc 100644 --- a/ddtrace/llmobs/_evaluators/ragas/base.py +++ b/ddtrace/llmobs/_evaluators/ragas/base.py @@ -6,8 +6,8 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer -from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.utils.version import parse_version from ddtrace.llmobs._constants import INTERNAL_CONTEXT_VARIABLE_KEYS from ddtrace.llmobs._constants import INTERNAL_QUERY_VARIABLE_KEYS @@ -26,8 +26,10 @@ class RagasDependencies: def __init__(self): import ragas - self.ragas_version = parse_version(ragas.__version__) - if self.ragas_version >= (0, 2, 0) or self.ragas_version < (0, 1, 10): + self.ragas_version = ragas.__version__ # type: str + + parsed_version = parse_version(ragas.__version__) + if parsed_version >= (0, 2, 0) or parsed_version < (0, 1, 10): raise NotImplementedError( "Ragas version: {} is not supported".format(self.ragas_version), ) @@ -56,6 +58,18 @@ def __init__(self): self.get_segmenter = get_segmenter + from ragas.metrics import answer_relevancy + + self.answer_relevancy = answer_relevancy + + from ragas.embeddings import embedding_factory + + self.embedding_factory = embedding_factory + + from ddtrace.llmobs._evaluators.ragas.models import ContextPrecisionVerification + + self.ContextPrecisionVerification = ContextPrecisionVerification + from ddtrace.llmobs._evaluators.ragas.models import StatementFaithfulnessAnswers self.StatementFaithfulnessAnswers = StatementFaithfulnessAnswers @@ -64,6 +78,10 @@ def __init__(self): self.StatementsAnswers = StatementsAnswers + from ddtrace.llmobs._evaluators.ragas.models import AnswerRelevanceClassification + + self.AnswerRelevanceClassification = AnswerRelevanceClassification + def _get_ml_app_for_ragas_trace(span_event: dict) -> str: """ @@ -121,7 +139,7 @@ def __init__(self, llmobs_service): raise NotImplementedError("Failed to load dependencies for `{}` evaluator".format(self.LABEL)) from e finally: telemetry_writer.add_count_metric( - namespace=TELEMETRY_APM_PRODUCT.LLMOBS, + namespace=TELEMETRY_NAMESPACE.MLOBS, name="evaluators.init", value=1, tags=( @@ -143,7 +161,7 @@ def run_and_submit_evaluation(self, span_event: dict): return score_result_or_failure, metric_metadata = self.evaluate(span_event) telemetry_writer.add_count_metric( - TELEMETRY_APM_PRODUCT.LLMOBS, + TELEMETRY_NAMESPACE.MLOBS, "evaluators.run", 1, tags=( diff --git a/ddtrace/llmobs/_evaluators/ragas/context_precision.py b/ddtrace/llmobs/_evaluators/ragas/context_precision.py new file mode 100644 index 00000000000..990302931c8 --- /dev/null +++ b/ddtrace/llmobs/_evaluators/ragas/context_precision.py @@ -0,0 +1,153 @@ +import math +from typing import Optional +from typing import Tuple +from typing import Union + +from ddtrace.internal.logger import get_logger +from ddtrace.llmobs._constants import EVALUATION_KIND_METADATA +from ddtrace.llmobs._constants import EVALUATION_SPAN_METADATA +from ddtrace.llmobs._evaluators.ragas.base import BaseRagasEvaluator +from ddtrace.llmobs._evaluators.ragas.base import _get_ml_app_for_ragas_trace + + +logger = get_logger(__name__) + + +class RagasContextPrecisionEvaluator(BaseRagasEvaluator): + """ + A class used by EvaluatorRunner to conduct ragas context precision evaluations + on LLM Observability span events. + """ + + LABEL = "ragas_context_precision" + METRIC_TYPE = "score" + + def __init__(self, llmobs_service): + """ + Initialize an evaluator that uses the ragas library to generate a context precision score on finished LLM spans. + + Context Precision is a metric that verifies if the context was useful in arriving at the given answer. + We compute this by dividing the number of relevant contexts by the total number of contexts. + Note that this is slightly modified from the original context precision metric in ragas, which computes + the mean of the precision @ rank k for each chunk in the context (where k is the number of + retrieved context chunks). + + For more information, see https://docs.ragas.io/en/latest/concepts/metrics/available_metrics/context_precision/ + + The `ragas.metrics.context_precision` instance is used for context precision scores. + If there is no llm attribute set on this instance, it will be set to the + default `llm_factory()` which uses openai. + + :param llmobs_service: An instance of the LLM Observability service used for tracing the evaluation and + submitting evaluation metrics. + + Raises: NotImplementedError if the ragas library is not found or if ragas version is not supported. + """ + super().__init__(llmobs_service) + self.ragas_context_precision_instance = self._get_context_precision_instance() + self.context_precision_output_parser = self.ragas_dependencies.RagasoutputParser( + pydantic_object=self.ragas_dependencies.ContextPrecisionVerification + ) + + def _get_context_precision_instance(self): + """ + This helper function ensures the context precision instance used in + ragas evaluator is updated with the latest ragas context precision instance + instance AND has an non-null llm + """ + if self.ragas_dependencies.context_precision is None: + return None + ragas_context_precision_instance = self.ragas_dependencies.context_precision + if not ragas_context_precision_instance.llm: + ragas_context_precision_instance.llm = self.ragas_dependencies.llm_factory() + return ragas_context_precision_instance + + def evaluate(self, span_event: dict) -> Tuple[Union[float, str], Optional[dict]]: + """ + Performs a context precision evaluation on an llm span event, returning either + - context precision score (float) OR failure reason (str) + - evaluation metadata (dict) + If the ragas context precision instance does not have `llm` set, we set `llm` using the `llm_factory()` + method from ragas which currently defaults to openai's gpt-4o-turbo. + """ + self.ragas_context_precision_instance = self._get_context_precision_instance() + if not self.ragas_context_precision_instance: + return "fail_context_precision_is_none", {} + + evaluation_metadata = {EVALUATION_KIND_METADATA: "context_precision"} # type: dict[str, Union[str, dict, list]] + + # initialize data we annotate for tracing ragas + score = math.nan + + with self.llmobs_service.workflow( + "dd-ragas.context_precision", ml_app=_get_ml_app_for_ragas_trace(span_event) + ) as ragas_cp_workflow: + try: + evaluation_metadata[EVALUATION_SPAN_METADATA] = self.llmobs_service.export_span(span=ragas_cp_workflow) + + ctx_precision_inputs = self._extract_evaluation_inputs_from_span(span_event) + if ctx_precision_inputs is None: + logger.debug( + "Failed to extract evaluation inputs from " + "span sampled for `ragas_context_precision` evaluation" + ) + return "fail_extract_context_precision_inputs", evaluation_metadata + + # create a prompt to evaluate the relevancy of each context chunk + context_precision_prompts = [ + self.ragas_context_precision_instance.context_precision_prompt.format( + question=ctx_precision_inputs["question"], + context=c, + answer=ctx_precision_inputs["answer"], + ) + for c in ctx_precision_inputs["contexts"] + ] + + responses = [] + + for prompt in context_precision_prompts: + result = self.ragas_context_precision_instance.llm.generate_text(prompt) + reproducibility = getattr(self.ragas_context_precision_instance, "_reproducibility", 1) + + results = [result.generations[0][i].text for i in range(reproducibility)] + try: + responses.append( + [ + res.dict() + for res in [self.context_precision_output_parser.parse(text) for text in results] + if res is not None + ] + ) + except Exception as e: + logger.debug( + "Failed to parse context precision verification for `ragas_context_precision`", + exc_info=e, + ) + return "fail_context_precision_parsing", evaluation_metadata + + answers = [] + for response in responses: + agg_answer = self.ragas_dependencies.ensembler.from_discrete([response], "verdict") + if agg_answer: + try: + agg_answer = self.ragas_dependencies.ContextPrecisionVerification.parse_obj(agg_answer[0]) + except Exception as e: + logger.debug( + "Failed to parse context precision verification for `ragas_context_precision`", + exc_info=e, + ) + return "fail_context_precision_parsing", evaluation_metadata + answers.append(agg_answer) + + if len(answers) == 0: + return "fail_no_answers", evaluation_metadata + + verdict_list = [1 if ver.verdict else 0 for ver in answers] + score = sum(verdict_list) / len(verdict_list) + return score, evaluation_metadata + finally: + self.llmobs_service.annotate( + span=ragas_cp_workflow, + input_data=span_event, + output_data=score, + ) diff --git a/ddtrace/llmobs/_evaluators/ragas/models.py b/ddtrace/llmobs/_evaluators/ragas/models.py index 5ee4d433c33..c5b37ee2b7f 100644 --- a/ddtrace/llmobs/_evaluators/ragas/models.py +++ b/ddtrace/llmobs/_evaluators/ragas/models.py @@ -11,6 +11,18 @@ """ +class AnswerRelevanceClassification(BaseModel): + question: str + noncommittal: int + + +class ContextPrecisionVerification(BaseModel): + """Answer for the verification task whether the context was useful.""" + + reason: str = Field(..., description="Reason for verification") + verdict: int = Field(..., description="Binary (0/1) verdict of verification") + + class StatementFaithfulnessAnswer(BaseModel): statement: str = Field(..., description="the original statement, word-by-word") reason: str = Field(..., description="the reason of the verdict") diff --git a/ddtrace/llmobs/_evaluators/runner.py b/ddtrace/llmobs/_evaluators/runner.py index 3d26998f1b4..056a80000e4 100644 --- a/ddtrace/llmobs/_evaluators/runner.py +++ b/ddtrace/llmobs/_evaluators/runner.py @@ -7,7 +7,9 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.periodic import PeriodicService from ddtrace.internal.telemetry import telemetry_writer -from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE +from ddtrace.llmobs._evaluators.ragas.answer_relevancy import RagasAnswerRelevancyEvaluator +from ddtrace.llmobs._evaluators.ragas.context_precision import RagasContextPrecisionEvaluator from ddtrace.llmobs._evaluators.ragas.faithfulness import RagasFaithfulnessEvaluator from ddtrace.llmobs._evaluators.sampler import EvaluatorRunnerSampler @@ -17,6 +19,8 @@ SUPPORTED_EVALUATORS = { RagasFaithfulnessEvaluator.LABEL: RagasFaithfulnessEvaluator, + RagasAnswerRelevancyEvaluator.LABEL: RagasAnswerRelevancyEvaluator, + RagasContextPrecisionEvaluator.LABEL: RagasContextPrecisionEvaluator, } @@ -56,7 +60,7 @@ def __init__(self, interval: float, llmobs_service=None, evaluators=None): raise e finally: telemetry_writer.add_count_metric( - namespace=TELEMETRY_APM_PRODUCT.LLMOBS, + namespace=TELEMETRY_NAMESPACE.MLOBS, name="evaluators.init", value=1, tags=( @@ -111,20 +115,12 @@ def periodic(self, _wait_sync=False) -> None: self._buffer = [] try: - if not _wait_sync: - for evaluator in self.evaluators: - self.executor.map( - lambda span_event: evaluator.run_and_submit_evaluation(span_event), - [ - span_event - for span_event, span in span_events_and_spans - if self.sampler.sample(evaluator.LABEL, span) - ], - ) - else: - for evaluator in self.evaluators: - for span_event, span in span_events_and_spans: - if self.sampler.sample(evaluator.LABEL, span): + for evaluator in self.evaluators: + for span_event, span in span_events_and_spans: + if self.sampler.sample(evaluator.LABEL, span): + if not _wait_sync: + self.executor.submit(evaluator.run_and_submit_evaluation, span_event) + else: evaluator.run_and_submit_evaluation(span_event) except RuntimeError as e: logger.debug("failed to run evaluation: %s", e) diff --git a/ddtrace/llmobs/_evaluators/sampler.py b/ddtrace/llmobs/_evaluators/sampler.py index 9dcb0759724..3598e90f7f3 100644 --- a/ddtrace/llmobs/_evaluators/sampler.py +++ b/ddtrace/llmobs/_evaluators/sampler.py @@ -9,8 +9,8 @@ from ddtrace._trace.sampling_rule import SamplingRule from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import telemetry_writer -from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE logger = get_logger(__name__) @@ -67,7 +67,7 @@ def parsing_failed_because(msg, maybe_throw_this): TELEMETRY_LOG_LEVEL.ERROR, message="Evaluator sampling parsing failure because: {}".format(msg) ) telemetry_writer.add_count_metric( - namespace=TELEMETRY_APM_PRODUCT.LLMOBS, + namespace=TELEMETRY_NAMESPACE.MLOBS, name="evaluators.error", value=1, tags=(("reason", "sampling_rule_parsing_failure"),), @@ -104,7 +104,7 @@ def parsing_failed_because(msg, maybe_throw_this): span_name = rule.get(EvaluatorRunnerSamplingRule.SPAN_NAME_KEY, SamplingRule.NO_RULE) evaluator_label = rule.get(EvaluatorRunnerSamplingRule.EVALUATOR_LABEL_KEY, SamplingRule.NO_RULE) telemetry_writer.add_distribution_metric( - TELEMETRY_APM_PRODUCT.LLMOBS, + TELEMETRY_NAMESPACE.MLOBS, "evaluators.rule_sample_rate", sample_rate, tags=(("evaluator_label", evaluator_label), ("span_name", span_name)), diff --git a/ddtrace/llmobs/_integrations/base.py b/ddtrace/llmobs/_integrations/base.py index f01c19f173a..a171b22867a 100644 --- a/ddtrace/llmobs/_integrations/base.py +++ b/ddtrace/llmobs/_integrations/base.py @@ -6,7 +6,6 @@ from typing import List # noqa:F401 from typing import Optional # noqa:F401 -from ddtrace import Pin from ddtrace import config from ddtrace._trace.sampler import RateSampler from ddtrace._trace.span import Span @@ -24,6 +23,7 @@ from ddtrace.llmobs._log_writer import V2LogWriter from ddtrace.llmobs._utils import _get_llmobs_parent_id from ddtrace.settings import IntegrationConfig +from ddtrace.trace import Pin log = get_logger(__name__) @@ -40,7 +40,9 @@ def __init__(self, integration_config: IntegrationConfig) -> None: self._log_writer = None self._statsd = None self.integration_config = integration_config - self._span_pc_sampler = RateSampler(sample_rate=integration_config.span_prompt_completion_sample_rate) + self._span_pc_sampler = RateSampler( + sample_rate=getattr(integration_config, "span_prompt_completion_sample_rate", 1.0) + ) if self.metrics_enabled: self._statsd = get_dogstatsd_client(get_stats_url(), namespace=self._integration_name) diff --git a/ddtrace/llmobs/_integrations/langchain.py b/ddtrace/llmobs/_integrations/langchain.py index 1fce3d11804..6b09038bd90 100644 --- a/ddtrace/llmobs/_integrations/langchain.py +++ b/ddtrace/llmobs/_integrations/langchain.py @@ -28,6 +28,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import format_langchain_io from ddtrace.llmobs.utils import Document @@ -312,10 +313,10 @@ def _llmobs_set_meta_tags_from_chain(self, span: Span, args, kwargs, outputs: An inputs = kwargs formatted_inputs = "" if inputs is not None: - formatted_inputs = self.format_io(inputs) + formatted_inputs = format_langchain_io(inputs) formatted_outputs = "" if not span.error and outputs is not None: - formatted_outputs = self.format_io(outputs) + formatted_outputs = format_langchain_io(outputs) span._set_ctx_items({SPAN_KIND: "workflow", INPUT_VALUE: formatted_inputs, OUTPUT_VALUE: formatted_outputs}) def _llmobs_set_meta_tags_from_embedding( @@ -346,7 +347,7 @@ def _llmobs_set_meta_tags_from_embedding( isinstance(input_texts, list) and all(isinstance(text, str) for text in input_texts) ): if is_workflow: - formatted_inputs = self.format_io(input_texts) + formatted_inputs = format_langchain_io(input_texts) span._set_ctx_item(input_tag_key, formatted_inputs) else: if isinstance(input_texts, str): @@ -392,7 +393,7 @@ def _llmobs_set_meta_tags_from_similarity_search( ) input_query = get_argument_value(args, kwargs, 0, "query") if input_query is not None: - formatted_inputs = self.format_io(input_query) + formatted_inputs = format_langchain_io(input_query) span._set_ctx_item(INPUT_VALUE, formatted_inputs) if span.error or not output_documents or not isinstance(output_documents, list): span._set_ctx_item(OUTPUT_VALUE, "") @@ -407,7 +408,7 @@ def _llmobs_set_meta_tags_from_similarity_search( metadata = getattr(d, "metadata", {}) doc["name"] = metadata.get("name", doc["id"]) documents.append(doc) - span._set_ctx_item(OUTPUT_DOCUMENTS, self.format_io(documents)) + span._set_ctx_item(OUTPUT_DOCUMENTS, format_langchain_io(documents)) # we set the value as well to ensure that the UI would display it in case the span was the root span._set_ctx_item(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(documents))) @@ -420,10 +421,10 @@ def _llmobs_set_meta_tags_from_tool(self, span: Span, tool_inputs: Dict[str, Any metadata["tool_config"] = tool_inputs.get("config") if tool_inputs.get("info"): metadata["tool_info"] = tool_inputs.get("info") - formatted_input = self.format_io(tool_input) + formatted_input = format_langchain_io(tool_input) formatted_outputs = "" if not span.error and tool_output is not None: - formatted_outputs = self.format_io(tool_output) + formatted_outputs = format_langchain_io(tool_output) span._set_ctx_items( { SPAN_KIND: "tool", @@ -536,33 +537,3 @@ def check_token_usage_ai_message(self, ai_message): total_tokens = usage.get("total_tokens", input_tokens + output_tokens) return (input_tokens, output_tokens, total_tokens), run_id_base - - def format_io( - self, - messages, - ): - """ - Formats input and output messages for serialization to JSON. - Specifically, makes sure that any schema messages are converted to strings appropriately. - """ - if isinstance(messages, dict): - formatted = {} - for key, value in messages.items(): - formatted[key] = self.format_io(value) - return formatted - if isinstance(messages, list): - return [self.format_io(message) for message in messages] - return self.get_content_from_message(messages) - - def get_content_from_message(self, message) -> str: - """ - Attempts to extract the content and role from a message (AIMessage, HumanMessage, SystemMessage) object. - """ - if isinstance(message, str): - return message - try: - content = getattr(message, "__dict__", {}).get("content", str(message)) - role = getattr(message, "role", ROLE_MAPPING.get(getattr(message, "type"), "")) - return (role, content) if role else content - except AttributeError: - return str(message) diff --git a/ddtrace/llmobs/_integrations/langgraph.py b/ddtrace/llmobs/_integrations/langgraph.py new file mode 100644 index 00000000000..a45e822e0a2 --- /dev/null +++ b/ddtrace/llmobs/_integrations/langgraph.py @@ -0,0 +1,174 @@ +from typing import Any +from typing import Dict +from typing import List +from typing import Optional + +from ddtrace import tracer +from ddtrace.ext import SpanTypes +from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._constants import INPUT_VALUE +from ddtrace.llmobs._constants import NAME +from ddtrace.llmobs._constants import OUTPUT_VALUE +from ddtrace.llmobs._constants import SPAN_KIND +from ddtrace.llmobs._constants import SPAN_LINKS +from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import format_langchain_io +from ddtrace.llmobs._utils import _get_attr +from ddtrace.llmobs._utils import _get_llmobs_parent_id +from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor +from ddtrace.span import Span + + +class LangGraphIntegration(BaseLLMIntegration): + _integration_name = "langgraph" + _graph_nodes_by_task_id: Dict[str, Any] = {} # maps task_id to dictionary of name, span, and span_links + + def _llmobs_set_tags( + self, + span: Span, + args: List[Any], + kwargs: Dict[str, Any], + response: Optional[Any] = None, + operation: str = "", # oneof graph, node + ): + if not self.llmobs_enabled: + return + + inputs = get_argument_value(args, kwargs, 0, "input") + config = get_argument_value(args, kwargs, 1, "config", optional=True) + metadata = _get_attr(config, "metadata", {}) + instance_id = metadata.get("langgraph_checkpoint_ns", "").split(":")[-1] + invoked_node = self._graph_nodes_by_task_id.setdefault(instance_id, {}) + invoked_node["span"] = {"trace_id": "{:x}".format(span.trace_id), "span_id": str(span.span_id)} + + span_links = [_default_span_link(span)] + invoked_node_span_links = invoked_node.get("span_links") + if invoked_node_span_links is not None: + span_links = invoked_node_span_links + current_span_links = span._get_ctx_item(SPAN_LINKS) or [] + + span._set_ctx_items( + { + SPAN_KIND: "agent" if operation == "graph" else "task", + INPUT_VALUE: format_langchain_io(inputs), + OUTPUT_VALUE: format_langchain_io(response), + NAME: self._graph_nodes_by_task_id.get(instance_id, {}).get("name") or kwargs.get("name", span.name), + SPAN_LINKS: current_span_links + span_links, + } + ) + if operation == "graph" and not _is_subgraph(span): + self._graph_nodes_by_task_id.clear() + + def llmobs_handle_pregel_loop_tick( + self, finished_tasks: dict, next_tasks: dict, more_tasks: bool, is_subgraph_node: bool = False + ): + """Compute incoming and outgoing span links between finished tasks and queued tasks in the graph.""" + if not self.llmobs_enabled: + return + graph_span = ( + tracer.current_span() + ) # we're running between nodes, so the current span should be the pregel graph + if graph_span is None or graph_span.span_type != SpanTypes.LLM: + return + + if not more_tasks: + self._handle_finished_graph(graph_span, finished_tasks, is_subgraph_node) + return + finished_task_names_to_ids = {task.name: task_id for task_id, task in finished_tasks.items()} + for task_id, task in next_tasks.items(): + self._link_task_to_parent(task_id, task, finished_task_names_to_ids) + + def _handle_finished_graph(self, graph_span, finished_tasks, is_subgraph_node): + """Create the span links for a finished pregel graph from all finished tasks as the graph span's outputs. + Generate the output-to-output span links for the last nodes in a pregel graph. + If the graph isn't a subgraph, add a span link from the graph span to the calling LLMObs parent span. + Note: is_subgraph_node denotes whether the graph is a subgraph node, + not whether it is a standalone graph (called internally during a node execution). + """ + graph_caller_span = _get_nearest_llmobs_ancestor(graph_span) if graph_span else None + output_span_links = [ + {**self._graph_nodes_by_task_id[task_id]["span"], "attributes": {"from": "output", "to": "output"}} + for task_id in finished_tasks.keys() + ] + graph_span_span_links = graph_span._get_ctx_item(SPAN_LINKS) or [] + graph_span._set_ctx_item(SPAN_LINKS, graph_span_span_links + output_span_links) + if graph_caller_span is not None and not is_subgraph_node: + graph_caller_span_links = graph_caller_span._get_ctx_item(SPAN_LINKS) or [] + span_links = [ + { + "span_id": str(graph_span.span_id) or "undefined", + "trace_id": "{:x}".format(graph_caller_span.trace_id), + "attributes": {"from": "output", "to": "output"}, + } + ] + graph_caller_span._set_ctx_item(SPAN_LINKS, graph_caller_span_links + span_links) + return + + def _link_task_to_parent(self, task_id, task, finished_task_names_to_ids): + """Create the span links for a queued task from its triggering parent tasks.""" + task_config = getattr(task, "config", {}) + task_triggers = task_config.get("metadata", {}).get("langgraph_triggers", []) + + trigger_node_names = [_extract_parent(trigger) for trigger in task_triggers] + trigger_node_ids: List[str] = [ + finished_task_names_to_ids.get(trigger_node_name, "") for trigger_node_name in trigger_node_names + ] + + for node_id in trigger_node_ids: + queued_node = self._graph_nodes_by_task_id.setdefault(task_id, {}) + queued_node["name"] = getattr(task, "name", "") + + trigger_node_span = self._graph_nodes_by_task_id.get(node_id, {}).get("span") + if not trigger_node_span: + # Subgraphs that are called at the start of the graph need to be named, but don't need any span links + continue + + span_link = { + "span_id": trigger_node_span.get("span_id", ""), + "trace_id": trigger_node_span.get("trace_id", ""), + "attributes": {"from": "output", "to": "input"}, + } + span_links = queued_node.setdefault("span_links", []) + span_links.append(span_link) + + +def _extract_parent(trigger: str) -> str: + """ + Extract the parent node name from a trigger string. + + The string could have the format: + - `parent:child` + - `parent:routing_logic:child` + - `branch:parent:routing_logic:child` + """ + split = trigger.split(":") + if len(split) < 3: + return split[0] + return split[1] + + +def _default_span_link(span: Span): + """ + Create a default input-to-input span link for a given span, if there are no + referenced spans that represent the causal link. In this case, we assume + the span is linked to its parent's input. + """ + return { + "span_id": str(_get_llmobs_parent_id(span)) or "undefined", + "trace_id": "{:x}".format(span.trace_id), + "attributes": {"from": "input", "to": "input"}, + } + + +def _is_subgraph(graph_span): + """Helper to denote whether the LangGraph graph this span represents is a sub-graph or a standalone graph. + Note that this only considers if this graph is nested in the execution of a larger graph, + not whether this graph is represented as a single node in the larger graph + (counterexample being a standalone graph called internally during a node execution). + """ + graph_caller_span = _get_nearest_llmobs_ancestor(graph_span) + while graph_caller_span is not None: + if graph_caller_span.resource.endswith("LangGraph"): + return True + graph_caller_span = _get_nearest_llmobs_ancestor(graph_caller_span) + return False diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index bd727b1a5a2..ea660f53f68 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -24,7 +24,7 @@ from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs.utils import Document -from ddtrace.pin import Pin +from ddtrace.trace import Pin class OpenAIIntegration(BaseLLMIntegration): diff --git a/ddtrace/llmobs/_integrations/utils.py b/ddtrace/llmobs/_integrations/utils.py index 2676dce9637..f180e0c1820 100644 --- a/ddtrace/llmobs/_integrations/utils.py +++ b/ddtrace/llmobs/_integrations/utils.py @@ -1,3 +1,6 @@ +from typing import Tuple +from typing import Union + from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY @@ -163,3 +166,41 @@ def get_system_instructions_from_google_model(model_instance): elif Part is not None and isinstance(elem, Part): system_instructions.append(_get_attr(elem, "text", "")) return system_instructions + + +LANGCHAIN_ROLE_MAPPING = { + "human": "user", + "ai": "assistant", + "system": "system", +} + + +def format_langchain_io( + messages, +): + """ + Formats input and output messages for serialization to JSON. + Specifically, makes sure that any schema messages are converted to strings appropriately. + """ + if isinstance(messages, dict): + formatted = {} + for key, value in messages.items(): + formatted[key] = format_langchain_io(value) + return formatted + if isinstance(messages, list): + return [format_langchain_io(message) for message in messages] + return get_content_from_langchain_message(messages) + + +def get_content_from_langchain_message(message) -> Union[str, Tuple[str, str]]: + """ + Attempts to extract the content and role from a message (AIMessage, HumanMessage, SystemMessage) object. + """ + if isinstance(message, str): + return message + try: + content = getattr(message, "__dict__", {}).get("content", str(message)) + role = getattr(message, "role", LANGCHAIN_ROLE_MAPPING.get(getattr(message, "type"), "")) + return (role, content) if role else content + except AttributeError: + return str(message) diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index b4f1dc1b2f6..9fec36a9d10 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -50,6 +50,7 @@ from ddtrace.llmobs._constants import PROPAGATED_PARENT_ID_KEY from ddtrace.llmobs._constants import SESSION_ID from ddtrace.llmobs._constants import SPAN_KIND +from ddtrace.llmobs._constants import SPAN_LINKS from ddtrace.llmobs._constants import SPAN_START_WHILE_DISABLED_WARNING from ddtrace.llmobs._constants import TAGS from ddtrace.llmobs._evaluators.runner import EvaluatorRunner @@ -212,6 +213,11 @@ def _llmobs_span_event(cls, span: Span) -> Tuple[Dict[str, Any], bool]: llmobs_span_event["tags"] = cls._llmobs_tags( span, ml_app, session_id, is_ragas_integration_span=is_ragas_integration_span ) + + span_links = span._get_ctx_item(SPAN_LINKS) + if isinstance(span_links, list): + llmobs_span_event["span_links"] = span_links + return llmobs_span_event, is_ragas_integration_span @staticmethod diff --git a/ddtrace/llmobs/_utils.py b/ddtrace/llmobs/_utils.py index dd616db8bef..1799eb5548d 100644 --- a/ddtrace/llmobs/_utils.py +++ b/ddtrace/llmobs/_utils.py @@ -14,6 +14,7 @@ from ddtrace.llmobs._constants import INTERNAL_QUERY_VARIABLE_KEYS from ddtrace.llmobs._constants import LANGCHAIN_APM_SPAN_NAME from ddtrace.llmobs._constants import ML_APP +from ddtrace.llmobs._constants import NAME from ddtrace.llmobs._constants import OPENAI_APM_SPAN_NAME from ddtrace.llmobs._constants import PARENT_ID_KEY from ddtrace.llmobs._constants import PROPAGATED_PARENT_ID_KEY @@ -124,7 +125,7 @@ def _get_span_name(span: Span) -> str: elif span.name == OPENAI_APM_SPAN_NAME and span.resource != "": client_name = span.get_tag("openai.request.client") or "OpenAI" return "{}.{}".format(client_name, span.resource) - return span.name + return span._get_ctx_item(NAME) or span.name def _get_ml_app(span: Span) -> str: diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py index 489c025037a..2c8ac4a5d65 100644 --- a/ddtrace/opentracer/tracer.py +++ b/ddtrace/opentracer/tracer.py @@ -18,6 +18,7 @@ from ddtrace.internal.constants import SPAN_API_OPENTRACING from ddtrace.internal.utils.config import get_application_name from ddtrace.settings import ConfigException +from ddtrace.vendor.debtcollector import deprecate from ..internal.logger import get_logger from .propagation import HTTPPropagator @@ -70,8 +71,8 @@ def __init__( If ``None`` is provided, defaults to :class:`opentracing.scope_managers.ThreadLocalScopeManager`. :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This - should only be passed if a custom Datadog tracer is being used. Defaults - to the global ``ddtrace.tracer`` tracer. + parameter is deprecated and will be removed in v3.0.0. The + to the global tracer (``ddtrace.tracer``) should always be used. """ # Merge the given config with the default into a new dict self._config = DEFAULT_CONFIG.copy() @@ -99,15 +100,25 @@ def __init__( self._scope_manager = scope_manager or ThreadLocalScopeManager() dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager) - self._dd_tracer = dd_tracer or ddtrace.tracer or DatadogTracer() + if dd_tracer is not None: + deprecate( + "The ``dd_tracer`` parameter is deprecated", + message="The global tracer (``ddtrace.tracer``) will be used instead.", + removal_version="3.0.0", + ) + + self._dd_tracer = dd_tracer or ddtrace.tracer self._dd_tracer.set_tags(self._config.get(keys.GLOBAL_TAGS)) # type: ignore[arg-type] - self._dd_tracer.configure( + trace_processors = None + if keys.SETTINGS in self._config: + trace_processors = self._config[keys.SETTINGS].get("FILTERS") # type: ignore[union-attr] + self._dd_tracer._configure( enabled=self._config.get(keys.ENABLED), hostname=self._config.get(keys.AGENT_HOSTNAME), https=self._config.get(keys.AGENT_HTTPS), port=self._config.get(keys.AGENT_PORT), sampler=self._config.get(keys.SAMPLER), - settings=self._config.get(keys.SETTINGS), + trace_processors=trace_processors, priority_sampling=self._config.get(keys.PRIORITY_SAMPLING), uds_path=self._config.get(keys.UDS_PATH), context_provider=dd_context_provider, # type: ignore[arg-type] diff --git a/ddtrace/pin.py b/ddtrace/pin.py index 926918b6cea..0e683b3b22e 100644 --- a/ddtrace/pin.py +++ b/ddtrace/pin.py @@ -1,209 +1,10 @@ -from typing import TYPE_CHECKING # noqa:F401 -from typing import Any # noqa:F401 -from typing import Dict # noqa:F401 -from typing import Optional # noqa:F401 +from ddtrace._trace.pin import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate -import wrapt -import ddtrace - -from .internal.logger import get_logger - - -log = get_logger(__name__) - - -# To set attributes on wrapt proxy objects use this prefix: -# http://wrapt.readthedocs.io/en/latest/wrappers.html -_DD_PIN_NAME = "_datadog_pin" -_DD_PIN_PROXY_NAME = "_self_" + _DD_PIN_NAME - - -class Pin(object): - """Pin (a.k.a Patch INfo) is a small class which is used to - set tracing metadata on a particular traced connection. - This is useful if you wanted to, say, trace two different - database clusters. - - >>> conn = sqlite.connect('/tmp/user.db') - >>> # Override a pin for a specific connection - >>> pin = Pin.override(conn, service='user-db') - >>> conn = sqlite.connect('/tmp/image.db') - """ - - __slots__ = ["tags", "tracer", "_target", "_config", "_initialized"] - - def __init__( - self, - service=None, # type: Optional[str] - tags=None, # type: Optional[Dict[str, str]] - tracer=None, - _config=None, # type: Optional[Dict[str, Any]] - ): - # type: (...) -> None - tracer = tracer or ddtrace.tracer - self.tags = tags - self.tracer = tracer - self._target = None # type: Optional[int] - # keep the configuration attribute internal because the - # public API to access it is not the Pin class - self._config = _config or {} # type: Dict[str, Any] - # [Backward compatibility]: service argument updates the `Pin` config - self._config["service_name"] = service - self._initialized = True - - @property - def service(self): - # type: () -> str - """Backward compatibility: accessing to `pin.service` returns the underlying - configuration value. - """ - return self._config["service_name"] - - def __setattr__(self, name, value): - if getattr(self, "_initialized", False) and name != "_target": - raise AttributeError("can't mutate a pin, use override() or clone() instead") - super(Pin, self).__setattr__(name, value) - - def __repr__(self): - return "Pin(service=%s, tags=%s, tracer=%s)" % (self.service, self.tags, self.tracer) - - @staticmethod - def _find(*objs): - # type: (Any) -> Optional[Pin] - """ - Return the first :class:`ddtrace.pin.Pin` found on any of the provided objects or `None` if none were found - - - >>> pin = Pin._find(wrapper, instance, conn) - - :param objs: The objects to search for a :class:`ddtrace.pin.Pin` on - :type objs: List of objects - :rtype: :class:`ddtrace.pin.Pin`, None - :returns: The first found :class:`ddtrace.pin.Pin` or `None` is none was found - """ - for obj in objs: - pin = Pin.get_from(obj) - if pin: - return pin - return None - - @staticmethod - def get_from(obj): - # type: (Any) -> Optional[Pin] - """Return the pin associated with the given object. If a pin is attached to - `obj` but the instance is not the owner of the pin, a new pin is cloned and - attached. This ensures that a pin inherited from a class is a copy for the new - instance, avoiding that a specific instance overrides other pins values. - - >>> pin = Pin.get_from(conn) - - :param obj: The object to look for a :class:`ddtrace.pin.Pin` on - :type obj: object - :rtype: :class:`ddtrace.pin.Pin`, None - :returns: :class:`ddtrace.pin.Pin` associated with the object, or None if none was found - """ - if hasattr(obj, "__getddpin__"): - return obj.__getddpin__() - - pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME - pin = getattr(obj, pin_name, None) - # detect if the PIN has been inherited from a class - if pin is not None and pin._target != id(obj): - pin = pin.clone() - pin.onto(obj) - return pin - - @classmethod - def override( - cls, - obj, # type: Any - service=None, # type: Optional[str] - tags=None, # type: Optional[Dict[str, str]] - tracer=None, - ): - # type: (...) -> None - """Override an object with the given attributes. - - That's the recommended way to customize an already instrumented client, without - losing existing attributes. - - >>> conn = sqlite.connect('/tmp/user.db') - >>> # Override a pin for a specific connection - >>> Pin.override(conn, service='user-db') - """ - if not obj: - return - - pin = cls.get_from(obj) - if pin is None: - Pin(service=service, tags=tags, tracer=tracer).onto(obj) - else: - pin.clone(service=service, tags=tags, tracer=tracer).onto(obj) - - def enabled(self): - # type: () -> bool - """Return true if this pin's tracer is enabled.""" - # inline to avoid circular imports - from ddtrace.settings.asm import config as asm_config - - return bool(self.tracer) and (self.tracer.enabled or asm_config._apm_opt_out) - - def onto(self, obj, send=True): - # type: (Any, bool) -> None - """Patch this pin onto the given object. If send is true, it will also - queue the metadata to be sent to the server. - """ - # Actually patch it on the object. - try: - if hasattr(obj, "__setddpin__"): - return obj.__setddpin__(self) - - pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME - - # set the target reference; any get_from, clones and retarget the new PIN - self._target = id(obj) - if self.service: - ddtrace.config._add_extra_service(self.service) - return setattr(obj, pin_name, self) - except AttributeError: - log.debug("can't pin onto object. skipping", exc_info=True) - - def remove_from(self, obj): - # type: (Any) -> None - # Remove pin from the object. - try: - pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME - - pin = Pin.get_from(obj) - if pin is not None: - delattr(obj, pin_name) - except AttributeError: - log.debug("can't remove pin from object. skipping", exc_info=True) - - def clone( - self, - service=None, # type: Optional[str] - tags=None, # type: Optional[Dict[str, str]] - tracer=None, - ): - # type: (...) -> Pin - """Return a clone of the pin with the given attributes replaced.""" - # do a shallow copy of Pin dicts - if not tags and self.tags: - tags = self.tags.copy() - - # we use a copy instead of a deepcopy because we expect configurations - # to have only a root level dictionary without nested objects. Using - # deepcopy introduces a big overhead: - # - # copy: 0.00654911994934082 - # deepcopy: 0.2787208557128906 - config = self._config.copy() - - return Pin( - service=service or self.service, - tags=tags, - tracer=tracer or self.tracer, # do not clone the Tracer - _config=config, - ) +deprecate( + "The ddtrace.trace.Pin module is deprecated and will be removed.", + message="Import ``Pin`` from the ddtrace.trace package.", + category=DDTraceDeprecationWarning, +) diff --git a/ddtrace/profiling/collector/_memalloc.c b/ddtrace/profiling/collector/_memalloc.c index b55e9ebcfab..1f2b87e0433 100644 --- a/ddtrace/profiling/collector/_memalloc.c +++ b/ddtrace/profiling/collector/_memalloc.c @@ -109,13 +109,19 @@ memalloc_init() } static void -memalloc_add_event(memalloc_context_t* ctx, void* ptr, size_t size) +memalloc_assert_gil() { if (g_crash_on_no_gil && !PyGILState_Check()) { int* p = NULL; *p = 0; abort(); // should never reach here } +} + +static void +memalloc_add_event(memalloc_context_t* ctx, void* ptr, size_t size) +{ + memalloc_assert_gil(); uint64_t alloc_count = atomic_add_clamped(&global_alloc_tracker->alloc_count, 1, ALLOC_TRACKER_MAX_COUNT); @@ -332,6 +338,8 @@ memalloc_stop(PyObject* Py_UNUSED(module), PyObject* Py_UNUSED(args)) return NULL; } + memalloc_assert_gil(); + PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &global_memalloc_ctx.pymem_allocator_obj); memalloc_tb_deinit(); if (memlock_trylock(&g_memalloc_lock)) { @@ -389,6 +397,8 @@ iterevents_new(PyTypeObject* type, PyObject* Py_UNUSED(args), PyObject* Py_UNUSE if (!iestate) return NULL; + memalloc_assert_gil(); + /* reset the current traceback list */ if (memlock_trylock(&g_memalloc_lock)) { iestate->alloc_tracker = global_alloc_tracker; diff --git a/ddtrace/propagation/http.py b/ddtrace/propagation/http.py index 563ee838d84..fdaf97410ad 100644 --- a/ddtrace/propagation/http.py +++ b/ddtrace/propagation/http.py @@ -40,8 +40,8 @@ from ..internal._tagset import decode_tagset_string from ..internal._tagset import encode_tagset_values from ..internal.compat import ensure_text +from ..internal.constants import _PROPAGATION_BEHAVIOR_RESTART from ..internal.constants import _PROPAGATION_STYLE_BAGGAGE -from ..internal.constants import _PROPAGATION_STYLE_NONE from ..internal.constants import _PROPAGATION_STYLE_W3C_TRACECONTEXT from ..internal.constants import DD_TRACE_BAGGAGE_MAX_BYTES from ..internal.constants import DD_TRACE_BAGGAGE_MAX_ITEMS @@ -878,20 +878,6 @@ def _inject(span_context, headers): headers[_HTTP_HEADER_TRACESTATE] = span_context._tracestate -class _NOP_Propagator: - @staticmethod - def _extract(headers): - # type: (Dict[str, str]) -> None - return None - - # this method technically isn't needed with the current way we have HTTPPropagator.inject setup - # but if it changes then we might want it - @staticmethod - def _inject(span_context, headers): - # type: (Context , Dict[str, str]) -> Dict[str, str] - return headers - - class _BaggageHeader: """Helper class to inject/extract Baggage Headers""" @@ -963,7 +949,6 @@ def _extract(headers: Dict[str, str]) -> Context: PROPAGATION_STYLE_B3_MULTI: _B3MultiHeader, PROPAGATION_STYLE_B3_SINGLE: _B3SingleHeader, _PROPAGATION_STYLE_W3C_TRACECONTEXT: _TraceContext, - _PROPAGATION_STYLE_NONE: _NOP_Propagator, _PROPAGATION_STYLE_BAGGAGE: _BaggageHeader, } @@ -974,12 +959,12 @@ class HTTPPropagator(object): """ @staticmethod - def _extract_configured_contexts_avail(normalized_headers): + def _extract_configured_contexts_avail(normalized_headers: Dict[str, str]) -> Tuple[List[Context], List[str]]: contexts = [] styles_w_ctx = [] for prop_style in config._propagation_style_extract: propagator = _PROP_STYLES[prop_style] - context = propagator._extract(normalized_headers) + context = propagator._extract(normalized_headers) # type: ignore # baggage is handled separately if prop_style == _PROPAGATION_STYLE_BAGGAGE: continue @@ -988,6 +973,24 @@ def _extract_configured_contexts_avail(normalized_headers): styles_w_ctx.append(prop_style) return contexts, styles_w_ctx + @staticmethod + def _context_to_span_link(context: Context, style: str, reason: str) -> Optional[SpanLink]: + # encoding expects at least trace_id and span_id + if context.span_id and context.trace_id: + return SpanLink( + context.trace_id, + context.span_id, + flags=1 if context.sampling_priority and context.sampling_priority > 0 else 0, + tracestate=( + context._meta.get(W3C_TRACESTATE_KEY, "") if style == _PROPAGATION_STYLE_W3C_TRACECONTEXT else None + ), + attributes={ + "reason": reason, + "context_headers": style, + }, + ) + return None + @staticmethod def _resolve_contexts(contexts, styles_w_ctx, normalized_headers): primary_context = contexts[0] @@ -996,23 +999,14 @@ def _resolve_contexts(contexts, styles_w_ctx, normalized_headers): for context in contexts[1:]: style_w_ctx = styles_w_ctx[contexts.index(context)] # encoding expects at least trace_id and span_id - if context.span_id and context.trace_id and context.trace_id != primary_context.trace_id: - links.append( - SpanLink( - context.trace_id, - context.span_id, - flags=1 if context.sampling_priority and context.sampling_priority > 0 else 0, - tracestate=( - context._meta.get(W3C_TRACESTATE_KEY, "") - if style_w_ctx == _PROPAGATION_STYLE_W3C_TRACECONTEXT - else None - ), - attributes={ - "reason": "terminated_context", - "context_headers": style_w_ctx, - }, - ) + if context.trace_id and context.trace_id != primary_context.trace_id: + link = HTTPPropagator._context_to_span_link( + context, + style_w_ctx, + "terminated_context", ) + if link: + links.append(link) # if trace_id matches and the propagation style is tracecontext # add the tracestate to the primary context elif style_w_ctx == _PROPAGATION_STYLE_W3C_TRACECONTEXT: @@ -1058,6 +1052,8 @@ def parent_call(): :param dict headers: HTTP headers to extend with tracing attributes. :param Span non_active_span: Only to be used if injecting a non-active span. """ + if not config._propagation_style_inject: + return if non_active_span is not None and non_active_span.context is not span_context: log.error( "span_context and non_active_span.context are not the same, but should be. non_active_span.context " @@ -1130,17 +1126,19 @@ def my_controller(url, headers): :param dict headers: HTTP headers to extract tracing attributes. :return: New `Context` with propagated attributes. """ - if not headers: - return Context() + context = Context() + if not headers or not config._propagation_style_extract: + return context try: + style = "" normalized_headers = {name.lower(): v for name, v in headers.items()} - context = Context() # tracer configured to extract first only if config._propagation_extract_first: # loop through the extract propagation styles specified in order, return whatever context we get first for prop_style in config._propagation_style_extract: propagator = _PROP_STYLES[prop_style] context = propagator._extract(normalized_headers) + style = prop_style if config.propagation_http_baggage_enabled is True: _attach_baggage_to_context(normalized_headers, context) break @@ -1148,6 +1146,9 @@ def my_controller(url, headers): # loop through all extract propagation styles else: contexts, styles_w_ctx = HTTPPropagator._extract_configured_contexts_avail(normalized_headers) + # check that styles_w_ctx is not empty + if styles_w_ctx: + style = styles_w_ctx[0] if contexts: context = HTTPPropagator._resolve_contexts(contexts, styles_w_ctx, normalized_headers) @@ -1159,9 +1160,12 @@ def my_controller(url, headers): baggage_context = _BaggageHeader._extract(normalized_headers) if baggage_context._baggage != {}: if context: - context._baggage = baggage_context._baggage + context._baggage = baggage_context.get_all_baggage_items() else: context = baggage_context + if config._propagation_behavior_extract == _PROPAGATION_BEHAVIOR_RESTART: + link = HTTPPropagator._context_to_span_link(context, style, "propagation_behavior_extract") + context = Context(baggage=context.get_all_baggage_items(), span_links=[link] if link else []) return context diff --git a/ddtrace/settings/_otel_remapper.py b/ddtrace/settings/_otel_remapper.py index 8bdb313fdef..d3501c2e3fa 100644 --- a/ddtrace/settings/_otel_remapper.py +++ b/ddtrace/settings/_otel_remapper.py @@ -28,7 +28,7 @@ def __class_getitem__(self, item): from ..constants import VERSION_KEY from ..internal.logger import get_logger from ..internal.telemetry import telemetry_writer -from ..internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_TRACER +from ..internal.telemetry.constants import TELEMETRY_NAMESPACE log = get_logger(__name__) @@ -169,7 +169,7 @@ def otel_remapping(): if otel_env.startswith("OTEL_") and otel_env != "OTEL_PYTHON_CONTEXT": log.warning("OpenTelemetry configuration %s is not supported by Datadog.", otel_env) telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "otel.env.unsupported", 1, (("config_opentelemetry", otel_env.lower()),), @@ -185,7 +185,7 @@ def otel_remapping(): otel_value, ) telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "otel.env.hiding", 1, (("config_opentelemetry", otel_env.lower()), ("config_datadog", dd_env.lower())), @@ -205,7 +205,7 @@ def otel_remapping(): otel_value, ) telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "otel.env.invalid", 1, (("config_opentelemetry", otel_env.lower()), ("config_datadog", dd_env.lower())), diff --git a/ddtrace/settings/config.py b/ddtrace/settings/config.py index 65baf99ccb3..b35975a2e5d 100644 --- a/ddtrace/settings/config.py +++ b/ddtrace/settings/config.py @@ -19,8 +19,12 @@ from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning from ddtrace.vendor.debtcollector import deprecate +from .._trace.pin import Pin from ..internal import gitmetadata +from ..internal.constants import _PROPAGATION_BEHAVIOR_DEFAULT +from ..internal.constants import _PROPAGATION_BEHAVIOR_IGNORE from ..internal.constants import _PROPAGATION_STYLE_DEFAULT +from ..internal.constants import _PROPAGATION_STYLE_NONE from ..internal.constants import DEFAULT_BUFFER_SIZE from ..internal.constants import DEFAULT_MAX_PAYLOAD_SIZE from ..internal.constants import DEFAULT_PROCESSING_INTERVAL @@ -34,7 +38,6 @@ from ..internal.serverless import in_aws_lambda from ..internal.utils.formats import asbool from ..internal.utils.formats import parse_tags_str -from ..pin import Pin from ._core import get_config as _get_config from ._inferred_base_service import detect_service from ._otel_remapper import otel_remapping as _otel_remapping @@ -147,12 +150,15 @@ def _parse_propagation_styles(styles_str): category=DDTraceDeprecationWarning, ) style = PROPAGATION_STYLE_B3_SINGLE - if not style: + if not style or style == _PROPAGATION_STYLE_NONE: continue if style not in PROPAGATION_STYLE_ALL: log.warning("Unknown DD_TRACE_PROPAGATION_STYLE: {!r}, allowed values are %r", style, PROPAGATION_STYLE_ALL) continue styles.append(style) + # Remove "none" if it's present since it lacks a propagator + if _PROPAGATION_STYLE_NONE in styles: + styles.remove(_PROPAGATION_STYLE_NONE) return styles @@ -529,17 +535,32 @@ def __init__(self): # Propagation styles # DD_TRACE_PROPAGATION_STYLE_EXTRACT and DD_TRACE_PROPAGATION_STYLE_INJECT # take precedence over DD_TRACE_PROPAGATION_STYLE - self._propagation_style_extract = _parse_propagation_styles( - _get_config( - ["DD_TRACE_PROPAGATION_STYLE_EXTRACT", "DD_TRACE_PROPAGATION_STYLE"], _PROPAGATION_STYLE_DEFAULT - ) + # if DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT is set to ignore + # we set DD_TRACE_PROPAGATION_STYLE_EXTRACT to [_PROPAGATION_STYLE_NONE] since no extraction will heeded + self._propagation_behavior_extract = _get_config( + ["DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT"], _PROPAGATION_BEHAVIOR_DEFAULT, self._lower ) + if self._propagation_behavior_extract != _PROPAGATION_BEHAVIOR_IGNORE: + self._propagation_style_extract = _parse_propagation_styles( + _get_config( + ["DD_TRACE_PROPAGATION_STYLE_EXTRACT", "DD_TRACE_PROPAGATION_STYLE"], _PROPAGATION_STYLE_DEFAULT + ) + ) + else: + log.debug( + """DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT is set to ignore, + setting DD_TRACE_PROPAGATION_STYLE_EXTRACT to empty list""" + ) + self._propagation_style_extract = [_PROPAGATION_STYLE_NONE] self._propagation_style_inject = _parse_propagation_styles( _get_config(["DD_TRACE_PROPAGATION_STYLE_INJECT", "DD_TRACE_PROPAGATION_STYLE"], _PROPAGATION_STYLE_DEFAULT) ) self._propagation_extract_first = _get_config("DD_TRACE_PROPAGATION_EXTRACT_FIRST", False, asbool) + # When True any active span is ignored when extracting trace context from headers + self._extract_ignore_active_span = asbool(os.getenv("_DD_TRACE_EXTRACT_IGNORE_ACTIVE_SPAN", False)) + # Datadog tracer tags propagation x_datadog_tags_max_length = _get_config("DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH", 512, int) if x_datadog_tags_max_length < 0: @@ -978,3 +999,6 @@ def convert_rc_trace_sampling_rules(self, rc_rules: List[Dict[str, Any]]) -> Opt return json.dumps(rc_rules) else: return None + + def _lower(self, value): + return value.lower() diff --git a/ddtrace/settings/profiling.py b/ddtrace/settings/profiling.py index 94d71f1778b..38a4b49d6d6 100644 --- a/ddtrace/settings/profiling.py +++ b/ddtrace/settings/profiling.py @@ -334,7 +334,7 @@ class ProfilingConfigStack(En): _v2_enabled = En.v( bool, "v2_enabled", - default=False, + default=True, help_type="Boolean", help="Whether to enable the v2 stack profiler. Also enables the libdatadog collector.", ) diff --git a/ddtrace/trace/__init__.py b/ddtrace/trace/__init__.py index 90c662cebc7..f709310d589 100644 --- a/ddtrace/trace/__init__.py +++ b/ddtrace/trace/__init__.py @@ -1,7 +1,18 @@ from ddtrace._trace.context import Context +from ddtrace._trace.filters import TraceFilter +from ddtrace._trace.pin import Pin +from ddtrace._trace.span import Span +from ddtrace._trace.tracer import Tracer -# TODO: Move `ddtrace.Pin`, `ddtrace.Tracer`, `ddtrace.Span`, and `ddtrace.tracer` to this module +# a global tracer instance with integration settings +tracer = Tracer() + __all__ = [ "Context", + "Pin", + "TraceFilter", + "Tracer", + "Span", + "tracer", ] diff --git a/docker-compose.yml b/docker-compose.yml index cf40a4a256d..118ad8cc5db 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -142,6 +142,7 @@ services: - DD_DISABLE_ERROR_RESPONSES=true - ENABLED_CHECKS=trace_content_length,trace_stall,meta_tracer_version_header,trace_count_header,trace_peer_service,trace_dd_service - SNAPSHOT_IGNORED_ATTRS=span_id,trace_id,parent_id,duration,start,metrics.system.pid,metrics.system.process_id,metrics.process_id,meta.runtime-id,meta._dd.p.tid,meta.pathway.hash,metrics._dd.tracer_kr,meta._dd.parent_id,meta.kafka.cluster_id + vertica: image: vertica/vertica-ce environment: diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 309b6178c56..c1c41df00c0 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -332,24 +332,25 @@ configuring the tracer with a filters list. For instance, to filter out all traces of incoming requests to a specific url:: from ddtrace import tracer + from ddtrace.trace import TraceFilter + + class FilterbyName(TraceFilter): + def process_trace(self, trace): + for span in trace: + if span.name == "some_name" + # drop the full trace chunk + return None + return trace tracer.configure(settings={ 'FILTERS': [ - FilterRequestsOnUrl(r'http://test\.example\.com'), + FilterbyName(), ], }) The filters in the filters list will be applied sequentially to each trace and the resulting trace will either be sent to the Agent or discarded. -**Built-in filters** - -The library comes with a ``FilterRequestsOnUrl`` filter that can be used to -filter out incoming requests to specific urls: - -.. autoclass:: ddtrace.filters.FilterRequestsOnUrl - :members: - **Writing a custom filter** Create a filter by implementing a class with a ``process_trace`` method and @@ -358,7 +359,7 @@ providing it to the filters parameter of :meth:`ddtrace.Tracer.configure()`. the pipeline or ``None`` if the trace should be discarded:: from ddtrace import Span, tracer - from ddtrace.filters import TraceFilter + from ddtrace.trace import TraceFilter class FilterExample(TraceFilter): def process_trace(self, trace): diff --git a/docs/api.rst b/docs/api.rst index 4c52e37808f..d4b4e80674a 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -19,7 +19,7 @@ Tracing .. autoclass:: ddtrace.Span :members: -.. autoclass:: ddtrace.Pin +.. autoclass:: ddtrace.trace.Pin :members: .. autoclass:: ddtrace.trace.Context diff --git a/docs/configuration.rst b/docs/configuration.rst index 35bb63fac20..455272f318d 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -368,6 +368,26 @@ Trace Context propagation version_added: v1.7.0: The ``b3multi`` propagation style was added and ``b3`` was deprecated in favor it. + DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT: + default: | + ``continue`` + + description: | + String for how to handle incoming request headers that are extracted for propagation of trace info. + + The supported values are ``continue``, ``restart``, and ``ignore``. + + After extracting the headers for propagation, this configuration determines what is done with them. + + The default value is ``continue`` which always propagates valid headers. + ``ignore`` ignores all incoming headers and ``restart`` turns the first extracted valid propagation header + into a span link and propagates baggage if present. + + Example: ``DD_TRACE_PROPAGATION_STYLE_EXTRACT="ignore"`` to ignore all incoming headers and to start a root span without a parent. + + version_added: + v2.20.0: + DD_TRACE_PROPAGATION_STYLE_INJECT: default: | ``tracecontext,datadog`` diff --git a/docs/contributing-integrations.rst b/docs/contributing-integrations.rst index 9d7d2d202ee..0dab68b5053 100644 --- a/docs/contributing-integrations.rst +++ b/docs/contributing-integrations.rst @@ -30,7 +30,7 @@ into the runtime execution of third-party libraries. The essential task of writi the functions in the third-party library that would serve as useful entrypoints and wrapping them with ``wrap_function_wrapper``. There are exceptions, but this is generally a useful starting point. -The Pin API in ``ddtrace.pin`` is used to configure the instrumentation at runtime. It provides a ``Pin`` class +The Pin API in ``ddtrace.trace.Pin`` is used to configure the instrumentation at runtime. It provides a ``Pin`` class that can store configuration data in memory in a manner that is accessible from within functions wrapped by Wrapt. ``Pin`` objects are most often used for storing configuration data scoped to a given integration, such as enable/disable flags and service name overrides. diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index c9cc13a5a9e..ff2cfc09c6d 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -31,6 +31,8 @@ autopatching autoreload autoreloading aws +AWS +ARN backend backends backport diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index 73dde525de9..04bd56d0ba9 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -67,7 +67,7 @@ This can be a problem for users who want to see error details from a child span While this is default behavior for integrations, users can add a trace filter to propagate the error details up to the root span:: from ddtrace import Span, tracer - from ddtrace.filters import TraceFilter + from ddtrace.trace import TraceFilter class ErrorFilter(TraceFilter): diff --git a/hatch.toml b/hatch.toml index 41e5d049e60..7269db2d497 100644 --- a/hatch.toml +++ b/hatch.toml @@ -308,7 +308,6 @@ dependencies = [ "pytest-cov", "requests", "hypothesis", - "requests", "astunparse", "flask", "virtualenv-clone" @@ -324,6 +323,83 @@ test = [ [[envs.appsec_iast_packages.matrix]] python = ["3.9", "3.10", "3.11", "3.12"] +## ASM appsec_integrations_django + +[envs.appsec_integrations_django] +template = "appsec_integrations_django" +dependencies = [ + "pytest", + "pytest-cov", + "requests", + "hypothesis", + "pylibmc", + "bcrypt==4.2.1", + "pytest-django[testing]==3.10.0", + "Django{matrix:django}", +] + +[envs.appsec_integrations_django.scripts] +test = [ + "uname -a", + "pip freeze", + "DD_CIVISIBILITY_ITR_ENABLED=0 DD_IAST_REQUEST_SAMPLING=100 _DD_APPSEC_DEDUPLICATION_ENABLED=false python -m pytest -vvv {args:tests/appsec/integrations/django_tests/}", +] + +[[envs.appsec_integrations_django.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] +django = ["~=4.0"] + +[[envs.appsec_integrations_django.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] +django = ["~=3.2"] + +## ASM appsec_integrations_flask + +[envs.appsec_integrations_flask] +template = "appsec_integrations_flask" +dependencies = [ + "pytest", + "pytest-cov", + "requests", + "hypothesis", + "gunicorn", + "psycopg2-binary~=2.9.9", + "MarkupSafe{matrix:markupsafe:}", + "itsdangerous{matrix:itsdangerous:}", + "Werkzeug{matrix:werkzeug:}", + "flask{matrix:flask}", +] + +[envs.appsec_integrations_flask.scripts] +test = [ + "uname -a", + "pip freeze", + "DD_TRACE_AGENT_URL=http://localhost:9126 DD_CIVISIBILITY_ITR_ENABLED=0 DD_IAST_REQUEST_SAMPLING=100 _DD_APPSEC_DEDUPLICATION_ENABLED=false python -m pytest -vvv {args:tests/appsec/integrations/flask_tests/}", +] + +[[envs.appsec_integrations_flask.matrix]] +python = ["3.8", "3.9"] +flask = ["~=1.1"] +# https://github.com/pallets/markupsafe/issues/282 +# DEV: Breaking change made in 2.1.0 release +markupsafe = ["~=1.1"] +itsdangerous = ["==2.0.1"] +# DEV: Flask 1.0.x is missing a maximum version for werkzeug dependency +werkzeug = ["==2.0.3"] + +[[envs.appsec_integrations_flask.matrix]] +python = ["3.8", "3.9", "3.10", "3.11"] +flask = ["~=2.2"] + +[[envs.appsec_integrations_flask.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] +flask = ["~=2.2"] + +[[envs.appsec_integrations_flask.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] +flask = ["~=3.0"] + + ## ASM FastAPI diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index 0f87b770edd..32ab1c31ff3 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -45,6 +45,7 @@ def parse_version(version): TELEMETRY_ENABLED = "DD_INJECTION_ENABLED" in os.environ DEBUG_MODE = os.environ.get("DD_TRACE_DEBUG", "").lower() in ("true", "1", "t") INSTALLED_PACKAGES = {} +DDTRACE_VERSION = "unknown" PYTHON_VERSION = "unknown" PYTHON_RUNTIME = "unknown" PKGS_ALLOW_LIST = {} @@ -133,7 +134,7 @@ def create_count_metric(metric, tags=None): } -def gen_telemetry_payload(telemetry_events, ddtrace_version="unknown"): +def gen_telemetry_payload(telemetry_events, ddtrace_version): return { "metadata": { "language_name": "python", @@ -233,6 +234,7 @@ def get_first_incompatible_sysarg(): def _inject(): + global DDTRACE_VERSION global INSTALLED_PACKAGES global PYTHON_VERSION global PYTHON_RUNTIME @@ -353,10 +355,7 @@ def _inject(): if not os.path.exists(site_pkgs_path): _log("ddtrace site-packages not found in %r, aborting" % site_pkgs_path, level="error") TELEMETRY_DATA.append( - gen_telemetry_payload( - [create_count_metric("library_entrypoint.abort", ["reason:missing_" + site_pkgs_path])], - DDTRACE_VERSION, - ) + create_count_metric("library_entrypoint.abort", ["reason:missing_" + site_pkgs_path]), ) return @@ -369,14 +368,9 @@ def _inject(): except BaseException as e: _log("failed to load ddtrace module: %s" % e, level="error") TELEMETRY_DATA.append( - gen_telemetry_payload( - [ - create_count_metric( - "library_entrypoint.error", ["error_type:import_ddtrace_" + type(e).__name__.lower()] - ) - ], - DDTRACE_VERSION, - ) + create_count_metric( + "library_entrypoint.error", ["error_type:import_ddtrace_" + type(e).__name__.lower()] + ), ) return @@ -408,28 +402,18 @@ def _inject(): _log("successfully configured ddtrace package, python path is %r" % os.environ["PYTHONPATH"]) TELEMETRY_DATA.append( - gen_telemetry_payload( + create_count_metric( + "library_entrypoint.complete", [ - create_count_metric( - "library_entrypoint.complete", - [ - "injection_forced:" + str(runtime_incomp or integration_incomp).lower(), - ], - ) + "injection_forced:" + str(runtime_incomp or integration_incomp).lower(), ], - DDTRACE_VERSION, - ) + ), ) except Exception as e: TELEMETRY_DATA.append( - gen_telemetry_payload( - [ - create_count_metric( - "library_entrypoint.error", ["error_type:init_ddtrace_" + type(e).__name__.lower()] - ) - ], - DDTRACE_VERSION, - ) + create_count_metric( + "library_entrypoint.error", ["error_type:init_ddtrace_" + type(e).__name__.lower()] + ), ) _log("failed to load ddtrace.bootstrap.sitecustomize: %s" % e, level="error") return @@ -451,12 +435,11 @@ def _inject(): _inject() except Exception as e: TELEMETRY_DATA.append( - gen_telemetry_payload( - [create_count_metric("library_entrypoint.error", ["error_type:main_" + type(e).__name__.lower()])] - ) + create_count_metric("library_entrypoint.error", ["error_type:main_" + type(e).__name__.lower()]) ) finally: if TELEMETRY_DATA: - send_telemetry(TELEMETRY_DATA) + payload = gen_telemetry_payload(TELEMETRY_DATA, DDTRACE_VERSION) + send_telemetry(payload) except Exception: pass # absolutely never allow exceptions to propagate to the app diff --git a/pyproject.toml b/pyproject.toml index df5fbdcdbb2..03560d1b171 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,9 +55,9 @@ ddtrace-run = "ddtrace.commands.ddtrace_run:main" ddcontextvars_context = "ddtrace.internal.opentelemetry.context:DDRuntimeContext" [project.entry-points.pytest11] -ddtrace = "ddtrace.contrib.pytest.plugin" -"ddtrace.pytest_bdd" = "ddtrace.contrib.pytest_bdd.plugin" -"ddtrace.pytest_benchmark" = "ddtrace.contrib.pytest_benchmark.plugin" +ddtrace = "ddtrace.contrib.internal.pytest.plugin" +"ddtrace.pytest_bdd" = "ddtrace.contrib.internal.pytest_bdd.plugin" +"ddtrace.pytest_benchmark" = "ddtrace.contrib.internal.pytest_benchmark.plugin" [project.entry-points.'ddtrace.products'] "code-origin-for-spans" = "ddtrace.debugging._products.code_origin.span" diff --git a/releasenotes/notes/ATO_V3-e7f73ecf00d1474b.yaml b/releasenotes/notes/ATO_V3-e7f73ecf00d1474b.yaml new file mode 100644 index 00000000000..0a2757dba6a --- /dev/null +++ b/releasenotes/notes/ATO_V3-e7f73ecf00d1474b.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + ASM: This introduces full support for Automated user lifecycle tracking for login events (success and failure) diff --git a/releasenotes/notes/chore-reduce-oci-image-ce45f1868ee14415.yaml b/releasenotes/notes/chore-reduce-oci-image-ce45f1868ee14415.yaml new file mode 100644 index 00000000000..42d4e6d348b --- /dev/null +++ b/releasenotes/notes/chore-reduce-oci-image-ce45f1868ee14415.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + lib-injection: Reduce size of OCI image size to improve k8s lib-injection pull and startup times. diff --git a/releasenotes/notes/ddtrace-resourcefilter-deprecated-52b1c92d388b0518.yaml b/releasenotes/notes/ddtrace-resourcefilter-deprecated-52b1c92d388b0518.yaml new file mode 100644 index 00000000000..183249aa688 --- /dev/null +++ b/releasenotes/notes/ddtrace-resourcefilter-deprecated-52b1c92d388b0518.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - | + tracing: Deprecates ``ddtrace.filters.FilterRequestsOnUrl``. Spans should be filtered/sampled using DD_TRACE_SAMPLING_RULES configuration. diff --git a/releasenotes/notes/deprecate-multiple-tracer-instances-078b920081ba4a36.yaml b/releasenotes/notes/deprecate-multiple-tracer-instances-078b920081ba4a36.yaml new file mode 100644 index 00000000000..7b96d366269 --- /dev/null +++ b/releasenotes/notes/deprecate-multiple-tracer-instances-078b920081ba4a36.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - | + tracing: Deprecates the use of multiple tracer instances in the same process. The global tracer (``ddtrace.tracer``) `should be used instead. diff --git a/releasenotes/notes/fix-bedrock-model-id-parsing-611aea2ca2e00656.yaml b/releasenotes/notes/fix-bedrock-model-id-parsing-611aea2ca2e00656.yaml new file mode 100644 index 00000000000..c3e13ea3d38 --- /dev/null +++ b/releasenotes/notes/fix-bedrock-model-id-parsing-611aea2ca2e00656.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + botocore: Resolves formatting errors in the bedrock integration when parsing request model IDs, which can now accept AWS ARNs. diff --git a/releasenotes/notes/fix-ssi-telemetry-events-a0a01ad0b6ef63b5.yaml b/releasenotes/notes/fix-ssi-telemetry-events-a0a01ad0b6ef63b5.yaml new file mode 100644 index 00000000000..a1eba938bb8 --- /dev/null +++ b/releasenotes/notes/fix-ssi-telemetry-events-a0a01ad0b6ef63b5.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + lib-injection: Fixes incorrect telemetry data payload format. diff --git a/releasenotes/notes/fix-unncessary-header-extraction-c1facf2b30331afb.yaml b/releasenotes/notes/fix-unncessary-header-extraction-c1facf2b30331afb.yaml new file mode 100644 index 00000000000..98bd5bf500b --- /dev/null +++ b/releasenotes/notes/fix-unncessary-header-extraction-c1facf2b30331afb.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + tracing: Fix scenarios when distributed tracing headers would be extracted and possibly activated when a trace was already started. diff --git a/releasenotes/notes/make-ci-vis-ints-internal-532bc22d19bb62ab.yaml b/releasenotes/notes/make-ci-vis-ints-internal-532bc22d19bb62ab.yaml new file mode 100644 index 00000000000..334a65b1cfb --- /dev/null +++ b/releasenotes/notes/make-ci-vis-ints-internal-532bc22d19bb62ab.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + ci vis: Moves the implementational details of the pytest, pytest_benchmark, pytest_bdd, and unittest integrations + from ``ddtrace.contrib.`` to ``ddtrace.contrib.internal.``. diff --git a/releasenotes/notes/make-tracer-configure-simplier-e13c731b3c937d22.yaml b/releasenotes/notes/make-tracer-configure-simplier-e13c731b3c937d22.yaml new file mode 100644 index 00000000000..927cd9a61f7 --- /dev/null +++ b/releasenotes/notes/make-tracer-configure-simplier-e13c731b3c937d22.yaml @@ -0,0 +1,21 @@ +--- +deprecations: + - | + tracing: Ensures most tracing configurations are only set on application start up. This is done by deprecating the following parameters in ``ddtrace.configure(...)`` function. + These parameters will be removed in ``ddtrace>=3.0.0``: + - enabled + - hostname + - port + - uds_path + - https + - sampler + - settings + - priority_sampling + - settings + - dogstatsd_url + - writer + - partial_flush_enabled + - partial_flush_min_spans + - api_version + - compute_stats_enabled + - wrap_executor diff --git a/releasenotes/notes/move-pin-and-filters-to-trace-package-2f47fa2d2592b413.yaml b/releasenotes/notes/move-pin-and-filters-to-trace-package-2f47fa2d2592b413.yaml new file mode 100644 index 00000000000..29b3ebe277d --- /dev/null +++ b/releasenotes/notes/move-pin-and-filters-to-trace-package-2f47fa2d2592b413.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + tracing: Deprecates ``ddtrace.pin`` module and moves the ``Pin`` class to ``ddtrace.trace`` package. In v3.0.0 the ``ddtrace/pin.py`` will be removed. + - | + tracing: Deprecates ``ddtrace.filters`` module and moves the ``TraceFilter`` and ``FilterRequestsOnUrl`` classes to ``ddtrace.trace`` package. In v3.0.0 the ``ddtrace/filters.py`` will be removed. \ No newline at end of file diff --git a/releasenotes/notes/profiling-stack-v2-default-ecd535ccf0c73ce0.yaml b/releasenotes/notes/profiling-stack-v2-default-ecd535ccf0c73ce0.yaml new file mode 100644 index 00000000000..a998fe652fc --- /dev/null +++ b/releasenotes/notes/profiling-stack-v2-default-ecd535ccf0c73ce0.yaml @@ -0,0 +1,19 @@ +--- +features: + - | + profiling: Stack V2 is enabled by default. It is the new stack sampler + implementation for CPython 3.8+. It enhances the performance, accuracy, + and reliability of Python CPU profiling. This feature activates our new + stack sampling, collection and export system. + + The following are known issues and missing features from Stack V2 + + - Services using ``gunicorn`` with Stack V2 results in performance degradation + - Support for ``gevent`` is lacking + - Exception sampling is missing + + If you find these as a blocker for enabling Stack V2 for your services, you + can turn it off via setting ``DD_PROFILING_STACK_V2_ENABLED=0``. If you + find any other issue, then please proceed to escalate using appropriate + support channels or file an issue on the repository. + diff --git a/releasenotes/notes/propagation_behavior_extract-3d16765cfd07485b.yaml b/releasenotes/notes/propagation_behavior_extract-3d16765cfd07485b.yaml new file mode 100644 index 00000000000..6e1def89993 --- /dev/null +++ b/releasenotes/notes/propagation_behavior_extract-3d16765cfd07485b.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + propagation: Introduces the environment variable ``DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT`` + to control the behavior of the extraction of distributed tracing headers. The values, ``continue`` (default), + ``ignore``, and ``restart``, are supported. The default value is ``continue`` which has no change from the current behavior of always propagating valid headers. + ``ignore`` ignores all incoming headers, never propagating the incoming trace information + and ``restart`` turns the first extracted propagation style into a span link and propagates baggage if extracted. + +fixes: + - | + propagation: Fixes an issue where the baggage header was not being propagated when the baggage header was the only header extracted. + With this fix, the baggage header is now propagated when it is the only header extracted. diff --git a/releasenotes/notes/remove-multi-tracer-support-from-pin-f2f20ca3fa731929.yaml b/releasenotes/notes/remove-multi-tracer-support-from-pin-f2f20ca3fa731929.yaml new file mode 100644 index 00000000000..18c70a15b04 --- /dev/null +++ b/releasenotes/notes/remove-multi-tracer-support-from-pin-f2f20ca3fa731929.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - | + tracer: Deprecates the ability to use multiple tracer instances with ddtrace.Pin. In v3.0.0 pin objects will only use the global tracer. diff --git a/riotfile.py b/riotfile.py index b6fde4c1f42..b52fa77cbe1 100644 --- a/riotfile.py +++ b/riotfile.py @@ -228,50 +228,16 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - name="appsec_integrations", - command="pytest {cmdargs} tests/appsec/integrations/", + name="appsec_integrations_pygoat", + pys=select_pys(min_version="3.10"), + command="pytest {cmdargs} tests/appsec/integrations/pygoat_tests/", pkgs={ "requests": latest, - "gunicorn": latest, - "psycopg2-binary": "~=2.9.9", }, env={ "DD_CIVISIBILITY_ITR_ENABLED": "0", "DD_IAST_REQUEST_SAMPLING": "100", # Override default 30% to analyze all IAST requests }, - venvs=[ - # Flask 1.x.x - Venv( - pys=select_pys(min_version="3.7", max_version="3.9"), - pkgs={ - "flask": "~=1.0", - # https://github.com/pallets/itsdangerous/issues/290 - # DEV: Breaking change made in 2.1.0 release - "itsdangerous": "<2.1.0", - # https://github.com/pallets/markupsafe/issues/282 - # DEV: Breaking change made in 2.1.0 release - "markupsafe": "<2.0", - # DEV: Flask 1.0.x is missing a maximum version for werkzeug dependency - "werkzeug": "<2.0", - }, - ), - # Flask 2.x.x - Venv( - pys=select_pys(min_version="3.7", max_version="3.11"), - pkgs={ - "flask": "~=2.2", - }, - ), - # Flask 3.x.x - Venv( - pys=select_pys(min_version="3.8", max_version="3.12"), - pkgs={ - "flask": "~=3.0", - "langchain": "==0.0.354", - "langchain_experimental": "==0.0.47", - }, - ), - ], ), Venv( name="profile-diff", @@ -330,9 +296,6 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="telemetry", command="pytest {cmdargs} tests/telemetry/", - env={ - "DD_PROFILING__FORCE_LEGACY_EXPORTER": "1", - }, pys=select_pys(), pkgs={ "requests": latest, @@ -411,7 +374,6 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT name="internal", env={ "DD_TRACE_AGENT_URL": "http://ddagent:8126", - "DD_PROFILING__FORCE_LEGACY_EXPORTER": "1", "DD_INSTRUMENTATION_TELEMETRY_ENABLED": "0", }, command="pytest -v {cmdargs} tests/internal/", @@ -516,9 +478,6 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="ddtracerun", - env={ - "DD_PROFILING__FORCE_LEGACY_EXPORTER": "1", - }, command="pytest {cmdargs} --no-cov tests/commands/test_runner.py", venvs=[ Venv( @@ -2813,6 +2772,15 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), ], ), + Venv( + name="langgraph", + command="pytest {cmdargs} tests/contrib/langgraph", + pys=select_pys(min_version="3.9"), + pkgs={ + "pytest-asyncio": latest, + "langgraph": "~=0.2.60", + }, + ), Venv( name="anthropic", command="pytest {cmdargs} tests/contrib/anthropic", @@ -2950,6 +2918,17 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT env={ "DD_AGENT_PORT": "9126", }, + venvs=[ + # Python 3.8 + Venv( + pys=["3.8"], + pkgs={"greenlet": "==3.1.0"}, + ), + # Python 3.9+ + Venv( + pys=select_pys(min_version="3.9"), + ), + ], ), Venv( name="subprocess", @@ -2974,6 +2953,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT command="python -m tests.profiling.run pytest -v --no-cov --capture=no --benchmark-disable {cmdargs} tests/profiling", # noqa: E501 env={ "DD_PROFILING_ENABLE_ASSERTS": "1", + "DD_PROFILING_STACK_V2_ENABLED": "0", "DD_PROFILING__FORCE_LEGACY_EXPORTER": "1", "CPUCOUNT": "12", }, diff --git a/tests/appsec/app.py b/tests/appsec/app.py index eb5beb666cf..e7e5dbaf231 100644 --- a/tests/appsec/app.py +++ b/tests/appsec/app.py @@ -87,7 +87,7 @@ from tests.appsec.iast_packages.packages.pkg_wrapt import pkg_wrapt from tests.appsec.iast_packages.packages.pkg_yarl import pkg_yarl from tests.appsec.iast_packages.packages.pkg_zipp import pkg_zipp -import tests.appsec.integrations.module_with_import_errors as module_with_import_errors +import tests.appsec.integrations.flask_tests.module_with_import_errors as module_with_import_errors app = Flask(__name__) diff --git a/tests/appsec/appsec/test_asm_standalone.py b/tests/appsec/appsec/test_asm_standalone.py index 3c2ed58caf6..16390888055 100644 --- a/tests/appsec/appsec/test_asm_standalone.py +++ b/tests/appsec/appsec/test_asm_standalone.py @@ -118,13 +118,13 @@ def tracer_appsec_standalone(request, tracer): # Remove the environment variables as they are unexpected args for the tracer configure request.param.pop("DD_APPSEC_SCA_ENABLED", None) - tracer.configure(api_version="v0.4", **request.param) + tracer._configure(api_version="v0.4", **request.param) yield tracer, request_param_copy # Reset tracer configuration ddtrace.config._reset() - tracer.configure(api_version="v0.4", appsec_enabled=False, appsec_standalone_enabled=False, iast_enabled=False) + tracer._configure(api_version="v0.4", appsec_enabled=False, appsec_standalone_enabled=False, iast_enabled=False) def test_appsec_standalone_apm_enabled_metric(tracer_appsec_standalone): diff --git a/tests/appsec/appsec/test_processor.py b/tests/appsec/appsec/test_processor.py index 3fec599237b..d3b133b2e21 100644 --- a/tests/appsec/appsec/test_processor.py +++ b/tests/appsec/appsec/test_processor.py @@ -65,6 +65,7 @@ def test_enable(tracer): def test_enable_custom_rules(): with override_global_config(dict(_asm_static_rule_file=rules.RULES_GOOD_PATH)): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled assert processor.rule_filename == rules.RULES_GOOD_PATH @@ -345,6 +346,7 @@ def test_ddwaf_not_raises_exception(): def test_obfuscation_parameter_key_empty(): with override_global_config(dict(_asm_obfuscation_parameter_key_regexp="")): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -352,6 +354,7 @@ def test_obfuscation_parameter_key_empty(): def test_obfuscation_parameter_value_empty(): with override_global_config(dict(_asm_obfuscation_parameter_value_regexp="")): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -361,6 +364,7 @@ def test_obfuscation_parameter_key_and_value_empty(): dict(_asm_obfuscation_parameter_key_regexp="", _asm_obfuscation_parameter_value_regexp="") ): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -368,6 +372,7 @@ def test_obfuscation_parameter_key_and_value_empty(): def test_obfuscation_parameter_key_invalid_regex(): with override_global_config(dict(_asm_obfuscation_parameter_key_regexp="(")): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -375,6 +380,7 @@ def test_obfuscation_parameter_key_invalid_regex(): def test_obfuscation_parameter_invalid_regex(): with override_global_config(dict(_asm_obfuscation_parameter_value_regexp="(")): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -384,6 +390,7 @@ def test_obfuscation_parameter_key_and_value_invalid_regex(): dict(_asm_obfuscation_parameter_key_regexp="(", _asm_obfuscation_parameter_value_regexp="(") ): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor.enabled @@ -638,7 +645,23 @@ def test_asm_context_registration(tracer): "name": "test required", "tags": {"category": "attack_attempt", "custom": "1", "type": "custom"}, "transformers": [], - } + }, + { + "conditions": [ + { + "operator": "match_regex", + "parameters": { + "inputs": [{"address": "usr.login"}], + "options": {"case_sensitive": False}, + "regex": "GET", + }, + } + ], + "id": "32b243c7-26eb-4046-bbbb-custom", + "name": "test required", + "tags": {"category": "attack_attempt", "custom": "1", "type": "custom"}, + "transformers": [], + }, ] } @@ -646,6 +669,7 @@ def test_asm_context_registration(tracer): def test_required_addresses(): with override_global_config(dict(_asm_static_rule_file=rules.RULES_GOOD_PATH)): processor = AppSecSpanProcessor() + processor.delayed_init() assert processor._addresses_to_keep == { "grpc.server.request.message", @@ -672,6 +696,7 @@ def test_required_addresses(): "server.request.query", "server.response.headers.no_cookies", "usr.id", + "usr.login", } diff --git a/tests/appsec/appsec/test_remoteconfiguration.py b/tests/appsec/appsec/test_remoteconfiguration.py index 1d2c47bc190..6f521de457a 100644 --- a/tests/appsec/appsec/test_remoteconfiguration.py +++ b/tests/appsec/appsec/test_remoteconfiguration.py @@ -82,7 +82,7 @@ def test_rc_activation_states_on(tracer, appsec_enabled, rc_value, remote_config dict(_asm_enabled=asbool(appsec_enabled), _remote_config_enabled=True) ): if appsec_enabled: - tracer.configure(appsec_enabled=asbool(appsec_enabled)) + tracer._configure(appsec_enabled=asbool(appsec_enabled)) rc_config = {"config": {"asm": {"enabled": rc_value}}} _appsec_callback(rc_config, tracer) @@ -103,7 +103,7 @@ def test_rc_activation_states_off(tracer, appsec_enabled, rc_value, remote_confi if appsec_enabled == "": del os.environ[APPSEC.ENV] with override_global_config(dict(_asm_enabled=True)): - tracer.configure(appsec_enabled=asbool(appsec_enabled)) + tracer._configure(appsec_enabled=asbool(appsec_enabled)) rc_config = {"config": {"asm": {"enabled": True}}} if rc_value is False: @@ -128,14 +128,14 @@ def test_rc_activation_states_off(tracer, appsec_enabled, rc_value, remote_confi def test_rc_capabilities(rc_enabled, appsec_enabled, capability, tracer): env = {} config = {} - tracer.configure(appsec_enabled=False, api_version="v0.4") + tracer._configure(appsec_enabled=False, api_version="v0.4") if appsec_enabled: env[APPSEC.ENV] = appsec_enabled config["appsec_enabled"] = asbool(appsec_enabled) config["api_version"] = "v0.4" with override_env(env): with override_global_config(dict(_remote_config_enabled=rc_enabled)): - tracer.configure(**config) + tracer._configure(**config) assert _appsec_rc_capabilities(test_tracer=tracer) == capability @@ -200,7 +200,7 @@ def test_rc_activation_check_asm_features_product_disables_rest_of_products( rc_config.skip_shutdown = False empty_config = {} with override_global_config(global_config): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") enable_appsec_rc(tracer) assert bool(remoteconfig_poller._client._products.get(PRODUCTS.ASM_DATA)) is expected @@ -246,7 +246,7 @@ def test_rc_activation_with_auto_user_appsec_fixed(tracer, remote_config_worker, api_version="v0.4", ) ): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") enable_appsec_rc(tracer) assert remoteconfig_poller._client._products.get(PRODUCTS.ASM_DATA) @@ -262,7 +262,7 @@ def test_load_new_configurations_dispatch_applied_configs( mock_appsec_rules_data, mock_appsec_1click_activation, remote_config_worker, tracer ): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' asm_data_data = b'{"data": [{"test": "data"}]}' @@ -304,7 +304,7 @@ def test_load_new_configurations_empty_config( mock_appsec_rules_data, mock_appsec_1click_activation, remote_config_worker, tracer ): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' asm_data_data = b'{"data": []}' @@ -451,7 +451,7 @@ def test_load_multiple_targets_file_same_product( mock_appsec_rules_data, mock_appsec_1click_activation, remote_config_worker, tracer ): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' asm_data_data1 = b'{"data": [{"a":1}]}' @@ -502,7 +502,7 @@ def test_load_new_config_and_remove_targets_file_same_product( mock_appsec_rules_data, mock_appsec_1click_activation, remote_config_worker, tracer ): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") applied_configs = {} enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' @@ -585,7 +585,7 @@ def test_load_new_config_and_remove_targets_file_same_product( @mock.patch.object(AppSecSpanProcessor, "_update_rules") def test_fullpath_appsec_rules_data(mock_update_rules, remote_config_worker, tracer): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") applied_configs = {} enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' @@ -670,7 +670,7 @@ def test_fullpath_appsec_rules_data(mock_update_rules, remote_config_worker, tra @mock.patch.object(AppSecSpanProcessor, "_update_rules") def test_fullpath_appsec_rules_data_empty_data(mock_update_rules, remote_config_worker, tracer): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True, api_version="v0.4")): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") applied_configs = {} enable_appsec_rc(tracer) asm_data_data1 = b'{"asm":{"enabled":true}, "exclusions": [{"t":1}]}' @@ -736,7 +736,7 @@ def test_fullpath_appsec_rules_data_empty_data(mock_update_rules, remote_config_ @mock.patch.object(AppSecSpanProcessor, "_update_rules") def test_fullpath_appsec_rules_data_add_delete_file(mock_update_rules, remote_config_worker, tracer): with override_global_config(dict(_asm_enabled=True, _remote_config_enabled=True)): - tracer.configure(appsec_enabled=True) + tracer._configure(appsec_enabled=True) applied_configs = {} enable_appsec_rc(tracer) asm_data_data1 = b'{"asm":{"enabled":true}, "exclusions": [{"b":1}]}' @@ -810,7 +810,7 @@ def test_load_new_empty_config_and_remove_targets_file_same_product( mock_appsec_rules_data, remote_config_worker, tracer ): with override_global_config(dict(_asm_enabled=True, api_version="v0.4", _remote_config_enabled=True)): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") applied_configs = {} enable_appsec_rc(tracer) asm_features_data = b'{"asm":{"enabled":true}}' @@ -900,7 +900,7 @@ def test_load_new_empty_config_and_remove_targets_file_same_product( def test_rc_activation_ip_blocking_data(tracer, remote_config_worker): with override_env({APPSEC.ENV: "true"}), override_global_config({}): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") rc_config = { "config": { "rules_data": [ @@ -933,7 +933,7 @@ def test_rc_activation_ip_blocking_data(tracer, remote_config_worker): def test_rc_activation_ip_blocking_data_expired(tracer, remote_config_worker): with override_env({APPSEC.ENV: "true"}), override_global_config({}): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") rc_config = { "config": { "rules_data": [ @@ -962,7 +962,7 @@ def test_rc_activation_ip_blocking_data_expired(tracer, remote_config_worker): def test_rc_activation_ip_blocking_data_not_expired(tracer, remote_config_worker): with override_env({APPSEC.ENV: "true"}), override_global_config({}): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") rc_config = { "config": { "rules_data": [ @@ -1007,7 +1007,7 @@ def test_rc_rules_data(tracer): with override_env({APPSEC.ENV: "true"}), override_global_config( dict(_asm_enabled=True, _asm_static_rule_file=f.name) ): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") config = { "rules_data": [], "custom_rules": [], @@ -1041,14 +1041,14 @@ def test_rc_rules_data(tracer): def test_rc_rules_data_error_empty(tracer): with override_env({APPSEC.ENV: "true"}), override_global_config(dict(_asm_enabled=True)): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") config = {} assert not _appsec_rules_data(config, tracer) def test_rc_rules_data_error_ddwaf(tracer): with override_env({APPSEC.ENV: "true"}), override_global_config(dict(_asm_enabled=True)): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") config = { "rules": [{"invalid": mock.MagicMock()}], } @@ -1057,7 +1057,7 @@ def test_rc_rules_data_error_ddwaf(tracer): def test_rules_never_empty(tracer): with override_global_config(dict(_asm_enabled=True)): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") with mock.patch("ddtrace.appsec._processor.AppSecSpanProcessor._update_rules", autospec=True) as mock_update: mock_update.reset_mock() _appsec_rules_data({"rules": []}, tracer) @@ -1069,7 +1069,7 @@ def test_rules_never_empty(tracer): def test_static_rules_never_modified(tracer): with override_global_config(dict(_asm_enabled=True)): - tracer.configure(appsec_enabled=True, api_version="v0.4") + tracer._configure(appsec_enabled=True, api_version="v0.4") processors = str(tracer._appsec_processor._rules["processors"]) scanners = str(tracer._appsec_processor._rules["scanners"]) proc_add = {"id": "new_processor"} diff --git a/tests/appsec/appsec/test_telemetry.py b/tests/appsec/appsec/test_telemetry.py index 8678820e8d6..47b58222dde 100644 --- a/tests/appsec/appsec/test_telemetry.py +++ b/tests/appsec/appsec/test_telemetry.py @@ -12,7 +12,7 @@ from ddtrace.appsec._processor import AppSecSpanProcessor from ddtrace.contrib.trace_utils import set_http_meta from ddtrace.ext import SpanTypes -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_APPSEC +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_DISTRIBUTION from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_GENERATE_METRICS import tests.appsec.rules as rules @@ -27,7 +27,7 @@ def _assert_generate_metrics(metrics_result, is_rule_triggered=False, is_blocked_request=False): - generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_APPSEC] + generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.APPSEC.value] assert len(generate_metrics) == 2, "Expected 2 generate_metrics" for _metric_id, metric in generate_metrics.items(): if metric.name == "waf.requests": @@ -44,7 +44,7 @@ def _assert_generate_metrics(metrics_result, is_rule_triggered=False, is_blocked def _assert_distributions_metrics(metrics_result, is_rule_triggered=False, is_blocked_request=False): - distributions_metrics = metrics_result[TELEMETRY_TYPE_DISTRIBUTION][TELEMETRY_NAMESPACE_TAG_APPSEC] + distributions_metrics = metrics_result[TELEMETRY_TYPE_DISTRIBUTION][TELEMETRY_NAMESPACE.APPSEC.value] assert len(distributions_metrics) == 2, "Expected 2 distributions_metrics" for _metric_id, metric in distributions_metrics.items(): @@ -61,7 +61,7 @@ def _assert_distributions_metrics(metrics_result, is_rule_triggered=False, is_bl def test_metrics_when_appsec_doesnt_runs(telemetry_writer, tracer): with override_global_config(dict(_asm_enabled=False)): - tracer.configure(api_version="v0.4", appsec_enabled=False) + tracer._configure(api_version="v0.4", appsec_enabled=False) telemetry_writer._namespace.flush() with tracer.trace("test", span_type=SpanTypes.WEB) as span: set_http_meta( @@ -69,8 +69,8 @@ def test_metrics_when_appsec_doesnt_runs(telemetry_writer, tracer): rules.Config(), ) metrics_data = telemetry_writer._namespace._metrics_data - assert len(metrics_data[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_APPSEC]) == 0 - assert len(metrics_data[TELEMETRY_TYPE_DISTRIBUTION][TELEMETRY_NAMESPACE_TAG_APPSEC]) == 0 + assert len(metrics_data[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.APPSEC.value]) == 0 + assert len(metrics_data[TELEMETRY_TYPE_DISTRIBUTION][TELEMETRY_NAMESPACE.APPSEC.value]) == 0 def test_metrics_when_appsec_runs(telemetry_writer, tracer): @@ -108,7 +108,8 @@ def test_log_metric_error_ddwaf_init(telemetry_writer): _asm_static_rule_file=os.path.join(rules.ROOT_DIR, "rules-with-2-errors.json"), ) ): - AppSecSpanProcessor() + processor = AppSecSpanProcessor() + processor.delayed_init() list_metrics_logs = list(telemetry_writer._logs) assert len(list_metrics_logs) == 1 @@ -136,7 +137,7 @@ def test_log_metric_error_ddwaf_timeout(telemetry_writer, tracer): assert len(list_metrics_logs) == 0 generate_metrics = telemetry_writer._namespace._metrics_data[TELEMETRY_TYPE_GENERATE_METRICS][ - TELEMETRY_NAMESPACE_TAG_APPSEC + TELEMETRY_NAMESPACE.APPSEC.value ] timeout_found = False diff --git a/tests/appsec/contrib_appsec/django_app/settings.py b/tests/appsec/contrib_appsec/django_app/settings.py index a7641cdd673..859201f840a 100644 --- a/tests/appsec/contrib_appsec/django_app/settings.py +++ b/tests/appsec/contrib_appsec/django_app/settings.py @@ -6,11 +6,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) ALLOWED_HOSTS = [ diff --git a/tests/appsec/contrib_appsec/django_app/urls.py b/tests/appsec/contrib_appsec/django_app/urls.py index aaff69169b5..1b691d43a53 100644 --- a/tests/appsec/contrib_appsec/django_app/urls.py +++ b/tests/appsec/contrib_appsec/django_app/urls.py @@ -196,7 +196,7 @@ def login_user(request): def new_service(request, service_name: str): import ddtrace - ddtrace.Pin.override(django, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin.override(django, service=service_name, tracer=ddtrace.tracer) return HttpResponse(service_name, status=200) diff --git a/tests/appsec/contrib_appsec/fastapi_app/app.py b/tests/appsec/contrib_appsec/fastapi_app/app.py index c5b765c4bbb..3403df6f844 100644 --- a/tests/appsec/contrib_appsec/fastapi_app/app.py +++ b/tests/appsec/contrib_appsec/fastapi_app/app.py @@ -104,7 +104,7 @@ async def multi_view_no_param(request: Request): # noqa: B008 async def new_service(service_name: str, request: Request): # noqa: B008 import ddtrace - ddtrace.Pin.override(app, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin.override(app, service=service_name, tracer=ddtrace.tracer) return HTMLResponse(service_name, 200) async def slow_numbers(minimum, maximum): @@ -235,23 +235,25 @@ def authenticate(username: str, password: str) -> Optional[str]: return USERS[username]["id"] else: appsec_trace_utils.track_user_login_failure_event( - tracer, user_id=USERS[username]["id"], exists=True, login_events_mode="auto" + tracer, user_id=USERS[username]["id"], exists=True, login_events_mode="auto", login=username ) return None appsec_trace_utils.track_user_login_failure_event( - tracer, user_id=username, exists=False, login_events_mode="auto" + tracer, user_id=username, exists=False, login_events_mode="auto", login=username ) return None - def login(user_id: str) -> None: + def login(user_id: str, username: str) -> None: """login user""" - appsec_trace_utils.track_user_login_success_event(tracer, user_id=user_id, login_events_mode="auto") + appsec_trace_utils.track_user_login_success_event( + tracer, user_id=user_id, login_events_mode="auto", login=username + ) username = request.query_params.get("username") password = request.query_params.get("password") user_id = authenticate(username=username, password=password) if user_id is not None: - login(user_id) + login(user_id, username) return HTMLResponse("OK") return HTMLResponse("login failure", status_code=401) diff --git a/tests/appsec/contrib_appsec/flask_app/app.py b/tests/appsec/contrib_appsec/flask_app/app.py index 939a7cad678..83fb1ce9721 100644 --- a/tests/appsec/contrib_appsec/flask_app/app.py +++ b/tests/appsec/contrib_appsec/flask_app/app.py @@ -13,11 +13,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) cur_dir = os.path.dirname(os.path.realpath(__file__)) tmpl_path = os.path.join(cur_dir, "test_templates") app = Flask(__name__, template_folder=tmpl_path) @@ -60,7 +56,7 @@ def multi_view(param_int=0, param_str=""): def new_service(service_name: str): import ddtrace - ddtrace.Pin.override(Flask, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin.override(Flask, service=service_name, tracer=ddtrace.tracer) return service_name @@ -188,22 +184,24 @@ def authenticate(username: str, password: str) -> Optional[str]: return USERS[username]["id"] else: appsec_trace_utils.track_user_login_failure_event( - tracer, user_id=USERS[username]["id"], exists=True, login_events_mode="auto" + tracer, user_id=USERS[username]["id"], exists=True, login_events_mode="auto", login=username ) return None appsec_trace_utils.track_user_login_failure_event( - tracer, user_id=username, exists=False, login_events_mode="auto" + tracer, user_id=username, exists=False, login_events_mode="auto", login=username ) return None - def login(user_id: str) -> None: + def login(user_id: str, login: str) -> None: """login user""" - appsec_trace_utils.track_user_login_success_event(tracer, user_id=user_id, login_events_mode="auto") + appsec_trace_utils.track_user_login_success_event( + tracer, user_id=user_id, login_events_mode="auto", login=login + ) username = request.args.get("username") password = request.args.get("password") user_id = authenticate(username=username, password=password) if user_id is not None: - login(user_id) + login(user_id, username) return "OK" return "login failure", 401 diff --git a/tests/appsec/contrib_appsec/test_flask.py b/tests/appsec/contrib_appsec/test_flask.py index 90a35ac0c88..b497de98bf9 100644 --- a/tests/appsec/contrib_appsec/test_flask.py +++ b/tests/appsec/contrib_appsec/test_flask.py @@ -1,8 +1,8 @@ from flask.testing import FlaskClient import pytest -from ddtrace import Pin from ddtrace.internal.packages import get_version_for_package +from ddtrace.trace import Pin from tests.appsec.contrib_appsec import utils from tests.utils import TracerTestCase diff --git a/tests/appsec/contrib_appsec/utils.py b/tests/appsec/contrib_appsec/utils.py index d3691e2bea3..5cf5e1da6b1 100644 --- a/tests/appsec/contrib_appsec/utils.py +++ b/tests/appsec/contrib_appsec/utils.py @@ -88,7 +88,7 @@ def check_rules_triggered(self, rule_id: List[str], root_span): assert result == rule_id, f"result={result}, expected={rule_id}" def update_tracer(self, interface): - interface.tracer.configure(api_version="v0.4") + interface.tracer._configure(api_version="v0.4") assert asm_config._asm_libddwaf_available # Only for tests diagnostics @@ -1379,7 +1379,7 @@ def validate_top_function(trace): assert get_tag(http.STATUS_CODE) == str(code), (get_tag(http.STATUS_CODE), code) if code == 200: assert self.body(response).startswith(f"{endpoint} endpoint") - telemetry_calls = {(c.__name__, f"{ns}.{nm}", t): v for (c, ns, nm, v, t), _ in mocked.call_args_list} + telemetry_calls = {(c.__name__, f"{ns.value}.{nm}", t): v for (c, ns, nm, v, t), _ in mocked.call_args_list} if asm_enabled and ep_enabled and action_level > 0: self.check_rules_triggered([rule] * (1 if action_level == 2 else 2), root_span) assert self.check_for_stack_trace(root_span) @@ -1478,9 +1478,16 @@ def test_auto_user_events( assert get_tag("_dd.appsec.events.users.login.failure.sdk") == "true" else: assert get_tag("_dd.appsec.events.users.login.success.sdk") is None + if mode == "identification": + assert get_tag("_dd.appsec.usr.login") == user + elif mode == "anonymization": + assert get_tag("_dd.appsec.usr.login") == _hash_user_id(user) else: assert get_tag("appsec.events.users.login.success.track") == "true" assert get_tag("usr.id") == user_id_hash + assert get_tag("_dd.appsec.usr.id") == user_id_hash + if mode == "identification": + assert get_tag("_dd.appsec.usr.login") == user # check for manual instrumentation tag in manual instrumented frameworks if interface.name in ["flask", "fastapi"]: assert get_tag("_dd.appsec.events.users.login.success.sdk") == "true" @@ -1552,7 +1559,7 @@ def test_tracer(): ddtrace.tracer = tracer # Yield to our test - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") yield tracer tracer.pop() ddtrace.tracer = original_tracer @@ -1560,8 +1567,8 @@ def test_tracer(): @contextmanager def post_tracer(interface): - original_tracer = getattr(ddtrace.Pin.get_from(interface.framework), "tracer", None) - ddtrace.Pin.override(interface.framework, tracer=interface.tracer) + original_tracer = getattr(ddtrace.trace.Pin.get_from(interface.framework), "tracer", None) + ddtrace.trace.Pin.override(interface.framework, tracer=interface.tracer) yield if original_tracer is not None: - ddtrace.Pin.override(interface.framework, tracer=original_tracer) + ddtrace.trace.Pin.override(interface.framework, tracer=original_tracer) diff --git a/tests/appsec/iast/_ast/test_ast_patching.py b/tests/appsec/iast/_ast/test_ast_patching.py index d014496942b..213737ecbce 100644 --- a/tests/appsec/iast/_ast/test_ast_patching.py +++ b/tests/appsec/iast/_ast/test_ast_patching.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import logging +import os import sys import astunparse @@ -20,6 +21,15 @@ _PREFIX = IAST.PATCH_ADDED_SYMBOL_PREFIX +@pytest.fixture(autouse=True, scope="module") +def clear_iast_env_vars(): + if IAST.PATCH_MODULES in os.environ: + os.environ.pop("_DD_IAST_PATCH_MODULES") + if IAST.DENY_MODULES in os.environ: + os.environ.pop("_DD_IAST_DENY_MODULES") + yield + + @pytest.mark.parametrize( "source_text, module_path, module_name", [ @@ -148,15 +158,34 @@ def test_astpatch_source_unchanged(module_name): assert ("", None) == astpatch_module(__import__(module_name, fromlist=[None])) -def test_module_should_iast_patch(): +def test_should_iast_patch_allow_first_party(): + assert _should_iast_patch("tests.appsec.iast.integration.main") + assert _should_iast_patch("tests.appsec.iast.integration.print_str") + + +def test_should_not_iast_patch_if_vendored(): + assert not _should_iast_patch("foobar.vendor.requests") + assert not _should_iast_patch(("vendored.foobar.requests")) + + +def test_should_iast_patch_deny_by_default_if_third_party(): + # note that modules here must be in the ones returned by get_package_distributions() + # but not in ALLOWLIST or DENYLIST. So please don't put astunparse there :) + assert not _should_iast_patch("astunparse.foo.bar.not.in.deny.or.allow.list") + + +def test_should_not_iast_patch_if_in_denylist(): assert not _should_iast_patch("ddtrace.internal.module") assert not _should_iast_patch("ddtrace.appsec._iast") + assert not _should_iast_patch("pip.foo.bar") + + +def test_should_not_iast_patch_if_stdlib(): assert not _should_iast_patch("base64") - assert not _should_iast_patch("envier") assert not _should_iast_patch("itertools") assert not _should_iast_patch("http") - assert _should_iast_patch("tests.appsec.iast.integration.main") - assert _should_iast_patch("tests.appsec.iast.integration.print_str") + assert not _should_iast_patch("os.path") + assert not _should_iast_patch("sys.platform") @pytest.mark.parametrize( diff --git a/tests/appsec/iast/conftest.py b/tests/appsec/iast/conftest.py index 5718fed357a..85d516dd154 100644 --- a/tests/appsec/iast/conftest.py +++ b/tests/appsec/iast/conftest.py @@ -2,6 +2,7 @@ import os import re import subprocess +import time import pytest @@ -26,6 +27,8 @@ from ddtrace.appsec._iast.taint_sinks.weak_hash import unpatch_iast as weak_hash_unpatch from ddtrace.contrib.internal.sqlite3.patch import patch as sqli_sqlite_patch from ddtrace.contrib.internal.sqlite3.patch import unpatch as sqli_sqlite_unpatch +from ddtrace.internal.utils.http import Response +from ddtrace.internal.utils.http import get_connection from tests.utils import override_env from tests.utils import override_global_config @@ -162,11 +165,27 @@ def check_native_code_exception_in_each_python_aspect_test(request, caplog): @pytest.fixture(scope="session") def configuration_endpoint(): current_dir = os.path.dirname(__file__) - cmd = [ - "python", - os.path.join(current_dir, "fixtures", "integration", "http_config_server.py"), - CONFIG_SERVER_PORT, - ] - process = subprocess.Popen(cmd, cwd=current_dir) + status = None + retries = 0 + while status != 200 and retries < 5: + cmd = [ + "python", + os.path.join(current_dir, "fixtures", "integration", "http_config_server.py"), + CONFIG_SERVER_PORT, + ] + process = subprocess.Popen(cmd, cwd=current_dir) + time.sleep(0.2) + + url = f"http://localhost:{CONFIG_SERVER_PORT}/" + conn = get_connection(url) + conn.request("GET", "/") + response = conn.getresponse() + result = Response.from_http_response(response) + status = result.status + retries += 1 + + if retries == 5: + pytest.skip("Failed to start the configuration server") + yield process.kill() diff --git a/tests/appsec/iast/fixtures/integration/main_configure.py b/tests/appsec/iast/fixtures/integration/main_configure.py index 44bace44af5..aa7de99e22c 100644 --- a/tests/appsec/iast/fixtures/integration/main_configure.py +++ b/tests/appsec/iast/fixtures/integration/main_configure.py @@ -25,7 +25,7 @@ def main(): if __name__ == "__main__": iast_enabled = bool(os.environ.get("DD_IAST_ENABLED", "") == "true") logger.info("IAST env var: %s", iast_enabled) - tracer.configure(iast_enabled=not iast_enabled) + tracer._configure(iast_enabled=not iast_enabled) main() if not iast_enabled: # Disabled by env var but then enabled with ``tracer.configure`` diff --git a/tests/appsec/iast/fixtures/integration/main_configure_right.py b/tests/appsec/iast/fixtures/integration/main_configure_right.py index c941c16ecbc..b1789deef97 100644 --- a/tests/appsec/iast/fixtures/integration/main_configure_right.py +++ b/tests/appsec/iast/fixtures/integration/main_configure_right.py @@ -24,5 +24,5 @@ def main(): if __name__ == "__main__": iast_enabled = bool(os.environ.get("DD_IAST_ENABLED", "") == "true") logger.info("configuring IAST to %s", iast_enabled) - tracer.configure(iast_enabled=iast_enabled) + tracer._configure(iast_enabled=iast_enabled) main() diff --git a/tests/appsec/iast/fixtures/integration/main_configure_wrong.py b/tests/appsec/iast/fixtures/integration/main_configure_wrong.py index a6c52201272..92828790b03 100644 --- a/tests/appsec/iast/fixtures/integration/main_configure_wrong.py +++ b/tests/appsec/iast/fixtures/integration/main_configure_wrong.py @@ -24,5 +24,5 @@ def main(): if __name__ == "__main__": iast_enabled = os.environ.get("DD_IAST_ENABLED", "false") logger.info("configuring IAST to %s", iast_enabled) - tracer.configure(iast_enabled=iast_enabled) + tracer._configure(iast_enabled=iast_enabled) main() diff --git a/tests/appsec/iast/fixtures/taint_sinks/code_injection.py b/tests/appsec/iast/fixtures/taint_sinks/code_injection.py index 18fb5d811d3..822ab547434 100644 --- a/tests/appsec/iast/fixtures/taint_sinks/code_injection.py +++ b/tests/appsec/iast/fixtures/taint_sinks/code_injection.py @@ -10,6 +10,34 @@ def pt_eval(origin_string): return r +def pt_eval_globals(origin_string): + context = {"x": 5, "y": 10} + r = eval(origin_string, context) + return r + + +def pt_eval_globals_locals(origin_string): + z = 15 # noqa: F841 + globals_dict = {"x": 10} + locals_dict = {"y": 20} + r = eval(origin_string, globals_dict, locals_dict) + return r + + +def pt_eval_lambda(fun): + return eval("lambda v,fun=fun:not fun(v)") + + +def is_true(value): + return value is True + + +def pt_eval_lambda_globals(origin_string): + globals_dict = {"square": lambda x: x * x} + r = eval(origin_string, globals=globals_dict) + return r + + def pt_literal_eval(origin_string): r = literal_eval(origin_string) return r diff --git a/tests/appsec/iast/taint_sinks/test_code_injection.py b/tests/appsec/iast/taint_sinks/test_code_injection.py index ffd9c156d25..a38600795ff 100644 --- a/tests/appsec/iast/taint_sinks/test_code_injection.py +++ b/tests/appsec/iast/taint_sinks/test_code_injection.py @@ -1,5 +1,7 @@ import os +import pytest + from ddtrace.appsec._iast._taint_tracking import OriginType from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject from ddtrace.appsec._iast.constants import VULN_CODE_INJECTION @@ -9,10 +11,10 @@ ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.code_injection") def test_code_injection_eval(iast_context_defaults): - mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.code_injection") code_string = '"abc" + "def"' tainted_string = taint_pyobject( @@ -35,53 +37,120 @@ def test_code_injection_eval(iast_context_defaults): assert vulnerability["evidence"].get("redacted") is None -# TODO: wrap exec functions is very dangerous because it needs and modifies locals and globals from the original func -# def test_code_injection_exec(iast_context_defaults): -# mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.code_injection") -# code_string = '"abc" + "def"' -# -# tainted_string = taint_pyobject( -# code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH -# ) -# mod.pt_exec(tainted_string) -# -# data = _get_iast_data() -# -# assert len(data["vulnerabilities"]) == 1 -# vulnerability = data["vulnerabilities"][0] -# source = data["sources"][0] -# assert vulnerability["type"] == VULN_CODE_INJECTION -# assert source["name"] == "path" -# assert source["origin"] == OriginType.PATH -# assert source["value"] == '"abc" + "def"' -# assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": '"abc" + "def"'}] -# assert "value" not in vulnerability["evidence"].keys() -# assert vulnerability["evidence"].get("pattern") is None -# assert vulnerability["evidence"].get("redacted") is None -# -# -# def test_code_injection_exec_with_globals(iast_context_defaults): -# mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.code_injection") -# code_string = 'my_var_in_pt_exec_with_globals + "-" + my_var_in_pt_exec_with_globals + "-"' -# -# tainted_string = taint_pyobject( -# code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH -# ) -# mod.pt_exec_with_globals(tainted_string) -# -# data = _get_iast_data() -# -# assert len(data["vulnerabilities"]) == 1 -# vulnerability = data["vulnerabilities"][0] -# source = data["sources"][0] -# assert vulnerability["type"] == VULN_CODE_INJECTION -# assert source["name"] == "path" -# assert source["origin"] == OriginType.PATH -# assert source["value"] == '"abc" + "def"' -# assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": '"abc" + "def"'}] -# assert "value" not in vulnerability["evidence"].keys() -# assert vulnerability["evidence"].get("pattern") is None -# assert vulnerability["evidence"].get("redacted") is None +def test_code_injection_eval_globals(iast_context_defaults): + """Validate globals and locals of the function""" + + code_string = "x + y" + + tainted_string = taint_pyobject( + code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH + ) + mod.pt_eval_globals(tainted_string) + + data = _get_iast_data() + + assert len(data["vulnerabilities"]) == 1 + vulnerability = data["vulnerabilities"][0] + source = data["sources"][0] + assert vulnerability["type"] == VULN_CODE_INJECTION + assert source["name"] == "path" + assert source["origin"] == OriginType.PATH + assert source["value"] == "x + y" + assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": "x + y"}] + assert "value" not in vulnerability["evidence"].keys() + assert vulnerability["evidence"].get("pattern") is None + assert vulnerability["evidence"].get("redacted") is None + + +def test_code_injection_eval_globals_locals(iast_context_defaults): + """Validate globals and locals of the function""" + + code_string = "x + y" + + tainted_string = taint_pyobject( + code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH + ) + + mod.pt_eval_globals_locals(tainted_string) + + data = _get_iast_data() + + assert len(data["vulnerabilities"]) == 1 + vulnerability = data["vulnerabilities"][0] + source = data["sources"][0] + assert vulnerability["type"] == VULN_CODE_INJECTION + assert source["name"] == "path" + assert source["origin"] == OriginType.PATH + assert source["value"] == "x + y" + assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": "x + y"}] + assert "value" not in vulnerability["evidence"].keys() + assert vulnerability["evidence"].get("pattern") is None + assert vulnerability["evidence"].get("redacted") is None + + +def test_code_injection_eval_globals_locals_override(iast_context_defaults): + """Validate globals and locals of the function""" + + code_string = "x + y + z" + + tainted_string = taint_pyobject( + code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH + ) + with pytest.raises(NameError): + mod.pt_eval_globals_locals(tainted_string) + + data = _get_iast_data() + + assert len(data["vulnerabilities"]) == 1 + vulnerability = data["vulnerabilities"][0] + source = data["sources"][0] + assert vulnerability["type"] == VULN_CODE_INJECTION + assert source["name"] == "path" + assert source["origin"] == OriginType.PATH + assert source["value"] == "x + y + z" + assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": "x + y + z"}] + assert "value" not in vulnerability["evidence"].keys() + assert vulnerability["evidence"].get("pattern") is None + assert vulnerability["evidence"].get("redacted") is None + + +def test_code_injection_eval_lambda(iast_context_defaults): + """Validate globals and locals of the function""" + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.code_injection") + + def pt_eval_lambda_no_tainted(fun): + return eval("lambda v,fun=fun:not fun(v)") + + def is_true_no_tainted(value): + return value is True + + assert mod.pt_eval_lambda(mod.is_true)(True) is pt_eval_lambda_no_tainted(is_true_no_tainted)(True) + + +def test_code_injection_eval_globals_kwargs_lambda(iast_context_defaults): + """Validate globals and locals of the function""" + + code_string = "square(5)" + + tainted_string = taint_pyobject( + code_string, source_name="path", source_value=code_string, source_origin=OriginType.PATH + ) + + mod.pt_eval_lambda_globals(tainted_string) + + data = _get_iast_data() + + assert len(data["vulnerabilities"]) == 1 + vulnerability = data["vulnerabilities"][0] + source = data["sources"][0] + assert vulnerability["type"] == VULN_CODE_INJECTION + assert source["name"] == "path" + assert source["origin"] == OriginType.PATH + assert source["value"] == "square(5)" + assert vulnerability["evidence"]["valueParts"] == [{"source": 0, "value": "square(5)"}] + assert "value" not in vulnerability["evidence"].keys() + assert vulnerability["evidence"].get("pattern") is None + assert vulnerability["evidence"].get("redacted") is None def test_code_injection_literal_eval(iast_context_defaults): diff --git a/tests/appsec/iast/test_telemetry.py b/tests/appsec/iast/test_telemetry.py index 106d9408815..139dab79918 100644 --- a/tests/appsec/iast/test_telemetry.py +++ b/tests/appsec/iast/test_telemetry.py @@ -28,7 +28,7 @@ from ddtrace.contrib.internal.sqlalchemy.patch import patch as sqli_sqlalchemy_patch from ddtrace.contrib.internal.sqlite3.patch import patch as sqli_sqlite3_patch from ddtrace.ext import SpanTypes -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_IAST +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_GENERATE_METRICS from tests.appsec.iast.aspects.conftest import _iast_patched_module from tests.appsec.utils import asm_context @@ -38,7 +38,7 @@ def _assert_instrumented_sink(telemetry_writer, vuln_type): metrics_result = telemetry_writer._namespace._metrics_data - generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_IAST] + generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.IAST.value] assert len(generate_metrics) == 1, "Expected 1 generate_metrics" assert [metric.name for metric in generate_metrics.values()] == ["instrumented.sink"] assert [metric._tags for metric in generate_metrics.values()] == [(("vulnerability_type", vuln_type),)] @@ -87,7 +87,7 @@ def test_metric_executed_sink(no_request_sampling, telemetry_writer, caplog): metrics_result = telemetry_writer._namespace._metrics_data - generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_IAST].values() + generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.IAST.value].values() assert len(generate_metrics) == 1 # Remove potential sinks from internal usage of the lib (like http.client, used to communicate with # the agent) @@ -151,7 +151,7 @@ def test_metric_instrumented_propagation(no_request_sampling, telemetry_writer): _iast_patched_module("benchmarks.bm.iast_fixtures.str_methods") metrics_result = telemetry_writer._namespace._metrics_data - generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_IAST] + generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.IAST.value] # Remove potential sinks from internal usage of the lib (like http.client, used to communicate with # the agent) filtered_metrics = [metric.name for metric in generate_metrics.values() if metric.name != "executed.sink"] @@ -175,7 +175,7 @@ def test_metric_request_tainted(no_request_sampling, telemetry_writer): metrics_result = telemetry_writer._namespace._metrics_data - generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE_TAG_IAST] + generate_metrics = metrics_result[TELEMETRY_TYPE_GENERATE_METRICS][TELEMETRY_NAMESPACE.IAST.value] # Remove potential sinks from internal usage of the lib (like http.client, used to communicate with # the agent) filtered_metrics = [metric.name for metric in generate_metrics.values() if metric.name != "executed.sink"] diff --git a/tests/appsec/iast_packages/test_packages.py b/tests/appsec/iast_packages/test_packages.py index 6da439dbeb5..83e53ae92c9 100644 --- a/tests/appsec/iast_packages/test_packages.py +++ b/tests/appsec/iast_packages/test_packages.py @@ -216,24 +216,29 @@ def uninstall(self, python_cmd): import_module_to_validate="boto3.session", ), PackageForTesting("botocore", "1.34.110", "", "", "", test_e2e=False), - PackageForTesting( - "cffi", "1.16.0", "", 30, "", import_module_to_validate="cffi.model", extras=[("setuptools", "72.1.0")] - ), - PackageForTesting( - "certifi", "2024.2.2", "", "The path to the CA bundle is", "", import_module_to_validate="certifi.core" - ), - PackageForTesting( - "charset-normalizer", - "3.3.2", - "my-bytes-string", - "my-bytes-string", - "", - import_name="charset_normalizer", - import_module_to_validate="charset_normalizer.api", - test_propagation=True, - fixme_propagation_fails=True, - ), - PackageForTesting("click", "8.1.7", "", "Hello World!\nHello World!\n", "", import_module_to_validate="click.core"), + ## Skip due to cffi added to the denylist + # PackageForTesting( + # "cffi", "1.16.0", "", 30, "", import_module_to_validate="cffi.model", extras=[("setuptools", "72.1.0")] + # ), + ## Skip due to certifi added to the denylist + # PackageForTesting( + # "certifi", "2024.2.2", "", "The path to the CA bundle is", "", import_module_to_validate="certifi.core" + # ), + ## Skip due to charset-normalizer added to the denylist + # PackageForTesting( + # "charset-normalizer", + # "3.3.2", + # "my-bytes-string", + # "my-bytes-string", + # "", + # import_name="charset_normalizer", + # import_module_to_validate="charset_normalizer.api", + # test_propagation=True, + # fixme_propagation_fails=True, + # ), + ## Skip due to click added to the denylist + # PackageForTesting("click", "8.1.7", "", "Hello World!\nHello World!\n", "", + # import_module_to_validate="click.core"), PackageForTesting( "cryptography", "42.0.7", @@ -247,15 +252,16 @@ def uninstall(self, python_cmd): PackageForTesting( "distlib", "0.3.8", "", "Name: example-package\nVersion: 0.1", "", import_module_to_validate="distlib.util" ), - PackageForTesting( - "exceptiongroup", - "1.2.1", - "foobar", - "ValueError: First error with foobar\nTypeError: Second error with foobar", - "", - import_module_to_validate="exceptiongroup._formatting", - test_propagation=True, - ), + ## Skip due to docopt added to the denylist + # PackageForTesting( + # "exceptiongroup", + # "1.2.1", + # "foobar", + # "ValueError: First error with foobar\nTypeError: Second error with foobar", + # "", + # import_module_to_validate="exceptiongroup._formatting", + # test_propagation=True, + # ), PackageForTesting( "filelock", "3.14.0", @@ -327,14 +333,15 @@ def uninstall(self, python_cmd): "", import_module_to_validate="isodate.duration", ), - PackageForTesting( - "itsdangerous", - "2.2.0", - "foobar", - "Signed value: foobar.generated_signature\nUnsigned value: foobar", - "", - import_module_to_validate="itsdangerous.serializer", - ), + ## Skip due to itsdangerous added to the denylist + # PackageForTesting( + # "itsdangerous", + # "2.2.0", + # "foobar", + # "Signed value: foobar.generated_signature\nUnsigned value: foobar", + # "", + # import_module_to_validate="itsdangerous.serializer", + # ), PackageForTesting( "jinja2", "3.1.4", @@ -424,13 +431,15 @@ def uninstall(self, python_cmd): PackageForTesting( "openpyxl", "3.1.2", "foobar", "Written value: foobar", "", import_module_to_validate="openpyxl.chart.axis" ), - PackageForTesting( - "packaging", - "24.0", - "", - {"is_version_valid": True, "requirement": "example-package>=1.0.0", "specifier": ">=1.0.0", "version": "1.2.3"}, - "", - ), + ## Skip due to packaging added to the denylist + # PackageForTesting( + # "packaging", + # "24.0", + # "", + # {"is_version_valid": True, "requirement": "example-package>=1.0.0", + # "specifier": ">=1.0.0", "version": "1.2.3"}, + # "", + # ), ## Skip due to pandas added to the denylist # Pandas dropped Python 3.8 support in pandas>2.0.3 # PackageForTesting("pandas", "2.2.2", "foobar", "Written value: foobar", "", skip_python_version=[(3, 8)]), @@ -443,14 +452,15 @@ def uninstall(self, python_cmd): import_module_to_validate="platformdirs.unix", test_propagation=True, ), - PackageForTesting( - "pluggy", - "1.5.0", - "foobar", - "Hook result: Plugin received: foobar", - "", - import_module_to_validate="pluggy._hooks", - ), + ## Skip due to pluggy added to the denylist + # PackageForTesting( + # "pluggy", + # "1.5.0", + # "foobar", + # "Hook result: Plugin received: foobar", + # "", + # import_module_to_validate="pluggy._hooks", + # ), PackageForTesting( "pyasn1", "0.6.0", @@ -461,7 +471,8 @@ def uninstall(self, python_cmd): test_propagation=True, fixme_propagation_fails=True, ), - PackageForTesting("pycparser", "2.22", "", "", ""), + ## Skip due to pygments added to the denylist + # PackageForTesting("pycparser", "2.22", "", "", ""), PackageForTesting( "pydantic", "2.7.1", @@ -619,15 +630,16 @@ def uninstall(self, python_cmd): test_propagation=True, fixme_propagation_fails=True, ), - PackageForTesting( - "werkzeug", - "3.0.3", - "your-password", - "Original password: your-password\nHashed password: replaced_hashed\nPassword match: True", - "", - import_module_to_validate="werkzeug.http", - skip_python_version=[(3, 6), (3, 7), (3, 8)], - ), + ## Skip due to werkzeug added to the denylist + # PackageForTesting( + # "werkzeug", + # "3.0.3", + # "your-password", + # "Original password: your-password\nHashed password: replaced_hashed\nPassword match: True", + # "", + # import_module_to_validate="werkzeug.http", + # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # ), PackageForTesting( "yarl", "1.9.4", @@ -640,24 +652,26 @@ def uninstall(self, python_cmd): test_propagation=True, fixme_propagation_fails=True, ), - PackageForTesting( - "zipp", - "3.18.2", - "example.zip", - "Contents of example.zip: ['example.zip/example.txt']", - "", - skip_python_version=[(3, 6), (3, 7), (3, 8)], - ), - PackageForTesting( - "typing-extensions", - "4.11.0", - "", - "", - "", - import_name="typing_extensions", - test_e2e=False, - skip_python_version=[(3, 6), (3, 7), (3, 8)], - ), + ## Skip due to zipp added to the denylist + # PackageForTesting( + # "zipp", + # "3.18.2", + # "example.zip", + # "Contents of example.zip: ['example.zip/example.txt']", + # "", + # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # ), + ## Skip due to typing-extensions added to the denylist + # PackageForTesting( + # "typing-extensions", + # "4.11.0", + # "", + # "", + # "", + # import_name="typing_extensions", + # test_e2e=False, + # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # ), PackageForTesting( "six", "1.16.0", @@ -687,14 +701,15 @@ def uninstall(self, python_cmd): "", import_name="jwt", ), - PackageForTesting( - "wrapt", - "1.16.0", - "some-value", - "Function executed with param: some-value", - "", - test_propagation=True, - ), + ## Skip due to pyarrow added to the denylist + # PackageForTesting( + # "wrapt", + # "1.16.0", + # "some-value", + # "Function executed with param: some-value", + # "", + # test_propagation=True, + # ), PackageForTesting( "cachetools", "5.3.3", @@ -804,16 +819,17 @@ def uninstall(self, python_cmd): "", import_name="OpenSSL.SSL", ), - PackageForTesting( - "moto[s3]", - "5.0.11", - "some_bucket", - "right_result", - "", - import_name="moto.s3.models", - test_e2e=True, - extras=[("boto3", "1.34.143")], - ), + ## Skip due to pyarrow added to the denylist + # PackageForTesting( + # "moto[s3]", + # "5.0.11", + # "some_bucket", + # "right_result", + # "", + # import_name="moto.s3.models", + # test_e2e=True, + # extras=[("boto3", "1.34.143")], + # ), PackageForTesting("decorator", "5.1.1", "World", "Decorated result: Hello, World!", ""), # TODO: e2e implemented but fails unpatched: "RateLimiter object has no attribute _is_allowed" PackageForTesting( diff --git a/tests/appsec/integrations/django_tests/__init__.py b/tests/appsec/integrations/django_tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/appsec/integrations/django_tests/conftest.py b/tests/appsec/integrations/django_tests/conftest.py new file mode 100644 index 00000000000..76ffa4a3763 --- /dev/null +++ b/tests/appsec/integrations/django_tests/conftest.py @@ -0,0 +1,66 @@ +import os + +import django +from django.conf import settings +import pytest + +from ddtrace import Pin +from ddtrace.appsec._iast import enable_iast_propagation +from ddtrace.contrib.internal.django.patch import patch +from tests.appsec.iast.conftest import _end_iast_context_and_oce +from tests.appsec.iast.conftest import _start_iast_context_and_oce +from tests.utils import DummyTracer +from tests.utils import TracerSpanContainer +from tests.utils import override_global_config + + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.appsec.integrations.django_tests.django_app.settings") + + +# `pytest` automatically calls this function once when tests are run. +def pytest_configure(): + with override_global_config( + dict( + _iast_enabled=True, + _deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + settings.DEBUG = False + enable_iast_propagation() + patch() + django.setup() + + +@pytest.fixture +def tracer(): + tracer = DummyTracer() + # Patch Django and override tracer to be our test tracer + pin = Pin.get_from(django) + original_tracer = pin.tracer + Pin.override(django, tracer=tracer) + + # Yield to our test + yield tracer + tracer.pop() + + # Reset the tracer pinned to Django and unpatch + # DEV: unable to properly unpatch and reload django app with each test + # unpatch() + Pin.override(django, tracer=original_tracer) + + +@pytest.fixture +def test_spans(tracer): + with override_global_config( + dict( + _iast_enabled=True, + _deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + container = TracerSpanContainer(tracer) + _start_iast_context_and_oce() + yield container + _end_iast_context_and_oce() + container.reset() diff --git a/tests/appsec/integrations/django_tests/django_app/__init__.py b/tests/appsec/integrations/django_tests/django_app/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/appsec/integrations/django_tests/django_app/settings.py b/tests/appsec/integrations/django_tests/django_app/settings.py new file mode 100644 index 00000000000..9883d69f5ba --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/settings.py @@ -0,0 +1,77 @@ +import os + +from ddtrace import tracer +from tests.webclient import PingFilter + + +tracer.configure( + settings={ + "FILTERS": [PingFilter()], + } +) + + +ALLOWED_HOSTS = [ + "testserver", + "localhost", +] + +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +DATABASES = { + "default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}, +} + + +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "unique-snowflake", + }, + "pylibmc": { + "BACKEND": "django.core.cache.backends.memcached.PyLibMCCache", + "LOCATION": "127.0.0.1:11211", + }, +} + +SITE_ID = 1 +SECRET_KEY = "not_very_secret_in_tests" +USE_I18N = True +USE_L10N = True +STATIC_URL = "/static/" +ROOT_URLCONF = "tests.appsec.integrations.django_tests.django_app.urls" + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [ + os.path.join(BASE_DIR, "templates"), + ], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + ], + }, + }, +] + +MIDDLEWARE = [ + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "django.middleware.security.SecurityMiddleware", +] + +INSTALLED_APPS = [ + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", +] diff --git a/tests/appsec/integrations/django_tests/django_app/urls.py b/tests/appsec/integrations/django_tests/django_app/urls.py new file mode 100644 index 00000000000..be2d142baa2 --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/urls.py @@ -0,0 +1,84 @@ +import django +from django.http import HttpResponse +from django.urls import path + +from ddtrace import tracer +from tests.appsec.integrations.django_tests.django_app import views + + +# django.conf.urls.url was deprecated in django 3 and removed in django 4 +if django.VERSION < (4, 0, 0): + from django.conf.urls import url as handler +else: + from django.urls import re_path as handler + + +def shutdown(request): + # Endpoint used to flush traces to the agent when doing snapshots. + tracer.shutdown() + return HttpResponse(status=200) + + +urlpatterns = [ + handler(r"^$", views.index), + # This must precede composed-view. + handler("appsec/response-header/$", views.magic_header_key, name="response-header"), + handler("appsec/body/$", views.body_view, name="body_view"), + handler("appsec/view_with_exception/$", views.view_with_exception, name="view_with_exception"), + handler("appsec/weak-hash/$", views.weak_hash_view, name="weak_hash"), + handler("appsec/block/$", views.block_callable_view, name="block"), + handler("appsec/command-injection/$", views.command_injection, name="command_injection"), + handler("appsec/header-injection/$", views.header_injection, name="header_injection"), + handler("appsec/taint-checking-enabled/$", views.taint_checking_enabled_view, name="taint_checking_enabled_view"), + handler( + "appsec/taint-checking-disabled/$", views.taint_checking_disabled_view, name="taint_checking_disabled_view" + ), + handler( + "appsec/sqli_http_request_parameter/$", views.sqli_http_request_parameter, name="sqli_http_request_parameter" + ), + handler( + "appsec/sqli_http_request_parameter_name_get/$", + views.sqli_http_request_parameter_name_get, + name="sqli_http_request_parameter_name_get", + ), + handler( + "appsec/sqli_http_request_parameter_name_post/$", + views.sqli_http_request_parameter_name_post, + name="sqli_http_request_parameter_name_post", + ), + handler( + "appsec/sqli_http_request_header_name/$", + views.sqli_http_request_header_name, + name="sqli_http_request_header_name", + ), + handler( + "appsec/sqli_http_request_header_value/$", + views.sqli_http_request_header_value, + name="sqli_http_request_header_value", + ), + handler( + "appsec/sqli_http_request_cookie_name/$", + views.sqli_http_request_cookie_name, + name="sqli_http_request_cookie_name", + ), + handler( + "appsec/sqli_http_request_cookie_value/$", + views.sqli_http_request_cookie_value, + name="sqli_http_request_cookie_value", + ), + handler("appsec/sqli_http_request_body/$", views.sqli_http_request_body, name="sqli_http_request_body"), + handler("appsec/source/body/$", views.source_body_view, name="source_body"), + handler("appsec/insecure-cookie/test_insecure_2_1/$", views.view_insecure_cookies_two_insecure_one_secure), + handler("appsec/insecure-cookie/test_insecure_special/$", views.view_insecure_cookies_insecure_special_chars), + handler("appsec/insecure-cookie/test_insecure/$", views.view_insecure_cookies_insecure), + handler("appsec/insecure-cookie/test_secure/$", views.view_insecure_cookies_secure), + handler("appsec/insecure-cookie/test_empty_cookie/$", views.view_insecure_cookies_empty), + path( + "appsec/sqli_http_path_parameter//", + views.sqli_http_path_parameter, + name="sqli_http_path_parameter", + ), + handler("appsec/validate_querydict/$", views.validate_querydict, name="validate_querydict"), + path("appsec/path-params///", views.path_params_view, name="path-params-view"), + path("appsec/checkuser//", views.checkuser_view, name="checkuser"), +] diff --git a/tests/contrib/django/django_app/appsec_urls.py b/tests/appsec/integrations/django_tests/django_app/views.py similarity index 54% rename from tests/contrib/django/django_app/appsec_urls.py rename to tests/appsec/integrations/django_tests/django_app/views.py index f5b3f359445..ef4fd78b138 100644 --- a/tests/contrib/django/django_app/appsec_urls.py +++ b/tests/appsec/integrations/django_tests/django_app/views.py @@ -1,39 +1,32 @@ +""" +Class based views used for Django tests. +""" import hashlib import os -from typing import TYPE_CHECKING # noqa:F401 +from typing import Any -import django from django.db import connection from django.http import HttpResponse from django.http import JsonResponse from ddtrace import tracer from ddtrace.appsec import _asm_request_context -from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect -from ddtrace.appsec._iast._taint_tracking.aspects import decode_aspect -from ddtrace.appsec._iast._utils import _is_python_version_supported as python_supported_by_iast +from ddtrace.appsec._iast._taint_tracking import OriginType +from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted +from ddtrace.appsec._iast.reporter import IastSpanReporter from ddtrace.appsec._trace_utils import block_request_if_user_blocked -from tests.utils import override_env -# django.conf.urls.url was deprecated in django 3 and removed in django 4 -if django.VERSION < (4, 0, 0): - from django.conf.urls import url as handler -else: - from django.urls import re_path as handler +def assert_origin(parameter: Any, origin_type: Any) -> None: + assert is_pyobject_tainted(parameter) + sources, _ = IastSpanReporter.taint_ranges_as_evidence_info(parameter) + assert sources[0].origin == origin_type -if django.VERSION >= (2, 0, 0): - from django.urls import path -else: - from django.conf.urls import url as path - -if TYPE_CHECKING: - from typing import Any # noqa:F401 - - -def include_view(request): - return HttpResponse(status=200) +def index(request): + response = HttpResponse("Hello, test app.") + response["my-response-header"] = "my_response_value" + return response def path_params_view(request, year, month): @@ -52,6 +45,8 @@ def body_view(request): return HttpResponse(data, status=200) else: data = request.POST + first_post_key = list(request.POST.keys())[0] + assert_origin(first_post_key, OriginType.PARAMETER_NAME) return HttpResponse(str(dict(data)), status=200) @@ -81,7 +76,25 @@ def sqli_http_request_parameter(request): obj = password_django.encode("i'm a password", bcrypt.gensalt()) with connection.cursor() as cursor: # label iast_enabled_sqli_http_request_parameter - cursor.execute(add_aspect(add_aspect(request.GET["q"], obj), "'")) + cursor.execute(request.GET["q"] + obj + "'") + + return HttpResponse(request.META["HTTP_USER_AGENT"], status=200) + + +def sqli_http_request_parameter_name_get(request): + obj = " 1" + with connection.cursor() as cursor: + # label iast_enabled_sqli_http_request_parameter_name_get + cursor.execute(list(request.GET.keys())[0] + obj) + + return HttpResponse(request.META["HTTP_USER_AGENT"], status=200) + + +def sqli_http_request_parameter_name_post(request): + obj = " 1" + with connection.cursor() as cursor: + # label iast_enabled_sqli_http_request_parameter_name_post + cursor.execute(list(request.POST.keys())[0] + obj) return HttpResponse(request.META["HTTP_USER_AGENT"], status=200) @@ -91,7 +104,7 @@ def sqli_http_request_header_name(request): with connection.cursor() as cursor: # label iast_enabled_sqli_http_request_header_name - cursor.execute(add_aspect("SELECT 1 FROM sqlite_", key)) + cursor.execute("SELECT 1 FROM sqlite_" + key) return HttpResponse(request.META["master"], status=200) @@ -99,7 +112,7 @@ def sqli_http_request_header_name(request): def sqli_http_request_header_value(request): value = [x for x in request.META.values() if x == "master"][0] with connection.cursor() as cursor: - query = add_aspect("SELECT 1 FROM sqlite_", value) + query = "SELECT 1 FROM sqlite_" + value # label iast_enabled_sqli_http_request_header_value cursor.execute(query) @@ -107,11 +120,8 @@ def sqli_http_request_header_value(request): def sqli_http_path_parameter(request, q_http_path_parameter): - with override_env({"DD_IAST_ENABLED": "True"}): - from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect - with connection.cursor() as cursor: - query = add_aspect("SELECT 1 from ", q_http_path_parameter) + query = "SELECT 1 from " + q_http_path_parameter # label iast_enabled_full_sqli_http_path_parameter cursor.execute(query) @@ -119,49 +129,28 @@ def sqli_http_path_parameter(request, q_http_path_parameter): def taint_checking_enabled_view(request): - if python_supported_by_iast(): - with override_env({"DD_IAST_ENABLED": "True"}): - from ddtrace.appsec._iast._taint_tracking import OriginType - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - from ddtrace.appsec._iast.reporter import IastSpanReporter - - def assert_origin_path(path): # type: (Any) -> None - assert is_pyobject_tainted(path) - sources, tainted_ranges_to_dict = IastSpanReporter.taint_ranges_as_evidence_info(path) - assert sources[0].origin == OriginType.PATH - - else: - - def assert_origin_path(pyobject): # type: (Any) -> bool - return True - - def is_pyobject_tainted(pyobject): # type: (Any) -> bool - return True - # TODO: Taint request body # assert is_pyobject_tainted(request.body) + first_get_key = list(request.GET.keys())[0] assert is_pyobject_tainted(request.GET["q"]) + assert is_pyobject_tainted(first_get_key) assert is_pyobject_tainted(request.META["QUERY_STRING"]) assert is_pyobject_tainted(request.META["HTTP_USER_AGENT"]) # TODO: Taint request headers # assert is_pyobject_tainted(request.headers["User-Agent"]) - assert_origin_path(request.path_info) - assert_origin_path(request.path) - assert_origin_path(request.META["PATH_INFO"]) + assert_origin(request.path_info, OriginType.PATH) + assert_origin(request.path, OriginType.PATH) + assert_origin(request.META["PATH_INFO"], OriginType.PATH) + assert_origin(request.GET["q"], OriginType.PARAMETER) + assert_origin(first_get_key, OriginType.PARAMETER_NAME) + return HttpResponse(request.META["HTTP_USER_AGENT"], status=200) def taint_checking_disabled_view(request): - if python_supported_by_iast(): - with override_env({"DD_IAST_ENABLED": "True"}): - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - else: - - def is_pyobject_tainted(pyobject): # type: (Any) -> bool - return False - assert not is_pyobject_tainted(request.body) assert not is_pyobject_tainted(request.GET["q"]) + assert not is_pyobject_tainted(list(request.GET.keys())[0]) assert not is_pyobject_tainted(request.META["QUERY_STRING"]) assert not is_pyobject_tainted(request.META["HTTP_USER_AGENT"]) assert not is_pyobject_tainted(request.headers["User-Agent"]) @@ -181,7 +170,7 @@ def sqli_http_request_cookie_name(request): with connection.cursor() as cursor: # label iast_enabled_sqli_http_cookies_name - cursor.execute(add_aspect("SELECT 1 FROM sqlite_", key)) + cursor.execute("SELECT 1 FROM sqlite_" + key) return HttpResponse(request.COOKIES["master"], status=200) @@ -191,7 +180,7 @@ def sqli_http_request_cookie_value(request): with connection.cursor() as cursor: # label iast_enabled_sqli_http_cookies_value - cursor.execute(add_aspect("SELECT 1 FROM sqlite_", value)) + cursor.execute("SELECT 1 FROM sqlite_" + value) return HttpResponse(request.COOKIES["master"], status=200) @@ -201,19 +190,19 @@ def sqli_http_request_body(request): if key in request.POST: value = request.POST[key] else: - value = decode_aspect(bytes.decode, 1, request.body) + value = request.body.decode() with connection.cursor() as cursor: # label iast_enabled_sqli_http_body - cursor.execute(add_aspect("SELECT 1 FROM sqlite_", value)) + cursor.execute("SELECT 1 FROM sqlite_" + value) return HttpResponse(value, status=200) def source_body_view(request): - value = decode_aspect(bytes.decode, 1, request.body) + value = request.body.decode() with connection.cursor() as cursor: # label source_body_view - cursor.execute(add_aspect("SELECT 1 FROM sqlite_master WHERE type='1'", value)) + cursor.execute("SELECT 1 FROM sqlite_master WHERE type='1'" + value) return HttpResponse(value, status=200) @@ -260,15 +249,15 @@ def view_insecure_cookies_insecure_special_chars(request): def command_injection(request): - value = decode_aspect(bytes.decode, 1, request.body) + value = request.body.decode() # label iast_command_injection - os.system(add_aspect("dir -l ", value)) + os.system("dir -l " + value) return HttpResponse("OK", status=200) def header_injection(request): - value = decode_aspect(bytes.decode, 1, request.body) + value = request.body.decode() response = HttpResponse("OK", status=200) # label iast_header_injection @@ -284,45 +273,3 @@ def validate_querydict(request): return HttpResponse( "x=%s, all=%s, keys=%s, urlencode=%s" % (str(res), str(lres), str(keys), qd.urlencode()), status=200 ) - - -urlpatterns = [ - handler("response-header/$", magic_header_key, name="response-header"), - handler("body/$", body_view, name="body_view"), - handler("view_with_exception/$", view_with_exception, name="view_with_exception"), - handler("weak-hash/$", weak_hash_view, name="weak_hash"), - handler("block/$", block_callable_view, name="block"), - handler("command-injection/$", command_injection, name="command_injection"), - handler("header-injection/$", header_injection, name="header_injection"), - handler("taint-checking-enabled/$", taint_checking_enabled_view, name="taint_checking_enabled_view"), - handler("taint-checking-disabled/$", taint_checking_disabled_view, name="taint_checking_disabled_view"), - handler("sqli_http_request_parameter/$", sqli_http_request_parameter, name="sqli_http_request_parameter"), - handler("sqli_http_request_header_name/$", sqli_http_request_header_name, name="sqli_http_request_header_name"), - handler("sqli_http_request_header_value/$", sqli_http_request_header_value, name="sqli_http_request_header_value"), - handler("sqli_http_request_cookie_name/$", sqli_http_request_cookie_name, name="sqli_http_request_cookie_name"), - handler("sqli_http_request_cookie_value/$", sqli_http_request_cookie_value, name="sqli_http_request_cookie_value"), - handler("sqli_http_request_body/$", sqli_http_request_body, name="sqli_http_request_body"), - handler("source/body/$", source_body_view, name="source_body"), - handler("insecure-cookie/test_insecure_2_1/$", view_insecure_cookies_two_insecure_one_secure), - handler("insecure-cookie/test_insecure_special/$", view_insecure_cookies_insecure_special_chars), - handler("insecure-cookie/test_insecure/$", view_insecure_cookies_insecure), - handler("insecure-cookie/test_secure/$", view_insecure_cookies_secure), - handler("insecure-cookie/test_empty_cookie/$", view_insecure_cookies_empty), - path( - "sqli_http_path_parameter//", - sqli_http_path_parameter, - name="sqli_http_path_parameter", - ), - handler("validate_querydict/$", validate_querydict, name="validate_querydict"), -] - -if django.VERSION >= (2, 0, 0): - urlpatterns += [ - path("path-params///", path_params_view, name="path-params-view"), - path("checkuser//", checkuser_view, name="checkuser"), - ] -else: - urlpatterns += [ - path(r"path-params/(?P[0-9]{4})/(?P\w+)/$", path_params_view, name="path-params-view"), - path(r"checkuser/(?P\w+)/$", checkuser_view, name="checkuser"), - ] diff --git a/tests/contrib/django/test_django_appsec.py b/tests/appsec/integrations/django_tests/test_django_appsec.py similarity index 98% rename from tests/contrib/django/test_django_appsec.py rename to tests/appsec/integrations/django_tests/test_django_appsec.py index 3c5cb399739..e8515a75a2c 100644 --- a/tests/contrib/django/test_django_appsec.py +++ b/tests/appsec/integrations/django_tests/test_django_appsec.py @@ -48,7 +48,7 @@ def _aux_appsec_get_root_span( if cookies is None: cookies = {} # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") # Set cookies client.cookies.load(cookies) if payload is None: @@ -235,7 +235,9 @@ def test_django_login_sucess_anonymization(client, test_spans, tracer, use_login assert login_span.get_tag(user.ID) == "1" assert login_span.get_tag("appsec.events.users.login.success.track") == "true" assert login_span.get_tag(APPSEC.AUTO_LOGIN_EVENTS_SUCCESS_MODE) == LOGIN_EVENTS_MODE.ANON - assert login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.login") is None + assert login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.login") == ( + "anon_d1ad1f735a4381c2e8dbed0222db1136" if use_login else None + ) assert login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.email") is None assert login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.username") is None @@ -368,7 +370,10 @@ def test_django_login_sucess_anonymization_but_user_set_login(client, test_spans assert login_span.get_tag(user.ID) == "anon_d1ad1f735a4381c2e8dbed0222db1136" assert login_span.get_tag("appsec.events.users.login.success.track") == "true" assert login_span.get_tag(APPSEC.AUTO_LOGIN_EVENTS_SUCCESS_MODE) == LOGIN_EVENTS_MODE.ANON - assert not login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.login") + assert ( + login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX + ".success.login") + == "anon_d1ad1f735a4381c2e8dbed0222db1136" + ) assert not login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX_PUBLIC + ".success.email") assert not login_span.get_tag(APPSEC.USER_LOGIN_EVENT_PREFIX_PUBLIC + ".success.username") diff --git a/tests/contrib/django/test_django_appsec_iast.py b/tests/appsec/integrations/django_tests/test_django_appsec_iast.py similarity index 87% rename from tests/contrib/django/test_django_appsec_iast.py rename to tests/appsec/integrations/django_tests/test_django_appsec_iast.py index ee5cb069331..d2c52337482 100644 --- a/tests/contrib/django/test_django_appsec_iast.py +++ b/tests/appsec/integrations/django_tests/test_django_appsec_iast.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- import json -import logging -import re import pytest @@ -19,7 +17,7 @@ from tests.utils import override_global_config -TEST_FILE = "tests/contrib/django/django_app/appsec_urls.py" +TEST_FILE = "tests/appsec/integrations/django_tests/django_app/views.py" @pytest.fixture(autouse=True) @@ -30,29 +28,6 @@ def iast_context(): yield -# The log contains "[IAST]" but "[IAST] create_context" or "[IAST] reset_context" are valid -IAST_VALID_LOG = re.compile(r"(?=.*\[IAST\] )(?!.*\[IAST\] (create_context|reset_context))") - - -@pytest.fixture(autouse=True) -def check_native_code_exception_in_each_django_test(request, caplog, telemetry_writer): - if "skip_iast_check_logs" in request.keywords: - yield - else: - caplog.set_level(logging.DEBUG) - with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( - dict(_iast_debug=True) - ), caplog.at_level(logging.DEBUG): - yield - - log_messages = [record.message for record in caplog.get_records("call")] - for message in log_messages: - if IAST_VALID_LOG.search(message): - pytest.fail(message) - list_metrics_logs = list(telemetry_writer._logs) - assert len(list_metrics_logs) == 0 - - def _aux_appsec_get_root_span( client, test_spans, @@ -66,7 +41,7 @@ def _aux_appsec_get_root_span( if cookies is None: cookies = {} # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") # Set cookies client.cookies.load(cookies) if payload is None: @@ -245,6 +220,105 @@ def test_django_tainted_user_agent_iast_enabled_sqli_http_request_parameter(clie assert loaded["vulnerabilities"][0]["hash"] == hash_value +@pytest.mark.django_db() +@pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") +def test_django_tainted_user_agent_iast_enabled_sqli_http_request_parameter_name_get(client, test_spans, tracer): + with override_global_config(dict(_iast_enabled=True, _deduplication_enabled=False, _iast_request_sampling=100.0)): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_parameter_name_get/?SELECT=unused", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) + + vuln_type = "SQL_INJECTION" + + assert response.status_code == 200 + assert response.content == b"test/1.2.3" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_parameter_name_get", vuln_type, filename=TEST_FILE + ) + + assert loaded["sources"] == [ + { + "name": "SELECT", + "origin": "http.request.parameter.name", + "value": "SELECT", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": "SELECT"}, + { + "value": " ", + }, + { + "redacted": True, + }, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + +@pytest.mark.django_db() +@pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") +def test_django_tainted_user_agent_iast_enabled_sqli_http_request_parameter_name_post(client, test_spans, tracer): + with override_global_config(dict(_iast_enabled=True, _deduplication_enabled=False, _iast_request_sampling=100.0)): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"SELECT": "unused"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_parameter_name_post/", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) + + vuln_type = "SQL_INJECTION" + + assert response.status_code == 200 + assert response.content == b"test/1.2.3" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_parameter_name_post", vuln_type, filename=TEST_FILE + ) + + assert loaded["sources"] == [ + { + "name": "SELECT", + "origin": "http.request.parameter.name", + "value": "SELECT", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": "SELECT"}, + { + "value": " ", + }, + { + "redacted": True, + }, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + @pytest.mark.django_db() @pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") def test_django_tainted_user_agent_iast_enabled_sqli_http_request_header_value(client, test_spans, tracer): @@ -364,37 +438,36 @@ def test_django_tainted_user_agent_iast_disabled_sqli_http_request_header_name(c @pytest.mark.django_db() @pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") def test_django_iast_enabled_full_sqli_http_path_parameter(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/sqli_http_path_parameter/sqlite_master/", - headers={"HTTP_USER_AGENT": "test/1.2.3"}, - ) - assert response.status_code == 200 - assert response.content == b"test/1.2.3" - - loaded = json.loads(root_span.get_tag(IAST.JSON)) - - assert loaded["sources"] == [ - {"origin": "http.request.path.parameter", "name": "q_http_path_parameter", "value": "sqlite_master"} + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/sqli_http_path_parameter/sqlite_master/", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) + assert response.status_code == 200 + assert response.content == b"test/1.2.3" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + assert loaded["sources"] == [ + {"origin": "http.request.path.parameter", "name": "q_http_path_parameter", "value": "sqlite_master"} + ] + assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " from "}, + {"value": "sqlite_master", "source": 0}, ] - assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " from "}, - {"value": "sqlite_master", "source": 0}, - ] - } - line, hash_value = get_line_and_hash( - "iast_enabled_full_sqli_http_path_parameter", VULN_SQL_INJECTION, filename=TEST_FILE - ) - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + } + line, hash_value = get_line_and_hash( + "iast_enabled_full_sqli_http_path_parameter", VULN_SQL_INJECTION, filename=TEST_FILE + ) + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() diff --git a/tests/appsec/integrations/flask_tests/__init__.py b/tests/appsec/integrations/flask_tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/appsec/integrations/module_with_import_errors.py b/tests/appsec/integrations/flask_tests/module_with_import_errors.py similarity index 100% rename from tests/appsec/integrations/module_with_import_errors.py rename to tests/appsec/integrations/flask_tests/module_with_import_errors.py diff --git a/tests/appsec/integrations/test_flask_remoteconfig.py b/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py similarity index 94% rename from tests/appsec/integrations/test_flask_remoteconfig.py rename to tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py index eaa69e04182..4ddf1ee8f64 100644 --- a/tests/appsec/integrations/test_flask_remoteconfig.py +++ b/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py @@ -14,9 +14,9 @@ from ddtrace.internal.compat import httplib from ddtrace.internal.compat import parse from tests.appsec.appsec_utils import gunicorn_server -from tests.appsec.integrations.utils import _PORT -from tests.appsec.integrations.utils import _multi_requests -from tests.appsec.integrations.utils import _request_200 +from tests.appsec.integrations.flask_tests.utils import _PORT +from tests.appsec.integrations.flask_tests.utils import _multi_requests +from tests.appsec.integrations.flask_tests.utils import _request_200 from tests.utils import flaky @@ -187,6 +187,7 @@ def _request_403(client, debug_mode=False, max_retries=40, sleep_time=1): raise AssertionError("request_403 failed, max_retries=%d, sleep_time=%f" % (max_retries, sleep_time)) +@flaky(until=1706677200, reason="TODO(avara1986): We need to migrate testagent to gitlab") @pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10") def test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled(): token = "test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled_{}".format(str(uuid.uuid4())) @@ -202,6 +203,7 @@ def test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled(): _unblock_ip(token) +@flaky(until=1706677200, reason="TODO(avara1986): We need to migrate testagent to gitlab") @pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10") def test_load_testing_appsec_ip_blocking_gunicorn_block(): token = "test_load_testing_appsec_ip_blocking_gunicorn_block_{}".format(str(uuid.uuid4())) @@ -219,6 +221,7 @@ def test_load_testing_appsec_ip_blocking_gunicorn_block(): _request_200(gunicorn_client) +@flaky(until=1706677200, reason="TODO(avara1986): We need to migrate testagent to gitlab") @pytest.mark.skipif(list(sys.version_info[:2]) != [3, 10], reason="Run this tests in python 3.10") def test_load_testing_appsec_ip_blocking_gunicorn_block_and_kill_child_worker(): token = "test_load_testing_appsec_ip_blocking_gunicorn_block_and_kill_child_worker_{}".format(str(uuid.uuid4())) @@ -267,7 +270,7 @@ def test_load_testing_appsec_1click_and_ip_blocking_gunicorn_block_and_kill_chil _request_200(gunicorn_client, debug_mode=False) -@pytest.mark.subprocess(ddtrace_run=True, out=b"success") +@pytest.mark.subprocess(ddtrace_run=True, err=None, out=b"success") def test_compatiblity_with_multiprocessing(): import multiprocessing from multiprocessing import Array diff --git a/tests/appsec/integrations/test_gunicorn_handlers.py b/tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py similarity index 100% rename from tests/appsec/integrations/test_gunicorn_handlers.py rename to tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py diff --git a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py b/tests/appsec/integrations/flask_tests/test_iast_flask_entrypoint_iast_patches.py similarity index 85% rename from tests/appsec/integrations/test_flask_entrypoint_iast_patches.py rename to tests/appsec/integrations/flask_tests/test_iast_flask_entrypoint_iast_patches.py index 4f54bc675c3..7b15868c270 100644 --- a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py +++ b/tests/appsec/integrations/flask_tests/test_iast_flask_entrypoint_iast_patches.py @@ -1,9 +1,10 @@ -import pytest +import sys -from tests.utils import flaky +import pytest -@pytest.mark.subprocess() +@pytest.mark.skipif(sys.version_info >= (3, 13, 0), reason="Test not compatible with Python 3.13") +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_patch(): import dis import io @@ -35,7 +36,35 @@ def test_ddtrace_iast_flask_patch(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.app_main_patched"] -@pytest.mark.subprocess() +@pytest.mark.skipif(sys.version_info < (3, 13, 0), reason="Test compatible with Python 3.13") +@pytest.mark.subprocess(err=None) +def test_ddtrace_iast_flask_patch_py313(): + import dis + import io + import re + import sys + + from tests.utils import override_env + from tests.utils import override_global_config + + PATTERN = r"""LOAD_GLOBAL 0 \(_ddtrace_aspects\)""" + + with override_global_config(dict(_iast_enabled=True)), override_env( + dict(DD_IAST_ENABLED="true", DD_IAST_REQUEST_SAMPLING="100") + ): + import tests.appsec.iast.fixtures.entrypoint.app_main_patched as flask_entrypoint + + dis_output = io.StringIO() + dis.dis(flask_entrypoint, file=dis_output) + str_output = dis_output.getvalue() + # Should have replaced the binary op with the aspect in add_test: + assert re.search(PATTERN, str_output), str_output + # Should have replaced the app.run() with a pass: + # assert "Disassembly of run" not in str_output, str_output + del sys.modules["tests.appsec.iast.fixtures.entrypoint.app_main_patched"] + + +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_patch_iast_disabled(): import dis import io @@ -62,7 +91,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.app_main_patched"] -@pytest.mark.subprocess() +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_no_patch(): import dis import io @@ -93,7 +122,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.app"] -@pytest.mark.subprocess() +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_app_create_app_enable_iast_propagation(): import dis import io @@ -125,7 +154,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.views"] -@pytest.mark.subprocess() +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_app_create_app_patch_all(): import dis import io @@ -155,8 +184,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.views"] -@flaky(1736035200) -@pytest.mark.subprocess(check_logs=False) +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_app_create_app_patch_all_enable_iast_propagation(): import dis import io @@ -187,7 +215,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.views"] -@pytest.mark.subprocess() +@pytest.mark.subprocess(err=None) def test_ddtrace_iast_flask_app_create_app_patch_all_enable_iast_propagation_disabled(): import dis import io diff --git a/tests/appsec/integrations/test_flask_iast_patching.py b/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py similarity index 93% rename from tests/appsec/integrations/test_flask_iast_patching.py rename to tests/appsec/integrations/flask_tests/test_iast_flask_patching.py index 3291297ea92..8cb3b2e7730 100644 --- a/tests/appsec/integrations/test_flask_iast_patching.py +++ b/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py @@ -2,9 +2,8 @@ from tests.appsec.appsec_utils import flask_server from tests.appsec.appsec_utils import gunicorn_server -from tests.appsec.integrations.utils import _PORT -from tests.appsec.integrations.utils import _request_200 -from tests.utils import flaky +from tests.appsec.integrations.flask_tests.utils import _PORT +from tests.appsec.integrations.flask_tests.utils import _request_200 def test_flask_iast_ast_patching_import_error(): @@ -28,7 +27,6 @@ def test_flask_iast_ast_patching_import_error(): assert response.content == b"False" -@flaky(until=1706677200, reason="TODO(avara1986): Re.Match contains errors. APPSEC-55239") @pytest.mark.parametrize("style", ["re_module", "re_object"]) @pytest.mark.parametrize("endpoint", ["re", "non-re"]) @pytest.mark.parametrize( diff --git a/tests/appsec/integrations/test_flask_telemetry.py b/tests/appsec/integrations/flask_tests/test_iast_flask_telemetry.py similarity index 100% rename from tests/appsec/integrations/test_flask_telemetry.py rename to tests/appsec/integrations/flask_tests/test_iast_flask_telemetry.py diff --git a/tests/appsec/integrations/test_langchain.py b/tests/appsec/integrations/flask_tests/test_iast_langchain.py similarity index 100% rename from tests/appsec/integrations/test_langchain.py rename to tests/appsec/integrations/flask_tests/test_iast_langchain.py diff --git a/tests/appsec/integrations/test_psycopg2.py b/tests/appsec/integrations/flask_tests/test_iast_psycopg2.py similarity index 100% rename from tests/appsec/integrations/test_psycopg2.py rename to tests/appsec/integrations/flask_tests/test_iast_psycopg2.py diff --git a/tests/appsec/integrations/utils.py b/tests/appsec/integrations/flask_tests/utils.py similarity index 100% rename from tests/appsec/integrations/utils.py rename to tests/appsec/integrations/flask_tests/utils.py index 18bf20a1608..3935a9447b4 100644 --- a/tests/appsec/integrations/utils.py +++ b/tests/appsec/integrations/flask_tests/utils.py @@ -2,6 +2,9 @@ import time +_PORT = 8040 + + def _multi_requests(client, url="/", debug_mode=False): if debug_mode: results = [ @@ -47,9 +50,6 @@ def _request_200( raise AssertionError("request_200 failed, max_retries=%d, sleep_time=%f" % (max_retries, sleep_time)) -_PORT = 8040 - - def _request(client, url="/"): response = client.get(url, headers={"X-Forwarded-For": "123.45.67.88"}) return response diff --git a/tests/appsec/suitespec.yml b/tests/appsec/suitespec.yml index 07769271cb9..f075ba2da4a 100644 --- a/tests/appsec/suitespec.yml +++ b/tests/appsec/suitespec.yml @@ -81,7 +81,7 @@ suites: retry: 2 runner: hatch timeout: 50m - appsec_integrations: + appsec_integrations_pygoat: parallelism: 7 paths: - '@bootstrap' @@ -90,11 +90,37 @@ suites: - '@appsec' - '@appsec_iast' - '@remoteconfig' - - tests/appsec/* + - tests/appsec/integrations/pygoat_tests/* - tests/snapshots/tests.appsec.* retry: 2 runner: riot snapshot: true + appsec_integrations_flask: + parallelism: 6 + paths: + - '@bootstrap' + - '@core' + - '@tracing' + - '@appsec' + - '@appsec_iast' + - '@remoteconfig' + - tests/appsec/integrations/flask_tests/* + retry: 2 + runner: hatch + timeout: 30m + appsec_integrations_django: + parallelism: 6 + paths: + - '@bootstrap' + - '@core' + - '@tracing' + - '@appsec' + - '@appsec_iast' + - '@remoteconfig' + - tests/appsec/integrations/django_tests/* + retry: 2 + runner: hatch + timeout: 30m appsec_threats_django: parallelism: 12 paths: diff --git a/tests/appsec/utils.py b/tests/appsec/utils.py index 9df1065f005..7d79455361f 100644 --- a/tests/appsec/utils.py +++ b/tests/appsec/utils.py @@ -35,7 +35,7 @@ def asm_context( if tracer is None: tracer = default_tracer if config: - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") with core.context_with_data( "test.asm", diff --git a/tests/ci_visibility/test_ci_visibility.py b/tests/ci_visibility/test_ci_visibility.py index ba9c7b5359d..9eee9eb43da 100644 --- a/tests/ci_visibility/test_ci_visibility.py +++ b/tests/ci_visibility/test_ci_visibility.py @@ -123,7 +123,10 @@ def test_ci_visibility_service_enable(): assert ci_visibility_instance._service == "test-service" assert ci_visibility_instance._api_settings.coverage_enabled is False assert ci_visibility_instance._api_settings.skipping_enabled is False - assert any(isinstance(tracer_filter, TraceCiVisibilityFilter) for tracer_filter in dummy_tracer._filters) + assert any( + isinstance(tracer_filter, TraceCiVisibilityFilter) + for tracer_filter in dummy_tracer._user_trace_processors + ) CIVisibility.disable() @@ -150,7 +153,10 @@ def test_ci_visibility_service_enable_without_service(): assert ci_visibility_instance._service == "test-repo" # Inherited from environment assert ci_visibility_instance._api_settings.coverage_enabled is False assert ci_visibility_instance._api_settings.skipping_enabled is False - assert any(isinstance(tracer_filter, TraceCiVisibilityFilter) for tracer_filter in dummy_tracer._filters) + assert any( + isinstance(tracer_filter, TraceCiVisibilityFilter) + for tracer_filter in dummy_tracer._user_trace_processors + ) CIVisibility.disable() @@ -1114,7 +1120,7 @@ def test_civisibility_enable_respects_passed_in_tracer(): "ddtrace.internal.ci_visibility.recorder.ddconfig", _get_default_civisibility_ddconfig() ), mock.patch("ddtrace.internal.ci_visibility.writer.config", ddtrace.settings.Config()): tracer = ddtrace.Tracer() - tracer.configure(partial_flush_enabled=False, partial_flush_min_spans=100) + tracer._configure(partial_flush_enabled=False, partial_flush_min_spans=100) CIVisibility.enable(tracer=tracer) assert CIVisibility._instance.tracer._partial_flush_enabled is False assert CIVisibility._instance.tracer._partial_flush_min_spans == 100 diff --git a/tests/commands/ddtrace_run_integration.py b/tests/commands/ddtrace_run_integration.py index e52a0c9b8b0..c6cd9320777 100644 --- a/tests/commands/ddtrace_run_integration.py +++ b/tests/commands/ddtrace_run_integration.py @@ -5,7 +5,7 @@ import redis -from ddtrace import Pin +from ddtrace.trace import Pin from tests.contrib.config import REDIS_CONFIG from tests.utils import DummyWriter @@ -16,7 +16,7 @@ assert pin writer = DummyWriter() - pin.tracer.configure(writer=writer) + pin.tracer._configure(writer=writer) r.flushall() spans = writer.pop() diff --git a/tests/contrib/aiobotocore/utils.py b/tests/contrib/aiobotocore/utils.py index 061f8d37847..b51b6550327 100644 --- a/tests/contrib/aiobotocore/utils.py +++ b/tests/contrib/aiobotocore/utils.py @@ -3,7 +3,7 @@ from async_generator import asynccontextmanager from async_generator import yield_ -from ddtrace import Pin +from ddtrace.trace import Pin LOCALSTACK_ENDPOINT_URL = { diff --git a/tests/contrib/aiohttp/test_aiohttp_client.py b/tests/contrib/aiohttp/test_aiohttp_client.py index f490bd85ada..2b2b51c2650 100644 --- a/tests/contrib/aiohttp/test_aiohttp_client.py +++ b/tests/contrib/aiohttp/test_aiohttp_client.py @@ -3,10 +3,10 @@ import aiohttp import pytest -from ddtrace import Pin from ddtrace.contrib.internal.aiohttp.patch import extract_netloc_and_query_info_from_url from ddtrace.contrib.internal.aiohttp.patch import patch from ddtrace.contrib.internal.aiohttp.patch import unpatch +from ddtrace.trace import Pin from tests.utils import override_config from tests.utils import override_http_config @@ -101,7 +101,7 @@ async def test_distributed_tracing_disabled(ddtrace_run_python_code_in_subproces import asyncio import sys import aiohttp -from ddtrace import Pin +from ddtrace.trace import Pin from tests.contrib.aiohttp.test_aiohttp_client import URL async def test(): @@ -184,7 +184,7 @@ def test_configure_service_name_pin(ddtrace_run_python_code_in_subprocess): import asyncio import sys import aiohttp -from ddtrace import Pin +from ddtrace.trace import Pin from tests.contrib.aiohttp.test_aiohttp_client import URL_200 async def test(): diff --git a/tests/contrib/aiohttp/test_request_safety.py b/tests/contrib/aiohttp/test_request_safety.py index 5fbdc13ce3a..8b82f611394 100644 --- a/tests/contrib/aiohttp/test_request_safety.py +++ b/tests/contrib/aiohttp/test_request_safety.py @@ -13,7 +13,7 @@ async def test_full_request(patched_app_tracer, aiohttp_client, loop): app, tracer = patched_app_tracer - tracer.configure(context_provider=DefaultContextProvider()) + tracer._configure(context_provider=DefaultContextProvider()) client = await aiohttp_client(app) # it should create a root span when there is a handler hit # with the proper tags @@ -38,7 +38,7 @@ async def test_multiple_full_request(patched_app_tracer, aiohttp_client, loop): responses = [] app, tracer = patched_app_tracer - tracer.configure(context_provider=DefaultContextProvider()) + tracer._configure(context_provider=DefaultContextProvider()) client = await aiohttp_client(app) # it should produce a wrong trace, but the Context must diff --git a/tests/contrib/aiohttp_jinja2/conftest.py b/tests/contrib/aiohttp_jinja2/conftest.py index 92c49f015f3..a58b72f7f49 100644 --- a/tests/contrib/aiohttp_jinja2/conftest.py +++ b/tests/contrib/aiohttp_jinja2/conftest.py @@ -3,7 +3,7 @@ from ddtrace.contrib.internal.aiohttp_jinja2.patch import patch from ddtrace.contrib.internal.aiohttp_jinja2.patch import unpatch -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.contrib.aiohttp.conftest import app_tracer # noqa:F401 from tests.contrib.aiohttp.conftest import patched_app_tracer # noqa:F401 from tests.contrib.aiohttp.conftest import untraced_app_tracer # noqa:F401 diff --git a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py index 222522d66f6..8889d828752 100644 --- a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py +++ b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py @@ -1,9 +1,9 @@ import aiohttp_jinja2 import pytest -from ddtrace import Pin from ddtrace import tracer from ddtrace.constants import ERROR_MSG +from ddtrace.trace import Pin from tests.contrib.aiohttp.app.web import set_filesystem_loader from tests.contrib.aiohttp.app.web import set_package_loader import tests.contrib.aiohttp.conftest # noqa:F401 diff --git a/tests/contrib/aiomysql/test_aiomysql.py b/tests/contrib/aiomysql/test_aiomysql.py index 9bd898c52ae..0bf8839dc96 100644 --- a/tests/contrib/aiomysql/test_aiomysql.py +++ b/tests/contrib/aiomysql/test_aiomysql.py @@ -5,11 +5,11 @@ import pymysql import pytest -from ddtrace import Pin from ddtrace import Tracer from ddtrace.contrib.internal.aiomysql.patch import patch from ddtrace.contrib.internal.aiomysql.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib import shared_tests_async as shared_tests from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.asyncio.utils import mark_asyncio diff --git a/tests/contrib/aiopg/test.py b/tests/contrib/aiopg/test.py index da741bacca3..eb738e009d8 100644 --- a/tests/contrib/aiopg/test.py +++ b/tests/contrib/aiopg/test.py @@ -4,11 +4,12 @@ from psycopg2 import extras import pytest -# project -from ddtrace import Pin from ddtrace.contrib.internal.aiopg.patch import patch from ddtrace.contrib.internal.aiopg.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME + +# project +from ddtrace.trace import Pin from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.config import POSTGRES_CONFIG from tests.opentracer.utils import init_tracer diff --git a/tests/contrib/algoliasearch/test.py b/tests/contrib/algoliasearch/test.py index 3d563e11035..87f5f7b6910 100644 --- a/tests/contrib/algoliasearch/test.py +++ b/tests/contrib/algoliasearch/test.py @@ -3,7 +3,7 @@ from ddtrace.contrib.internal.algoliasearch.patch import algoliasearch_version from ddtrace.contrib.internal.algoliasearch.patch import patch from ddtrace.contrib.internal.algoliasearch.patch import unpatch -from ddtrace.pin import Pin +from ddtrace.trace import Pin from ddtrace.vendor.packaging.version import parse as parse_version from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/anthropic/conftest.py b/tests/contrib/anthropic/conftest.py index d975204ef7c..3e5dac0a442 100644 --- a/tests/contrib/anthropic/conftest.py +++ b/tests/contrib/anthropic/conftest.py @@ -3,10 +3,10 @@ import mock import pytest -from ddtrace import Pin from ddtrace.contrib.internal.anthropic.patch import patch from ddtrace.contrib.internal.anthropic.patch import unpatch from ddtrace.llmobs import LLMObs +from ddtrace.trace import Pin from tests.contrib.anthropic.utils import get_request_vcr from tests.utils import DummyTracer from tests.utils import DummyWriter @@ -37,7 +37,7 @@ def mock_tracer(ddtrace_global_config, anthropic): pin = Pin.get_from(anthropic) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(anthropic, tracer=mock_tracer) - pin.tracer.configure() + pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use to mock tracer. LLMObs.disable() diff --git a/tests/contrib/aredis/test_aredis.py b/tests/contrib/aredis/test_aredis.py index c0e5719b80e..e62cfa974be 100644 --- a/tests/contrib/aredis/test_aredis.py +++ b/tests/contrib/aredis/test_aredis.py @@ -5,9 +5,9 @@ import pytest from wrapt import ObjectProxy -from ddtrace import Pin from ddtrace.contrib.internal.aredis.patch import patch from ddtrace.contrib.internal.aredis.patch import unpatch +from ddtrace.trace import Pin from tests.conftest import DEFAULT_DDTRACE_SUBPROCESS_TEST_SERVICE_NAME from tests.opentracer.utils import init_tracer from tests.utils import override_config @@ -152,7 +152,7 @@ def test_schematization_of_service_and_operation(ddtrace_run_python_code_in_subp import pytest import sys from tests.conftest import * -from ddtrace.pin import Pin +from ddtrace.trace import Pin import aredis from tests.contrib.config import REDIS_CONFIG from tests.contrib.aredis.test_aredis import traced_aredis diff --git a/tests/contrib/asyncpg/test_asyncpg.py b/tests/contrib/asyncpg/test_asyncpg.py index 539f64f10ac..032a0f91731 100644 --- a/tests/contrib/asyncpg/test_asyncpg.py +++ b/tests/contrib/asyncpg/test_asyncpg.py @@ -5,11 +5,11 @@ import mock import pytest -from ddtrace import Pin from ddtrace import tracer from ddtrace.contrib.internal.asyncpg.patch import patch from ddtrace.contrib.internal.asyncpg.patch import unpatch from ddtrace.contrib.trace_utils import iswrapped +from ddtrace.trace import Pin from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.asyncio.utils import mark_asyncio from tests.contrib.config import POSTGRES_CONFIG diff --git a/tests/contrib/asynctest/test_asynctest.py b/tests/contrib/asynctest/test_asynctest.py index 44e0c0c2387..24680cb2875 100644 --- a/tests/contrib/asynctest/test_asynctest.py +++ b/tests/contrib/asynctest/test_asynctest.py @@ -5,7 +5,7 @@ import pytest import ddtrace -from ddtrace.contrib.pytest.plugin import is_enabled +from ddtrace.contrib.internal.pytest.plugin import is_enabled from ddtrace.ext import test from ddtrace.internal.ci_visibility import CIVisibility from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings @@ -46,7 +46,7 @@ def pytest_configure(config): CIVisibility.enable(tracer=self.tracer, config=ddtrace.config.pytest) with override_env(dict(DD_API_KEY="foobar.baz")): - self.tracer.configure(writer=DummyCIVisibilityWriter("https://citestcycle-intake.banana")) + self.tracer._configure(writer=DummyCIVisibilityWriter("https://citestcycle-intake.banana")) return self.testdir.inline_run(*args, plugins=[CIVisibilityPlugin()]) @pytest.mark.skipif( diff --git a/tests/contrib/avro/test_avro.py b/tests/contrib/avro/test_avro.py index 7be5e2f6351..3db10460a23 100644 --- a/tests/contrib/avro/test_avro.py +++ b/tests/contrib/avro/test_avro.py @@ -4,11 +4,11 @@ from avro.io import DatumWriter from wrapt import ObjectProxy -from ddtrace import Pin from ddtrace.constants import AUTO_KEEP from ddtrace.contrib.internal.avro.patch import patch from ddtrace.contrib.internal.avro.patch import unpatch from ddtrace.ext import schema as SCHEMA_TAGS +from ddtrace.trace import Pin OPENAPI_USER_SCHEMA_DEF = ( diff --git a/tests/contrib/boto/test.py b/tests/contrib/boto/test.py index cd2520be5b9..2570ca9c65c 100644 --- a/tests/contrib/boto/test.py +++ b/tests/contrib/boto/test.py @@ -14,12 +14,13 @@ from moto import mock_s3 from moto import mock_sts -# project -from ddtrace import Pin from ddtrace.contrib.internal.boto.patch import patch from ddtrace.contrib.internal.boto.patch import unpatch from ddtrace.ext import http from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME + +# project +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/botocore/bedrock_cassettes/amazon_invoke_model_arn.yaml b/tests/contrib/botocore/bedrock_cassettes/amazon_invoke_model_arn.yaml new file mode 100644 index 00000000000..cd2283c0ce7 --- /dev/null +++ b/tests/contrib/botocore/bedrock_cassettes/amazon_invoke_model_arn.yaml @@ -0,0 +1,52 @@ +interactions: +- request: + body: '{"inputText": "Command: can you explain what Datadog is to someone not + in the tech industry?", "textGenerationConfig": {"maxTokenCount": 50, "stopSequences": + [], "temperature": 0, "topP": 0.9}}' + headers: + Content-Length: + - '193' + User-Agent: + - !!binary | + Qm90bzMvMS4zNC40OSBtZC9Cb3RvY29yZSMxLjM0LjQ5IHVhLzIuMCBvcy9tYWNvcyMyNC4yLjAg + bWQvYXJjaCNhcm02NCBsYW5nL3B5dGhvbiMzLjEwLjUgbWQvcHlpbXBsI0NQeXRob24gY2ZnL3Jl + dHJ5LW1vZGUjbGVnYWN5IEJvdG9jb3JlLzEuMzQuNDk= + X-Amz-Date: + - !!binary | + MjAyNTAxMTRUMjIwNDAyWg== + amz-sdk-invocation-id: + - !!binary | + YjY5NGZlNDgtNDBmNy00YTJlLWI1YTgtYjRiZGVhZTU5MjQ0 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/arn%3Aaws%3Abedrock%3Aus-east-1%3A%3Afoundation-model%2Famazon.titan-tg1-large/invoke + response: + body: + string: '{"inputTextTokenCount":18,"results":[{"tokenCount":50,"outputText":"\n\nDatadog + is a monitoring and analytics platform for IT operations, DevOps, and software + development teams. It provides real-time monitoring of infrastructure, applications, + and services, allowing users to identify and resolve issues quickly. Datadog + collects","completionReason":"LENGTH"}]}' + headers: + Connection: + - keep-alive + Content-Length: + - '361' + Content-Type: + - application/json + Date: + - Tue, 14 Jan 2025 22:04:05 GMT + X-Amzn-Bedrock-Input-Token-Count: + - '18' + X-Amzn-Bedrock-Invocation-Latency: + - '2646' + X-Amzn-Bedrock-Output-Token-Count: + - '50' + x-amzn-RequestId: + - b2d0fd44-c29a-4cd4-a97a-6901a48f6264 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/botocore/test.py b/tests/contrib/botocore/test.py index af70c453d19..9e4f91cb61f 100644 --- a/tests/contrib/botocore/test.py +++ b/tests/contrib/botocore/test.py @@ -33,7 +33,6 @@ except ImportError: from moto import mock_kinesis as mock_firehose -from ddtrace import Pin from ddtrace import config from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK @@ -47,6 +46,7 @@ from ddtrace.internal.utils.version import parse_version from ddtrace.propagation.http import HTTP_HEADER_PARENT_ID from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/botocore/test_bedrock.py b/tests/contrib/botocore/test_bedrock.py index 1001aff0dac..1cf5618bd0e 100644 --- a/tests/contrib/botocore/test_bedrock.py +++ b/tests/contrib/botocore/test_bedrock.py @@ -4,9 +4,9 @@ import mock import pytest -from ddtrace import Pin from ddtrace.contrib.internal.botocore.patch import patch from ddtrace.contrib.internal.botocore.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.botocore.bedrock_utils import _MODELS from tests.contrib.botocore.bedrock_utils import _REQUEST_BODIES from tests.contrib.botocore.bedrock_utils import get_request_vcr @@ -222,6 +222,15 @@ def test_meta_invoke(bedrock_client, request_vcr): json.loads(response.get("body").read()) +@pytest.mark.snapshot +def test_invoke_model_using_aws_arn_model_id(bedrock_client, request_vcr): + body = json.dumps(_REQUEST_BODIES["amazon"]) + model = "arn:aws:bedrock:us-east-1::foundation-model/amazon.titan-tg1-large" + with request_vcr.use_cassette("amazon_invoke_model_arn.yaml"): + response = bedrock_client.invoke_model(body=body, modelId=model) + json.loads(response.get("body").read()) + + @pytest.mark.snapshot def test_amazon_invoke_stream(bedrock_client, request_vcr): body, model = json.dumps(_REQUEST_BODIES["amazon"]), _MODELS["amazon"] diff --git a/tests/contrib/botocore/test_bedrock_llmobs.py b/tests/contrib/botocore/test_bedrock_llmobs.py index c4af15100f2..790b86f0704 100644 --- a/tests/contrib/botocore/test_bedrock_llmobs.py +++ b/tests/contrib/botocore/test_bedrock_llmobs.py @@ -4,10 +4,10 @@ import mock import pytest -from ddtrace import Pin from ddtrace.contrib.internal.botocore.patch import patch from ddtrace.contrib.internal.botocore.patch import unpatch from ddtrace.llmobs import LLMObs +from ddtrace.trace import Pin from tests.contrib.botocore.bedrock_utils import _MODELS from tests.contrib.botocore.bedrock_utils import _REQUEST_BODIES from tests.contrib.botocore.bedrock_utils import get_request_vcr diff --git a/tests/contrib/botocore/test_stepfunctions.py b/tests/contrib/botocore/test_stepfunctions.py index aaf17eb6051..f350e967d4c 100644 --- a/tests/contrib/botocore/test_stepfunctions.py +++ b/tests/contrib/botocore/test_stepfunctions.py @@ -1,9 +1,9 @@ import json -from ddtrace import Pin from ddtrace.contrib.internal.botocore.services.stepfunctions import update_stepfunction_input from ddtrace.ext import SpanTypes from ddtrace.internal import core +from ddtrace.trace import Pin def test_update_stepfunction_input(): diff --git a/tests/contrib/cassandra/test.py b/tests/contrib/cassandra/test.py index b6f5664ac72..21b98d6396f 100644 --- a/tests/contrib/cassandra/test.py +++ b/tests/contrib/cassandra/test.py @@ -9,7 +9,6 @@ from cassandra.query import SimpleStatement import mock -from ddtrace import Pin from ddtrace import config from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_TYPE @@ -19,6 +18,7 @@ from ddtrace.ext import cassandra as cassx from ddtrace.ext import net from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib.config import CASSANDRA_CONFIG from tests.opentracer.utils import init_tracer from tests.utils import DummyTracer diff --git a/tests/contrib/celery/autopatch.py b/tests/contrib/celery/autopatch.py index 128ad9f8415..08eeed5acad 100644 --- a/tests/contrib/celery/autopatch.py +++ b/tests/contrib/celery/autopatch.py @@ -1,4 +1,4 @@ -from ddtrace import Pin +from ddtrace.trace import Pin if __name__ == "__main__": diff --git a/tests/contrib/celery/base.py b/tests/contrib/celery/base.py index 4e89b77a94b..c2b7de22a54 100644 --- a/tests/contrib/celery/base.py +++ b/tests/contrib/celery/base.py @@ -3,9 +3,9 @@ import celery import pytest -from ddtrace import Pin from ddtrace.contrib.internal.celery.patch import patch from ddtrace.contrib.internal.celery.patch import unpatch +from ddtrace.trace import Pin from tests.utils import TracerTestCase from ..config import RABBITMQ_CONFIG diff --git a/tests/contrib/celery/test_app.py b/tests/contrib/celery/test_app.py index 6f43a04a9bf..6218d77f061 100644 --- a/tests/contrib/celery/test_app.py +++ b/tests/contrib/celery/test_app.py @@ -1,7 +1,7 @@ import celery -from ddtrace import Pin from ddtrace.contrib.internal.celery.patch import unpatch_app +from ddtrace.trace import Pin from .base import CeleryBaseTestCase diff --git a/tests/contrib/celery/test_integration.py b/tests/contrib/celery/test_integration.py index 8b676fa1108..717ed1de359 100644 --- a/tests/contrib/celery/test_integration.py +++ b/tests/contrib/celery/test_integration.py @@ -8,13 +8,13 @@ import mock import pytest -from ddtrace import Pin from ddtrace._trace.context import Context from ddtrace.constants import ERROR_MSG from ddtrace.contrib.internal.celery.patch import patch from ddtrace.contrib.internal.celery.patch import unpatch import ddtrace.internal.forksafe as forksafe from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import flaky diff --git a/tests/contrib/celery/test_patch.py b/tests/contrib/celery/test_patch.py index fe0e5b97b2b..3892fc79fb1 100644 --- a/tests/contrib/celery/test_patch.py +++ b/tests/contrib/celery/test_patch.py @@ -1,6 +1,6 @@ import unittest -from ddtrace import Pin +from ddtrace.trace import Pin from tests.contrib.patch import emit_integration_and_version_to_test_agent diff --git a/tests/contrib/celery/test_tagging.py b/tests/contrib/celery/test_tagging.py index 22e8b539c50..6b88acf9434 100644 --- a/tests/contrib/celery/test_tagging.py +++ b/tests/contrib/celery/test_tagging.py @@ -5,9 +5,9 @@ from celery.contrib.testing.worker import start_worker import pytest -from ddtrace import Pin from ddtrace.contrib.internal.celery.patch import patch from ddtrace.contrib.internal.celery.patch import unpatch +from ddtrace.trace import Pin from tests.utils import DummyTracer from .base import AMQP_BROKER_URL diff --git a/tests/contrib/consul/test.py b/tests/contrib/consul/test.py index c5eec48369d..285287f9e95 100644 --- a/tests/contrib/consul/test.py +++ b/tests/contrib/consul/test.py @@ -1,11 +1,11 @@ import consul from wrapt import BoundFunctionWrapper -from ddtrace import Pin from ddtrace.contrib.internal.consul.patch import patch from ddtrace.contrib.internal.consul.patch import unpatch from ddtrace.ext import consul as consulx from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/dbapi/test_dbapi.py b/tests/contrib/dbapi/test_dbapi.py index 80135734a6a..1f6be1d66f5 100644 --- a/tests/contrib/dbapi/test_dbapi.py +++ b/tests/contrib/dbapi/test_dbapi.py @@ -1,7 +1,6 @@ import mock import pytest -from ddtrace import Pin from ddtrace._trace.span import Span # noqa:F401 from ddtrace.contrib.dbapi import FetchTracedCursor from ddtrace.contrib.dbapi import TracedConnection @@ -9,6 +8,7 @@ from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings import Config from ddtrace.settings.integration import IntegrationConfig +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured from tests.utils import assert_is_not_measured diff --git a/tests/contrib/dbapi/test_dbapi_appsec.py b/tests/contrib/dbapi/test_dbapi_appsec.py index b60b3ac05c0..d43d9c37e3c 100644 --- a/tests/contrib/dbapi/test_dbapi_appsec.py +++ b/tests/contrib/dbapi/test_dbapi_appsec.py @@ -1,12 +1,12 @@ import mock import pytest -from ddtrace import Pin from ddtrace.appsec._iast import oce from ddtrace.appsec._iast._utils import _is_python_version_supported from ddtrace.contrib.dbapi import TracedCursor from ddtrace.settings import Config from ddtrace.settings.integration import IntegrationConfig +from ddtrace.trace import Pin from tests.appsec.iast.conftest import _end_iast_context_and_oce from tests.appsec.iast.conftest import _start_iast_context_and_oce from tests.utils import TracerTestCase diff --git a/tests/contrib/dbapi_async/test_dbapi_async.py b/tests/contrib/dbapi_async/test_dbapi_async.py index f7151fe1390..7343e875829 100644 --- a/tests/contrib/dbapi_async/test_dbapi_async.py +++ b/tests/contrib/dbapi_async/test_dbapi_async.py @@ -1,7 +1,6 @@ import mock import pytest -from ddtrace import Pin from ddtrace._trace.span import Span # noqa:F401 from ddtrace.contrib.dbapi_async import FetchTracedAsyncCursor from ddtrace.contrib.dbapi_async import TracedAsyncConnection @@ -9,6 +8,7 @@ from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings import Config from ddtrace.settings.integration import IntegrationConfig +from ddtrace.trace import Pin from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.asyncio.utils import mark_asyncio from tests.utils import assert_is_measured diff --git a/tests/contrib/django/conftest.py b/tests/contrib/django/conftest.py index a2de59753fb..3dd992681b4 100644 --- a/tests/contrib/django/conftest.py +++ b/tests/contrib/django/conftest.py @@ -4,8 +4,8 @@ from django.conf import settings import pytest -from ddtrace import Pin from ddtrace.contrib.internal.django.patch import patch +from ddtrace.trace import Pin from tests.utils import DummyTracer from tests.utils import TracerSpanContainer diff --git a/tests/contrib/django/django_app/settings.py b/tests/contrib/django/django_app/settings.py index 5bd53d693e2..664690d71e1 100644 --- a/tests/contrib/django/django_app/settings.py +++ b/tests/contrib/django/django_app/settings.py @@ -6,11 +6,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) ALLOWED_HOSTS = [ diff --git a/tests/contrib/django/django_app/urls.py b/tests/contrib/django/django_app/urls.py index caa7b33653f..523a250d0c8 100644 --- a/tests/contrib/django/django_app/urls.py +++ b/tests/contrib/django/django_app/urls.py @@ -78,7 +78,6 @@ def shutdown(request): re_path(r"re-path.*/", repath_view), path("path/", path_view), path("include/", include("tests.contrib.django.django_app.extra_urls")), - path("appsec/", include("tests.contrib.django.django_app.appsec_urls")), # This must precede composed-view. handler(r"^some-static-view/$", TemplateView.as_view(template_name="my-template.html")), handler(r"^composed-template-view/$", views.ComposedTemplateView.as_view(), name="composed-template-view"), diff --git a/tests/contrib/django/test_django_dbm.py b/tests/contrib/django/test_django_dbm.py index 00edf1c0815..d44f90f3208 100644 --- a/tests/contrib/django/test_django_dbm.py +++ b/tests/contrib/django/test_django_dbm.py @@ -1,7 +1,7 @@ from django.db import connections import mock -from ddtrace import Pin +from ddtrace.trace import Pin from tests.contrib import shared_tests from tests.utils import DummyTracer from tests.utils import override_config diff --git a/tests/contrib/djangorestframework/test_appsec.py b/tests/contrib/djangorestframework/test_appsec.py index 68489f99be0..0a7d099943c 100644 --- a/tests/contrib/djangorestframework/test_appsec.py +++ b/tests/contrib/djangorestframework/test_appsec.py @@ -12,7 +12,7 @@ def test_djangorest_request_body_urlencoded(client, test_spans, tracer): with override_global_config(dict(_asm_enabled=True)): # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") payload = urlencode({"mytestingbody_key": "mytestingbody_value"}) client.post("/users/", payload, content_type="application/x-www-form-urlencoded") root_span = test_spans.spans[0] @@ -30,7 +30,7 @@ def test_djangorest_request_body_custom_parser(client, test_spans, tracer): with override_global_config(dict(_asm_enabled=True)): tracer._asm_enabled = True # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") payload, content_type = ( '--52d1fb4eb9c021e53ac2846190e4ac72\r\nContent-Disposition: form-data; name="json"\r\n' 'Content-Type: application/json\r\n\r\n{"value": "yqrweytqwreasldhkuqwgervflnmlnli"}\r\n' diff --git a/tests/contrib/dogpile_cache/test_tracing.py b/tests/contrib/dogpile_cache/test_tracing.py index d6b26bb1b58..fec78818eda 100644 --- a/tests/contrib/dogpile_cache/test_tracing.py +++ b/tests/contrib/dogpile_cache/test_tracing.py @@ -3,9 +3,9 @@ import dogpile import pytest -from ddtrace import Pin from ddtrace.contrib.internal.dogpile_cache.patch import patch from ddtrace.contrib.internal.dogpile_cache.patch import unpatch +from ddtrace.trace import Pin from tests.conftest import DEFAULT_DDTRACE_SUBPROCESS_TEST_SERVICE_NAME from tests.utils import DummyTracer from tests.utils import TracerSpanContainer diff --git a/tests/contrib/dramatiq/test_integration.py b/tests/contrib/dramatiq/test_integration.py index 7d80d554390..ba8d836181c 100644 --- a/tests/contrib/dramatiq/test_integration.py +++ b/tests/contrib/dramatiq/test_integration.py @@ -5,7 +5,7 @@ from ddtrace.contrib.internal.dramatiq.patch import patch from ddtrace.contrib.internal.dramatiq.patch import unpatch -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.utils import DummyTracer from tests.utils import snapshot diff --git a/tests/contrib/elasticsearch/test_elasticsearch.py b/tests/contrib/elasticsearch/test_elasticsearch.py index 091fe4b6901..6e381bc1e31 100644 --- a/tests/contrib/elasticsearch/test_elasticsearch.py +++ b/tests/contrib/elasticsearch/test_elasticsearch.py @@ -6,7 +6,6 @@ import pytest -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.elasticsearch.patch import get_version from ddtrace.contrib.internal.elasticsearch.patch import get_versions @@ -14,6 +13,7 @@ from ddtrace.contrib.internal.elasticsearch.patch import unpatch from ddtrace.ext import http from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib.patch import emit_integration_and_version_to_test_agent from tests.utils import TracerTestCase diff --git a/tests/contrib/falcon/test_suite.py b/tests/contrib/falcon/test_suite.py index cdf26d401ac..96768505328 100644 --- a/tests/contrib/falcon/test_suite.py +++ b/tests/contrib/falcon/test_suite.py @@ -226,7 +226,7 @@ def test_200_ot(self): """OpenTracing version of test_200.""" writer = self.tracer._writer ot_tracer = init_tracer("my_svc", self.tracer) - ot_tracer._dd_tracer.configure(writer=writer) + ot_tracer._dd_tracer._configure(writer=writer) with ot_tracer.start_active_span("ot_span"): out = self.make_test_call("/200", expected_status_code=200) diff --git a/tests/contrib/fastapi/test_fastapi_appsec.py b/tests/contrib/fastapi/test_fastapi_appsec.py index d2f1b6492f9..a6124299f96 100644 --- a/tests/contrib/fastapi/test_fastapi_appsec.py +++ b/tests/contrib/fastapi/test_fastapi_appsec.py @@ -10,7 +10,7 @@ def _aux_appsec_prepare_tracer(tracer, asm_enabled=True): # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") def get_response_body(response): diff --git a/tests/contrib/fastapi/test_fastapi_appsec_iast.py b/tests/contrib/fastapi/test_fastapi_appsec_iast.py index 19502912b8d..91963f21297 100644 --- a/tests/contrib/fastapi/test_fastapi_appsec_iast.py +++ b/tests/contrib/fastapi/test_fastapi_appsec_iast.py @@ -45,7 +45,7 @@ def _aux_appsec_prepare_tracer(tracer): oce.reconfigure() # Hack: need to pass an argument to configure so that the processors are recreated - tracer.configure(api_version="v0.4") + tracer._configure(api_version="v0.4") def get_response_body(response): diff --git a/tests/contrib/flask/__init__.py b/tests/contrib/flask/__init__.py index 216b3676e93..a512a79f196 100644 --- a/tests/contrib/flask/__init__.py +++ b/tests/contrib/flask/__init__.py @@ -2,9 +2,9 @@ from flask.testing import FlaskClient import wrapt -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import patch from ddtrace.contrib.internal.flask.patch import unpatch +from ddtrace.trace import Pin from tests.utils import TracerTestCase diff --git a/tests/contrib/flask/app.py b/tests/contrib/flask/app.py index 82059ce0eaa..c19b34c4216 100644 --- a/tests/contrib/flask/app.py +++ b/tests/contrib/flask/app.py @@ -12,11 +12,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) cur_dir = os.path.dirname(os.path.realpath(__file__)) tmpl_path = os.path.join(cur_dir, "test_templates") app = Flask(__name__, template_folder=tmpl_path) diff --git a/tests/contrib/flask/test_blueprint.py b/tests/contrib/flask/test_blueprint.py index db069d1bf04..96401dfa1a9 100644 --- a/tests/contrib/flask/test_blueprint.py +++ b/tests/contrib/flask/test_blueprint.py @@ -1,7 +1,7 @@ import flask -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import unpatch +from ddtrace.trace import Pin from . import BaseFlaskTestCase diff --git a/tests/contrib/flask/test_flask_appsec.py b/tests/contrib/flask/test_flask_appsec.py index 642585fc035..6937b9e4c03 100644 --- a/tests/contrib/flask/test_flask_appsec.py +++ b/tests/contrib/flask/test_flask_appsec.py @@ -31,7 +31,7 @@ def setUp(self): def _aux_appsec_prepare_tracer(self, appsec_enabled=True): # Hack: need to pass an argument to configure so that the processors are recreated - self.tracer.configure(api_version="v0.4") + self.tracer._configure(api_version="v0.4") def test_flask_ipblock_manually_json(self): # Most tests of flask blocking are in the test_flask_snapshot, this just diff --git a/tests/contrib/flask/test_flask_appsec_iast.py b/tests/contrib/flask/test_flask_appsec_iast.py index c49dca7c29f..bedf2b58bdc 100644 --- a/tests/contrib/flask/test_flask_appsec_iast.py +++ b/tests/contrib/flask/test_flask_appsec_iast.py @@ -48,7 +48,7 @@ def setUp(self): patch_header_injection() patch_json() - self.tracer.configure(api_version="v0.4", appsec_enabled=True, iast_enabled=True) + self.tracer._configure(api_version="v0.4", appsec_enabled=True, iast_enabled=True) oce.reconfigure() @pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") @@ -1496,7 +1496,7 @@ def setUp(self): ) ): super(FlaskAppSecIASTDisabledTestCase, self).setUp() - self.tracer.configure(api_version="v0.4") + self.tracer._configure(api_version="v0.4") @pytest.mark.skipif(not python_supported_by_iast(), reason="Python version not supported by IAST") def test_flask_full_sqli_iast_disabled_http_request_cookies_name(self): diff --git a/tests/contrib/flask/test_flask_appsec_telemetry.py b/tests/contrib/flask/test_flask_appsec_telemetry.py index df21a2b508b..27e0fa509be 100644 --- a/tests/contrib/flask/test_flask_appsec_telemetry.py +++ b/tests/contrib/flask/test_flask_appsec_telemetry.py @@ -17,7 +17,7 @@ def inject_fixtures(self, telemetry_writer): # noqa: F811 def _aux_appsec_prepare_tracer(self, appsec_enabled=True): # Hack: need to pass an argument to configure so that the processors are recreated - self.tracer.configure(api_version="v0.4") + self.tracer._configure(api_version="v0.4") def test_telemetry_metrics_block(self): with override_global_config(dict(_asm_enabled=True, _asm_static_rule_file=rules.RULES_GOOD_PATH)): diff --git a/tests/contrib/flask/test_flask_helpers.py b/tests/contrib/flask/test_flask_helpers.py index a02f25eb4af..d3672213a0a 100644 --- a/tests/contrib/flask/test_flask_helpers.py +++ b/tests/contrib/flask/test_flask_helpers.py @@ -2,10 +2,10 @@ import flask -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import flask_version from ddtrace.contrib.internal.flask.patch import unpatch from ddtrace.internal.compat import StringIO +from ddtrace.trace import Pin from . import BaseFlaskTestCase diff --git a/tests/contrib/flask/test_signals.py b/tests/contrib/flask/test_signals.py index e16cf84157e..b86e8989047 100644 --- a/tests/contrib/flask/test_signals.py +++ b/tests/contrib/flask/test_signals.py @@ -1,9 +1,9 @@ import flask import mock -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import flask_version from ddtrace.contrib.internal.flask.patch import unpatch +from ddtrace.trace import Pin from . import BaseFlaskTestCase diff --git a/tests/contrib/flask/test_template.py b/tests/contrib/flask/test_template.py index 7231548642e..a38311d3b86 100644 --- a/tests/contrib/flask/test_template.py +++ b/tests/contrib/flask/test_template.py @@ -1,8 +1,8 @@ import flask -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import flask_version from ddtrace.contrib.internal.flask.patch import unpatch +from ddtrace.trace import Pin from . import BaseFlaskTestCase diff --git a/tests/contrib/flask_autopatch/test_flask_autopatch.py b/tests/contrib/flask_autopatch/test_flask_autopatch.py index 4da8fc921f0..27c4b47e2d0 100644 --- a/tests/contrib/flask_autopatch/test_flask_autopatch.py +++ b/tests/contrib/flask_autopatch/test_flask_autopatch.py @@ -2,9 +2,9 @@ import flask import wrapt -from ddtrace import Pin from ddtrace.contrib.internal.flask.patch import flask_version from ddtrace.ext import http +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured from tests.utils import assert_span_http_status_code diff --git a/tests/contrib/google_generativeai/conftest.py b/tests/contrib/google_generativeai/conftest.py index 6c370a07452..64b2eb83d1b 100644 --- a/tests/contrib/google_generativeai/conftest.py +++ b/tests/contrib/google_generativeai/conftest.py @@ -6,7 +6,7 @@ from ddtrace.contrib.internal.google_generativeai.patch import patch from ddtrace.contrib.internal.google_generativeai.patch import unpatch from ddtrace.llmobs import LLMObs -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.contrib.google_generativeai.utils import MockGenerativeModelAsyncClient from tests.contrib.google_generativeai.utils import MockGenerativeModelClient from tests.utils import DummyTracer @@ -36,7 +36,7 @@ def mock_tracer(ddtrace_global_config, genai): pin = Pin.get_from(genai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(genai, tracer=mock_tracer) - pin.tracer.configure() + pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use to mock tracer. LLMObs.disable() diff --git a/tests/contrib/grpc/common.py b/tests/contrib/grpc/common.py index 668a99d2584..e67e4f32a92 100644 --- a/tests/contrib/grpc/common.py +++ b/tests/contrib/grpc/common.py @@ -2,10 +2,10 @@ from grpc._grpcio_metadata import __version__ as _GRPC_VERSION from grpc.framework.foundation import logging_pool -from ddtrace import Pin from ddtrace.contrib.internal.grpc import constants from ddtrace.contrib.internal.grpc.patch import patch from ddtrace.contrib.internal.grpc.patch import unpatch +from ddtrace.trace import Pin from tests.utils import TracerTestCase from .hello_pb2_grpc import add_HelloServicer_to_server diff --git a/tests/contrib/grpc/test_grpc.py b/tests/contrib/grpc/test_grpc.py index 8f6bd00b4e3..1ad7900d33b 100644 --- a/tests/contrib/grpc/test_grpc.py +++ b/tests/contrib/grpc/test_grpc.py @@ -5,16 +5,16 @@ from grpc.framework.foundation import logging_pool import pytest -from ddtrace import Pin from ddtrace._trace.span import _get_64_highest_order_bits_as_hex from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK from ddtrace.constants import ERROR_TYPE -from ddtrace.contrib.grpc import constants +from ddtrace.contrib.internal.grpc import constants from ddtrace.contrib.internal.grpc.patch import _unpatch_server from ddtrace.contrib.internal.grpc.patch import patch from ddtrace.contrib.internal.grpc.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import flaky from tests.utils import snapshot @@ -216,7 +216,7 @@ def _test_secure_channel(self, secure_channel_function): self._check_server_span(server_span, "grpc-server", "SayHello", "unary") def test_pin_not_activated(self): - self.tracer.configure(enabled=False) + self.tracer._configure(enabled=False) with grpc.insecure_channel("127.0.0.1:%d" % (_GRPC_PORT)) as channel: stub = HelloStub(channel) stub.SayHello(HelloRequest(name="test")) diff --git a/tests/contrib/grpc_aio/test_grpc_aio.py b/tests/contrib/grpc_aio/test_grpc_aio.py index 1296e027486..0606bcc3db2 100644 --- a/tests/contrib/grpc_aio/test_grpc_aio.py +++ b/tests/contrib/grpc_aio/test_grpc_aio.py @@ -7,7 +7,6 @@ from grpc import aio import pytest -from ddtrace import Pin from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK from ddtrace.constants import ERROR_TYPE @@ -16,6 +15,7 @@ from ddtrace.contrib.internal.grpc.patch import patch from ddtrace.contrib.internal.grpc.patch import unpatch from ddtrace.contrib.internal.grpc.utils import _parse_rpc_repr_string +from ddtrace.trace import Pin import ddtrace.vendor.packaging.version as packaging_version from tests.contrib.grpc.hello_pb2 import HelloReply from tests.contrib.grpc.hello_pb2 import HelloRequest @@ -340,7 +340,7 @@ async def test_invalid_target(server_info, tracer): @pytest.mark.parametrize("server_info", [_CoroHelloServicer(), _SyncHelloServicer()], indirect=True) async def test_pin_not_activated(server_info, tracer): - tracer.configure(enabled=False) + tracer._configure(enabled=False) async with aio.insecure_channel(server_info.target) as channel: stub = HelloStub(channel) await stub.SayHello(HelloRequest(name="test")) diff --git a/tests/contrib/gunicorn/wsgi_mw_app.py b/tests/contrib/gunicorn/wsgi_mw_app.py index ac3931923c7..9cf9927ff34 100644 --- a/tests/contrib/gunicorn/wsgi_mw_app.py +++ b/tests/contrib/gunicorn/wsgi_mw_app.py @@ -17,11 +17,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) SCHEDULER_SENTINEL = -1 assert bootstrap.profiler._scheduler._last_export not in (None, SCHEDULER_SENTINEL) diff --git a/tests/contrib/httplib/test_httplib.py b/tests/contrib/httplib/test_httplib.py index 05977b53cc5..24a5fe3f051 100644 --- a/tests/contrib/httplib/test_httplib.py +++ b/tests/contrib/httplib/test_httplib.py @@ -17,7 +17,7 @@ from ddtrace.internal.compat import parse from ddtrace.internal.constants import _HTTPLIB_NO_TRACE_REQUEST from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase from tests.utils import assert_span_http_status_code diff --git a/tests/contrib/httplib/test_httplib_distributed.py b/tests/contrib/httplib/test_httplib_distributed.py index 3552f65a51a..40e5e891662 100644 --- a/tests/contrib/httplib/test_httplib_distributed.py +++ b/tests/contrib/httplib/test_httplib_distributed.py @@ -7,7 +7,7 @@ from ddtrace import config from ddtrace._trace.span import _get_64_highest_order_bits_as_hex from ddtrace.internal.compat import httplib -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.utils import TracerTestCase from .test_httplib import SOCKET diff --git a/tests/contrib/httpx/test_httpx.py b/tests/contrib/httpx/test_httpx.py index 426099dcf04..33ecadb825f 100644 --- a/tests/contrib/httpx/test_httpx.py +++ b/tests/contrib/httpx/test_httpx.py @@ -6,8 +6,8 @@ from ddtrace.contrib.internal.httpx.patch import HTTPX_VERSION from ddtrace.contrib.internal.httpx.patch import patch from ddtrace.contrib.internal.httpx.patch import unpatch -from ddtrace.pin import Pin from ddtrace.settings.http import HttpConfig +from ddtrace.trace import Pin from tests.utils import flaky from tests.utils import override_config from tests.utils import override_http_config diff --git a/tests/contrib/httpx/test_httpx_pre_0_11.py b/tests/contrib/httpx/test_httpx_pre_0_11.py index fc9c003328c..315c53cb29c 100644 --- a/tests/contrib/httpx/test_httpx_pre_0_11.py +++ b/tests/contrib/httpx/test_httpx_pre_0_11.py @@ -6,8 +6,8 @@ from ddtrace.contrib.internal.httpx.patch import HTTPX_VERSION from ddtrace.contrib.internal.httpx.patch import patch from ddtrace.contrib.internal.httpx.patch import unpatch -from ddtrace.pin import Pin from ddtrace.settings.http import HttpConfig +from ddtrace.trace import Pin from tests.utils import override_config from tests.utils import override_http_config diff --git a/tests/contrib/jinja2/test_jinja2.py b/tests/contrib/jinja2/test_jinja2.py index 985c1114383..64002fd6555 100644 --- a/tests/contrib/jinja2/test_jinja2.py +++ b/tests/contrib/jinja2/test_jinja2.py @@ -4,10 +4,10 @@ # 3rd party import jinja2 -from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.jinja2.patch import patch from ddtrace.contrib.internal.jinja2.patch import unpatch +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured from tests.utils import assert_is_not_measured diff --git a/tests/contrib/kafka/test_kafka.py b/tests/contrib/kafka/test_kafka.py index f1afc2dbe89..d5858574ef9 100644 --- a/tests/contrib/kafka/test_kafka.py +++ b/tests/contrib/kafka/test_kafka.py @@ -11,18 +11,18 @@ import mock import pytest -from ddtrace import Pin from ddtrace import Tracer from ddtrace.contrib.internal.kafka.patch import TracedConsumer from ddtrace.contrib.internal.kafka.patch import patch from ddtrace.contrib.internal.kafka.patch import unpatch -from ddtrace.filters import TraceFilter import ddtrace.internal.datastreams # noqa: F401 - used as part of mock patching from ddtrace.internal.datastreams.processor import PROPAGATION_KEY_BASE_64 from ddtrace.internal.datastreams.processor import ConsumerPartitionKey from ddtrace.internal.datastreams.processor import DataStreamsCtx from ddtrace.internal.datastreams.processor import PartitionKey from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter +from ddtrace.trace import Pin +from ddtrace.trace import TraceFilter from tests.contrib.config import KAFKA_CONFIG from tests.datastreams.test_public_api import MockedTracer from tests.utils import DummyTracer @@ -108,7 +108,7 @@ def tracer(should_filter_empty_polls): patch() t = Tracer() if should_filter_empty_polls: - t.configure(settings={"FILTERS": [KafkaConsumerPollFilter()]}) + t._configure(trace_processors=[KafkaConsumerPollFilter()]) # disable backoff because it makes these tests less reliable t._writer._send_payload_with_backoff = t._writer._send_payload try: @@ -505,7 +505,7 @@ def _generate_in_subprocess(random_topic): PAYLOAD = bytes("hueh hueh hueh", encoding="utf-8") - ddtrace.tracer.configure(settings={"FILTERS": [KafkaConsumerPollFilter()]}) + ddtrace.tracer._configure(trace_processors=[KafkaConsumerPollFilter()]) # disable backoff because it makes these tests less reliable ddtrace.tracer._writer._send_payload_with_backoff = ddtrace.tracer._writer._send_payload patch() @@ -518,8 +518,8 @@ def _generate_in_subprocess(random_topic): "auto.offset.reset": "earliest", } ) - ddtrace.Pin.override(producer, tracer=ddtrace.tracer) - ddtrace.Pin.override(consumer, tracer=ddtrace.tracer) + ddtrace.trace.Pin.override(producer, tracer=ddtrace.tracer) + ddtrace.trace.Pin.override(consumer, tracer=ddtrace.tracer) # We run all of these commands with retry attempts because the kafka-confluent API # sys.exits on connection failures, which causes the test to fail. We want to retry @@ -799,7 +799,7 @@ def test_tracing_context_is_propagated_when_enabled(ddtrace_run_python_code_in_s import random import sys -from ddtrace import Pin +from ddtrace.trace import Pin from ddtrace.contrib.internal.kafka.patch import patch from tests.contrib.kafka.test_kafka import consumer @@ -1039,7 +1039,7 @@ def test_does_not_trace_empty_poll_when_disabled(ddtrace_run_python_code_in_subp import random import sys -from ddtrace import Pin +from ddtrace.trace import Pin from ddtrace.contrib.internal.kafka.patch import patch from ddtrace import config diff --git a/tests/contrib/kombu/test.py b/tests/contrib/kombu/test.py index ef27d3a09ac..b56ecdf0d0f 100644 --- a/tests/contrib/kombu/test.py +++ b/tests/contrib/kombu/test.py @@ -2,13 +2,13 @@ import kombu import mock -from ddtrace import Pin from ddtrace.contrib.internal.kombu import utils from ddtrace.contrib.internal.kombu.patch import patch from ddtrace.contrib.internal.kombu.patch import unpatch from ddtrace.ext import kombu as kombux from ddtrace.internal.datastreams.processor import PROPAGATION_KEY_BASE_64 from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/langchain/conftest.py b/tests/contrib/langchain/conftest.py index f54b040bba7..2de8acd19e0 100644 --- a/tests/contrib/langchain/conftest.py +++ b/tests/contrib/langchain/conftest.py @@ -3,9 +3,9 @@ import mock import pytest -from ddtrace import Pin from ddtrace.contrib.internal.langchain.patch import patch from ddtrace.contrib.internal.langchain.patch import unpatch +from ddtrace.trace import Pin from tests.utils import DummyTracer from tests.utils import DummyWriter from tests.utils import override_config @@ -59,7 +59,7 @@ def mock_tracer(langchain, mock_logs, mock_metrics): pin = Pin.get_from(langchain) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(langchain, tracer=mock_tracer) - pin.tracer.configure() + pin.tracer._configure() yield mock_tracer mock_logs.reset_mock() diff --git a/tests/contrib/langgraph/__init__.py b/tests/contrib/langgraph/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/langgraph/conftest.py b/tests/contrib/langgraph/conftest.py new file mode 100644 index 00000000000..19d01c018aa --- /dev/null +++ b/tests/contrib/langgraph/conftest.py @@ -0,0 +1,158 @@ +import operator +from typing import Annotated +from typing import TypedDict + +from langgraph.graph import END +from langgraph.graph import START +from langgraph.graph import StateGraph +import pytest + +from ddtrace import Pin +from ddtrace.contrib.internal.langgraph.patch import patch +from ddtrace.contrib.internal.langgraph.patch import unpatch +from ddtrace.llmobs import LLMObs as llmobs_service +from ddtrace.llmobs._writer import LLMObsSpanWriter +from tests.utils import DummyTracer +from tests.utils import override_global_config + + +@pytest.fixture +def mock_tracer(): + yield DummyTracer() + + +@pytest.fixture +def langgraph(monkeypatch, mock_tracer): + monkeypatch.setenv("_DD_TRACE_LANGGRAPH_ENABLED", "true") + patch() + import langgraph + + pin = Pin.get_from(langgraph) + pin.override(langgraph, tracer=mock_tracer) + yield langgraph + unpatch() + + +def default_global_config(): + return {"_dd_api_key": "", "_llmobs_ml_app": "unnamed-ml-app", "service": "tests.llmobs"} + + +class TestLLMObsSpanWriter(LLMObsSpanWriter): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.events = [] + + def enqueue(self, event): + self.events.append(event) + + +@pytest.fixture +def llmobs_span_writer(): + yield TestLLMObsSpanWriter(interval=1.0, timeout=1.0) + + +@pytest.fixture +def llmobs(tracer, llmobs_span_writer): + with override_global_config(default_global_config()): + llmobs_service.enable(_tracer=tracer) + llmobs_service._instance._llmobs_span_writer = llmobs_span_writer + yield llmobs_service + llmobs_service.disable() + + +@pytest.fixture +def llmobs_events(llmobs, llmobs_span_writer): + return llmobs_span_writer.events + + +class State(TypedDict): + a_list: Annotated[list, operator.add] + which: str + + +def _do_op(name): + def op(state: State): + return {"a_list": [name]} + + return op + + +@pytest.fixture +def simple_graph(langgraph): + graph_builder = StateGraph(State) + graph_builder.add_node("a", _do_op("a")) + graph_builder.add_node("b", _do_op("b")) + graph_builder.add_edge(START, "a") + graph_builder.add_edge("a", "b") + graph_builder.add_edge("b", END) + graph = graph_builder.compile() + + yield graph + + +@pytest.fixture +def conditional_graph(langgraph): + def which(state): + if state["which"] not in ("b", "c"): + return "b" + return state["which"] + + graph_builder = StateGraph(State) + graph_builder.add_node("a", _do_op("a")) + graph_builder.add_node("b", _do_op("b")) + graph_builder.add_node("c", _do_op("c")) + graph_builder.add_edge(START, "a") + graph_builder.add_conditional_edges("a", which) + graph_builder.add_edge("b", END) + graph_builder.add_edge("c", END) + graph = graph_builder.compile() + + yield graph + + +@pytest.fixture +def complex_graph(langgraph): + def which(state): + if state["which"] not in ("b", "c"): + return "b" + return state["which"] + + subgraph_builder_b = StateGraph(State) + subgraph_builder_b.add_node("b1", _do_op("b1")) + subgraph_builder_b.add_node("b2", _do_op("b2")) + subgraph_builder_b.add_node("b3", _do_op("b3")) + subgraph_builder_b.add_edge(START, "b1") + subgraph_builder_b.add_edge("b1", "b2") + subgraph_builder_b.add_edge("b2", "b3") + subgraph_builder_b.add_edge("b3", END) + subgraph_b = subgraph_builder_b.compile() + + graph_builder = StateGraph(State) + graph_builder.add_node("a", _do_op("a")) + graph_builder.add_node("b", subgraph_b) + graph_builder.add_node("c", _do_op("c")) + graph_builder.add_edge(START, "a") + graph_builder.add_conditional_edges("a", which) + graph_builder.add_edge("b", END) + graph_builder.add_edge("c", END) + graph = graph_builder.compile() + + yield graph + + +@pytest.fixture +def fanning_graph(langgraph): + graph_builder = StateGraph(State) + graph_builder.add_node("a", _do_op("a")) + graph_builder.add_node("b", _do_op("b")) + graph_builder.add_node("c", _do_op("c")) + graph_builder.add_node("d", _do_op("d")) + graph_builder.add_edge(START, "a") + graph_builder.add_edge("a", "b") + graph_builder.add_edge("a", "c") + graph_builder.add_edge("b", "d") + graph_builder.add_edge("c", "d") + graph_builder.add_edge("d", END) + graph = graph_builder.compile() + + yield graph diff --git a/tests/contrib/langgraph/test_langgraph.py b/tests/contrib/langgraph/test_langgraph.py new file mode 100644 index 00000000000..1894b8b9420 --- /dev/null +++ b/tests/contrib/langgraph/test_langgraph.py @@ -0,0 +1,135 @@ +def assert_simple_graph_spans(spans): + assert len(spans) == 3 + assert spans[0].resource == "langgraph.graph.state.CompiledStateGraph.LangGraph" + assert spans[1].resource == "langgraph.utils.runnable.RunnableSeq.a" + assert spans[2].resource == "langgraph.utils.runnable.RunnableSeq.b" + + +def assert_conditional_graph_spans(spans, which): + assert len(spans) == 3 + assert spans[0].resource == "langgraph.graph.state.CompiledStateGraph.LangGraph" + assert spans[1].resource == "langgraph.utils.runnable.RunnableSeq.a" + assert spans[2].resource == f"langgraph.utils.runnable.RunnableSeq.{which}" + + +def assert_subgraph_spans(spans): + assert len(spans) == 6 + assert spans[0].resource == "langgraph.graph.state.CompiledStateGraph.LangGraph" + assert spans[1].resource == "langgraph.utils.runnable.RunnableSeq.a" + assert spans[2].resource == "langgraph.graph.state.CompiledStateGraph.LangGraph" + assert spans[3].resource == "langgraph.utils.runnable.RunnableSeq.b1" + assert spans[4].resource == "langgraph.utils.runnable.RunnableSeq.b2" + assert spans[5].resource == "langgraph.utils.runnable.RunnableSeq.b3" + + +def assert_fanning_graph_spans(spans): + assert len(spans) == 5 + assert spans[0].resource == "langgraph.graph.state.CompiledStateGraph.LangGraph" + assert spans[1].resource == "langgraph.utils.runnable.RunnableSeq.a" + assert spans[2].resource == "langgraph.utils.runnable.RunnableSeq.b" + assert spans[3].resource == "langgraph.utils.runnable.RunnableSeq.c" + assert spans[4].resource == "langgraph.utils.runnable.RunnableSeq.d" + + +def test_simple_graph(langgraph, simple_graph, mock_tracer): + simple_graph.invoke({"a_list": [], "which": "a"}) + spans = mock_tracer.pop_traces()[0] + assert_simple_graph_spans(spans) + + +async def test_simple_graph_async(langgraph, simple_graph, mock_tracer): + await simple_graph.ainvoke({"a_list": [], "which": "a"}) + spans = mock_tracer.pop_traces()[0] + assert_simple_graph_spans(spans) + + +def test_simple_graph_stream(langgraph, simple_graph, mock_tracer): + for _ in simple_graph.stream({"a_list": [], "which": "a"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_simple_graph_spans(spans) + + +async def test_simple_graph_stream_async(langgraph, simple_graph, mock_tracer): + async for _ in simple_graph.astream({"a_list": [], "which": "a"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_simple_graph_spans(spans) + + +def test_conditional_graph(langgraph, conditional_graph, mock_tracer): + conditional_graph.invoke({"a_list": [], "which": "c"}) + spans = mock_tracer.pop_traces()[0] + assert_conditional_graph_spans(spans, which="c") + + +async def test_conditional_graph_async(langgraph, conditional_graph, mock_tracer): + await conditional_graph.ainvoke({"a_list": [], "which": "b"}) + spans = mock_tracer.pop_traces()[0] + assert_conditional_graph_spans(spans, which="b") + + +def test_conditional_graph_stream(langgraph, conditional_graph, mock_tracer): + for _ in conditional_graph.stream({"a_list": [], "which": "c"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_conditional_graph_spans(spans, which="c") + + +async def test_conditional_graph_stream_async(langgraph, conditional_graph, mock_tracer): + async for _ in conditional_graph.astream({"a_list": [], "which": "b"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_conditional_graph_spans(spans, which="b") + + +def test_subgraph(langgraph, complex_graph, mock_tracer): + complex_graph.invoke({"a_list": [], "which": "b"}) + spans = mock_tracer.pop_traces()[0] + assert_subgraph_spans(spans) + + +async def test_subgraph_async(langgraph, complex_graph, mock_tracer): + await complex_graph.ainvoke({"a_list": [], "which": "b"}) + spans = mock_tracer.pop_traces()[0] + assert_subgraph_spans(spans) + + +def test_subgraph_stream(langgraph, complex_graph, mock_tracer): + for _ in complex_graph.stream({"a_list": [], "which": "b"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_subgraph_spans(spans) + + +async def test_subgraph_stream_async(langgraph, complex_graph, mock_tracer): + async for _ in complex_graph.astream({"a_list": [], "which": "b"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_subgraph_spans(spans) + + +def test_fanning_graph(langgraph, fanning_graph, mock_tracer): + fanning_graph.invoke({"a_list": [], "which": "b"}) + spans = mock_tracer.pop_traces()[0] + assert_fanning_graph_spans(spans) + + +async def test_fanning_graph_async(langgraph, fanning_graph, mock_tracer): + await fanning_graph.ainvoke({"a_list": [], "which": "b"}) + spans = mock_tracer.pop_traces()[0] + assert_fanning_graph_spans(spans) + + +def test_fanning_graph_stream(langgraph, fanning_graph, mock_tracer): + for _ in fanning_graph.stream({"a_list": [], "which": "b"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_fanning_graph_spans(spans) + + +async def test_fanning_graph_stream_async(langgraph, fanning_graph, mock_tracer): + async for _ in fanning_graph.astream({"a_list": [], "which": "b"}): + pass + spans = mock_tracer.pop_traces()[0] + assert_fanning_graph_spans(spans) diff --git a/tests/contrib/langgraph/test_langgraph_llmobs.py b/tests/contrib/langgraph/test_langgraph_llmobs.py new file mode 100644 index 00000000000..968a98bdb26 --- /dev/null +++ b/tests/contrib/langgraph/test_langgraph_llmobs.py @@ -0,0 +1,171 @@ +import json + + +def _assert_span_link(from_span_event, to_span_event, from_io, to_io): + """ + Assert that a span link exists between two span events, specifically the correct span ID and from/to specification. + """ + found = False + expected_to_span_id = "undefined" if not to_span_event else to_span_event["span_id"] + for span_link in from_span_event["span_links"]: + if span_link["span_id"] == expected_to_span_id: + assert span_link["attributes"] == {"from": from_io, "to": to_io} + found = True + break + assert found + + +class TestLangGraphLLMObs: + def test_simple_graph(self, llmobs_events, simple_graph): + simple_graph.invoke({"a_list": [], "which": "a"}, stream_mode=["values"]) + graph_span = llmobs_events[2] + a_span = llmobs_events[0] + b_span = llmobs_events[1] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, b_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + + # Check that the graph span has the appropriate output + # stream_mode=["values"] should result in the last yield being a tuple + assert graph_span["meta"]["output"]["value"] == json.dumps({"a_list": ["a", "b"], "which": "a"}) + + async def test_simple_graph_async(self, llmobs_events, simple_graph): + await simple_graph.ainvoke({"a_list": [], "which": "a"}) + graph_span = llmobs_events[2] + a_span = llmobs_events[0] + b_span = llmobs_events[1] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, b_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + + # Check that the graph span has the appropriate output + # default stream_mode of "values" should result in the last yield being an object + assert graph_span["meta"]["output"]["value"] == json.dumps({"a_list": ["a", "b"], "which": "a"}) + + def test_conditional_graph(self, llmobs_events, conditional_graph): + conditional_graph.invoke({"a_list": [], "which": "c"}) + graph_span = llmobs_events[2] + a_span = llmobs_events[0] + c_span = llmobs_events[1] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, c_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert c_span["name"] == "c" + _assert_span_link(c_span, a_span, "output", "input") + + async def test_conditional_graph_async(self, llmobs_events, conditional_graph): + await conditional_graph.ainvoke({"a_list": [], "which": "b"}) + graph_span = llmobs_events[2] + a_span = llmobs_events[0] + c_span = llmobs_events[1] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, c_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert c_span["name"] == "b" + _assert_span_link(c_span, a_span, "output", "input") + + def test_subgraph(self, llmobs_events, complex_graph): + complex_graph.invoke({"a_list": [], "which": "b"}) + graph_span = llmobs_events[5] + a_span = llmobs_events[0] + b_span = llmobs_events[4] + b1_span = llmobs_events[1] + b2_span = llmobs_events[2] + b3_span = llmobs_events[3] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, b_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + _assert_span_link(b_span, b3_span, "output", "output") + assert b1_span["name"] == "b1" + _assert_span_link(b1_span, b_span, "input", "input") + assert b2_span["name"] == "b2" + _assert_span_link(b2_span, b1_span, "output", "input") + assert b3_span["name"] == "b3" + _assert_span_link(b3_span, b2_span, "output", "input") + + async def test_subgraph_async(self, llmobs_events, complex_graph): + await complex_graph.ainvoke({"a_list": [], "which": "b"}) + graph_span = llmobs_events[5] + a_span = llmobs_events[0] + b_span = llmobs_events[4] + b1_span = llmobs_events[1] + b2_span = llmobs_events[2] + b3_span = llmobs_events[3] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, b_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + _assert_span_link(b_span, b3_span, "output", "output") + assert b1_span["name"] == "b1" + _assert_span_link(b1_span, b_span, "input", "input") + assert b2_span["name"] == "b2" + _assert_span_link(b2_span, b1_span, "output", "input") + assert b3_span["name"] == "b3" + _assert_span_link(b3_span, b2_span, "output", "input") + + def test_fanning_graph(self, llmobs_events, fanning_graph): + fanning_graph.invoke({"a_list": [], "which": ""}) + graph_span = llmobs_events[4] + a_span = llmobs_events[0] + b_span = llmobs_events[1] + c_span = llmobs_events[2] + d_span = llmobs_events[3] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, d_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + assert c_span["name"] == "c" + _assert_span_link(c_span, a_span, "output", "input") + assert d_span["name"] == "d" + _assert_span_link(d_span, b_span, "output", "input") + _assert_span_link(d_span, c_span, "output", "input") + + async def test_fanning_graph_async(self, llmobs_events, fanning_graph): + await fanning_graph.ainvoke({"a_list": [], "which": ""}) + graph_span = llmobs_events[4] + a_span = llmobs_events[0] + b_span = llmobs_events[1] + c_span = llmobs_events[2] + d_span = llmobs_events[3] + + assert graph_span["name"] == "LangGraph" + _assert_span_link(graph_span, None, "input", "input") + _assert_span_link(graph_span, d_span, "output", "output") + assert a_span["name"] == "a" + _assert_span_link(a_span, graph_span, "input", "input") + assert b_span["name"] == "b" + _assert_span_link(b_span, a_span, "output", "input") + assert c_span["name"] == "c" + _assert_span_link(c_span, a_span, "output", "input") + assert d_span["name"] == "d" + _assert_span_link(d_span, b_span, "output", "input") + _assert_span_link(d_span, c_span, "output", "input") diff --git a/tests/contrib/langgraph/test_langgraph_patch.py b/tests/contrib/langgraph/test_langgraph_patch.py new file mode 100644 index 00000000000..5e57cbb3f63 --- /dev/null +++ b/tests/contrib/langgraph/test_langgraph_patch.py @@ -0,0 +1,102 @@ +import os +import sys +from tempfile import NamedTemporaryFile + +from ddtrace.contrib.langgraph import get_version +from ddtrace.contrib.langgraph import patch +from ddtrace.contrib.langgraph import unpatch +from tests.contrib.patch import PatchTestCase +from tests.utils import call_program + + +os.environ["_DD_TRACE_LANGGRAPH_ENABLED"] = "true" + + +class TestLangGraphPatch(PatchTestCase.Base): + __integration_name__ = "langgraph" + __module_name__ = "langgraph" + __patch_func__ = patch + __unpatch_func__ = unpatch + __get_version__ = get_version + + def assert_module_patched(self, langgraph): + from langgraph.pregel import Pregel + from langgraph.pregel.loop import PregelLoop + from langgraph.utils.runnable import RunnableSeq + + self.assert_wrapped(RunnableSeq.invoke) + self.assert_wrapped(RunnableSeq.ainvoke) + self.assert_wrapped(Pregel.stream) + self.assert_wrapped(Pregel.astream) + self.assert_wrapped(PregelLoop.tick) + + def assert_not_module_patched(self, langgraph): + from langgraph.pregel import Pregel + from langgraph.pregel.loop import PregelLoop + from langgraph.utils.runnable import RunnableSeq + + self.assert_not_wrapped(RunnableSeq.invoke) + self.assert_not_wrapped(RunnableSeq.ainvoke) + self.assert_not_wrapped(Pregel.stream) + self.assert_not_wrapped(Pregel.astream) + self.assert_not_wrapped(PregelLoop.tick) + + def assert_not_module_double_patched(self, langgraph): + from langgraph.pregel import Pregel + from langgraph.pregel.loop import PregelLoop + from langgraph.utils.runnable import RunnableSeq + + self.assert_not_double_wrapped(RunnableSeq.invoke) + self.assert_not_double_wrapped(RunnableSeq.ainvoke) + self.assert_not_double_wrapped(Pregel.stream) + self.assert_not_double_wrapped(Pregel.astream) + self.assert_not_double_wrapped(PregelLoop.tick) + + def test_ddtrace_run_patch_on_import(self): + # Overriding the base test case due to langgraph's code structure not allowing + # langgraph to be patched by a direct import. + with NamedTemporaryFile(mode="w", suffix=".py") as f: + f.write( + """ +import sys + +from ddtrace.internal.module import ModuleWatchdog + +from wrapt import wrap_function_wrapper as wrap + +patched = False + +def patch_hook(module): + def patch_wrapper(wrapped, _, args, kwrags): + global patched + + result = wrapped(*args, **kwrags) + sys.stdout.write("K") + patched = True + return result + + wrap(module.__name__, module.patch.__name__, patch_wrapper) + +ModuleWatchdog.register_module_hook("ddtrace.contrib..patch", patch_hook) + +sys.stdout.write("O") + +import langgraph as mod +from langgraph import graph + +# If the module was already loaded during the sitecustomize +# we check that the module was marked as patched. +if not patched and ( + getattr(mod, "__datadog_patch", False) or getattr(mod, "_datadog_patch", False) +): + sys.stdout.write("K") +""" + ) + f.flush() + + env = os.environ.copy() + env["DD_TRACE_%s_ENABLED" % self.__integration_name__.upper()] = "1" + + out, err, _, _ = call_program("ddtrace-run", sys.executable, f.name, env=env) + + self.assertEqual(out, b"OK", "stderr:\n%s" % err.decode()) diff --git a/tests/contrib/mako/test_mako.py b/tests/contrib/mako/test_mako.py index 9acfaf066ae..7e690b04a43 100644 --- a/tests/contrib/mako/test_mako.py +++ b/tests/contrib/mako/test_mako.py @@ -4,13 +4,13 @@ from mako.runtime import Context from mako.template import Template -from ddtrace import Pin from ddtrace.contrib.internal.mako.constants import DEFAULT_TEMPLATE_NAME from ddtrace.contrib.internal.mako.patch import patch from ddtrace.contrib.internal.mako.patch import unpatch from ddtrace.internal.compat import StringIO from ddtrace.internal.compat import to_unicode from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/mariadb/test_mariadb.py b/tests/contrib/mariadb/test_mariadb.py index 05a3718d219..7ea8cd27feb 100644 --- a/tests/contrib/mariadb/test_mariadb.py +++ b/tests/contrib/mariadb/test_mariadb.py @@ -4,9 +4,9 @@ import mariadb import pytest -from ddtrace import Pin from ddtrace.contrib.internal.mariadb.patch import patch from ddtrace.contrib.internal.mariadb.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.config import MARIADB_CONFIG from tests.utils import DummyTracer from tests.utils import assert_dict_issuperset diff --git a/tests/contrib/molten/test_molten.py b/tests/contrib/molten/test_molten.py index d0b1eb1648f..cc73ceef861 100644 --- a/tests/contrib/molten/test_molten.py +++ b/tests/contrib/molten/test_molten.py @@ -2,7 +2,6 @@ from molten.testing import TestClient import pytest -from ddtrace import Pin from ddtrace import config from ddtrace.constants import ERROR_MSG from ddtrace.contrib.internal.molten.patch import MOLTEN_VERSION @@ -12,6 +11,7 @@ from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.propagation.http import HTTP_HEADER_PARENT_ID from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured from tests.utils import assert_span_http_status_code diff --git a/tests/contrib/molten/test_molten_di.py b/tests/contrib/molten/test_molten_di.py index f320dd01e64..d360698f4cb 100644 --- a/tests/contrib/molten/test_molten_di.py +++ b/tests/contrib/molten/test_molten_di.py @@ -3,9 +3,9 @@ import molten from molten import DependencyInjector -from ddtrace import Pin from ddtrace.contrib.internal.molten.patch import patch from ddtrace.contrib.internal.molten.patch import unpatch +from ddtrace.trace import Pin from tests.utils import TracerTestCase diff --git a/tests/contrib/mongoengine/test.py b/tests/contrib/mongoengine/test.py index 0f41ac61d8b..b3961e3808c 100644 --- a/tests/contrib/mongoengine/test.py +++ b/tests/contrib/mongoengine/test.py @@ -3,11 +3,11 @@ import mongoengine import pymongo -from ddtrace import Pin from ddtrace.contrib.internal.mongoengine.patch import patch from ddtrace.contrib.internal.mongoengine.patch import unpatch from ddtrace.ext import mongo as mongox from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import DummyTracer from tests.utils import TracerTestCase diff --git a/tests/contrib/mysql/test_mysql.py b/tests/contrib/mysql/test_mysql.py index 0c80d4a2c57..08626890fac 100644 --- a/tests/contrib/mysql/test_mysql.py +++ b/tests/contrib/mysql/test_mysql.py @@ -1,9 +1,9 @@ import mock import mysql -from ddtrace import Pin from ddtrace.contrib.internal.mysql.patch import patch from ddtrace.contrib.internal.mysql.patch import unpatch +from ddtrace.trace import Pin from tests.contrib import shared_tests from tests.contrib.config import MYSQL_CONFIG from tests.opentracer.utils import init_tracer diff --git a/tests/contrib/mysqldb/test_mysqldb.py b/tests/contrib/mysqldb/test_mysqldb.py index e27163b649a..5d2c98a752c 100644 --- a/tests/contrib/mysqldb/test_mysqldb.py +++ b/tests/contrib/mysqldb/test_mysqldb.py @@ -2,10 +2,10 @@ import MySQLdb import pytest -from ddtrace import Pin from ddtrace.contrib.internal.mysqldb.patch import patch from ddtrace.contrib.internal.mysqldb.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib import shared_tests from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase diff --git a/tests/contrib/openai/conftest.py b/tests/contrib/openai/conftest.py index 0d36a5803bf..d25cae38e0d 100644 --- a/tests/contrib/openai/conftest.py +++ b/tests/contrib/openai/conftest.py @@ -7,11 +7,11 @@ import mock import pytest -from ddtrace import Pin from ddtrace.contrib.internal.openai.patch import patch from ddtrace.contrib.internal.openai.patch import unpatch -from ddtrace.filters import TraceFilter from ddtrace.llmobs import LLMObs +from ddtrace.trace import Pin +from ddtrace.trace import TraceFilter from tests.utils import DummyTracer from tests.utils import DummyWriter from tests.utils import override_config @@ -173,7 +173,7 @@ def patch_openai(ddtrace_global_config, ddtrace_config_openai, openai_api_key, o @pytest.fixture def snapshot_tracer(openai, patch_openai, mock_logs, mock_metrics): pin = Pin.get_from(openai) - pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) + pin.tracer._configure(trace_processors=[FilterOrg()]) yield pin.tracer @@ -186,7 +186,7 @@ def mock_tracer(ddtrace_global_config, openai, patch_openai, mock_logs, mock_met pin = Pin.get_from(openai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(openai, tracer=mock_tracer) - pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) + pin.tracer._configure(trace_processors=[FilterOrg()]) if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use to mock tracer. diff --git a/tests/contrib/openai/test_openai_v0.py b/tests/contrib/openai/test_openai_v0.py index 04654f4a4cf..3fa262e8a4a 100644 --- a/tests/contrib/openai/test_openai_v0.py +++ b/tests/contrib/openai/test_openai_v0.py @@ -1476,8 +1476,8 @@ def test_integration_sync(openai_api_key, ddtrace_run_python_code_in_subprocess) import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v0 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) with get_openai_vcr(subdirectory_name="v0").use_cassette("completion.yaml"): resp = openai.Completion.create(model="ada", prompt="Hello world", temperature=0.8, n=2, stop=".", max_tokens=10) """, @@ -1527,8 +1527,8 @@ def test_integration_async(openai_api_key, ddtrace_run_python_code_in_subprocess import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v0 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) async def task(): with get_openai_vcr(subdirectory_name="v0").use_cassette("completion_async.yaml"): resp = await openai.Completion.acreate( @@ -1900,8 +1900,8 @@ def test_integration_service_name(openai_api_key, ddtrace_run_python_code_in_sub import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v0 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) with get_openai_vcr(subdirectory_name="v0").use_cassette("completion.yaml"): resp = openai.Completion.create(model="ada", prompt="Hello world", temperature=0.8, n=2, stop=".", max_tokens=10) """, diff --git a/tests/contrib/openai/test_openai_v1.py b/tests/contrib/openai/test_openai_v1.py index 91737d9e5eb..918d3eadae9 100644 --- a/tests/contrib/openai/test_openai_v1.py +++ b/tests/contrib/openai/test_openai_v1.py @@ -1111,8 +1111,8 @@ def test_integration_sync(openai_api_key, ddtrace_run_python_code_in_subprocess) import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v1 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) with get_openai_vcr(subdirectory_name="v1").use_cassette("completion.yaml"): client = openai.OpenAI() resp = client.completions.create( @@ -1159,8 +1159,8 @@ def test_integration_async(openai_api_key, ddtrace_run_python_code_in_subprocess import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v1 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) async def task(): with get_openai_vcr(subdirectory_name="v1").use_cassette("completion.yaml"): client = openai.AsyncOpenAI() @@ -1547,8 +1547,8 @@ def test_integration_service_name(openai_api_key, ddtrace_run_python_code_in_sub import ddtrace from tests.contrib.openai.conftest import FilterOrg from tests.contrib.openai.test_openai_v1 import get_openai_vcr -pin = ddtrace.Pin.get_from(openai) -pin.tracer.configure(settings={"FILTERS": [FilterOrg()]}) +pin = ddtrace.trace.Pin.get_from(openai) +pin.tracer._configure(trace_processors=[FilterOrg()]) with get_openai_vcr(subdirectory_name="v1").use_cassette("completion.yaml"): client = openai.OpenAI() resp = client.completions.create(model="ada", prompt="hello world") diff --git a/tests/contrib/psycopg/test_psycopg.py b/tests/contrib/psycopg/test_psycopg.py index d7821ca8905..8e13ecc4128 100644 --- a/tests/contrib/psycopg/test_psycopg.py +++ b/tests/contrib/psycopg/test_psycopg.py @@ -8,11 +8,11 @@ from psycopg.sql import Identifier from psycopg.sql import Literal -from ddtrace import Pin from ddtrace.contrib.internal.psycopg.patch import patch from ddtrace.contrib.internal.psycopg.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.internal.utils.version import parse_version +from ddtrace.trace import Pin from tests.contrib.config import POSTGRES_CONFIG from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase diff --git a/tests/contrib/psycopg/test_psycopg_async.py b/tests/contrib/psycopg/test_psycopg_async.py index 7a8654f1626..7e4fbd59624 100644 --- a/tests/contrib/psycopg/test_psycopg_async.py +++ b/tests/contrib/psycopg/test_psycopg_async.py @@ -5,9 +5,9 @@ from psycopg.sql import SQL from psycopg.sql import Literal -from ddtrace import Pin from ddtrace.contrib.internal.psycopg.patch import patch from ddtrace.contrib.internal.psycopg.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.config import POSTGRES_CONFIG from tests.opentracer.utils import init_tracer diff --git a/tests/contrib/psycopg2/test_psycopg.py b/tests/contrib/psycopg2/test_psycopg.py index eeea555d396..902d24d3c0e 100644 --- a/tests/contrib/psycopg2/test_psycopg.py +++ b/tests/contrib/psycopg2/test_psycopg.py @@ -7,11 +7,11 @@ from psycopg2 import extensions from psycopg2 import extras -from ddtrace import Pin from ddtrace.contrib.internal.psycopg.patch import patch from ddtrace.contrib.internal.psycopg.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.internal.utils.version import parse_version +from ddtrace.trace import Pin from tests.contrib.config import POSTGRES_CONFIG from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase diff --git a/tests/contrib/pylibmc/test.py b/tests/contrib/pylibmc/test.py index 7a112677c27..9de012439dc 100644 --- a/tests/contrib/pylibmc/test.py +++ b/tests/contrib/pylibmc/test.py @@ -5,12 +5,13 @@ # 3p import pylibmc -# project -from ddtrace import Pin from ddtrace.contrib.internal.pylibmc.client import TracedClient from ddtrace.contrib.internal.pylibmc.patch import patch from ddtrace.contrib.internal.pylibmc.patch import unpatch from ddtrace.ext import memcached + +# project +from ddtrace.trace import Pin from tests.contrib.config import MEMCACHED_CONFIG as cfg from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase diff --git a/tests/contrib/pymemcache/test_client.py b/tests/contrib/pymemcache/test_client.py index 07eba3766ed..19a7a93d523 100644 --- a/tests/contrib/pymemcache/test_client.py +++ b/tests/contrib/pymemcache/test_client.py @@ -9,12 +9,13 @@ import pytest import wrapt -# project -from ddtrace import Pin from ddtrace.contrib.internal.pymemcache.client import WrappedClient from ddtrace.contrib.internal.pymemcache.patch import patch from ddtrace.contrib.internal.pymemcache.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME + +# project +from ddtrace.trace import Pin from tests.utils import DummyTracer from tests.utils import TracerTestCase from tests.utils import override_config diff --git a/tests/contrib/pymemcache/test_client_defaults.py b/tests/contrib/pymemcache/test_client_defaults.py index b16414c3c9f..0b5e44aa3b0 100644 --- a/tests/contrib/pymemcache/test_client_defaults.py +++ b/tests/contrib/pymemcache/test_client_defaults.py @@ -2,10 +2,11 @@ import pymemcache import pytest -# project -from ddtrace import Pin from ddtrace.contrib.internal.pymemcache.patch import patch from ddtrace.contrib.internal.pymemcache.patch import unpatch + +# project +from ddtrace.trace import Pin from tests.utils import override_config from .test_client_mixin import TEST_HOST diff --git a/tests/contrib/pymemcache/test_client_mixin.py b/tests/contrib/pymemcache/test_client_mixin.py index 78162fcf93c..2d471765e1f 100644 --- a/tests/contrib/pymemcache/test_client_mixin.py +++ b/tests/contrib/pymemcache/test_client_mixin.py @@ -2,12 +2,13 @@ import pymemcache import pytest -# project -from ddtrace import Pin from ddtrace.contrib.internal.pymemcache.patch import patch from ddtrace.contrib.internal.pymemcache.patch import unpatch from ddtrace.ext import memcached as memcachedx from ddtrace.ext import net + +# project +from ddtrace.trace import Pin from tests.utils import DummyTracer from tests.utils import TracerTestCase from tests.utils import override_config diff --git a/tests/contrib/pymongo/test.py b/tests/contrib/pymongo/test.py index f439036fcc1..b6669d40ac0 100644 --- a/tests/contrib/pymongo/test.py +++ b/tests/contrib/pymongo/test.py @@ -3,13 +3,14 @@ import pymongo -# project -from ddtrace import Pin from ddtrace.contrib.internal.pymongo.client import normalize_filter from ddtrace.contrib.internal.pymongo.patch import _CHECKOUT_FN_NAME from ddtrace.contrib.internal.pymongo.patch import patch from ddtrace.contrib.internal.pymongo.patch import unpatch from ddtrace.ext import SpanTypes + +# project +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import DummyTracer from tests.utils import TracerTestCase @@ -690,7 +691,7 @@ def test_peer_service_tagging(self): def test_patch_with_disabled_tracer(self): tracer, client = self.get_tracer_and_client() - tracer.configure(enabled=False) + tracer._configure(enabled=False) db = client.testdb db.drop_collection("teams") diff --git a/tests/contrib/pymysql/test_pymysql.py b/tests/contrib/pymysql/test_pymysql.py index b8a11e5afa7..e94e03c8395 100644 --- a/tests/contrib/pymysql/test_pymysql.py +++ b/tests/contrib/pymysql/test_pymysql.py @@ -1,10 +1,10 @@ import mock import pymysql -from ddtrace import Pin from ddtrace.contrib.internal.pymysql.patch import patch from ddtrace.contrib.internal.pymysql.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib import shared_tests from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase diff --git a/tests/contrib/pynamodb/test_pynamodb.py b/tests/contrib/pynamodb/test_pynamodb.py index 8f474419bc0..33b4e4c2c14 100644 --- a/tests/contrib/pynamodb/test_pynamodb.py +++ b/tests/contrib/pynamodb/test_pynamodb.py @@ -4,10 +4,10 @@ from pynamodb.connection.base import Connection import pytest -from ddtrace import Pin from ddtrace.contrib.internal.pynamodb.patch import patch from ddtrace.contrib.internal.pynamodb.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/pyodbc/test_pyodbc.py b/tests/contrib/pyodbc/test_pyodbc.py index 8703b0b0391..4c965aede7b 100644 --- a/tests/contrib/pyodbc/test_pyodbc.py +++ b/tests/contrib/pyodbc/test_pyodbc.py @@ -1,9 +1,9 @@ import pyodbc -from ddtrace import Pin from ddtrace.contrib.internal.pyodbc.patch import patch from ddtrace.contrib.internal.pyodbc.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/pyramid/app/app.py b/tests/contrib/pyramid/app/app.py index 037fb7e08e6..e83acbe5197 100644 --- a/tests/contrib/pyramid/app/app.py +++ b/tests/contrib/pyramid/app/app.py @@ -7,11 +7,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) def hello_world(request): diff --git a/tests/contrib/pyramid/pserve_app/app/__init__.py b/tests/contrib/pyramid/pserve_app/app/__init__.py index 13b4b58b6ab..dd10de6c5ad 100644 --- a/tests/contrib/pyramid/pserve_app/app/__init__.py +++ b/tests/contrib/pyramid/pserve_app/app/__init__.py @@ -2,7 +2,7 @@ from pyramid.response import Response from ddtrace import tracer -from ddtrace.filters import TraceFilter +from ddtrace.trace import TraceFilter class PingFilter(TraceFilter): @@ -13,11 +13,7 @@ def process_trace(self, trace): return None if trace and trace[0].trace_id == 1 else trace -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) def tracer_shutdown(request): diff --git a/tests/contrib/pytest/test_coverage_per_suite.py b/tests/contrib/pytest/test_coverage_per_suite.py index a9c985fa7b4..adb2a710c76 100644 --- a/tests/contrib/pytest/test_coverage_per_suite.py +++ b/tests/contrib/pytest/test_coverage_per_suite.py @@ -4,8 +4,8 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest._utils import _pytest_version_supports_itr +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_itr from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL from ddtrace.internal.ci_visibility._api_client import ITRData from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings diff --git a/tests/contrib/pytest/test_pytest.py b/tests/contrib/pytest/test_pytest.py index eeefa59f714..267a9d97eac 100644 --- a/tests/contrib/pytest/test_pytest.py +++ b/tests/contrib/pytest/test_pytest.py @@ -9,10 +9,10 @@ import ddtrace from ddtrace.constants import ERROR_MSG from ddtrace.constants import SAMPLING_PRIORITY_KEY -from ddtrace.contrib.pytest import get_version -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest.constants import XFAIL_REASON -from ddtrace.contrib.pytest.plugin import is_enabled +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest.constants import XFAIL_REASON +from ddtrace.contrib.internal.pytest.patch import get_version +from ddtrace.contrib.internal.pytest.plugin import is_enabled from ddtrace.ext import ci from ddtrace.ext import git from ddtrace.ext import test @@ -724,7 +724,7 @@ def test_dd_origin_tag_propagated_to_every_span(self): """ import pytest import ddtrace - from ddtrace import Pin + from ddtrace.trace import Pin def test_service(ddtracer): with ddtracer.trace("SPAN2") as span2: diff --git a/tests/contrib/pytest/test_pytest_atr.py b/tests/contrib/pytest/test_pytest_atr.py index 3e526e8cdf7..ebb4f8421d8 100644 --- a/tests/contrib/pytest/test_pytest_atr.py +++ b/tests/contrib/pytest/test_pytest_atr.py @@ -10,8 +10,8 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest._utils import _pytest_version_supports_atr +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_atr from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from tests.ci_visibility.util import _get_default_civisibility_ddconfig from tests.contrib.pytest.test_pytest import PytestTestCaseBase diff --git a/tests/contrib/pytest/test_pytest_efd.py b/tests/contrib/pytest/test_pytest_efd.py index 2affcec3585..e2a2fa08cab 100644 --- a/tests/contrib/pytest/test_pytest_efd.py +++ b/tests/contrib/pytest/test_pytest_efd.py @@ -10,8 +10,8 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest._utils import _pytest_version_supports_efd +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_efd from ddtrace.internal.ci_visibility._api_client import EarlyFlakeDetectionSettings from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from tests.ci_visibility.api_client._util import _make_fqdn_test_ids diff --git a/tests/contrib/pytest/test_pytest_quarantine.py b/tests/contrib/pytest/test_pytest_quarantine.py index 93b0b07eade..52e5c5a393c 100644 --- a/tests/contrib/pytest/test_pytest_quarantine.py +++ b/tests/contrib/pytest/test_pytest_quarantine.py @@ -10,8 +10,8 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 -from ddtrace.contrib.pytest._utils import _pytest_version_supports_efd +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_efd from ddtrace.internal.ci_visibility._api_client import QuarantineSettings from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from tests.contrib.pytest.test_pytest import PytestTestCaseBase diff --git a/tests/contrib/pytest/test_pytest_snapshot.py b/tests/contrib/pytest/test_pytest_snapshot.py index f8c4090c33d..8b298c28c95 100644 --- a/tests/contrib/pytest/test_pytest_snapshot.py +++ b/tests/contrib/pytest/test_pytest_snapshot.py @@ -3,7 +3,7 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from tests.ci_visibility.util import _get_default_ci_env_vars from tests.utils import TracerTestCase diff --git a/tests/contrib/pytest/test_pytest_snapshot_v2.py b/tests/contrib/pytest/test_pytest_snapshot_v2.py index 85d70d4c38e..dad546df23b 100644 --- a/tests/contrib/pytest/test_pytest_snapshot_v2.py +++ b/tests/contrib/pytest/test_pytest_snapshot_v2.py @@ -3,7 +3,7 @@ import pytest -from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 +from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from tests.ci_visibility.util import _get_default_ci_env_vars from tests.utils import TracerTestCase diff --git a/tests/contrib/pytest_bdd/test_pytest_bdd.py b/tests/contrib/pytest_bdd/test_pytest_bdd.py index edf3ab90454..25dff3c8bee 100644 --- a/tests/contrib/pytest_bdd/test_pytest_bdd.py +++ b/tests/contrib/pytest_bdd/test_pytest_bdd.py @@ -2,8 +2,8 @@ import os from ddtrace.constants import ERROR_MSG -from ddtrace.contrib.pytest_bdd._plugin import _get_step_func_args_json -from ddtrace.contrib.pytest_bdd._plugin import get_version +from ddtrace.contrib.internal.pytest_bdd._plugin import _get_step_func_args_json +from ddtrace.contrib.internal.pytest_bdd._plugin import get_version from ddtrace.ext import test from tests.contrib.patch import emit_integration_and_version_to_test_agent from tests.contrib.pytest.test_pytest import PytestTestCaseBase @@ -197,20 +197,23 @@ def test_simple(): assert spans[0].get_tag(ERROR_MSG) def test_get_step_func_args_json_empty(self): - self.monkeypatch.setattr("ddtrace.contrib.pytest_bdd._plugin._extract_step_func_args", lambda *args: None) + self.monkeypatch.setattr( + "ddtrace.contrib.internal.pytest_bdd._plugin._extract_step_func_args", lambda *args: None + ) assert _get_step_func_args_json(None, lambda: None, None) is None def test_get_step_func_args_json_valid(self): self.monkeypatch.setattr( - "ddtrace.contrib.pytest_bdd._plugin._extract_step_func_args", lambda *args: {"func_arg": "test string"} + "ddtrace.contrib.internal.pytest_bdd._plugin._extract_step_func_args", + lambda *args: {"func_arg": "test string"}, ) assert _get_step_func_args_json(None, lambda: None, None) == '{"func_arg": "test string"}' def test_get_step_func_args_json_invalid(self): self.monkeypatch.setattr( - "ddtrace.contrib.pytest_bdd._plugin._extract_step_func_args", lambda *args: {"func_arg": set()} + "ddtrace.contrib.internal.pytest_bdd._plugin._extract_step_func_args", lambda *args: {"func_arg": set()} ) expected = '{"error_serializing_args": "Object of type set is not JSON serializable"}' diff --git a/tests/contrib/pytest_benchmark/test_pytest_benchmark.py b/tests/contrib/pytest_benchmark/test_pytest_benchmark.py index ba55659b8f8..233a389855c 100644 --- a/tests/contrib/pytest_benchmark/test_pytest_benchmark.py +++ b/tests/contrib/pytest_benchmark/test_pytest_benchmark.py @@ -1,24 +1,24 @@ import os -from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_INFO -from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_MEAN -from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_RUN -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_HD15IQR -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_IQR -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_IQR_OUTLIERS -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_LD15IQR -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_MAX -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_MEAN -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_MEDIAN -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_MIN -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_N -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_OPS -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_OUTLIERS -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_Q1 -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_Q3 -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_STDDEV -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_STDDEV_OUTLIERS -from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_TOTAL +from ddtrace.contrib.internal.pytest_benchmark.constants import BENCHMARK_INFO +from ddtrace.contrib.internal.pytest_benchmark.constants import BENCHMARK_MEAN +from ddtrace.contrib.internal.pytest_benchmark.constants import BENCHMARK_RUN +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_HD15IQR +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_IQR +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_IQR_OUTLIERS +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_LD15IQR +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_MAX +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_MEAN +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_MEDIAN +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_MIN +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_N +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_OPS +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_OUTLIERS +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_Q1 +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_Q3 +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_STDDEV +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_STDDEV_OUTLIERS +from ddtrace.contrib.internal.pytest_benchmark.constants import STATISTICS_TOTAL from ddtrace.ext.test import TEST_TYPE from tests.contrib.pytest.test_pytest import PytestTestCaseBase diff --git a/tests/contrib/redis/test_redis.py b/tests/contrib/redis/test_redis.py index 4bd4f896686..fb83f6f53fc 100644 --- a/tests/contrib/redis/test_redis.py +++ b/tests/contrib/redis/test_redis.py @@ -5,10 +5,10 @@ import redis import ddtrace -from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch from ddtrace.contrib.internal.redis.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import DummyTracer from tests.utils import TracerTestCase @@ -547,7 +547,7 @@ def test_opentracing(self): ot_tracer = init_tracer("redis_svc", ddtrace.tracer) # FIXME: OpenTracing always overrides the hostname/port and creates a new # writer so we have to reconfigure with the previous one - ddtrace.tracer.configure(writer=writer) + ddtrace.tracer._configure(writer=writer) with ot_tracer.start_active_span("redis_get"): us = self.r.get("cheese") diff --git a/tests/contrib/redis/test_redis_asyncio.py b/tests/contrib/redis/test_redis_asyncio.py index 116b0dd2784..77a809392cd 100644 --- a/tests/contrib/redis/test_redis_asyncio.py +++ b/tests/contrib/redis/test_redis_asyncio.py @@ -7,10 +7,10 @@ import redis.asyncio from wrapt import ObjectProxy -from ddtrace import Pin from ddtrace import tracer from ddtrace.contrib.internal.redis.patch import patch from ddtrace.contrib.internal.redis.patch import unpatch +from ddtrace.trace import Pin from tests.utils import override_config from ..config import REDIS_CONFIG diff --git a/tests/contrib/redis/test_redis_cluster.py b/tests/contrib/redis/test_redis_cluster.py index dcf58c2d00a..2731a18fcee 100644 --- a/tests/contrib/redis/test_redis_cluster.py +++ b/tests/contrib/redis/test_redis_cluster.py @@ -2,10 +2,10 @@ import pytest import redis -from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch from ddtrace.contrib.internal.redis.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerTestCase diff --git a/tests/contrib/redis/test_redis_cluster_asyncio.py b/tests/contrib/redis/test_redis_cluster_asyncio.py index 03815073723..b8624c533aa 100644 --- a/tests/contrib/redis/test_redis_cluster_asyncio.py +++ b/tests/contrib/redis/test_redis_cluster_asyncio.py @@ -2,9 +2,9 @@ import pytest import redis -from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch from ddtrace.contrib.internal.redis.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import assert_is_measured @@ -164,9 +164,9 @@ def test_default_service_name_v1(): import redis - from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -210,9 +210,9 @@ def test_user_specified_service_v0(): import redis - from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -259,9 +259,9 @@ def test_user_specified_service_v1(): import redis - from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -304,8 +304,8 @@ def test_env_user_specified_rediscluster_service_v0(): import redis - from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -345,8 +345,8 @@ def test_env_user_specified_rediscluster_service_v1(): import redis - from ddtrace import Pin from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -390,9 +390,9 @@ def test_service_precedence_v0(): import redis - from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -435,9 +435,9 @@ def test_service_precedence_v1(): import redis - from ddtrace import Pin from ddtrace import config from ddtrace.contrib.internal.redis.patch import patch + from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerSpanContainer diff --git a/tests/contrib/rediscluster/test.py b/tests/contrib/rediscluster/test.py index 1ba8881e161..a2c5ac5c6b2 100644 --- a/tests/contrib/rediscluster/test.py +++ b/tests/contrib/rediscluster/test.py @@ -2,11 +2,11 @@ import pytest import rediscluster -from ddtrace import Pin from ddtrace.contrib.internal.rediscluster.patch import REDISCLUSTER_VERSION from ddtrace.contrib.internal.rediscluster.patch import patch from ddtrace.contrib.internal.rediscluster.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.contrib.config import REDISCLUSTER_CONFIG from tests.utils import DummyTracer from tests.utils import TracerTestCase diff --git a/tests/contrib/rq/test_rq.py b/tests/contrib/rq/test_rq.py index d2efb996227..d72871823da 100644 --- a/tests/contrib/rq/test_rq.py +++ b/tests/contrib/rq/test_rq.py @@ -6,10 +6,10 @@ import redis import rq -from ddtrace import Pin from ddtrace.contrib.internal.rq.patch import get_version from ddtrace.contrib.internal.rq.patch import patch from ddtrace.contrib.internal.rq.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.patch import emit_integration_and_version_to_test_agent from tests.utils import override_config from tests.utils import snapshot diff --git a/tests/contrib/sanic/run_server.py b/tests/contrib/sanic/run_server.py index ca3cc668aa7..cf8d1142d05 100644 --- a/tests/contrib/sanic/run_server.py +++ b/tests/contrib/sanic/run_server.py @@ -9,11 +9,7 @@ from tests.webclient import PingFilter -tracer.configure( - settings={ - "FILTERS": [PingFilter()], - } -) +tracer._configure(trace_processors=[PingFilter()]) app = Sanic("test_sanic_server") diff --git a/tests/contrib/shared_tests.py b/tests/contrib/shared_tests.py index a7659374693..cf647a15628 100644 --- a/tests/contrib/shared_tests.py +++ b/tests/contrib/shared_tests.py @@ -1,4 +1,4 @@ -from ddtrace import Pin +from ddtrace.trace import Pin # DBM Shared Tests diff --git a/tests/contrib/shared_tests_async.py b/tests/contrib/shared_tests_async.py index 97d1df32cfa..0d49f09d608 100644 --- a/tests/contrib/shared_tests_async.py +++ b/tests/contrib/shared_tests_async.py @@ -1,4 +1,4 @@ -from ddtrace import Pin +from ddtrace.trace import Pin # DBM Shared Tests diff --git a/tests/contrib/snowflake/test_snowflake.py b/tests/contrib/snowflake/test_snowflake.py index cdb735fb947..9762804651d 100644 --- a/tests/contrib/snowflake/test_snowflake.py +++ b/tests/contrib/snowflake/test_snowflake.py @@ -6,10 +6,10 @@ import responses import snowflake.connector -from ddtrace import Pin from ddtrace import tracer from ddtrace.contrib.internal.snowflake.patch import patch from ddtrace.contrib.internal.snowflake.patch import unpatch +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import override_config from tests.utils import snapshot diff --git a/tests/contrib/sqlalchemy/test_patch.py b/tests/contrib/sqlalchemy/test_patch.py index f33c4bdd8a5..a6f08bb5f46 100644 --- a/tests/contrib/sqlalchemy/test_patch.py +++ b/tests/contrib/sqlalchemy/test_patch.py @@ -1,10 +1,10 @@ import sqlalchemy from sqlalchemy import text -from ddtrace import Pin from ddtrace.contrib.internal.sqlalchemy.patch import get_version from ddtrace.contrib.internal.sqlalchemy.patch import patch from ddtrace.contrib.internal.sqlalchemy.patch import unpatch +from ddtrace.trace import Pin from tests.contrib.patch import emit_integration_and_version_to_test_agent from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/sqlite3/test_sqlite3.py b/tests/contrib/sqlite3/test_sqlite3.py index f10452cb8da..6101dcfa081 100644 --- a/tests/contrib/sqlite3/test_sqlite3.py +++ b/tests/contrib/sqlite3/test_sqlite3.py @@ -13,7 +13,6 @@ import pytest import ddtrace -from ddtrace import Pin from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK from ddtrace.constants import ERROR_TYPE @@ -21,6 +20,7 @@ from ddtrace.contrib.internal.sqlite3.patch import patch from ddtrace.contrib.internal.sqlite3.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase from tests.utils import assert_is_measured diff --git a/tests/contrib/starlette/test_starlette.py b/tests/contrib/starlette/test_starlette.py index 7d56c71c3f8..f290ade8ea7 100644 --- a/tests/contrib/starlette/test_starlette.py +++ b/tests/contrib/starlette/test_starlette.py @@ -8,13 +8,13 @@ from starlette.testclient import TestClient import ddtrace -from ddtrace import Pin from ddtrace.constants import ERROR_MSG from ddtrace.contrib.internal.sqlalchemy.patch import patch as sql_patch from ddtrace.contrib.internal.sqlalchemy.patch import unpatch as sql_unpatch from ddtrace.contrib.internal.starlette.patch import patch as starlette_patch from ddtrace.contrib.internal.starlette.patch import unpatch as starlette_unpatch from ddtrace.propagation import http as http_propagation +from ddtrace.trace import Pin from tests.contrib.starlette.app import get_app from tests.utils import DummyTracer from tests.utils import TracerSpanContainer diff --git a/tests/contrib/subprocess/test_subprocess.py b/tests/contrib/subprocess/test_subprocess.py index a6ff5a3a9bd..40e7ab67431 100644 --- a/tests/contrib/subprocess/test_subprocess.py +++ b/tests/contrib/subprocess/test_subprocess.py @@ -4,13 +4,13 @@ import pytest -from ddtrace import Pin from ddtrace.contrib.internal.subprocess.constants import COMMANDS from ddtrace.contrib.internal.subprocess.patch import SubprocessCmdLine from ddtrace.contrib.internal.subprocess.patch import patch from ddtrace.contrib.internal.subprocess.patch import unpatch from ddtrace.ext import SpanTypes from ddtrace.internal import core +from ddtrace.trace import Pin from tests.utils import override_config from tests.utils import override_global_config diff --git a/tests/contrib/tornado/test_config.py b/tests/contrib/tornado/test_config.py index d91a2e95912..fba648b23d7 100644 --- a/tests/contrib/tornado/test_config.py +++ b/tests/contrib/tornado/test_config.py @@ -1,5 +1,5 @@ from ddtrace._trace.tracer import Tracer -from ddtrace.filters import TraceFilter +from ddtrace.trace import TraceFilter from tests.utils import DummyWriter from .utils import TornadoTestCase @@ -48,7 +48,7 @@ def test_tracer_is_properly_configured(self): assert self.tracer.agent_trace_url == "http://dd-agent.service.consul:8126" writer = DummyWriter() - self.tracer.configure(enabled=True, writer=writer) + self.tracer._configure(enabled=True, writer=writer) with self.tracer.trace("keep"): pass spans = writer.pop() diff --git a/tests/contrib/unittest/test_unittest.py b/tests/contrib/unittest/test_unittest.py index b2af61b47d8..cd24c26f3c0 100644 --- a/tests/contrib/unittest/test_unittest.py +++ b/tests/contrib/unittest/test_unittest.py @@ -7,15 +7,15 @@ from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_TYPE from ddtrace.constants import SPAN_KIND -from ddtrace.contrib.unittest.constants import COMPONENT_VALUE -from ddtrace.contrib.unittest.constants import FRAMEWORK -from ddtrace.contrib.unittest.constants import KIND -from ddtrace.contrib.unittest.constants import MODULE_OPERATION_NAME -from ddtrace.contrib.unittest.constants import SESSION_OPERATION_NAME -from ddtrace.contrib.unittest.constants import SUITE_OPERATION_NAME -from ddtrace.contrib.unittest.constants import TEST_OPERATION_NAME -from ddtrace.contrib.unittest.patch import _set_tracer -from ddtrace.contrib.unittest.patch import patch +from ddtrace.contrib.internal.unittest.constants import COMPONENT_VALUE +from ddtrace.contrib.internal.unittest.constants import FRAMEWORK +from ddtrace.contrib.internal.unittest.constants import KIND +from ddtrace.contrib.internal.unittest.constants import MODULE_OPERATION_NAME +from ddtrace.contrib.internal.unittest.constants import SESSION_OPERATION_NAME +from ddtrace.contrib.internal.unittest.constants import SUITE_OPERATION_NAME +from ddtrace.contrib.internal.unittest.constants import TEST_OPERATION_NAME +from ddtrace.contrib.internal.unittest.patch import _set_tracer +from ddtrace.contrib.internal.unittest.patch import patch from ddtrace.ext import SpanTypes from ddtrace.ext import test from ddtrace.ext.ci import RUNTIME_VERSION diff --git a/tests/contrib/unittest/test_unittest_patch.py b/tests/contrib/unittest/test_unittest_patch.py index e8996fc573a..ccb8668fa12 100644 --- a/tests/contrib/unittest/test_unittest_patch.py +++ b/tests/contrib/unittest/test_unittest_patch.py @@ -3,12 +3,12 @@ # removed the ``_generated`` suffix from the file name, to prevent the content # from being overwritten by future re-generations. -from ddtrace.contrib.unittest.patch import get_version -from ddtrace.contrib.unittest.patch import patch +from ddtrace.contrib.internal.unittest.patch import get_version +from ddtrace.contrib.internal.unittest.patch import patch try: - from ddtrace.contrib.unittest.patch import unpatch + from ddtrace.contrib.internal.unittest.patch import unpatch except ImportError: unpatch = None from tests.contrib.patch import PatchTestCase diff --git a/tests/contrib/urllib3/test_urllib3.py b/tests/contrib/urllib3/test_urllib3.py index 256104a80f5..24ba7815e56 100644 --- a/tests/contrib/urllib3/test_urllib3.py +++ b/tests/contrib/urllib3/test_urllib3.py @@ -11,8 +11,8 @@ from ddtrace.contrib.internal.urllib3.patch import unpatch from ddtrace.ext import http from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME -from ddtrace.pin import Pin from ddtrace.settings.asm import config as asm_config +from ddtrace.trace import Pin from tests.contrib.config import HTTPBIN_CONFIG from tests.opentracer.utils import init_tracer from tests.utils import TracerTestCase @@ -536,7 +536,7 @@ def test_distributed_tracing_apm_opt_out_true(self): config.urllib3["distributed_tracing"] = True self.tracer.enabled = False # Ensure the ASM SpanProcessor is set - self.tracer.configure(appsec_standalone_enabled=True, appsec_enabled=True) + self.tracer._configure(appsec_standalone_enabled=True, appsec_enabled=True) assert asm_config._apm_opt_out with mock.patch( "urllib3.connectionpool.HTTPConnectionPool._make_request", side_effect=ValueError @@ -586,7 +586,7 @@ def test_distributed_tracing_apm_opt_out_false(self): config.urllib3["distributed_tracing"] = True self.tracer.enabled = False # Ensure the ASM SpanProcessor is set. - self.tracer.configure(appsec_standalone_enabled=False, appsec_enabled=True) + self.tracer._configure(appsec_standalone_enabled=False, appsec_enabled=True) assert not asm_config._apm_opt_out with mock.patch( "urllib3.connectionpool.HTTPConnectionPool._make_request", side_effect=ValueError diff --git a/tests/contrib/vertexai/conftest.py b/tests/contrib/vertexai/conftest.py index d048683a5c7..0b2b976b610 100644 --- a/tests/contrib/vertexai/conftest.py +++ b/tests/contrib/vertexai/conftest.py @@ -5,7 +5,7 @@ from ddtrace.contrib.internal.vertexai.patch import patch from ddtrace.contrib.internal.vertexai.patch import unpatch from ddtrace.llmobs import LLMObs -from ddtrace.pin import Pin +from ddtrace.trace import Pin from tests.contrib.vertexai.utils import MockAsyncPredictionServiceClient from tests.contrib.vertexai.utils import MockPredictionServiceClient from tests.utils import DummyTracer @@ -44,7 +44,7 @@ def mock_tracer(ddtrace_global_config, vertexai): pin = Pin.get_from(vertexai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(vertexai, tracer=mock_tracer) - pin.tracer.configure() + pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use the mock tracer. LLMObs.disable() diff --git a/tests/contrib/vertica/test_vertica.py b/tests/contrib/vertica/test_vertica.py index f0c47887f9e..196e1621ee5 100644 --- a/tests/contrib/vertica/test_vertica.py +++ b/tests/contrib/vertica/test_vertica.py @@ -2,7 +2,6 @@ import wrapt import ddtrace -from ddtrace import Pin from ddtrace import config from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK @@ -11,6 +10,7 @@ from ddtrace.contrib.internal.vertica.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.settings.config import _deepmerge +from ddtrace.trace import Pin from tests.contrib.config import VERTICA_CONFIG from tests.opentracer.utils import init_tracer from tests.utils import DummyTracer diff --git a/tests/contrib/wsgi/test_wsgi.py b/tests/contrib/wsgi/test_wsgi.py index de2b7bb0935..f98be18a769 100644 --- a/tests/contrib/wsgi/test_wsgi.py +++ b/tests/contrib/wsgi/test_wsgi.py @@ -8,6 +8,7 @@ from ddtrace.contrib.internal.wsgi.wsgi import _DDWSGIMiddlewareBase from ddtrace.contrib.internal.wsgi.wsgi import get_request_headers from tests.utils import override_config +from tests.utils import override_global_config from tests.utils import override_http_config from tests.utils import snapshot @@ -411,3 +412,77 @@ def test_schematization(ddtrace_run_python_code_in_subprocess, service_name, sch env["DD_SERVICE"] = service_name _, stderr, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env) assert status == 0, stderr + + +def test_distributed_tracing_existing_parent(tracer, test_spans): + """We should not parse and activate distributed context if there is already an active span""" + + # Middleware that starts a root trace, but doesn't parse headers + def broken_middleware(app, tracer): + def middleware(environ, start_response): + with tracer.trace("broken_middleware"): + return app(environ, start_response) + + return middleware + + app = TestApp(broken_middleware(DDWSGIMiddleware(application, tracer=tracer), tracer)) + resp = app.get("/", headers={"X-Datadog-Parent-Id": "1234", "X-Datadog-Trace-Id": "4321"}) + + assert config.wsgi.distributed_tracing is True + assert resp.status == "200 OK" + assert resp.status_int == 200 + + spans = test_spans.pop() + assert len(spans) == 5 + + # The root should NOT inherit from distributed headers + root = spans[0] + assert root.name == "broken_middleware" + assert root.trace_id != 4321 + assert root.parent_id != 1234 + + # The rest of the spans should inherit from the root + for span in spans[1:]: + assert span.trace_id == root.trace_id + if span.name == "wsgi.request": + assert span.parent_id == root.span_id + + +def test_distributed_tracing_existing_parent_ff_enabled(tracer, test_spans): + """We should parse and activate distributed context even if there is already an active span""" + + # Middleware that starts a root trace, but doesn't parse headers + def broken_middleware(app, tracer): + def middleware(environ, start_response): + with tracer.trace("broken_middleware"): + return app(environ, start_response) + + return middleware + + # DEV: Default is False (ignore distributed headers when there is an active span) + with override_global_config(dict(_extract_ignore_active_span=True)): + app = TestApp(broken_middleware(DDWSGIMiddleware(application, tracer=tracer), tracer)) + resp = app.get("/", headers={"X-Datadog-Parent-Id": "1234", "X-Datadog-Trace-Id": "4321"}) + + assert config.wsgi.distributed_tracing is True + assert resp.status == "200 OK" + assert resp.status_int == 200 + + spans = test_spans.pop() + assert len(spans) == 5 + + # The root should NOT inherit from distributed headers + root = spans[0] + assert root.name == "broken_middleware" + assert root.trace_id != 4321 + assert root.parent_id != 1234 + + # The rest of the spans should inherit from distributed tracing headers + wsgi_request = spans[1] + assert wsgi_request.name == "wsgi.request" + assert wsgi_request.trace_id == 4321 + assert wsgi_request.parent_id == 1234 + + for span in spans[3:]: + assert span.trace_id == wsgi_request.trace_id + assert span.parent_id != root.parent_id diff --git a/tests/contrib/yaaredis/test_yaaredis.py b/tests/contrib/yaaredis/test_yaaredis.py index 17903e4ec3d..350b323de9c 100644 --- a/tests/contrib/yaaredis/test_yaaredis.py +++ b/tests/contrib/yaaredis/test_yaaredis.py @@ -6,9 +6,9 @@ from wrapt import ObjectProxy import yaaredis -from ddtrace import Pin from ddtrace.contrib.internal.yaaredis.patch import patch from ddtrace.contrib.internal.yaaredis.patch import unpatch +from ddtrace.trace import Pin from tests.opentracer.utils import init_tracer from tests.utils import override_config diff --git a/tests/integration/test_context_snapshots.py b/tests/integration/test_context_snapshots.py index 4ed44bd558e..612422064a0 100644 --- a/tests/integration/test_context_snapshots.py +++ b/tests/integration/test_context_snapshots.py @@ -1,9 +1,8 @@ import pytest +from tests.integration.utils import AGENT_VERSION from tests.utils import snapshot -from .test_integration import AGENT_VERSION - pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") diff --git a/tests/integration/test_debug.py b/tests/integration/test_debug.py index f01ff1edfb3..18c28e51620 100644 --- a/tests/integration/test_debug.py +++ b/tests/integration/test_debug.py @@ -1,10 +1,8 @@ -from datetime import datetime import json import logging import os import re import subprocess -import sys from typing import List from typing import Optional @@ -17,11 +15,10 @@ from ddtrace.internal import debug from ddtrace.internal.writer import AgentWriter from ddtrace.internal.writer import TraceWriter +from tests.integration.utils import AGENT_VERSION from tests.subprocesstest import SubprocessTestCase from tests.subprocesstest import run_in_subprocess -from .test_integration import AGENT_VERSION - pytestmark = pytest.mark.skipif(AGENT_VERSION == "testagent", reason="The test agent doesn't support startup logs.") @@ -36,7 +33,14 @@ def __eq__(self, other): return Match() +@pytest.mark.subprocess() def test_standard_tags(): + from datetime import datetime + import sys + + import ddtrace + from ddtrace.internal import debug + f = debug.collect(ddtrace.tracer) date = f.get("date") @@ -94,7 +98,7 @@ def test_standard_tags(): assert f.get("tracer_enabled") is True assert f.get("sampler_type") == "DatadogSampler" assert f.get("priority_sampler_type") == "N/A" - assert f.get("service") == "tests.integration" + assert f.get("service") == "ddtrace_subprocess_dir" assert f.get("dd_version") == "" assert f.get("debug") is False assert f.get("enabled_cli") is False @@ -110,9 +114,14 @@ def test_standard_tags(): assert icfg["flask"] == "N/A" +@pytest.mark.subprocess() def test_debug_post_configure(): - tracer = ddtrace.Tracer() - tracer.configure( + import re + + from ddtrace import tracer + from ddtrace.internal import debug + + tracer._configure( hostname="0.0.0.0", port=1234, ) @@ -122,17 +131,22 @@ def test_debug_post_configure(): agent_url = f.get("agent_url") assert agent_url == "http://0.0.0.0:1234" - assert f.get("is_global_tracer") is False + assert f.get("is_global_tracer") is True assert f.get("tracer_enabled") is True agent_error = f.get("agent_error") # Error code can differ between Python version assert re.match("^Agent not reachable.*Connection refused", agent_error) - # Tracer doesn't support re-configure()-ing with a UDS after an initial - # configure with normal http settings. So we need a new tracer instance. - tracer = ddtrace.Tracer() - tracer.configure(uds_path="/file.sock") + +@pytest.mark.subprocess() +def test_debug_post_configure_uds(): + import re + + from ddtrace import tracer + from ddtrace.internal import debug + + tracer._configure(uds_path="/file.sock") f = debug.collect(tracer) @@ -193,7 +207,7 @@ def test_tracer_loglevel_info_connection(self): # shove an unserializable object into the config log output # regression: this used to cause an exception to be raised ddtrace.config.version = AgentWriter(agent_url="foobar") - tracer.configure() + tracer._configure() assert mock.call(logging.INFO, re_matcher("- DATADOG TRACER CONFIGURATION - ")) in mock_logger.mock_calls @run_in_subprocess( @@ -206,7 +220,7 @@ def test_tracer_loglevel_info_no_connection(self): tracer = ddtrace.Tracer() logging.basicConfig(level=logging.INFO) with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer.configure() + tracer._configure() assert mock.call(logging.INFO, re_matcher("- DATADOG TRACER CONFIGURATION - ")) in mock_logger.mock_calls assert mock.call(logging.WARNING, re_matcher("- DATADOG TRACER DIAGNOSTIC - ")) in mock_logger.mock_calls @@ -219,7 +233,7 @@ def test_tracer_loglevel_info_no_connection(self): def test_tracer_log_disabled_error(self): tracer = ddtrace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer.configure() + tracer._configure() assert mock_logger.mock_calls == [] @run_in_subprocess( @@ -231,7 +245,7 @@ def test_tracer_log_disabled_error(self): def test_tracer_log_disabled(self): tracer = ddtrace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer.configure() + tracer._configure() assert mock_logger.mock_calls == [] @run_in_subprocess( @@ -243,7 +257,7 @@ def test_tracer_info_level_log(self): logging.basicConfig(level=logging.INFO) tracer = ddtrace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer.configure() + tracer._configure() assert mock_logger.mock_calls == [] @@ -317,7 +331,7 @@ def flush_queue(self) -> None: def test_different_samplers(): tracer = ddtrace.Tracer() - tracer.configure(sampler=ddtrace._trace.sampler.RateSampler()) + tracer._configure(sampler=ddtrace._trace.sampler.RateSampler()) info = debug.collect(tracer) assert info.get("sampler_type") == "RateSampler" @@ -326,7 +340,7 @@ def test_different_samplers(): def test_startup_logs_sampling_rules(): tracer = ddtrace.Tracer() sampler = ddtrace._trace.sampler.DatadogSampler(rules=[ddtrace._trace.sampler.SamplingRule(sample_rate=1.0)]) - tracer.configure(sampler=sampler) + tracer._configure(sampler=sampler) f = debug.collect(tracer) assert f.get("sampler_rules") == [ @@ -337,7 +351,7 @@ def test_startup_logs_sampling_rules(): sampler = ddtrace._trace.sampler.DatadogSampler( rules=[ddtrace._trace.sampler.SamplingRule(sample_rate=1.0, service="xyz", name="abc")] ) - tracer.configure(sampler=sampler) + tracer._configure(sampler=sampler) f = debug.collect(tracer) assert f.get("sampler_rules") == [ @@ -415,7 +429,7 @@ def test_debug_span_log(): def test_partial_flush_log(): tracer = ddtrace.Tracer() - tracer.configure( + tracer._configure( partial_flush_enabled=True, partial_flush_min_spans=300, ) diff --git a/tests/integration/test_encoding.py b/tests/integration/test_encoding.py index 43c47ac4840..7138ff94e00 100644 --- a/tests/integration/test_encoding.py +++ b/tests/integration/test_encoding.py @@ -18,7 +18,7 @@ def test_simple_trace_accepted_by_agent(self): for _ in range(999): with tracer.trace("child"): pass - tracer.shutdown() + tracer.flush() log.warning.assert_not_called() log.error.assert_not_called() @@ -39,7 +39,7 @@ def test_trace_with_meta_accepted_by_agent(self, tags): for _ in range(999): with tracer.trace("child") as child: child.set_tags(tags) - tracer.shutdown() + tracer.flush() log.warning.assert_not_called() log.error.assert_not_called() @@ -60,7 +60,7 @@ def test_trace_with_metrics_accepted_by_agent(self, metrics): for _ in range(999): with tracer.trace("child") as child: child.set_metrics(metrics) - tracer.shutdown() + tracer.flush() log.warning.assert_not_called() log.error.assert_not_called() @@ -79,6 +79,6 @@ def test_trace_with_links_accepted_by_agent(self, span_links_kwargs): for _ in range(10): with tracer.trace("child") as child: child.set_link(**span_links_kwargs) - tracer.shutdown() + tracer.flush() log.warning.assert_not_called() log.error.assert_not_called() diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py index 78606bbde14..32484ac65c0 100644 --- a/tests/integration/test_integration.py +++ b/tests/integration/test_integration.py @@ -10,12 +10,8 @@ from ddtrace import Tracer from ddtrace.internal.atexit import register_on_exit_signal from ddtrace.internal.runtime import container -from ddtrace.internal.writer import AgentWriter -from tests.integration.utils import AGENT_VERSION -from tests.integration.utils import BadEncoder from tests.integration.utils import import_ddtrace_in_subprocess from tests.integration.utils import parametrize_with_all_encodings -from tests.integration.utils import send_invalid_payload_and_get_logs from tests.integration.utils import skip_if_testagent from tests.utils import call_program @@ -23,12 +19,15 @@ FOUR_KB = 1 << 12 +@pytest.mark.subprocess() def test_configure_keeps_api_hostname_and_port(): - tracer = Tracer() + from ddtrace import tracer + from tests.integration.utils import AGENT_VERSION + assert tracer._writer.agent_url == "http://localhost:{}".format("9126" if AGENT_VERSION == "testagent" else "8126") - tracer.configure(hostname="127.0.0.1", port=8127) + tracer._configure(hostname="127.0.0.1", port=8127) assert tracer._writer.agent_url == "http://127.0.0.1:8127" - tracer.configure(api_version="v0.5") + tracer._configure(api_version="v0.5") assert ( tracer._writer.agent_url == "http://127.0.0.1:8127" ), "Previous overrides of hostname and port are retained after a configure() call without those arguments" @@ -100,7 +99,7 @@ def test_single_trace_uds(): from ddtrace import tracer as t sockdir = "/tmp/ddagent/trace.sock" - t.configure(uds_path=sockdir) + t._configure(uds_path=sockdir) with mock.patch("ddtrace.internal.writer.writer.log") as log: t.trace("client.testing").finish() @@ -118,7 +117,7 @@ def test_uds_wrong_socket_path(): from ddtrace import tracer as t encoding = os.environ["DD_TRACE_API_VERSION"] - t.configure(uds_path="/tmp/ddagent/nosockethere") + t._configure(uds_path="/tmp/ddagent/nosockethere") with mock.patch("ddtrace.internal.writer.writer.log") as log: t.trace("client.testing").finish() t.shutdown() @@ -292,7 +291,7 @@ def test_metrics_partial_flush_disabled(): from tests.utils import AnyInt from tests.utils import override_global_config - t.configure( + t._configure( partial_flush_enabled=False, ) @@ -392,7 +391,7 @@ def test_trace_generates_error_logs_when_hostname_invalid(): from ddtrace import tracer as t - t.configure(hostname="bad", port=1111) + t._configure(hostname="bad", port=1111) with mock.patch("ddtrace.internal.writer.writer.log") as log: t.trace("op").finish() @@ -506,8 +505,12 @@ def test_validate_headers_in_payload_to_intake_with_nested_spans(): assert headers.get("X-Datadog-Trace-Count") == "10" +@parametrize_with_all_encodings def test_trace_with_invalid_client_endpoint_generates_error_log(): - t = Tracer() + import mock + + from ddtrace import tracer as t + for client in t._writer._clients: client.ENDPOINT = "/bad" with mock.patch("ddtrace.internal.writer.writer.log") as log: @@ -526,7 +529,12 @@ def test_trace_with_invalid_client_endpoint_generates_error_log(): @skip_if_testagent +@pytest.mark.subprocess(err=None) def test_trace_with_invalid_payload_generates_error_log(): + import mock + + from tests.integration.utils import send_invalid_payload_and_get_logs + log = send_invalid_payload_and_get_logs() log.error.assert_has_calls( [ @@ -541,11 +549,11 @@ def test_trace_with_invalid_payload_generates_error_log(): @skip_if_testagent -@pytest.mark.subprocess(env={"_DD_TRACE_WRITER_LOG_ERROR_PAYLOADS": "true", "DD_TRACE_API_VERSION": "v0.5"}) +@pytest.mark.subprocess(env={"_DD_TRACE_WRITER_LOG_ERROR_PAYLOADS": "true", "DD_TRACE_API_VERSION": "v0.5"}, err=None) def test_trace_with_invalid_payload_logs_payload_when_LOG_ERROR_PAYLOADS(): import mock - from tests.integration.test_integration import send_invalid_payload_and_get_logs + from tests.integration.utils import send_invalid_payload_and_get_logs log = send_invalid_payload_and_get_logs() log.error.assert_has_calls( @@ -562,12 +570,12 @@ def test_trace_with_invalid_payload_logs_payload_when_LOG_ERROR_PAYLOADS(): @skip_if_testagent -@pytest.mark.subprocess(env={"_DD_TRACE_WRITER_LOG_ERROR_PAYLOADS": "true", "DD_TRACE_API_VERSION": "v0.5"}) +@pytest.mark.subprocess(env={"_DD_TRACE_WRITER_LOG_ERROR_PAYLOADS": "true", "DD_TRACE_API_VERSION": "v0.5"}, err=None) def test_trace_with_non_bytes_payload_logs_payload_when_LOG_ERROR_PAYLOADS(): import mock - from tests.integration.test_integration import send_invalid_payload_and_get_logs from tests.integration.utils import BadEncoder + from tests.integration.utils import send_invalid_payload_and_get_logs class NonBytesBadEncoder(BadEncoder): def encode(self): @@ -590,7 +598,11 @@ def encode_traces(self, traces): ) +@pytest.mark.subprocess(err=None) def test_trace_with_failing_encoder_generates_error_log(): + from tests.integration.utils import BadEncoder + from tests.integration.utils import send_invalid_payload_and_get_logs + class ExceptionBadEncoder(BadEncoder): def encode(self): raise Exception() @@ -620,9 +632,12 @@ def test_api_version_downgrade_generates_no_warning_logs(): log.error.assert_not_called() +@pytest.mark.subprocess() def test_synchronous_writer_shutdown_raises_no_exception(): - tracer = Tracer() - tracer.configure(writer=AgentWriter(tracer._writer.agent_url, sync_mode=True)) + from ddtrace import tracer + from ddtrace.internal.writer import AgentWriter + + tracer._configure(writer=AgentWriter(tracer._writer.agent_url, sync_mode=True)) tracer.shutdown() @@ -746,7 +761,7 @@ def test_partial_flush_log(): from ddtrace import tracer as t partial_flush_min_spans = 2 - t.configure( + t._configure( partial_flush_min_spans=partial_flush_min_spans, ) diff --git a/tests/integration/test_integration_civisibility.py b/tests/integration/test_integration_civisibility.py index 8a504f6a220..a95bcc2bccb 100644 --- a/tests/integration/test_integration_civisibility.py +++ b/tests/integration/test_integration_civisibility.py @@ -3,17 +3,14 @@ import mock import pytest -from ddtrace._trace.tracer import Tracer from ddtrace.internal import agent from ddtrace.internal.ci_visibility import CIVisibility from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings from ddtrace.internal.ci_visibility.constants import AGENTLESS_ENDPOINT -from ddtrace.internal.ci_visibility.constants import COVERAGE_TAG_NAME from ddtrace.internal.ci_visibility.constants import EVP_PROXY_AGENT_ENDPOINT from ddtrace.internal.ci_visibility.constants import EVP_SUBDOMAIN_HEADER_EVENT_VALUE from ddtrace.internal.ci_visibility.constants import EVP_SUBDOMAIN_HEADER_NAME -from ddtrace.internal.ci_visibility.writer import CIVisibilityWriter -from ddtrace.internal.utils.http import Response +from ddtrace.internal.ci_visibility.recorder import CIVisibilityTracer as Tracer from tests.ci_visibility.util import _get_default_civisibility_ddconfig from tests.utils import override_env @@ -74,10 +71,18 @@ def test_civisibility_intake_with_apikey(): CIVisibility.disable() +@pytest.mark.subprocess() def test_civisibility_intake_payloads(): + import mock + + from ddtrace import tracer as t + from ddtrace.internal.ci_visibility.constants import COVERAGE_TAG_NAME + from ddtrace.internal.ci_visibility.recorder import CIVisibilityWriter + from ddtrace.internal.utils.http import Response + from tests.utils import override_env + with override_env(dict(DD_API_KEY="foobar.baz")): - t = Tracer() - t.configure(writer=CIVisibilityWriter(reuse_connections=True, coverage_enabled=True)) + t._configure(writer=CIVisibilityWriter(reuse_connections=True, coverage_enabled=True)) t._writer._conn = mock.MagicMock() with mock.patch("ddtrace.internal.writer.Response.from_http_response") as from_http_response: from_http_response.return_value.__class__ = Response diff --git a/tests/integration/test_integration_snapshots.py b/tests/integration/test_integration_snapshots.py index bd48faa34a6..3c8bae602f4 100644 --- a/tests/integration/test_integration_snapshots.py +++ b/tests/integration/test_integration_snapshots.py @@ -7,23 +7,21 @@ from ddtrace import Tracer from ddtrace import tracer -from ddtrace.constants import AUTO_KEEP -from ddtrace.constants import SAMPLING_PRIORITY_KEY -from ddtrace.constants import USER_KEEP -from ddtrace.internal.writer import AgentWriter +from tests.integration.utils import AGENT_VERSION from tests.integration.utils import mark_snapshot from tests.integration.utils import parametrize_with_all_encodings from tests.utils import override_global_config from tests.utils import snapshot -from .test_integration import AGENT_VERSION - pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") @snapshot(include_tracer=True) +@pytest.mark.subprocess() def test_single_trace_single_span(tracer): + from ddtrace import tracer + s = tracer.trace("operation", service="my-svc") s.set_tag("k", "v") # numeric tag @@ -31,11 +29,14 @@ def test_single_trace_single_span(tracer): s.set_metric("float_metric", 12.34) s.set_metric("int_metric", 4321) s.finish() - tracer.shutdown() + tracer.flush() @snapshot(include_tracer=True) +@pytest.mark.subprocess() def test_multiple_traces(tracer): + from ddtrace import tracer + with tracer.trace("operation1", service="my-svc") as s: s.set_tag("k", "v") s.set_tag("num", 1234) @@ -49,15 +50,22 @@ def test_multiple_traces(tracer): s.set_metric("float_metric", 12.34) s.set_metric("int_metric", 4321) tracer.trace("child").finish() - tracer.shutdown() + tracer.flush() -@pytest.mark.parametrize( - "writer", - ("default", "sync"), -) @snapshot(include_tracer=True) -def test_filters(writer, tracer): +@pytest.mark.subprocess( + parametrize={"DD_WRITER_MODE": ["default", "sync"]}, + token="tests.integration.test_integration_snapshots.test_filters", +) +def test_filters(): + import os + + from ddtrace import tracer + from ddtrace.internal.writer import AgentWriter + + writer = os.environ.get("DD_WRITER_MODE", "default") + if writer == "sync": writer = AgentWriter( tracer.agent_trace_url, @@ -79,27 +87,25 @@ def process_trace(self, trace): s.set_tag(self.key, self.value) return trace - tracer.configure( - settings={ - "FILTERS": [FilterMutate("boop", "beep")], - }, - writer=writer, - ) + tracer._configure(trace_processors=[FilterMutate("boop", "beep")], writer=writer) with tracer.trace("root"): with tracer.trace("child"): pass - tracer.shutdown() + tracer.flush() # Have to use sync mode snapshot so that the traces are associated to this # test case since we use a custom writer (that doesn't have the trace headers # injected). +@pytest.mark.subprocess() @snapshot(async_mode=False) def test_synchronous_writer(): - tracer = Tracer() + from ddtrace import tracer + from ddtrace.internal.writer import AgentWriter + writer = AgentWriter(tracer._writer.agent_url, sync_mode=True) - tracer.configure(writer=writer) + tracer._configure(writer=writer) with tracer.trace("operation1", service="my-svc"): with tracer.trace("child1"): pass @@ -117,19 +123,18 @@ def test_tracer_trace_across_popen(): the child span has does not have '_dd.p.dm' shows that sampling was run before fork automatically. """ - tracer = Tracer() def task(tracer): with tracer.trace("child"): pass - tracer.shutdown() + tracer.flush() with tracer.trace("parent"): p = multiprocessing.Process(target=task, args=(tracer,)) p.start() p.join() - tracer.shutdown() + tracer.flush() @snapshot(async_mode=False) @@ -140,31 +145,34 @@ def test_tracer_trace_across_multiple_popens(): the child span has does not have '_dd.p.dm' shows that sampling was run before fork automatically. """ - tracer = Tracer() def task(tracer): def task2(tracer): with tracer.trace("child2"): pass - tracer.shutdown() + tracer.flush() with tracer.trace("child1"): p = multiprocessing.Process(target=task2, args=(tracer,)) p.start() p.join() - tracer.shutdown() + tracer.flush() with tracer.trace("parent"): p = multiprocessing.Process(target=task, args=(tracer,)) p.start() p.join() - tracer.shutdown() + tracer.flush() @snapshot() +@pytest.mark.subprocess() def test_wrong_span_name_type_not_sent(): """Span names should be a text type.""" - tracer = Tracer() + import mock + + from ddtrace import tracer + with mock.patch("ddtrace._trace.span.log") as log: with tracer.trace(123): pass @@ -180,11 +188,9 @@ def test_wrong_span_name_type_not_sent(): ], ) @pytest.mark.parametrize("encoding", ["v0.4", "v0.5"]) -@snapshot() def test_trace_with_wrong_meta_types_not_sent(encoding, meta, monkeypatch): """Wrong meta types should raise TypeErrors during encoding and fail to send to the agent.""" with override_global_config(dict(_trace_api=encoding)): - tracer = Tracer() with mock.patch("ddtrace._trace.span.log") as log: with tracer.trace("root") as root: root._meta = meta @@ -218,14 +224,19 @@ def test_trace_with_wrong_metrics_types_not_sent(encoding, metrics, monkeypatch) log.exception.assert_called_once_with("error closing trace") -@snapshot() +@pytest.mark.subprocess() +@pytest.mark.snapshot() def test_tracetagsprocessor_only_adds_new_tags(): - tracer = Tracer() + from ddtrace import tracer + from ddtrace.constants import AUTO_KEEP + from ddtrace.constants import SAMPLING_PRIORITY_KEY + from ddtrace.constants import USER_KEEP + with tracer.trace(name="web.request") as span: span.context.sampling_priority = AUTO_KEEP span.set_metric(SAMPLING_PRIORITY_KEY, USER_KEEP) - tracer.shutdown() + tracer.flush() # Override the token so that both parameterizations of the test use the same snapshot diff --git a/tests/integration/test_priority_sampling.py b/tests/integration/test_priority_sampling.py index 59177be57cb..653ef96d49e 100644 --- a/tests/integration/test_priority_sampling.py +++ b/tests/integration/test_priority_sampling.py @@ -9,12 +9,11 @@ from ddtrace.internal.encoding import MsgpackEncoderV04 as Encoder from ddtrace.internal.writer import AgentWriter from ddtrace.tracer import Tracer +from tests.integration.utils import AGENT_VERSION from tests.integration.utils import parametrize_with_all_encodings from tests.integration.utils import skip_if_testagent from tests.utils import override_global_config -from .test_integration import AGENT_VERSION - def _turn_tracer_into_dummy(tracer): """Override tracer's writer's write() method to keep traces instead of sending them away""" diff --git a/tests/integration/test_propagation.py b/tests/integration/test_propagation.py index 5bd0a122a8c..bcad0ed4432 100644 --- a/tests/integration/test_propagation.py +++ b/tests/integration/test_propagation.py @@ -1,44 +1,15 @@ import pytest -from ddtrace import Tracer +from ddtrace import tracer from ddtrace.constants import MANUAL_DROP_KEY from ddtrace.propagation.http import HTTPPropagator -from tests.utils import override_global_config - -from .test_integration import AGENT_VERSION +from tests.integration.utils import AGENT_VERSION pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") -@pytest.fixture( - params=[ - dict(global_config=dict()), - dict( - global_config=dict(_x_datadog_tags_max_length="0", _x_datadog_tags_enabled=False), - ), - dict(global_config=dict(), partial_flush_enabled=True, partial_flush_min_spans=2), - ] -) -def tracer(request): - global_config = request.param.get("global_config", dict()) - partial_flush_enabled = request.param.get("partial_flush_enabled") - partial_flush_min_spans = request.param.get("partial_flush_min_spans") - with override_global_config(global_config): - tracer = Tracer() - kwargs = dict() - if partial_flush_enabled: - kwargs["partial_flush_enabled"] = partial_flush_enabled - if partial_flush_min_spans: - kwargs["partial_flush_min_spans"] = partial_flush_min_spans - tracer.configure(**kwargs) - yield tracer - tracer.shutdown() - - -@pytest.mark.snapshot() def test_trace_tags_multispan(): - tracer = Tracer() headers = { "x-datadog-trace-id": "1234", "x-datadog-parent-id": "5678", @@ -61,15 +32,8 @@ def test_trace_tags_multispan(): gc.finish() -@pytest.fixture -def downstream_tracer(): - tracer = Tracer() - yield tracer - tracer.shutdown() - - @pytest.mark.snapshot() -def test_sampling_decision_downstream(downstream_tracer): +def test_sampling_decision_downstream(): """ Ensures that set_tag(MANUAL_DROP_KEY) on a span causes the sampling decision meta and sampling priority metric to be set appropriately indicating rejection @@ -81,7 +45,7 @@ def test_sampling_decision_downstream(downstream_tracer): "x-datadog-tags": "_dd.p.dm=-1", } kept_trace_context = HTTPPropagator.extract(headers_indicating_kept_trace) - downstream_tracer.context_provider.activate(kept_trace_context) + tracer.context_provider.activate(kept_trace_context) - with downstream_tracer.trace("p", service="downstream") as span_to_reject: + with tracer.trace("p", service="downstream") as span_to_reject: span_to_reject.set_tag(MANUAL_DROP_KEY) diff --git a/tests/integration/test_sampling.py b/tests/integration/test_sampling.py index 234d12d283c..442ed6e4d88 100644 --- a/tests/integration/test_sampling.py +++ b/tests/integration/test_sampling.py @@ -1,4 +1,3 @@ -import mock import pytest from ddtrace._trace.sampler import DatadogSampler @@ -7,10 +6,9 @@ from ddtrace.constants import MANUAL_DROP_KEY from ddtrace.constants import MANUAL_KEEP_KEY from ddtrace.internal.writer import AgentWriter +from tests.integration.utils import AGENT_VERSION from tests.utils import snapshot -from .test_integration import AGENT_VERSION - pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") RESOURCE = "mycoolre$ource" # codespell:ignore @@ -19,6 +17,9 @@ def snapshot_parametrized_with_writers(f): def _patch(writer, tracer): + old_sampler = tracer._sampler + old_writer = tracer._writer + old_tags = tracer._tags if writer == "sync": writer = AgentWriter( tracer.agent_trace_url, @@ -29,11 +30,13 @@ def _patch(writer, tracer): writer._headers = tracer._writer._headers else: writer = tracer._writer - tracer.configure(writer=writer) try: return f(writer, tracer) finally: - tracer.shutdown() + tracer.flush() + # Reset tracer configurations to avoid leaking state between tests + tracer._configure(sampler=old_sampler, writer=old_writer) + tracer._tags = old_tags wrapped = snapshot(include_tracer=True, token_override=f.__name__)(_patch) return pytest.mark.parametrize( @@ -51,7 +54,7 @@ def test_sampling_with_defaults(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_1(writer, tracer): sampler = DatadogSampler(default_sample_rate=1.0) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace2"): tracer.trace("child").finish() @@ -59,7 +62,7 @@ def test_sampling_with_default_sample_rate_1(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_tiny(writer, tracer): sampler = DatadogSampler(default_sample_rate=0.000001) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace3"): tracer.trace("child").finish() @@ -67,7 +70,7 @@ def test_sampling_with_default_sample_rate_tiny(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_1_and_rule_1(writer, tracer): sampler = DatadogSampler(default_sample_rate=1, rules=[SamplingRule(1.0)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace4"): tracer.trace("child").finish() @@ -75,7 +78,7 @@ def test_sampling_with_default_sample_rate_1_and_rule_1(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_1_and_rule_0(writer, tracer): sampler = DatadogSampler(default_sample_rate=1, rules=[SamplingRule(0)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace5"): tracer.trace("child").finish() @@ -83,7 +86,7 @@ def test_sampling_with_default_sample_rate_1_and_rule_0(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_1_and_manual_drop(writer, tracer): sampler = DatadogSampler(default_sample_rate=1) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace6"): with tracer.trace("child") as span: span.set_tag(MANUAL_DROP_KEY) @@ -92,7 +95,7 @@ def test_sampling_with_default_sample_rate_1_and_manual_drop(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_default_sample_rate_1_and_manual_keep(writer, tracer): sampler = DatadogSampler(default_sample_rate=1) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace7"): with tracer.trace("child") as span: span.set_tag(MANUAL_KEEP_KEY) @@ -101,7 +104,7 @@ def test_sampling_with_default_sample_rate_1_and_manual_keep(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_rate_sampler_with_tiny_rate(writer, tracer): sampler = RateSampler(0.0000000001) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace8"): tracer.trace("child").finish() @@ -109,7 +112,7 @@ def test_sampling_with_rate_sampler_with_tiny_rate(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_sample_rate_1_and_rate_limit_0(writer, tracer): sampler = DatadogSampler(default_sample_rate=1, rate_limit=0) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace5"): tracer.trace("child").finish() @@ -117,7 +120,7 @@ def test_sampling_with_sample_rate_1_and_rate_limit_0(writer, tracer): @snapshot_parametrized_with_writers def test_sampling_with_sample_rate_1_and_rate_limit_3_and_rule_0(writer, tracer): sampler = DatadogSampler(default_sample_rate=1, rules=[SamplingRule(0)], rate_limit=3) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace5"): tracer.trace("child").finish() @@ -125,7 +128,7 @@ def test_sampling_with_sample_rate_1_and_rate_limit_3_and_rule_0(writer, tracer) @snapshot_parametrized_with_writers def test_sampling_with_rate_limit_3(writer, tracer): sampler = DatadogSampler(rate_limit=3) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace("trace5"): tracer.trace("child").finish() @@ -133,7 +136,7 @@ def test_sampling_with_rate_limit_3(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_resource(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, resource=RESOURCE)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer.trace("should_not_send", resource=RESOURCE).finish() tracer.trace("should_send", resource="something else").finish() @@ -141,7 +144,7 @@ def test_extended_sampling_resource(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=TAGS)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send").finish() tracer._tags = {"banana": "True"} @@ -157,7 +160,7 @@ def test_extended_sampling_tags_glob(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=rule_tags)]) assert sampler.rules[0].tags == {tag_key: "my*"} - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send").finish() @@ -168,7 +171,7 @@ def test_extended_sampling_tags_glob(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags_glob_insensitive_case_match(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, resource="BANANA")]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send", resource="bananA").finish() @@ -178,7 +181,7 @@ def test_extended_sampling_tags_glob_insensitive_case_match(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags_and_resource(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=TAGS, resource=RESOURCE)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send", resource=RESOURCE).finish() @@ -192,7 +195,7 @@ def test_extended_sampling_tags_and_resource(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_w_None_meta(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags={"test": None}, resource=RESOURCE)]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = {"test": None} tracer.trace("should_not_send", resource=RESOURCE).finish() @@ -206,7 +209,7 @@ def test_extended_sampling_w_None_meta(writer, tracer): @snapshot() def test_extended_sampling_w_metrics(tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags={"test": 123}, resource=RESOURCE)]) - tracer.configure(sampler=sampler) + tracer._configure(sampler=sampler) tracer._tags = {"test": 123} tracer.trace("should_not_send", resource=RESOURCE).finish() @@ -228,7 +231,7 @@ def test_extended_sampling_glob_multi_rule(writer, tracer): SamplingRule(1, service="webserv?r", name="web.req*"), ] ) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = {"test": "tag"} tracer.trace(name="web.reqUEst", service="wEbServer").finish() @@ -237,7 +240,7 @@ def test_extended_sampling_glob_multi_rule(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags_and_resource_glob(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=TAGS, resource="mycoolre$ou*")]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send", resource=RESOURCE).finish() @@ -251,7 +254,7 @@ def test_extended_sampling_tags_and_resource_glob(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags_and_service_glob(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=TAGS, service="mycoolser????")]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace("should_not_send", service="mycoolservice").finish() @@ -265,7 +268,7 @@ def test_extended_sampling_tags_and_service_glob(writer, tracer): @snapshot_parametrized_with_writers def test_extended_sampling_tags_and_name_glob(writer, tracer): sampler = DatadogSampler(rules=[SamplingRule(0, tags=TAGS, name="mycoolna*")]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) tracer._tags = TAGS tracer.trace(name="mycoolname").finish() @@ -282,7 +285,7 @@ def test_extended_sampling_float_special_case_do_not_match(writer, tracer): # should not match the rule, and should therefore be kept """ sampler = DatadogSampler(rules=[SamplingRule(0, tags={"tag": "2*"})]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace(name="should_send") as span: span.set_tag("tag", 20.1) @@ -293,17 +296,21 @@ def test_extended_sampling_float_special_case_match_star(writer, tracer): # should match the rule, and should therefore should be dropped """ sampler = DatadogSampler(rules=[SamplingRule(0, tags={"tag": "*"})]) - tracer.configure(sampler=sampler, writer=writer) + tracer._configure(sampler=sampler, writer=writer) with tracer.trace(name="should_send") as span: span.set_tag("tag", 20.1) +@pytest.mark.subprocess() def test_rate_limiter_on_spans(tracer): """ Ensure that the rate limiter is applied to spans """ + from ddtrace import tracer + from ddtrace.sampler import DatadogSampler + # Rate limit is only applied if a sample rate or trace sample rule is set - tracer.configure(sampler=DatadogSampler(default_sample_rate=1, rate_limit=10)) + tracer._configure(sampler=DatadogSampler(default_sample_rate=1, rate_limit=10)) spans = [] # Generate 10 spans with the start and finish time in same second for x in range(10): @@ -325,11 +332,17 @@ def test_rate_limiter_on_spans(tracer): assert dropped_span.context.sampling_priority < 0 +@pytest.mark.subprocess() def test_rate_limiter_on_long_running_spans(tracer): """ Ensure that the rate limiter is applied on increasing time intervals """ - tracer.configure(sampler=DatadogSampler(rate_limit=5)) + import mock + + from ddtrace import tracer + from ddtrace.sampler import DatadogSampler + + tracer._configure(sampler=DatadogSampler(rate_limit=5)) with mock.patch("ddtrace.internal.rate_limiter.time.monotonic_ns", return_value=1617333414): span_m30 = tracer.trace(name="march 30") diff --git a/tests/integration/test_settings.py b/tests/integration/test_settings.py index 55e8d1e76d8..249b0211bb4 100644 --- a/tests/integration/test_settings.py +++ b/tests/integration/test_settings.py @@ -2,7 +2,7 @@ import pytest -from .test_integration import AGENT_VERSION +from tests.integration.utils import AGENT_VERSION def _get_telemetry_config_items(events, item_name): diff --git a/tests/integration/test_trace_stats.py b/tests/integration/test_trace_stats.py index 0e19a44f1dd..46c153bc8d5 100644 --- a/tests/integration/test_trace_stats.py +++ b/tests/integration/test_trace_stats.py @@ -5,25 +5,23 @@ import mock import pytest -from ddtrace import Tracer from ddtrace._trace.sampler import DatadogSampler from ddtrace._trace.sampler import SamplingRule from ddtrace.constants import SPAN_MEASURED_KEY from ddtrace.ext import http from ddtrace.internal.processor.stats import SpanStatsProcessorV06 +from tests.integration.utils import AGENT_VERSION +from tests.utils import DummyTracer from tests.utils import override_global_config -from .test_integration import AGENT_VERSION - pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") @pytest.fixture def stats_tracer(): - # type: (float) -> Generator[Tracer, None, None] with override_global_config(dict(_trace_compute_stats=True)): - tracer = Tracer() + tracer = DummyTracer() yield tracer tracer.shutdown() @@ -70,10 +68,10 @@ def test_compute_stats_default_and_configure(run_python_code_in_subprocess, envv """Ensure stats computation can be enabled.""" # Test enabling via `configure` - t = Tracer() + t = DummyTracer() assert not t._compute_stats assert not any(isinstance(p, SpanStatsProcessorV06) for p in t._span_processors) - t.configure(compute_stats_enabled=True) + t._configure(compute_stats_enabled=True) assert any(isinstance(p, SpanStatsProcessorV06) for p in t._span_processors) assert t._compute_stats @@ -100,24 +98,27 @@ def test_compute_stats_default_and_configure(run_python_code_in_subprocess, envv assert status == 0, out + err -def test_apm_opt_out_compute_stats_and_configure(run_python_code_in_subprocess): +@pytest.mark.subprocess(err=None) +def test_apm_opt_out_compute_stats_and_configure(): """ Ensure stats computation is disabled, but reported as enabled, if APM is opt-out. """ + from ddtrace import tracer as t + from ddtrace.internal.processor.stats import SpanStatsProcessorV06 # Test via `configure` - t = Tracer() assert not t._compute_stats assert not any(isinstance(p, SpanStatsProcessorV06) for p in t._span_processors) - t.configure(appsec_enabled=True, appsec_standalone_enabled=True) + t._configure(appsec_enabled=True, appsec_standalone_enabled=True) assert not any(isinstance(p, SpanStatsProcessorV06) for p in t._span_processors) # the stats computation is disabled assert not t._compute_stats # but it's reported as enabled assert t._writer._headers.get("Datadog-Client-Computed-Stats") == "yes" - t.configure(appsec_enabled=False, appsec_standalone_enabled=False) + +def test_apm_opt_out_compute_stats_and_configure_env(run_python_code_in_subprocess): # Test via environment variable env = os.environ.copy() env.update({"DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED": "true", "DD_APPSEC_ENABLED": "true"}) @@ -239,7 +240,7 @@ def test_top_level(send_once_stats_tracer): @pytest.mark.snapshot() def test_single_span_sampling(stats_tracer, sampling_rule): sampler = DatadogSampler([sampling_rule]) - stats_tracer.configure(sampler=sampler) + stats_tracer._configure(sampler=sampler) with stats_tracer.trace("parent", service="test"): with stats_tracer.trace("child") as child: # FIXME: Replace with span sampling rule diff --git a/tests/integration/test_tracemethods.py b/tests/integration/test_tracemethods.py index 8568cbc3737..15129c56161 100644 --- a/tests/integration/test_tracemethods.py +++ b/tests/integration/test_tracemethods.py @@ -5,7 +5,7 @@ import pytest -from .test_integration import AGENT_VERSION +from tests.integration.utils import AGENT_VERSION pytestmark = pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") diff --git a/tests/integration/utils.py b/tests/integration/utils.py index dea4a091ed4..21822ea6e59 100644 --- a/tests/integration/utils.py +++ b/tests/integration/utils.py @@ -33,7 +33,7 @@ def send_invalid_payload_and_get_logs(encoder_cls=BadEncoder): client.encoder = encoder_cls() with mock.patch("ddtrace.internal.writer.writer.log") as log: t.trace("asdf").finish() - t.shutdown() + t.flush() return log diff --git a/tests/internal/crashtracker/test_crashtracker.py b/tests/internal/crashtracker/test_crashtracker.py index a4074745f83..96dc81c6a5f 100644 --- a/tests/internal/crashtracker/test_crashtracker.py +++ b/tests/internal/crashtracker/test_crashtracker.py @@ -3,8 +3,6 @@ import pytest -from ddtrace.settings.profiling import config as profiling_config -from ddtrace.settings.profiling import config_str import tests.internal.crashtracker.utils as utils @@ -506,7 +504,7 @@ def test_crashtracker_user_tags_envvar(run_python_code_in_subprocess): @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") -@pytest.mark.skipif(sys.version_info > (3, 12), reason="Fails on 3.13") +@pytest.mark.skipif(sys.version_info >= (3, 13), reason="Fails on 3.13") def test_crashtracker_set_tag_profiler_config(run_python_code_in_subprocess): port, sock = utils.crashtracker_receiver_bind() assert sock @@ -528,7 +526,11 @@ def test_crashtracker_set_tag_profiler_config(run_python_code_in_subprocess): # Now check for the profiler_config tag assert b"profiler_config" in data - profiler_config = config_str(profiling_config) + py_version = sys.version_info[:2] + if py_version >= (3, 8): + profiler_config = "stack_v2_lock_mem_heap_exp_dd_CAP1.0_MAXF64" + else: + profiler_config = "stack_lock_mem_heap_exp_dd_CAP1.0_MAXF64" assert profiler_config.encode() in data diff --git a/tests/internal/test_module.py b/tests/internal/test_module.py index 885f796af81..c84c2c740d6 100644 --- a/tests/internal/test_module.py +++ b/tests/internal/test_module.py @@ -578,17 +578,6 @@ def __getattr__(name): "ddtrace.contrib.trace_utils", "ddtrace.contrib.trace_utils_async", "ddtrace.contrib.trace_utils_redis", - # TODO: The following contrib modules are part of the public API (unlike most integrations). - # We should consider privatizing the internals of these integrations. - "ddtrace.contrib.unittest.patch", - "ddtrace.contrib.unittest.constants", - "ddtrace.contrib.pytest.constants", - "ddtrace.contrib.pytest.newhooks", - "ddtrace.contrib.pytest.plugin", - "ddtrace.contrib.pytest_benchmark.constants", - "ddtrace.contrib.pytest_benchmark.plugin", - "ddtrace.contrib.pytest_bdd.constants", - "ddtrace.contrib.pytest_bdd.plugin", ] ) diff --git a/tests/internal/test_settings.py b/tests/internal/test_settings.py index 97bdced47d9..9010e5f044a 100644 --- a/tests/internal/test_settings.py +++ b/tests/internal/test_settings.py @@ -251,7 +251,7 @@ def test_remoteconfig_sampling_rate_user(run_python_code_in_subprocess): assert span.get_metric("_dd.rule_psr") == 0.1 custom_sampler = DatadogSampler(default_sample_rate=0.3) -tracer.configure(sampler=custom_sampler) +tracer._configure(sampler=custom_sampler) with tracer.trace("test") as span: pass assert span.get_metric("_dd.rule_psr") == 0.3 @@ -316,7 +316,7 @@ def test_remoteconfig_sampling_rules(run_python_code_in_subprocess): assert span.get_metric("_dd.rule_psr") == 0.1 custom_sampler = DatadogSampler(DatadogSampler._parse_rules_from_str('[{"sample_rate":0.3, "name":"test"}]')) -tracer.configure(sampler=custom_sampler) +tracer._configure(sampler=custom_sampler) with tracer.trace("test") as span: pass assert span.get_metric("_dd.rule_psr") == 0.3 diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index 4e60a8f3996..3583516538c 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -531,28 +531,27 @@ def _llm_span_with_expected_ragas_inputs_in_messages(ragas_inputs=None): class DummyEvaluator: - LABEL = "dummy" - - def __init__(self, llmobs_service): + def __init__(self, llmobs_service, label="dummy"): self.llmobs_service = llmobs_service + self.LABEL = label def run_and_submit_evaluation(self, span): self.llmobs_service.submit_evaluation( span_context=span, - label=DummyEvaluator.LABEL, + label=self.LABEL, value=1.0, metric_type="score", ) -def _dummy_evaluator_eval_metric_event(span_id, trace_id): +def _dummy_evaluator_eval_metric_event(span_id, trace_id, label=None): return LLMObsEvaluationMetricEvent( join_on={"span": {"span_id": span_id, "trace_id": trace_id}}, score_value=1.0, ml_app="unnamed-ml-app", timestamp_ms=mock.ANY, metric_type="score", - label=DummyEvaluator.LABEL, + label=label or "dummy", tags=["ddtrace.version:{}".format(ddtrace.__version__), "ml_app:unnamed-ml-app"], ) @@ -573,6 +572,7 @@ def _expected_ragas_context_precision_spans(ragas_inputs=None): "span.kind": "workflow", "input": {"value": mock.ANY}, "output": {"value": "1.0"}, + "metadata": {}, }, "metrics": {}, "tags": expected_ragas_trace_tags(), @@ -589,6 +589,7 @@ def _expected_ragas_context_precision_spans(ragas_inputs=None): "span.kind": "workflow", "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, + "metadata": {}, }, "metrics": {}, "tags": expected_ragas_trace_tags(), @@ -712,3 +713,61 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "tags": expected_ragas_trace_tags(), }, ] + + +def _expected_ragas_answer_relevancy_spans(ragas_inputs=None): + if not ragas_inputs: + ragas_inputs = default_ragas_inputs + return [ + { + "trace_id": mock.ANY, + "span_id": mock.ANY, + "parent_id": "undefined", + "name": "dd-ragas.answer_relevancy", + "start_ns": mock.ANY, + "duration": mock.ANY, + "status": "ok", + "meta": { + "span.kind": "workflow", + "input": {"value": mock.ANY}, + "output": {"value": mock.ANY}, + "metadata": {"answer_classifications": mock.ANY, "strictness": mock.ANY}, + }, + "metrics": {}, + "tags": expected_ragas_trace_tags(), + }, + { + "trace_id": mock.ANY, + "span_id": mock.ANY, + "parent_id": mock.ANY, + "name": "dd-ragas.extract_evaluation_inputs_from_span", + "start_ns": mock.ANY, + "duration": mock.ANY, + "status": "ok", + "meta": { + "span.kind": "workflow", + "input": {"value": mock.ANY}, + "output": {"value": mock.ANY}, + "metadata": {}, + }, + "metrics": {}, + "tags": expected_ragas_trace_tags(), + }, + { + "trace_id": mock.ANY, + "span_id": mock.ANY, + "parent_id": mock.ANY, + "name": "dd-ragas.calculate_similarity", + "start_ns": mock.ANY, + "duration": mock.ANY, + "status": "ok", + "meta": { + "span.kind": "workflow", + "input": {"value": mock.ANY}, + "output": {"value": mock.ANY}, + "metadata": {}, + }, + "metrics": {}, + "tags": expected_ragas_trace_tags(), + }, + ] diff --git a/tests/llmobs/conftest.py b/tests/llmobs/conftest.py index 108f7dfe55d..61a028e5caf 100644 --- a/tests/llmobs/conftest.py +++ b/tests/llmobs/conftest.py @@ -150,6 +150,15 @@ def reset_ragas_faithfulness_llm(): ragas.metrics.faithfulness.llm = previous_llm +@pytest.fixture +def reset_ragas_answer_relevancy_llm(): + import ragas + + previous_llm = ragas.metrics.answer_relevancy.llm + yield + ragas.metrics.answer_relevancy.llm = previous_llm + + @pytest.fixture def mock_ragas_evaluator(mock_llmobs_eval_metric_writer, ragas): patcher = mock.patch("ddtrace.llmobs._evaluators.ragas.faithfulness.RagasFaithfulnessEvaluator.evaluate") @@ -159,6 +168,17 @@ def mock_ragas_evaluator(mock_llmobs_eval_metric_writer, ragas): patcher.stop() +@pytest.fixture +def mock_ragas_answer_relevancy_calculate_similarity(): + import numpy + + patcher = mock.patch("ragas.metrics.answer_relevancy.calculate_similarity") + MockRagasCalcSim = patcher.start() + MockRagasCalcSim.return_value = numpy.array([1.0, 1.0, 1.0]) + yield MockRagasCalcSim + patcher.stop() + + @pytest.fixture def tracer(): return DummyTracer() diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.answer_relevancy_inference.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.answer_relevancy_inference.yaml new file mode 100644 index 00000000000..1f537a977b8 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.answer_relevancy_inference.yaml @@ -0,0 +1,557 @@ +interactions: +- request: + body: '{"messages": [{"content": "Generate a question for the given answer and + Identify if answer is noncommittal. Give noncommittal as 1 if the answer is + noncommittal and 0 if the answer is committal. A noncommittal answer is one + that is evasive, vague, or ambiguous. For example, \"I don''t know\" or \"I''m + not sure\" are noncommittal answers\n\nThe output should be a well-formatted + JSON instance that conforms to the JSON schema below.\n\nAs an example, for + the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": + \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, + \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted + instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} + is not well-formatted.\n\nHere is the output JSON schema:\n```\n{\"type\": \"object\", + \"properties\": {\"question\": {\"title\": \"Question\", \"type\": \"string\"}, + \"noncommittal\": {\"title\": \"Noncommittal\", \"type\": \"integer\"}}, \"required\": + [\"question\", \"noncommittal\"]}\n```\n\nDo not return any preamble or explanations, + return only a pure JSON string surrounded by triple backticks (```).\n\nExamples:\n\nanswer: + \"Albert Einstein was born in Germany.\"\ncontext: \"Albert Einstein was a German-born + theoretical physicist who is widely held to be one of the greatest and most + influential scientists of all time\"\noutput: ```{\"question\": \"Where was + Albert Einstein born?\", \"noncommittal\": 0}```\n\nanswer: \"It can change + its skin color based on the temperature of its environment.\"\ncontext: \"A + recent scientific study has discovered a new species of frog in the Amazon rainforest + that has the unique ability to change its skin color based on the temperature + of its environment.\"\noutput: ```{\"question\": \"What unique ability does + the newly discovered species of frog have?\", \"noncommittal\": 0}```\n\nanswer: + \"Everest\"\ncontext: \"The tallest mountain on Earth, measured from sea level, + is a renowned peak located in the Himalayas.\"\noutput: ```{\"question\": \"What + is the tallest mountain on Earth?\", \"noncommittal\": 0}```\n\nanswer: \"I + don''t know about the groundbreaking feature of the smartphone invented in + 2023 as am unaware of information beyond 2022. \"\ncontext: \"In 2023, a groundbreaking + invention was announced: a smartphone with a battery life of one month, revolutionizing + the way people use mobile technology.\"\noutput: ```{\"question\": \"What was + the groundbreaking feature of the smartphone invented in 2023?\", \"noncommittal\": + 1}```\n\nYour actual task:\n\nanswer: \"The capital of France is Paris\"\ncontext: + \"The capital of France is Paris.\"\noutput: \n", "role": "user"}], "model": + "gpt-4o-mini", "n": 3, "stream": false, "temperature": 0.3}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2795' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//7FTBjpswEL3zFdacQ0VoNmS5VJWyvfTWSttDqcAxA7hrbK89rLaN8u+V + CRuItpV6by8c5s17vHn2+BgxBrKGnIHoOIneqvi9/bjf3iVPSI/74ZP87H/e7++e7x8ytc+2sAoM + c/iOgl5Yb4TprUKSRp9h4ZATBtV19nab3d5ku2QEelOjCrTWUrwxcS+1jNMk3cRJFq93E7szUqCH + nH2NGGPsOH6DT13jM+Rs1BorPXrPW4T80sQYOKNCBbj30hPXBKsZFEYT6tF6VVXHAh4H9MF5ATkr + 4EvHiUnPqEMmuJXEFTMN++C4FviugBUrQBstTN9LIq4CKzlVVbX8h8Nm8DzMqQelpvrpYlqZ1jpz + 8BN+qTdSS9+VDrk3Ohj0ZCxEC/KrJNb/k5iSSP+9JCLGvo0LM1zNC9aZ3lJJ5gF1ENym08LAvKcL + 9HYCyRBXi/ruBbjSK2skLpVfxAuCiw7rmTrvJx9qaRbA8ghfu/md9nlyqdu/kZ8BIdAS1qV1WEtx + PfHc5jA8Y39qu6Q8GgaP7kkKLEmiCydRY8MHdb5I4H94wr5spG7RWSfPt6mxZZZinfHDzUZAdIp+ + AQAA//8DAHIOLnJvBQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 9017b85e9bb772c2-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 13 Jan 2025 19:16:21 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=jNCWyg.Vq7.UysipP_0ZTDahpo.QhHWQvZ5Biaue6Bs-1736795781-1.0.1.1-C.GEfp7jlmfkY9qIXtsRjf9L9W5MzQ2OSXUpBOB0jIjYNSrJBlVnNwuHbaPYKT9.DjEjgPPIK69hkYhC0UtQZA; + path=/; expires=Mon, 13-Jan-25 19:46:21 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=g4nQujMCF8pE0TfAWPiY5rkdnbhtDE1kP7w0dFQq.v4-1736795781566-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - datadog-staging + openai-processing-ms: + - '541' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999320' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_b5c84c23586be35a5fc602178e84a812 + status: + code: 200 + message: OK +- request: + body: '{"input": [[3923, 374, 279, 6864, 315, 9822, 30]], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '114' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R62a6zurbm/X6KpXWb2gpd8GDdESCQ0NhJIF2pVAKSECCE0NiAj867l8h/dKrq + ZkqT6QkGD3/d8H/866+//q6T4pH2f//z19/vvOv//h/ztXvcx3//89f//Ndff/3113/8fv5/Ix9V + 8rjf80/2G/77Y/65P8a///lL+O8r/3fQfO/usyW6cQiS8fqNqTqZDmFB9CzQZNaOAvxD18wig+yx + MtBK1X4rAQvr8BNO1X4QQBKbG1uX1xwNXnmJIVDSKxUgH/kkaS4FIigf4ir9J+GqqVbI3199XDcX + uajvtNRBWz01LMjftzeRozeg9egZzOXXtJjug5tDdO1dshW/q7BfSw2F69jrDBfHVzIF8SNCmVc/ + mL4LcsR72gpoO778eT4GF+8HyYLbsz6zLc6WbX8/SDZoi5YQm5Kl16XjykCR1Lzo8hI5RVfULAM1 + 2G7IzY8HNKqrAqB1rSs7ewprp+Pxe4TmuxswYGInw+pRZmgp1BzLrn3xuHURclAP+ZbgbKd4NIjP + Z7ADd0s2q3MecnzWDY0a4gMvF/d12Itn24fxmafEum1eaMhbIQMosxgP6XFop29oK7DgboPDe/nm + LTkiHRbZ4stcFB9bmpzUCC3Ec4ulnVeGVBTiWs1PTko2b/Hc8kXxluCwLUeC3fZk8hUyUgir20hc + +WO34+OUVdDot454R68xR6hfEaiK7WHqLb5FfT/YErqXR5eZPdTeoCaXXG1k9UIb59qF3f2NFogU + yyWehvclpO/ltVkthYYTV09lkwdsopqrZjqV8WntyYq/2cL0bY8UoQdGbDc1GDmP+kAOe10vpD3L + AGx1T5hX+0Eo58s8RocycpjFYuRNMOmNhjaWShfcKcKBrr0jSj+uybzrpQ07qoRbKMs6IoHSbc2h + iFcKxGKeM3N5jdCQfLcA/jhpZLMtP6hL754CR/oA4q2igvPL7WCjGj22xKlXZTH4bklhNEeHrkb3 + lPAKJkXL9T4mdo9q3pP7oQTZEnfMuMY7k+/umgVoVUVUk6tPMoivSIfs3QKe+pNXDBtf6lB/YhdC + FvdX+JsPOpRnh0o33U46NfiqoGp9hYXjrkvGg/wawNAvE7HwSi/kkzMA7CSjYuZRLBO2/FwjuCnX + hJGV+jK77NBF4B0si0QK26BJ2NwjEI6NQzbd2HlNRNI92iySFyO7rV0Mb9G1kJVkH7IzxTgsvFOe + L/Hm6VCl1HQkLfREQOI2Vsn6lQYhI0dkqJFd7EjwRkPLa6GOwPPpmhmXaJuMKdqfNdnHKV4mDm+n + ek0n+IaHN7HXHW0nenMewLRNSzxjtQ0nFLUpNF9voJNweJn9a31dgHBPBeK2730xkGVg/96fLpxk + F7JPoWNNJl+J6Dgswm67vrjgasuY6HJVmyOEgKERuzM5FMfcZLa9UqCJRkqHtpzMjjRPUKVjMeBX + ugq8QTkJDzgEg0n2R6kouHG5GMjZSyFxLwENK8/3MBDZrtl6FEjBPxdvCxcteJMgepqc2/aoqoHy + uBLjs2vCaeKiC+XKOOM6887tdHZ2HcpUc4EXix6F7QoZD8iEa8G274Ns1h9yPGt1qWyIrTiNyR26 + pKvSUwgzdlsnZGm9pkAVd0vco9Nzattuoy4e04f9vudwkKYc+lfas7un9uFUSlkFl4PusHsqDt7o + R76urks3JM7tY6DJ3Ly30F0/e+Ipa5IMd9oZiDlnhVlRvi6kgcY24CEJ6FJaeO34ukg1Mn3/Sfzd + 7o24WcsuCrreINaFdHzisftAy4fNiHO6Knys9WUGzW2pM33zunnTXntUEOXHJ1vbyo7TzAkG9BGm + ngVy5YSC2PiWGu6HM0mXyiX5LIq38MMnKqtbLfxIX0sCpjktIWRzLwZwIxWg39nM80jUStWjSuGj + 3W5kw921KT3WOUWy76fMJ9LB5O1XH7R5PkR3P6t2mBYhVd/de8vcgpTJsGkSFZ6ED5h3Nk+4/zEa + 4EZyYetndU2+mEYYRAoaO+jPiPfyxiyRmkYi2ZzuUTLgqyxAYvVHit5c8frLRa1QZwWYal8KRXXK + IIagtM94WHyffDSavoEvrL+41eTSm97La41mvCdOMvacviTTBvuQe+yHRxRUhYK+8gO2Nk67gq+m + +Lq6hZuR2A/b5h13TAmdxf3IrHD58oa5vlGhah+2U/enGe90QMNbHIgrPO4JP6yHK0o/W5NE+Jwl + o/42qj//T8SthNg3lFxYVaXBnO+AvL4kpQWumuvETOie88zRDJj5izj7oxLyASSANIl7YvSntpi2 + WSyo5OYdGF59YjQ9h9oADpZJ9u7nVvSP4uErvPUSKq7rYzI879/rSr4FHhZeCTVHSLs9qOrRp+Vr + gUP2TJstXCXlSMILsvjI+UYFGk42nfdTMpk1UVD6WoyY73yzEOOX7v7Gs3BPUDJMi4SixhmWbK5/ + 3rqWe0XZIVTx9OncduyH+KzW93zEKrfsZGrwOtLm/Ukcq8vC0ZJ1DDSNJ7J5fsWEBUztQKQLjZjT + s04YOXIdSJa9mad6cTJdrl8BUhgfdNVZEZomrrmoR/p6Xt+A860fATwmbhD9bJOQS+S4Rdp9/2B4 + eKlhP6UZRjN/Mt0WgnaQK9lCuegLWFQDBY3hYTK0UhJOzCovZjH9+LIS0zddbt9VMp605gy9q36Z + +VYnj7pqq4OxciT6JQ54Y/Aut3/W/6C6iPft2XWhX9wTtl6Fm5BXm8KAnKQ6Od6zb9uJixVW90zq + mDPlm2S0gh5UiJcbFsRpzKdpqWL1JSodHoZm4j1b9A91o4LDHm9ybSksF0d43k4bcp+yindusKLI + P1YB0+fx7My9CcS4RMRrvz7njtkpqPArh+DmdDOnS/VM0WN4r5m/bgaTf3BoaX2zuWExY2dzEnI1 + h9Pic2KbjJ29SX+vBM0n94I4eV8kfb7wFHgvxh378X2PvTH94dWMTyuTyW9ZAIo+OV6N6redmqG0 + EVvVB0KWkcwHS7oC0m1FwEhCuJUFDA0CiSZ4afsHs/vhJyVXE8tYWYfSimxj+ICrE3LEjin/7u+9 + HzYe01Xv8X1S2LAynzcKKebtRB66BMaFbSg/b/Ymm/EfJe3hROwqDU2mRBpe7fBLxSvl6/3RUyAN + T+cPf0vJST2jmY+Zva0tc3hnESC1oDuq7OKl2Vd3GUMXJgu2uYQ7ky7yRYnE0+1LgtM5Tj5ttqew + xnLNtt6eI66xRoWL31jM0kQ7mVQ1xDCowURlwbMT+dMqe7hR2aYvWhZoSCjO1PvR3hEcFbonGYK1 + AH3d58SxpTZkD+O2gMXWL3/43fapfloAkx/7mS/dhF2bEcOsD9nNXSft6LRZo23Kq0XuZrk2pdvD + V9BBoTvmXxTHHGtdzkB9Ky6GT/81OdN9A/pTf2Hrm702aSllJUh41xMrMFvEhzjFgBFRyWaRGmhi + 0VqFX/2706sy+ft57xA+3iViuqbqDT89+7X3EsGz/h12DziDcqdvWijLJ6f1upqAkWWJl69QQH2K + 9pGGfSkmjmikiAfJpgLho+9YPL1sc/KyIdLyQ2OwgAp3czzkUKtG1Ats565R24W8z1RZ3uZk1qec + B0o+we5+fLFgF+TelCh3+/e+FLldFA7iYsTa092esOQngzeeDAZoutwPDIM+eNOq5ramaqxivn8b + i6nolQztPDuj8OPDqK8pDHl8xiLv14nEllEHStNHbE3Lgg/S/nCF5aexKA+vVjFCWu7RXB9ke2Tf + oqduXmpO/WVU/qYW4hWoCjoOzYKOVR6g6XJ9CQiWF4f4EdP4GFNt+NUXrt8H2aMyzlXYmSwg7kUz + 2p8+VvPv88Hs82f0Jns8PWAtRRGz3Obcsvchs9G8n5i3P50Stu6sLeDjU6K587TD7iR4OqoXKCd+ + EwvtRCpFgR/fbe+P9re+FKTOsSjM+oUJ3UFSYSx2xNIfYjuZhxyD4mUl01/XgznzcQNqvBWIJ2Az + /P2O/FCsCDb958yXbqnScLCZs312Cdc3kor4bmiIP66akF/ytatlOc6JX66X7bgLaf7DF3pByQGN + 1++xg+z89HE58iGcDliaYPmpLaa/0SHpnx+9g3Pbp4RIi7ad9aWi5aFk4iF6lu0An8nW0p3fsV/9 + jmc7fiB6KDfMlN81H6wrH+BZv0SGT9oZsXCqjrA/H65Y+N4tUzCdsIHn7bJhO2NRoK8V9Atgfu4y + 67jzw17bfzDQ98KhXf6pi9eNZIb2yauOEOGgJ4JZKhJgsXoS91AG7XSO5AjG+zn55QV8WLt1rbru + tcalLTctj1dWBJ6TMSxeb18+WfiEwYdPw5zyXCajvPFKNNcb8RLB5aOVhzY4/mNPJ8XqPTbq3xwg + fE/ErEMn6X56zZkCi+2WRuZxS70coe1bl1lhqvP+KUgR2DRf4aftH7wRy/4RDZ50JMGhwOH0coc9 + zH6GFju/KLhE4i1Y0qOf13+c/cKo/PASa8aha0dxey0BPRqM5bJR227jLzrYbZUnlhTHNen3AwYa + 7FYnJA2icKybwwBFD8qvPkIuZumgktTNmTNOB69/dcUEr8vyRHT/TovvrL9U5/ZYEt/JLCQhD10h + PQ5rEv/8eVTEW4D74sx8zeu8qXWFMwjJkeOBXxbetBHyB4xHTWbWofK9sRRXZ/TzM/aHOcUAH9UG + vU9lrJRaxsdfHtJMbMMs+bvxaPrIr0g/OyWz62Pt0R9/zX533m9iwrVA32pz/kOHXWAgoTHVCah6 + 3lH5FVM0deOuRHJWPpn57R/mcCn0q5ZZoUd2yrf1OiPVJ3hx2pKAq1oxblrTheB0YmT7PlzMge3Q + +ee/sZxGOOGRhR8w5ynM7Rdv3j8f+wY+qVETXbg/+FTzlY0OBCyW2DvBG/GN2Wjej3TqJi+UZr0A + wijJbBf073CclgbA/VQU9JentHHCtuhWGjdmH15Dy81iVan7D9aZkQYnr5StNILiHflszg+8Yb9/ + H5FUrSZGivsTDUa4p4hegoz98pUhkvc6xLfRZ2czN5PJ30o+0rZq+ke/98jzXTjxqSbePF/uf9wG + wuOa0ekS1UnTlMxYbXTDIORxWYa/74+qlGbEQw/MqU5fqha6/pGYh5yh4apdK6QwpWLnh6aF03lK + M6jP3oPtiJN6Ux5m5z/7ybfMTTjtxgl+zyMVfideb6SDASTvKnLYRxLqBWm4auBNp59eCtnX1nNg + maWx3WPjtHxdRJaq+KPGdhZKTN56baWW2N4zz1jVyR+/O+drBOtPwRyuF++MZv1HZj1m0tn/o2ZT + LoivTmc++5cY0cA32HaRmIV494cMpOJxZYZxe/OfPljtz+H1D58OperUq1Y0HsxaVWk4xHgY4FxZ + a5LwbV5M+xhJSHraJ7azR8bH45PVaBnuDdrOfDHeJndCUfhSmGGAbcoZJxKU9YYS57JYc+k+GJmG + VLEnbpQVHg+SoIR0dbaIJ25uqBduCYa1kp3YmiqqOeXpNYZQ5Zx4UJJicq/6XnuScWDnZhmGTTeu + S0itdUyp3JhFPShD/cefWupq4N9N67molKQTXdCyMCdjq25V6fgayCZyl3xa32BYzXkDFY9B3bJT + k3QIbWwVPx+XZzImfqhDHgomHo5S0fLFafDR+01EvErJEU276DTB7D+ZmTdVSNPPMUUnPwQqaGIV + jqOnWwi84TTzd4lm/Smgnx7DylAmza2CBVx2Nad8etZhc0F+CXL/CahqvvVkADdVoWNQMGOlvlBf + OYdcrSq2IIbLJo8txQDQzrMyOqmjEtLIfGJI2vCERfrJ0LD6pNUPb7Hs3Lxi/OU9j6I8stCyTXPc + V+sGvp/28wePR398+QAfTcCLN7kWw2VV2aB+TyXZpshFP/6FwrmKs/6XTP6r35N/AGZadmG23ahI + 6EZFm2bu59ZOPtL/5Lu4TsTGaz4WmmBYJtasf0wkf9phDwdVcdn+sUz4hDwUr7YSfbA5X/Ck+XvC + 1puuZNNHXTI2QWyjk6cX5GaKccLybZwjQz9NeJrzMPUcLSOY8YbY+fdj9tV+V8KKYUq2p+Up5PZk + UQiX2ob5QyXysU96/4cnP7+IeG/edHXDS+EPvrYNHiqYXkZFtt4+5EN7ihXIK8MiplkYrSxfXR+G + q7gm61N8SfhGPFA4TtKSCh+hMMflssuRtHmtGaZl4fHi4FY/viVWtl0V0/K+MtBRazs8rGnvtWTY + Y4DwMzHTX5Ji6GMNYPdqSmJ1Pfnjb8F14/qnr/hYN7cBZv1HF20dFNOqRjaSDvsLuSW7ozdMmZSp + b8+/MEcTPuYvf4doeQjwoOzqYnpJngWOn+6ZIRxeHteC7RZumFFi3tbv4nub3AHN+Qlztb3TDsZX + itHmbgxso+1wyGvtGqveuX8yf7nOeDPsQQVciQrbGKji32NbUnDT6svcm2kmLe7eHcz4y3amqIaD + GrxU6M43guUqI8m4fbYlisJCweN4YOFYlAcM9eroUJ4exoTepsnQNEkviLUpJY+yhpWwxmJN3Ju6 + Ql14UwVIjOLAtv5ZaEdOt5GS7nBHtehpIumGGgvNeS+JZ74b/GG1QMGKjdh6Glf+yw8BvfGRLmc/ + 2q9cWoN0YQaxb3qVTPPz0CXzZYLnPGQoiy+AHJkrgs+9Z4rCLfShkZULVYWHFvbOQDHo80yFC+mQ + vPxczwD22ySmbNTFuKdjB/V6TEkadLY5ZvYjA7jDmZinU8L7U7bZ/vIcdrucbXNizadEkZBmZK0M + G3MULGqhGxJF9vNf7+s37kCNtJwu5jyIo1Pcodk/sY0aXPn3l9ekq8hiIdnF7ZDeTQUkWYiZ03X7 + lmvHDdWW02PCe8vJk3Hsc+EPHox8obacRqMLiy0uCTmvuTeJwrEBub359PHwdS4WtmBov/6CQcRj + 8TX36kL96enF5h0V/Lz/GjCa3JnzrzMabSiOEL7d66z34mIKqiRGw1seqDAVZdHoB5+iZNle6DTz + CSefevvLn+f+k4EmJfN02CePZq4XklDk8RguGnnP/atb8sdfzHiCFcIjr3Py9RmqdrVn6VCJiN9L + oOjZM4f88pgpXyBXLfuDzjaNpbd8rlckk1aiy5kPvkPqdnBxHtPcz6nasezXexQ6ZM2M5nY2R3FZ + pyD5SvTL39rx6ok5XFaDRx7fh1FIeWMb6Gwsr1gLdGp2IX/nYGcdop2hla34UMwIrHtXM2PW4xNM + 2xp839ixTWg9zcmfrim6RPcX2T02n6KLL5n0618Qa+5fdGryzOB8OcVse726RacqQgfvYpESN1j3 + 4RCn1RWSLK2ZJ11p0pFlYMFRLyqc5Lrvydz2S2B7NaDyzcqLcd2rzQ9PmbVcjiFF32sFM96yzeps + hPJmsZ5++TxV3J63060CgDE+uWTjPD7mSN2mgvjy8dgvvxdyoazgbp/5f/k9nb4Udc1PJubye8vF + 5Dv4YFm7iHnLg8p/eh42zfrJgt2QJ5NcXA1IUbZh21EKwqkLyxjNeRPVrGbNpa0h1nDB0oaQw+PI + x/ah+pAK7sTW4T3zJi9Ce7Ukp+/8fQNPVjL1CMWKYqwutSMfjHQ7gN48KzLXF+8Eab1F2+XJZT9/ + SX/8fKZdzwz/Gno8HcZJ0ySjIKYqRgW/weEMefpgzMdF2g5Xzz5qanoWiXmuh5DZk9+huT+J5Tnf + 43mj7ZErRpTM+iHpnNvtiAJd63E989VEe6EC1EsL5qYSS4bzpsVgq0fCdmfzUPCkJ3skX9YuO+z1 + rOgzpRbgHd8cZm3KsylfmxVG83qTzRR3nG++aQMvUe3+9HO50N0k+IbhG6ufqSyGrLqliOS0IltN + KcJuBesOZn1I37vFo+0sab/Qon7jk91bCYuRtA4GQj4w8xPj3f2ABfRbz+/MP115KRW4JIJATGW7 + S3hvHnTNiQ495pZ2K9jpVJXoW8UYo7XJC77TLwqc3ajFE611T0qP7zO00zqY+ZdzbhxvKcz3p6K2 + MlrR80KAm3eMmOPasjnOeAOHYDLJr98z+7UUAQkYRXx5M9kznGwt/94fdIi4yofzITsi4f4QsJYe + CZpefWige9yJdPlsOBpzMZHAIajBYuQ+0bgLq3xFacWZY4StSdNjH8ExsL0/7zPi6JsjQ5bucx50 + aFkVX7cw62UMs974ujhbwN+/UwH/+a+//vpfvxMGVX1/vOeDAf1j7P/930cF/h3f438LgvTnGALt + 4uzx9z//dQLh729bV9/+f/d1+fh0f//zF/pz1ODvvu7j9/9z+V/zg/7zX/8HAAD//wMAiTuukd4g + AAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 9017b864b936de9b-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 13 Jan 2025 19:16:22 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=s1ISh4iUNLQ2Ya1kD2H7qoQ8Iu5E35cNV7LByFeP6S4-1736795782-1.0.1.1-Yiw0sA3z8E8DIIhiXr_yjzMDz_ePLu1gwWVxoqFOBJDcDhE91jZRgy2Kp6FYdcBcS950vyAqL1y9LPmOMIGJxA; + path=/; expires=Mon, 13-Jan-25 19:46:22 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=beMBNvOtq78fFCvDki.8wgKumD_EQhiZMEDa05JaT7s-1736795782543-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - text-embedding-ada-002 + openai-organization: + - datadog-staging + openai-processing-ms: + - '80' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-678875496d-6fjmx + x-envoy-upstream-service-time: + - '59' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999993' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_e589af7ff963668ef0b2722e99fcde70 + status: + code: 200 + message: OK +- request: + body: '{"input": [[3923, 374, 279, 6864, 315, 9822, 30], [3923, 374, 279, 6864, + 315, 9822, 30], [3923, 374, 279, 6864, 315, 9822, 30]], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '192' + content-type: + - application/json + cookie: + - __cf_bm=s1ISh4iUNLQ2Ya1kD2H7qoQ8Iu5E35cNV7LByFeP6S4-1736795782-1.0.1.1-Yiw0sA3z8E8DIIhiXr_yjzMDz_ePLu1gwWVxoqFOBJDcDhE91jZRgy2Kp6FYdcBcS950vyAqL1y9LPmOMIGJxA; + _cfuvid=beMBNvOtq78fFCvDki.8wgKumD_EQhiZMEDa05JaT7s-1736795782543-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA+x6SxOyOpfuvH/Frj21q+QmWXwzBATlkqggao8EFQERuSRAuvq/d+HbdU6d6Rn1 + YE+sUiMGsvLcVv7z3/766+86KR5p//e//vr7nXf93/8+f3a/9be///XXf/zbX3/99dd//l7/n5GP + Knnc7/kn+w3/fZl/7o/x73/9JfyfT/7voPna3WdLdOMQJOPle6PqZDqEBdGzQJNZOwrwD10ziwyy + x8pAK1X7rQQsrMNPOFX7QQBJbK5sXV5yNHjl+QaBkl6oAPnIJ0lzKRBB+RBX6T8JV021Qv7+4uO6 + OctFfaelDtrqqWFB/r69iRy9Aa1Hz2Auv6TFdB/cHKJL75Kt+F2F/VpqKFzGXme4OL6SKbg9IpR5 + 9YPpuyBHvKetgLbjy5/nY3DxfpAsuD7rmG1xtmz7+0GyQVu0hNiULL0uHVcGiqTmRZfnyCm6omYZ + qMF2Q67+bUCjuioAWte6sNhTWDsdj98jNN/dgAETOxlWjzJDS6HmWHbts8ets5CDesi3BGc7xaPB + LY7BDtwt2aziPOQ41g2NGuIDLxf3ddiLse3D+MxTYl03LzTkrZABlNkND+lxaKdvaCuw4G6Dw3v5 + 5i05Ih0W2eLLXHQ7tjQ5qRFaiHGLpZ1XhlQUbrWan5yUbN5i3PJF8ZbgsC1Hgt32ZPIVMlIIq+tI + XPljt+PjlFXQ6NeOeEevMUeoXxGoiu1h6i2+RX0/2BK6l0eXmT3U3qAm51xtZPVMG+fShd39jRaI + FMslnob3OaTv5aVZLYWGE1dPZZMHbKKaq2Y6lfFp7cmKv9nC9G2PFKEHRmw3NRg5j/pADntdL6Q9 + ywBsdU+YV/tBKOfL/IYOZeQwi92QN8GkNxraWCpdcKcIB7r2jij9uCbzLuc27KgSbqEs64gESrc1 + h+K2UuAm5jkzl5cIDcl3C+CPk0Y22/KDuvTuKXCkDyDeKio4P18PNqrRY0ucelUWg++WFEZzdOhq + dE8Jr2BStFzvb8TuUc17cj+UIFvijhmX287ku7tmAVpVEdXk6pMM4ivSIXu3gKf+5BXDxpc61J/Y + mZDF/RX+5oMOZexQ6arbSacGXxVUra+wcNx1yXiQXwMY+nkiFl7phXxyBoCdZFTMPIplwpafSwRX + 5ZIwslJfZpcdugi8g2WRSGEbNAmbewTCsXHIphs7r4lIukebRfJiZLe1i+EtuhaykuxDdqZ4Cwvv + lOdLvHk6VCk1HUkLPRGQuL2pZP1Kg5CRIzLUyC52JHijoeW1UEfg+XTNjHO0TcYU7WNN9nGKl4nD + 26le0wm+4eFN7HVH24lenQcwbdMSz1htwwlFbQrN1xvoJBxeZv9aXxYg3FOBuO17XwxkGdi/+6cL + J9mF7FPoWJPJVyI6Douw267PLrja8kZ0uarNEULA0IhdTA7FMTeZba8UaKKR0qEtJ7MjzRNU6VgM + +JWuAm9QTsIDDsFgkv1RKgpunM8GcvZSSNxzQMPK8z0MRLZrth4FUvDP2dvCWQveJIieJue2Papq + oDwuxPjsmnCauOhCuTJiXGde3E6xs+tQppoLvFj0KGxXyHhAJlwKtn0fZLP+kGOs1aWyIbbiNCZ3 + 6JKuSk8hzNhtnZCl9ZoCVdwtcY9Oz6ltu426eEwf9nuew0Gacuhfac/untqHUyllFZwPusPuqTh4 + ox/5urou3ZA414+BJnPz3kJ3+eyJp6xJMtxpZyDmxAqzonxdSAO92YCHJKBLaeG14+ss1cj0/Sfx + d7s34mYtuyjoeoNYZ9Lxid/cB1o+bEac00XhY60vM2iuS53pm9fVm/bao4IoPz7Z2lZ2nGZOMKCP + MPUskCsnFMTGt9RwP8QkXSrn5LMo3sIPn6isbrXwI30tCZjmtISQzb0YwI1UgH5nM88jUStVjyqF + j3a9kg1316b0WOcUyb6fMp9IB5O3X33Q5vkQ3f2s2mFahFR9d+8tcwtSJsOmSVR4Ej5g3tk84f7H + aIAbyZmtn9Ul+WIaYRApaOygPyPeyxuzRGoaiWRzukfJgC+yAInVHyl6c8Xrz2e1Qp0VYKp9KRTV + KYMbBKUd42HxffLRaPoGvrD+4laTS296Ly81mvGeOMnYc/qSTBvsQ+6xHx5RUBUK+soP2No47Qq+ + mm6X1TXcjMR+2DbvuGNKKBb3I7PC5csb5vpGhap92E7dn2a80wENb3EgrvC4J/ywHi4o/WxNEuE4 + S0b9bVR/fk/ErYTYN5RcWFWlwZzvgLy+JKUFrprrxEzonvPM0QyY+Ys4+6MS8gEkgDS59cToT20x + bbOboJKrd2B49bmh6TnUBnCwTLJ3P9eifxQPX+Gtl1BxXR+T4Xn/XlbyNfCw8EqoOULa7UFVjz4t + XwscsmfabOEiKUcSnpHFR843KtBwsum8n5LJrImC0tdixHznm4V4e+nubzwL9wQlw7RIKGqcYcnm + +ueta7kXlB1CFU+fzm3HfrjFan3PR6xyy06mBq8jbd6fxLG6LBwtWcdA09tENs+vmLCAqR2IdKER + c3rWCSNHrgPJsjfzVO+WTOfLV4AUxgdddVaEpolrLuqRvp7XN+B860cAj4kbRI9tEnKJHLdIu+8f + DA8vNeynNMNo5k+m20LQDnIlWygXfQGLaqCgMTxMhlZKwolZ5dksph9fVmL6psvtu0rGk9bE0Lvq + l5lvdfKoq7Y6GCtHol/igDcG73L7Z/0Pqot438auC/3inrD1KtyEvNoUBuQk1cnxnn3bTlyssLpn + UsecKd8koxX0oMJtuWHBLb3xaVqqWH2JSoeHoZl4zxb9Q92o4LDHm1xaCsvFEZ7X04bcp6zinRus + KPKPVcD0eTyLuTeBeCsR8dqvz7ljdgoq/MohuDldzelcPVP0GN5r5q+bweQfHFpa32yuWMxYbE5C + ruZwWnxObJOx2Jv090rQfHIviJP3RdLnC0+B92LcsR/f99gb0x9ezfi0Mpn8lgWg6JPj1ah+26kZ + ShuxVX0gZBnJfLCkCyDdVgSMJIRbWcDQIJBogpe2fzC7H35ScjGxjJV1KK3I9gYfcHVCjtgx5d/1 + vffDxmO66j2+TwobVubzSiHFvJ3IQ5fAOLMN5fFmb7IZ/1HSHk7ErtLQZEqk4dUOv1S8Ur7eHz0F + 0vB0/vC3lJzUGM18zOxtbZnDO4sAqQXdUWV3W5p9dZcxdGGyYJtzuDPpIl+USDxdvyQ4xbfk02Z7 + Cmss12zr7TniGmtUOPuNxSxNtJNJVUMMgxpMVBY8O5E/rbKHK5Vt+qJlgYaE4ky9H+0dwVGhe5Ih + WAvQ131OHFtqQ/YwrgtYbP3yh99tn+qnBTD5sZ/50k3YpRkxzPqQXd110o5OmzXaprxY5G6Wa1O6 + PnwFHRS6Y/5Zccyx1uUM1LfiYvj0X5Mz3TegP/Vntr7aa5OWUlaChHc9sQKzRXy4pRgwIirZLFID + TSxaq/Crf3d6VSZ/P+8dwse7REzXVL3hp2e/9l4ieNa/w+4BMSh3+qaFsnxyWq+rCRhZlnj5CgXU + p2gfadiXbsQRjRTxINlUIHz0HbtNL9ucvGyItPzQGCygwt0cDznUqhH1Atu5a9R2Ie8zVZa3OZn1 + KeeBkk+wux9fLNgFuTclyt3+3S9FbheFg7gYsfZ0tycs+cngjSeDAZrO9wPDoA/etKq5rakaq5jv + X8diKnolQzvPzij8+DDqawpDfouxyPt1IrFl1IHS9BFb07Lgg7Q/XGD5aSzKw4tVjJCWezTXB9ke + 2bfoqZuXmlN/GZW/qYV4BaqCjkOzoGOVB2g6X14CguXZIX7END7eqDb86gvX74PsURnnKuxMFhD3 + rBntTx+r+ff5YHb8Gb3JHk8PWEtRxCy3iVv2PmQ2mvcT8/anU8LWnbUFfHxKNHeedtidBE9H9QLl + xG9uQjuRSlHgx3fb+6P9rS8FqXMsCrN+YUJ3kFQYix2x9IfYTuYhx6B4Wcn01+VgznzcgHrbCsQT + sBn+3iM/FCuCTf8586VbqjQcbOZsn13C9Y2kIr4bGuKPqybk53ztalmOc+KX62U77kKa//CFnlFy + QOPle+wgi58+Lkc+hNMBSxMsP7XF9Dc6JP3zo3cQt31KiLRo21lfKloeSiYeomfZDvCZbC3d+R37 + 1e8Y27cHoodyw0z5XfPBuvABnvVLZPikxYiFU3WEfXy4YOF7t0zBdMIGntfzhu2MRYG+VtAvgPm5 + y6zjzg97bf/BQN8Lh3b5py5eV5IZ2ievOkKEg54IZqlIgMXqSdxDGbRTHMkRjPc4+eUFfFi7da26 + 7qXGpS03Lb+trAg8J2NYvFy/fLLwCYMPn4Y5ZVwmo7zxSjTXG/ESweWjlYc2OP5jTyfF6j026t8c + IHxPxKxDJ+l+es2ZAovtlkbmcUs9H6HtW5dZYarz/ilIEdg0X+Gn7R+8Ecv+EQ2edCTBocDh9HKH + Pcx+hhY7vyi4RG5bsKRHP6//OPuFUfnhJdaMQ9eO4vZSAno0GMtlo7bdxl90sNsqTywpjmvS7wcM + NNitTkgaROFYN4cBih6UX32EXMzSQSWpmzNnnA5e/+qKCV7n5Yno/p0W31l/qc71sSS+k1lIQh66 + QHoc1uT28+dRcdsC3Bcx8zWv86bWFWIQkiPHAz8vvGkj5A8Yj5rMrEPle2MprmL08zP2hznFAB/V + Br1PZayUWsbHXx7STGzDLPm78Wj6yC9Ij52S2fWx9uiPv2a/O+83MeFaoG+1Of+hwy4wkNCY6gRU + jXdUft0omrpxVyI5K5/M/PYPczgX+kXLrNAjO+Xbep2R6hO8OG1JwFWtGDet6UJwOjGyfR/O5sB2 + KP75byynEU54ZOEHzHkKc/vFm/fPx76BT2rURBfuDz7VfGWjAwGLJfZO8EZ8ZTaa9yOduskLpVkv + gDBKMtsF/Tscp6UBcD8VBf3lKe0tYVt0LY0rsw+voeVmsarU/QfrzEiDk1fKVhpB8Y58NucH3rDf + v49IqlYTI8X9iQYj3FNEz0HGfvnKEMl7HW7X0WexmZvJ5G8lH2lbNf2j33vk+S6c+FQTb54v9z9u + A+Fxzeh0juqkaUpmrDa6YRDyOC/D3/NHVUoz4qEH5lSnL1ULXf9IzEPO0HDRLhVSmFKx+KFp4RRP + aQZ17D3YjjipN+VhFv/ZT75lbsJpN07w+z9S4Xfi9UY6GEDyriKHfSShXpCGiwbedPrppZB9bT0H + llka2z02TsvXRWSpij9qbGehxOSt11Zqie0984xVnfzxu3O+RrD+FMzhcvZiNOs/Musxk87+HzWb + ckF8dYr57F9uiAa+wbaLxCzEuz9kIBWPCzOM65v/9MFqH4eXP3w6lKpTr1rReDBrVaXhcMPDAHFl + rUnCt3kx7W9IQtLTPrGdPTI+Hp+sRstwb9B25ovxOrkTisKXwgwDbFPOOJGgrDeUOOfFmkv3wcg0 + pIo9caOs8HiQBCWkq9ginri5ol64JhjWSnZia6qo5pSnlxuEKufEg5IUk3vR99qTjAOLm2UYNt24 + LiG11jdK5cYs6kEZ6j/+1FJXA/9uWs9FpSSd6IKWhTkZW3WrSsfXQDaRu+TT+grDas4bqHgM6pad + mqRDaGOr+Pk4P5Mx8UMd8lAw8XCUipYvToOP3m8i4lVKjmjaRacJZv/JzLypQpp+jik6+SFQQROr + cBw93ULgDaeZv0s0608B/fQYVoYyaa4VLOC8qznl07MOmzPyS5D7T0BV860nA7ipCh2Dghkr9YX6 + yjnkalWxBTFcNnlsKQaAdp6V0UkdlZBG5hND0oYnLNJPhobVJ61+eItl5+oV4y/veRTlkYWWbZrj + vlo38P20nz94PPrjywf4aAJevMmlGM6rygb1eyrJNkUu+vEvFM5FnPW/ZPJf/Z78AzDTsguz7UZF + Qlcq2jRzP9d28pH+J9/FdSI2XvOx0ATDMrFm/WMi+dMOezioisv2j2XCJ+Sh22or0Qeb8wVPmp8n + bL3pQjZ91CVjE9xsdPL0glxN8ZawfHvLkaGfJjzNeZgaR8sIZrwhdv79mH2135WwYpiS7Wl5Crk9 + WRTCpbZh/lCJfOyT3v/hyc8vIt6bV13d8FL4g69tg4cKppdRka23D/nQnm4K5JVhEdMsjFaWL64P + w0Vck/Xpdk74RjxQOE7SkgofoTDH5bLLkbR5rRmmZeHx4uBWP74lVrZdFdPyvjLQUWs7PKxp77Vk + 2GOA8DMx01+SYuhvGsDu1ZTE6nryx9+C697qn77iY91cB5j1H120dVBMqxrZSDrsz+Sa7I7eMGVS + pr49/8wcTfiYv/wdouUhwIOyq4vpJXkWOH66Z4ZweHlcC7ZbuGJGiXldv4vvdXIHNOcnzNX2TjsY + X+mGNndjYBtth0Nea5eb6sX9k/nLdcabYQ8q4EpU2MZAFf8e25KCm1Zf5l5NM2lx9+5gxl+2M0U1 + HNTgpUIXXwmWq4wk4/bZligKCwWP44GFY1EeMNSro0N5ehgTep0mQ9MkvSDWppQ8yhpWwhqLNXGv + 6gp14VUVIDGKA9v6sdCOnG4jJd3hjmrR00TSFTUWmvNecpv5bvCH1QIFKzZi62lc+C8/BPTGR7qc + /Wi/cmkN0pkZxL7qVTLN/4fOmS8TPOchQ1l8AeTIXBEc954pCtfQh0ZWzlQVHlrYOwPFoM8zFc6k + Q/Lyc4kB7LdJTNmoi3FPxw7q9ZiSNOhsc8zsRwZwh5iYp1PC+1O22f7yHHY9x7Y5seZTokhIM7JW + ho05Cha10BWJIvv5r/fle+tAjbScLuY8iKPTrUOzf2IbNbjw7y+vSVeRxUKyu7VDejcVkGThxpyu + 27dcO26otpweE95bTp6MY58Lf/Bg5Au15TQaXVhscUlIvObeJArHBuT26tPHw9e5WNiCof36CwYR + j8XX3KsL9aenF5t3VPB4/zVgNLkz518xGm0ojhC+3cus927FFFTJDQ1veaDCVJRFox98ipJle6bT + zCecfOrtL3+e+08GmpTM02GfPJq5XkhCkcdvcNbIe+5fXZM//mLGE6wQHnmdk69jqNrVnqVDJSJ+ + L4GiZ88c8stjpnyBXLXsDzrbNJbe8rlekUxaiS5nPvgOqdvB2XlMcz+naseyX+9R6JA1M5prbI7i + sk5B8pXol7+148UTczivBo88vg+jkPLGNlBsLC9YC3RqdiF/52BnHaKdoZWt+FDMCKx7VzNj1uMT + TNsafN/YsU1oPc3Jny4pOkf3F9k9Np+iu50z6de/INbcv+jU5JlBfD7d2PZycYtOVYQO3sUiJW6w + 7sPhllYXSLK0Zp50oUlHloEFR72ocJLrvidz2y+B7dWAylcrL8Z1rzY/PGXWcjmGFH0vFcx4yzar + 2AjlzWI9/fJ5qrg9b6drBQDj7eSSjfP4mCN1mwpu54/Hfvm9kAtlBXc75v/j93T6UtQ1P5mYy+8t + F5Pv4INl7SLmLQ8q/+l52DTrJwt2Q55McnExIEXZhm1HKQinLixvaM6bqGY1ay5tDbGGM5Y2hBwe + Rz62D9WHVHAntg7vmTd5EdqrJTl95+cbeLKSqUcoVhRjdakd+WCk2wH05lmRub54J0jrLdouTy77 + +Uv64+eYdj0z/Evo8XQYJ02TjIKYqhgV/AqHGPL0wZiPi7QdLp591NQ0FokZ10PI7Mnv0NyfxPKc + 7/G80fbIFSNKZv2QdM71ekSBrvW4nvlqor1QAeqlBXNTiSVDvGkx2OqRsF1sHgqe9GSP5PPaZYe9 + nhV9ptQCvG9Xh1mbMjblS7PCaF5vspluHeebb9rAS1S7P/1cLnRXCb5h+MbqZyqLIauuKSI5rchW + U4qwW8G6g1kf0vdu8Wg7S9ovtKjf+GT3VsJiJK2DgZAPzPzEeHc/YAH91vM7809XnksFzokgEFPZ + 7hLemwddc6JDj7mlXQt2OlUl+lY3jNHa5AXf6WcFYjdq8URr3ZPS4zuGdloHM/9yzo3jNYX5+lTU + VkYrel4IcPWOEXNcWzbHGW/gEEwm+fV7Zr+WIiABo4gvryZ7hpOt5d/7gw4RV/kQH7IjEu4PAWvp + kaDp1YcGut86kS6fDUdjLiYSOAQ1WIzcJxp3YZWvKK04c4ywNWl67CM4Brb3535GHH1zZMjSfc6D + Di2rbpctzHoZw6w3vi7OFvD371TAf/37/8eJAvGfEwX/nCj450TBPycK/jlR8L/mRMF/AwAA///s + 3TEKwkAABMA+rwjXB0JK/yJykMPCxAvmBBv/LqdBfEKQabfZDwy7RAFRQBQQBUQBUUAUEAVEAVFA + FBAFRAFRQBQQBUQBUUAUEAVEAVFAFBAFRAFRsB9RMBAFRAFRQBQQBUQBUUAUEAVEAVFAFBAFRAFR + QBQQBUQBUUAUEAVEAVFAFBAFRAFRQBQQBUQBUfDnoqBp2+P7BWHOY5oqDCjpUbovFejiGLu+Hz5X + Cfc1nlM4bAIhLLc8L+VU8iVd10oNtvWCUHKJ02/e1Kpn8wIAAP//AwArUdmvhGEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 9017b86929d9de9b-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 13 Jan 2025 19:16:22 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - text-embedding-ada-002 + openai-organization: + - datadog-staging + openai-processing-ms: + - '56' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-867d7fff98-jp2xz + x-envoy-upstream-service-time: + - '34' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999979' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_0664830d8f95882874d90777108542e2 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_multiple_context.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_multiple_context.yaml new file mode 100644 index 00000000000..a78852c8d64 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_multiple_context.yaml @@ -0,0 +1,312 @@ +interactions: +- request: + body: '{"messages": [{"content": "Given question, answer and context verify if + the context was useful in arriving at the given answer. Give verdict as \"1\" + if useful and \"0\" if not with json output.\n\nThe output should be a well-formatted + JSON instance that conforms to the JSON schema below.\n\nAs an example, for + the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": + \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, + \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted + instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} + is not well-formatted.\n\nHere is the output JSON schema:\n```\n{\"description\": + \"Answer for the verification task wether the context was useful.\", \"type\": + \"object\", \"properties\": {\"reason\": {\"title\": \"Reason\", \"description\": + \"Reason for verification\", \"type\": \"string\"}, \"verdict\": {\"title\": + \"Verdict\", \"description\": \"Binary (0/1) verdict of verification\", \"type\": + \"integer\"}}, \"required\": [\"reason\", \"verdict\"]}\n```\n\nDo not return + any preamble or explanations, return only a pure JSON string surrounded by triple + backticks (```).\n\nExamples:\n\nquestion: \"What can you tell me about albert + Albert Einstein?\"\ncontext: \"Albert Einstein (14 March 1879 \u2013 18 April + 1955) was a German-born theoretical physicist, widely held to be one of the + greatest and most influential scientists of all time. Best known for developing + the theory of relativity, he also made important contributions to quantum mechanics, + and was thus a central figure in the revolutionary reshaping of the scientific + understanding of nature that modern physics accomplished in the first decades + of the twentieth century. His mass\u2013energy equivalence formula E = mc2, + which arises from relativity theory, has been called \\\"the world''s most famous + equation\\\". He received the 1921 Nobel Prize in Physics \\\"for his services + to theoretical physics, and especially for his discovery of the law of the photoelectric + effect\\\", a pivotal step in the development of quantum theory. His work is + also known for its influence on the philosophy of science. In a 1999 poll of + 130 leading physicists worldwide by the British journal Physics World, Einstein + was ranked the greatest physicist of all time. His intellectual achievements + and originality have made Einstein synonymous with genius.\"\nanswer: \"Albert + Einstein born in 14 March 1879 was German-born theoretical physicist, widely + held to be one of the greatest and most influential scientists of all time. + He received the 1921 Nobel Prize in Physics for his services to theoretical + physics. He published 4 papers in 1905. Einstein moved to Switzerland in 1895\"\nverification: + ```{\"reason\": \"The provided context was indeed useful in arriving at the + given answer. The context includes key information about Albert Einstein''s + life and contributions, which are reflected in the answer.\", \"verdict\": 1}```\n\nquestion: + \"who won 2020 icc world cup?\"\ncontext: \"The 2022 ICC Men''s T20 World Cup, + held from October 16 to November 13, 2022, in Australia, was the eighth edition + of the tournament. Originally scheduled for 2020, it was postponed due to the + COVID-19 pandemic. England emerged victorious, defeating Pakistan by five wickets + in the final to clinch their second ICC Men''s T20 World Cup title.\"\nanswer: + \"England\"\nverification: ```{\"reason\": \"the context was useful in clarifying + the situation regarding the 2020 ICC World Cup and indicating that England was + the winner of the tournament that was intended to be held in 2020 but actually + took place in 2022.\", \"verdict\": 1}```\n\nquestion: \"What is the tallest + mountain in the world?\"\ncontext: \"The Andes is the longest continental mountain + range in the world, located in South America. It stretches across seven countries + and features many of the highest peaks in the Western Hemisphere. The range + is known for its diverse ecosystems, including the high-altitude Andean Plateau + and the Amazon rainforest.\"\nanswer: \"Mount Everest.\"\nverification: ```{\"reason\": + \"the provided context discusses the Andes mountain range, which, while impressive, + does not include Mount Everest or directly relate to the question about the + world''s tallest mountain.\", \"verdict\": 0}```\n\nYour actual task:\n\nquestion: + \"Is france part of europe?\"\ncontext: \"irrelevant\"\nanswer: \"France is + indeed part of europe\"\nverification: \n", "role": "user"}], "model": "gpt-4o-mini", + "n": 1, "stream": false, "temperature": 1e-08}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4612' + content-type: + - application/json + cookie: + - __cf_bm=2N0lRp5YNIBKY6AUc.tpQsJVlWEga7Ys924AChkX4qk-1733967111-1.0.1.1-IJEARyUXuMN2pbqt5jU4yaj77.QHaVM0uVSztZt49GpbAV1HXoPr6.uIdz2viIUlRExuu5tYN_.v5wUpYjyBSQ; + _cfuvid=TvHcCPz7N_.kfviRP.Y0iD_HMeA.0uxvji5nzbbTR5w-1733967111302-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFNNj5swFLzzK5586QVWZJNtPm49dC+tVKmteikVGPsFnBrbsR9Roij/ + vTLJBlbdSr0g9ObNaGYenBMApiTbABMtJ9E5nX2Q3z5//dTu9WH/vJD57stx79z6JFD+oBVLI8PW + OxT0wnoQtnMaSVlzhYVHThhVZ8v5fP1+OZvNB6CzEnWkNY6yhc06ZVT2mD8usnyZzW7iorVKYGAb + +JkAAJyHZ/RpJB7ZBvL0ZdJhCLxBtrkvATBvdZwwHoIKxA2xdASFNYRmsF5V1S5YU5hzwTzy+Mo2 + ULDvLcKwdiRw3h6URAkqgPIeNR64IeBGgrQYwFgadr2qe0Lg5gTKbK3veGwDPDbcS2UaePbcCHwX + oEHbeO5aJbiGQJz6AMrAx95bhw8FS6FgB/RSCYp28kthqqqaRvC47QOPNZpe69v8cu9E28Z5W4cb + fp9vlVGhLa9JY/5A1rEBvSQAv4bu+1d1Mudt56gk+xtNFFw/PV312HjyEZ0vbiBZ4nrCWq3TN/RK + icSVDpPrMcFFi3KkjqfmvVR2AiST1H+7eUv7mlyZ5n/kR0AIdISydB7jUV4lHtc8xj/iX2v3lgfD + LJwCYVdulWnQO6+u3+PWlXXN52KFy7xmySX5AwAA//8DAEriHaudAwAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8f09f515b8157281-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 12 Dec 2024 01:31:53 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - datadog-staging + openai-processing-ms: + - '976' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149998903' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_2c2d33a1a025655db6ab9a62096b644a + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"content": "Given question, answer and context verify if + the context was useful in arriving at the given answer. Give verdict as \"1\" + if useful and \"0\" if not with json output.\n\nThe output should be a well-formatted + JSON instance that conforms to the JSON schema below.\n\nAs an example, for + the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": + \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, + \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted + instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} + is not well-formatted.\n\nHere is the output JSON schema:\n```\n{\"description\": + \"Answer for the verification task wether the context was useful.\", \"type\": + \"object\", \"properties\": {\"reason\": {\"title\": \"Reason\", \"description\": + \"Reason for verification\", \"type\": \"string\"}, \"verdict\": {\"title\": + \"Verdict\", \"description\": \"Binary (0/1) verdict of verification\", \"type\": + \"integer\"}}, \"required\": [\"reason\", \"verdict\"]}\n```\n\nDo not return + any preamble or explanations, return only a pure JSON string surrounded by triple + backticks (```).\n\nExamples:\n\nquestion: \"What can you tell me about albert + Albert Einstein?\"\ncontext: \"Albert Einstein (14 March 1879 \u2013 18 April + 1955) was a German-born theoretical physicist, widely held to be one of the + greatest and most influential scientists of all time. Best known for developing + the theory of relativity, he also made important contributions to quantum mechanics, + and was thus a central figure in the revolutionary reshaping of the scientific + understanding of nature that modern physics accomplished in the first decades + of the twentieth century. His mass\u2013energy equivalence formula E = mc2, + which arises from relativity theory, has been called \\\"the world''s most famous + equation\\\". He received the 1921 Nobel Prize in Physics \\\"for his services + to theoretical physics, and especially for his discovery of the law of the photoelectric + effect\\\", a pivotal step in the development of quantum theory. His work is + also known for its influence on the philosophy of science. In a 1999 poll of + 130 leading physicists worldwide by the British journal Physics World, Einstein + was ranked the greatest physicist of all time. His intellectual achievements + and originality have made Einstein synonymous with genius.\"\nanswer: \"Albert + Einstein born in 14 March 1879 was German-born theoretical physicist, widely + held to be one of the greatest and most influential scientists of all time. + He received the 1921 Nobel Prize in Physics for his services to theoretical + physics. He published 4 papers in 1905. Einstein moved to Switzerland in 1895\"\nverification: + ```{\"reason\": \"The provided context was indeed useful in arriving at the + given answer. The context includes key information about Albert Einstein''s + life and contributions, which are reflected in the answer.\", \"verdict\": 1}```\n\nquestion: + \"who won 2020 icc world cup?\"\ncontext: \"The 2022 ICC Men''s T20 World Cup, + held from October 16 to November 13, 2022, in Australia, was the eighth edition + of the tournament. Originally scheduled for 2020, it was postponed due to the + COVID-19 pandemic. England emerged victorious, defeating Pakistan by five wickets + in the final to clinch their second ICC Men''s T20 World Cup title.\"\nanswer: + \"England\"\nverification: ```{\"reason\": \"the context was useful in clarifying + the situation regarding the 2020 ICC World Cup and indicating that England was + the winner of the tournament that was intended to be held in 2020 but actually + took place in 2022.\", \"verdict\": 1}```\n\nquestion: \"What is the tallest + mountain in the world?\"\ncontext: \"The Andes is the longest continental mountain + range in the world, located in South America. It stretches across seven countries + and features many of the highest peaks in the Western Hemisphere. The range + is known for its diverse ecosystems, including the high-altitude Andean Plateau + and the Amazon rainforest.\"\nanswer: \"Mount Everest.\"\nverification: ```{\"reason\": + \"the provided context discusses the Andes mountain range, which, while impressive, + does not include Mount Everest or directly relate to the question about the + world''s tallest mountain.\", \"verdict\": 0}```\n\nYour actual task:\n\nquestion: + \"Is france part of europe?\"\ncontext: \"France is part of europe\"\nanswer: + \"France is indeed part of europe\"\nverification: \n", "role": "user"}], "model": + "gpt-4o-mini", "n": 1, "stream": false, "temperature": 1e-08}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4626' + content-type: + - application/json + cookie: + - __cf_bm=2N0lRp5YNIBKY6AUc.tpQsJVlWEga7Ys924AChkX4qk-1733967111-1.0.1.1-IJEARyUXuMN2pbqt5jU4yaj77.QHaVM0uVSztZt49GpbAV1HXoPr6.uIdz2viIUlRExuu5tYN_.v5wUpYjyBSQ; + _cfuvid=TvHcCPz7N_.kfviRP.Y0iD_HMeA.0uxvji5nzbbTR5w-1733967111302-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8S4EVu7Wtm4H0ccilSHtpVUg0uZLoSCRLrmKnhv+9 + oOxYCpoCvRDEzs5wZpfHCIApyTJgouEkOtsmG/lw//Bp83tffRH2+7duU9/dfT7gPS53j79YHBhm + u0NBL6wbYTrbIimjz7BwyAmDarqcz9fvl2m6GIDOSGwDrbaULEzSKa2S29ntIpktk3R1YTdGCfQs + gx8RAMBxOINPLfHAMpjFL5UOvec1suzaBMCcaUOFce+VJ66JxSMojCbUg/WyLHfe6Fwfc+aQhyvL + IGdfG4Sh7UAglUNB7TN44oQeqOEEHx3XAkF5sNwRmAo+9M5YjGHfKNFMSL21xlGgIXDt9+jAOvOk + JMqbnMWQsyd0UgkKL6enXJdlOXXrsOo9DxPTfdte6qdr/NbU1pmtv+DXeqW08k1xDhWiejKWDegp + Avg5jLl/NTlmneksFWQeUQfB9bvVWY+N2x3R+fwCkiHeTljrNH5Dr5BIXLV+sigmuGhQjtRxq7yX + ykyAaJL6bzdvaZ+TK13/j/wICIGWUBbWYVjKq8Rjm8Pw+f/Vdp3yYJj5Z0/YFZXSNTrr1PnrVbbY + bvlcrHA527LoFP0BAAD//wMAcOvPiIgDAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8f09f51cdbaf7281-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 12 Dec 2024 01:31:54 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - datadog-staging + openai-processing-ms: + - '779' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149998900' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_4b6ae2d675e25726729e2577e662f691 + status: + code: 200 + message: OK +version: 1 \ No newline at end of file diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_single_context.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_single_context.yaml new file mode 100644 index 00000000000..ddbd8f5c3d9 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_single_context.yaml @@ -0,0 +1,160 @@ +interactions: +- request: + body: '{"messages": [{"content": "Given question, answer and context verify if + the context was useful in arriving at the given answer. Give verdict as \"1\" + if useful and \"0\" if not with json output.\n\nThe output should be a well-formatted + JSON instance that conforms to the JSON schema below.\n\nAs an example, for + the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": + \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, + \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted + instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} + is not well-formatted.\n\nHere is the output JSON schema:\n```\n{\"description\": + \"Answer for the verification task wether the context was useful.\", \"type\": + \"object\", \"properties\": {\"reason\": {\"title\": \"Reason\", \"description\": + \"Reason for verification\", \"type\": \"string\"}, \"verdict\": {\"title\": + \"Verdict\", \"description\": \"Binary (0/1) verdict of verification\", \"type\": + \"integer\"}}, \"required\": [\"reason\", \"verdict\"]}\n```\n\nDo not return + any preamble or explanations, return only a pure JSON string surrounded by triple + backticks (```).\n\nExamples:\n\nquestion: \"What can you tell me about albert + Albert Einstein?\"\ncontext: \"Albert Einstein (14 March 1879 \u2013 18 April + 1955) was a German-born theoretical physicist, widely held to be one of the + greatest and most influential scientists of all time. Best known for developing + the theory of relativity, he also made important contributions to quantum mechanics, + and was thus a central figure in the revolutionary reshaping of the scientific + understanding of nature that modern physics accomplished in the first decades + of the twentieth century. His mass\u2013energy equivalence formula E = mc2, + which arises from relativity theory, has been called \\\"the world''s most famous + equation\\\". He received the 1921 Nobel Prize in Physics \\\"for his services + to theoretical physics, and especially for his discovery of the law of the photoelectric + effect\\\", a pivotal step in the development of quantum theory. His work is + also known for its influence on the philosophy of science. In a 1999 poll of + 130 leading physicists worldwide by the British journal Physics World, Einstein + was ranked the greatest physicist of all time. His intellectual achievements + and originality have made Einstein synonymous with genius.\"\nanswer: \"Albert + Einstein born in 14 March 1879 was German-born theoretical physicist, widely + held to be one of the greatest and most influential scientists of all time. + He received the 1921 Nobel Prize in Physics for his services to theoretical + physics. He published 4 papers in 1905. Einstein moved to Switzerland in 1895\"\nverification: + ```{\"reason\": \"The provided context was indeed useful in arriving at the + given answer. The context includes key information about Albert Einstein''s + life and contributions, which are reflected in the answer.\", \"verdict\": 1}```\n\nquestion: + \"who won 2020 icc world cup?\"\ncontext: \"The 2022 ICC Men''s T20 World Cup, + held from October 16 to November 13, 2022, in Australia, was the eighth edition + of the tournament. Originally scheduled for 2020, it was postponed due to the + COVID-19 pandemic. England emerged victorious, defeating Pakistan by five wickets + in the final to clinch their second ICC Men''s T20 World Cup title.\"\nanswer: + \"England\"\nverification: ```{\"reason\": \"the context was useful in clarifying + the situation regarding the 2020 ICC World Cup and indicating that England was + the winner of the tournament that was intended to be held in 2020 but actually + took place in 2022.\", \"verdict\": 1}```\n\nquestion: \"What is the tallest + mountain in the world?\"\ncontext: \"The Andes is the longest continental mountain + range in the world, located in South America. It stretches across seven countries + and features many of the highest peaks in the Western Hemisphere. The range + is known for its diverse ecosystems, including the high-altitude Andean Plateau + and the Amazon rainforest.\"\nanswer: \"Mount Everest.\"\nverification: ```{\"reason\": + \"the provided context discusses the Andes mountain range, which, while impressive, + does not include Mount Everest or directly relate to the question about the + world''s tallest mountain.\", \"verdict\": 0}```\n\nYour actual task:\n\nquestion: + \"What is the capital of France?\"\ncontext: \"The capital of France is Paris.\"\nanswer: + \"The capital of France is Paris\"\nverification: \n", "role": "user"}], "model": + "gpt-4o-mini", "n": 1, "stream": false, "temperature": 1e-08}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '4637' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.52.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.52.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFQuercCyAzvxrZf20qIpEqOHqpAociXRpUiCXMcODP+9 + IO1YNppDL4K0szOYndEhA2BKshUw0XMSg9P5J/n89fuP55/Ft123XTfFk5qpL6Zbv6z3irNJZNhm + g4LeWXfCDk4jKWtOsPDICaNqsZzPHxfLopgmYLASdaR1jvJ7mw/KqHw2nd3n02VePJzZvVUCA1vB + rwwA4JCe0aeRuGcrSFppMmAIvEO2uiwBMG91nDAeggrEDbHJCAprCE2yXtf1JlhTmkPJPPL4ylZQ + spcewXn7qiRKSPt7Aqk8CtJvEIgTBqCeE1CPILhTxDXYFj57bgSCCvDEvQoT2PVK9PEb9zyRPbYa + BaEEZRKbm7BDf1eyCZTsFb1UgqKL4liauq6vnXtst4HH9MxW6/P8eIlC285524Qzfpm3yqjQV6cD + 49mBrGMJPWYAv1Pk25sUmfN2cFSR/YMmCj4uzpGzsekRnS/OIFni+or1+A7c6FUSiSsdrkpjgose + 5UgdG+ZbqewVkF1d/a+bj7RPlyvT/Y/8CAiBjlBWzmMs5ebicc3jJvX58dol5WSYhbdAOFStMh16 + 59XpN2xd1TR8Lh5wOW1Ydsz+AgAA//8DAJLjX/OUAwAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8f09f507cc477281-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 12 Dec 2024 01:31:51 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=2N0lRp5YNIBKY6AUc.tpQsJVlWEga7Ys924AChkX4qk-1733967111-1.0.1.1-IJEARyUXuMN2pbqt5jU4yaj77.QHaVM0uVSztZt49GpbAV1HXoPr6.uIdz2viIUlRExuu5tYN_.v5wUpYjyBSQ; + path=/; expires=Thu, 12-Dec-24 02:01:51 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=TvHcCPz7N_.kfviRP.Y0iD_HMeA.0uxvji5nzbbTR5w-1733967111302-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - datadog-staging + openai-processing-ms: + - '564' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149998898' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_db048be5fbcb3bc4136d9c89ace1249a + status: + code: 200 + message: OK +version: 1 \ No newline at end of file diff --git a/tests/llmobs/suitespec.yml b/tests/llmobs/suitespec.yml index 8e99d1ad6cb..29ee63dc0dd 100644 --- a/tests/llmobs/suitespec.yml +++ b/tests/llmobs/suitespec.yml @@ -17,6 +17,9 @@ components: openai: - ddtrace/contrib/openai/* - ddtrace/contrib/internal/openai/* + langgraph: + - ddtrace/contrib/langgraph/* + - ddtrace/contrib/internal/langgraph/* suites: anthropic: parallelism: 3 @@ -92,3 +95,16 @@ suites: - tests/snapshots/tests.contrib.openai.* runner: riot snapshot: true + langgraph: + parallelism: 3 + paths: + - '@bootstrap' + - '@core' + - '@tracing' + - '@contrib' + - '@langgraph' + - '@requests' + - '@llmobs' + - tests/contrib/langgraph/* + runner: riot + snapshot: true diff --git a/tests/llmobs/test_llmobs_evaluator_runner.py b/tests/llmobs/test_llmobs_evaluator_runner.py index 96104bb19be..eaf381367d0 100644 --- a/tests/llmobs/test_llmobs_evaluator_runner.py +++ b/tests/llmobs/test_llmobs_evaluator_runner.py @@ -59,6 +59,29 @@ def test_evaluator_runner_timed_enqueues_eval_metric(llmobs, mock_llmobs_eval_me ) +@pytest.mark.vcr_logs +def test_evaluator_runner_multiple_evaluators(llmobs, mock_llmobs_eval_metric_writer): + evaluator_runner = EvaluatorRunner(interval=0.01, llmobs_service=llmobs) + evaluator_runner.evaluators += [ + DummyEvaluator(llmobs_service=llmobs, label="1"), + DummyEvaluator(llmobs_service=llmobs, label="2"), + DummyEvaluator(llmobs_service=llmobs, label="3"), + ] + evaluator_runner.start() + + evaluator_runner.enqueue({"span_id": "123", "trace_id": "1234"}, DUMMY_SPAN) + + time.sleep(0.1) + + calls = [call[0][0] for call in mock_llmobs_eval_metric_writer.enqueue.call_args_list] + sorted_calls = sorted(calls, key=lambda x: x["label"]) + assert sorted_calls == [ + _dummy_evaluator_eval_metric_event(span_id="123", trace_id="1234", label="1"), + _dummy_evaluator_eval_metric_event(span_id="123", trace_id="1234", label="2"), + _dummy_evaluator_eval_metric_event(span_id="123", trace_id="1234", label="3"), + ] + + def test_evaluator_runner_on_exit(mock_writer_logs, run_python_code_in_subprocess): env = os.environ.copy() pypath = [os.path.dirname(os.path.dirname(os.path.dirname(__file__)))] diff --git a/tests/llmobs/test_llmobs_ragas_evaluators.py b/tests/llmobs/test_llmobs_ragas_evaluators.py index 0e901fe93b4..cc02709baff 100644 --- a/tests/llmobs/test_llmobs_ragas_evaluators.py +++ b/tests/llmobs/test_llmobs_ragas_evaluators.py @@ -3,16 +3,42 @@ import mock import pytest +from ddtrace.llmobs._evaluators.ragas.answer_relevancy import RagasAnswerRelevancyEvaluator +from ddtrace.llmobs._evaluators.ragas.context_precision import RagasContextPrecisionEvaluator from ddtrace.llmobs._evaluators.ragas.faithfulness import RagasFaithfulnessEvaluator from ddtrace.span import Span from tests.llmobs._utils import _expected_llmobs_llm_span_event +from tests.llmobs._utils import _expected_ragas_answer_relevancy_spans +from tests.llmobs._utils import _expected_ragas_context_precision_spans from tests.llmobs._utils import _expected_ragas_faithfulness_spans from tests.llmobs._utils import _llm_span_with_expected_ragas_inputs_in_messages from tests.llmobs._utils import _llm_span_with_expected_ragas_inputs_in_prompt +from tests.llmobs._utils import default_ragas_inputs +from tests.llmobs._utils import logs_vcr pytest.importorskip("ragas", reason="Tests require ragas to be available on user env") +ragas_answer_relevancy_cassette = logs_vcr.use_cassette( + "tests.llmobs.test_llmobs_ragas_evaluators.answer_relevancy_inference.yaml" +) + +ragas_context_precision_single_context_cassette = logs_vcr.use_cassette( + "tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_single_context.yaml" +) +ragas_context_precision_multiple_context_cassette = logs_vcr.use_cassette( + "tests.llmobs.test_llmobs_ragas_evaluators.test_ragas_context_precision_multiple_context.yaml" +) + + +@pytest.fixture +def reset_ragas_context_precision_llm(): + import ragas + + previous_llm = ragas.metrics.context_precision.llm + yield + ragas.metrics.context_precision.llm = previous_llm + def _llm_span_without_io(): return _expected_llmobs_llm_span_event(Span("dummy")) @@ -238,3 +264,359 @@ def test_llmobs_with_faithfulness_emits_traces_and_evals_on_exit(mock_writer_log assert status == 0, err assert out == b"" assert err == b"" + + +def test_ragas_context_precision_init(ragas, llmobs): + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + assert rcp_evaluator.llmobs_service == llmobs + assert rcp_evaluator.ragas_context_precision_instance == ragas.metrics.context_precision + assert rcp_evaluator.ragas_context_precision_instance.llm == ragas.llms.llm_factory() + + +def test_ragas_context_precision_throws_if_dependencies_not_present(llmobs, mock_ragas_dependencies_not_present, ragas): + with pytest.raises( + NotImplementedError, match="Failed to load dependencies for `ragas_context_precision` evaluator" + ): + RagasContextPrecisionEvaluator(llmobs) + + +def test_ragas_context_precision_returns_none_if_inputs_extraction_fails(ragas, mock_llmobs_submit_evaluation, llmobs): + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + failure_msg, _ = rcp_evaluator.evaluate(_llm_span_without_io()) + assert failure_msg == "fail_extract_context_precision_inputs" + assert rcp_evaluator.llmobs_service.submit_evaluation.call_count == 0 + + +def test_ragas_context_precision_has_modified_context_precision_instance( + ragas, mock_llmobs_submit_evaluation, reset_ragas_context_precision_llm, llmobs +): + """Context precision instance used in ragas evaluator should match the global ragas context precision instance""" + from ragas.llms import BaseRagasLLM + from ragas.metrics import context_precision + + class FirstDummyLLM(BaseRagasLLM): + def __init__(self): + super().__init__() + + def generate_text(self) -> str: + return "dummy llm" + + def agenerate_text(self) -> str: + return "dummy llm" + + context_precision.llm = FirstDummyLLM() + + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + + assert rcp_evaluator.ragas_context_precision_instance.llm.generate_text() == "dummy llm" + + class SecondDummyLLM(BaseRagasLLM): + def __init__(self): + super().__init__() + + def generate_text(self) -> str: + return "second dummy llm" + + def agenerate_text(self) -> str: + return "second dummy llm" + + context_precision.llm = SecondDummyLLM() + + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + + assert rcp_evaluator.ragas_context_precision_instance.llm.generate_text() == "second dummy llm" + + +def test_ragas_context_precision_submits_evaluation(ragas, llmobs, mock_llmobs_submit_evaluation): + """Test that evaluation is submitted for a valid llm span where question is in the prompt variables""" + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + llm_span = _llm_span_with_expected_ragas_inputs_in_prompt() + with ragas_context_precision_single_context_cassette: + rcp_evaluator.run_and_submit_evaluation(llm_span) + rcp_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasContextPrecisionEvaluator.LABEL, + metric_type=RagasContextPrecisionEvaluator.METRIC_TYPE, + value=1.0, + metadata={ + "_dd.evaluation_kind": "context_precision", + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_context_precision_submits_evaluation_on_span_with_question_in_messages( + ragas, llmobs, mock_llmobs_submit_evaluation +): + """Test that evaluation is submitted for a valid llm span where the last message content is the question""" + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + llm_span = _llm_span_with_expected_ragas_inputs_in_messages() + with ragas_context_precision_single_context_cassette: + rcp_evaluator.run_and_submit_evaluation(llm_span) + rcp_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasContextPrecisionEvaluator.LABEL, + metric_type=RagasContextPrecisionEvaluator.METRIC_TYPE, + value=1.0, + metadata={ + "_dd.evaluation_kind": "context_precision", + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_context_precision_submits_evaluation_on_span_with_custom_keys( + ragas, llmobs, mock_llmobs_submit_evaluation +): + """Test that evaluation is submitted for a valid llm span where the last message content is the question""" + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + llm_span = _expected_llmobs_llm_span_event( + Span("dummy"), + prompt={ + "variables": { + "user_input": default_ragas_inputs["question"], + "context_2": default_ragas_inputs["context"], + "context_3": default_ragas_inputs["context"], + }, + "_dd_context_variable_keys": ["context_2", "context_3"], + "_dd_query_variable_keys": ["user_input"], + }, + output_messages=[{"content": default_ragas_inputs["answer"]}], + ) + with ragas_context_precision_multiple_context_cassette: + rcp_evaluator.run_and_submit_evaluation(llm_span) + rcp_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasContextPrecisionEvaluator.LABEL, + metric_type=RagasContextPrecisionEvaluator.METRIC_TYPE, + value=0.5, + metadata={ + "_dd.evaluation_kind": "context_precision", + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_context_precision_emits_traces(ragas, llmobs, llmobs_events): + rcp_evaluator = RagasContextPrecisionEvaluator(llmobs) + with ragas_context_precision_single_context_cassette: + rcp_evaluator.evaluate(_llm_span_with_expected_ragas_inputs_in_prompt()) + ragas_spans = [event for event in llmobs_events if event["name"].startswith("dd-ragas.")] + ragas_spans = sorted(ragas_spans, key=lambda d: d["start_ns"]) + assert len(ragas_spans) == 2 + assert ragas_spans == _expected_ragas_context_precision_spans() + + # verify the trace structure + root_span = ragas_spans[0] + root_span_id = root_span["span_id"] + assert root_span["parent_id"] == "undefined" + assert root_span["meta"] is not None + + root_span_trace_id = root_span["trace_id"] + for child_span in ragas_spans[1:]: + assert child_span["trace_id"] == root_span_trace_id + assert child_span["parent_id"] == root_span_id + + +def test_ragas_answer_relevancy_init(ragas, llmobs): + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + assert rar_evaluator.llmobs_service == llmobs + assert rar_evaluator.ragas_answer_relevancy_instance == ragas.metrics.answer_relevancy + assert rar_evaluator.ragas_answer_relevancy_instance.llm == ragas.llms.llm_factory() + assert ( + rar_evaluator.ragas_answer_relevancy_instance.embeddings.embeddings + == ragas.embeddings.embedding_factory().embeddings + ) + assert ( + rar_evaluator.ragas_answer_relevancy_instance.embeddings.run_config + == ragas.embeddings.embedding_factory().run_config + ) + + +def test_ragas_answer_relevancy_throws_if_dependencies_not_present(llmobs, mock_ragas_dependencies_not_present, ragas): + with pytest.raises(NotImplementedError, match="Failed to load dependencies for `ragas_answer_relevancy` evaluator"): + RagasAnswerRelevancyEvaluator(llmobs) + + +def test_ragas_answer_relevancy_returns_none_if_inputs_extraction_fails(ragas, mock_llmobs_submit_evaluation, llmobs): + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + failure_msg, _ = rar_evaluator.evaluate(_llm_span_without_io()) + assert failure_msg == "fail_extract_answer_relevancy_inputs" + assert rar_evaluator.llmobs_service.submit_evaluation.call_count == 0 + + +def test_ragas_answer_relevancy_has_modified_answer_relevancy_instance( + ragas, mock_llmobs_submit_evaluation, reset_ragas_answer_relevancy_llm, llmobs +): + """Answer relevancy instance used in ragas evaluator should match the global ragas context precision instance""" + from ragas.llms import BaseRagasLLM + from ragas.metrics import answer_relevancy + + class FirstDummyLLM(BaseRagasLLM): + def __init__(self): + super().__init__() + + def generate_text(self) -> str: + return "dummy llm" + + def agenerate_text(self) -> str: + return "dummy llm" + + answer_relevancy.llm = FirstDummyLLM() + + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + + assert rar_evaluator.ragas_answer_relevancy_instance.llm.generate_text() == "dummy llm" + + class SecondDummyLLM(BaseRagasLLM): + def __init__(self): + super().__init__() + + def generate_text(self) -> str: + return "second dummy llm" + + def agenerate_text(self) -> str: + return "second dummy llm" + + answer_relevancy.llm = SecondDummyLLM() + + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + + assert rar_evaluator.ragas_answer_relevancy_instance.llm.generate_text() == "second dummy llm" + + +def test_ragas_answer_relevancy_submits_evaluation( + ragas, llmobs, mock_llmobs_submit_evaluation, mock_ragas_answer_relevancy_calculate_similarity +): + """Test that evaluation is submitted for a valid llm span where question is in the prompt variables""" + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + llm_span = _llm_span_with_expected_ragas_inputs_in_prompt() + with ragas_answer_relevancy_cassette: + rar_evaluator.run_and_submit_evaluation(llm_span) + rar_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasAnswerRelevancyEvaluator.LABEL, + metric_type=RagasAnswerRelevancyEvaluator.METRIC_TYPE, + value=mock.ANY, + metadata={ + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_answer_relevancy_submits_evaluation_on_span_with_question_in_messages( + ragas, llmobs, mock_llmobs_submit_evaluation, mock_ragas_answer_relevancy_calculate_similarity +): + """Test that evaluation is submitted for a valid llm span where the last message content is the question""" + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + llm_span = _llm_span_with_expected_ragas_inputs_in_messages() + with ragas_answer_relevancy_cassette: + rar_evaluator.run_and_submit_evaluation(llm_span) + rar_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasAnswerRelevancyEvaluator.LABEL, + metric_type=RagasAnswerRelevancyEvaluator.METRIC_TYPE, + value=mock.ANY, + metadata={ + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_answer_relevancy_submits_evaluation_on_span_with_custom_keys( + ragas, llmobs, mock_llmobs_submit_evaluation, mock_ragas_answer_relevancy_calculate_similarity +): + """Test that evaluation is submitted for a valid llm span where the last message content is the question""" + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + llm_span = _expected_llmobs_llm_span_event( + Span("dummy"), + prompt={ + "variables": { + "user_input": "Is france part of europe?", + "context_2": "irrelevant", + "context_3": "France is part of europe", + }, + "_dd_context_variable_keys": ["context_2", "context_3"], + "_dd_query_variable_keys": ["user_input"], + }, + output_messages=[{"content": "France is indeed part of europe"}], + ) + with ragas_answer_relevancy_cassette: + rar_evaluator.run_and_submit_evaluation(llm_span) + rar_evaluator.llmobs_service.submit_evaluation.assert_has_calls( + [ + mock.call( + span_context={ + "span_id": llm_span.get("span_id"), + "trace_id": llm_span.get("trace_id"), + }, + label=RagasAnswerRelevancyEvaluator.LABEL, + metric_type=RagasAnswerRelevancyEvaluator.METRIC_TYPE, + value=mock.ANY, + metadata={ + "_dd.evaluation_span": {"span_id": mock.ANY, "trace_id": mock.ANY}, + }, + ) + ] + ) + + +def test_ragas_answer_relevancy_emits_traces( + ragas, llmobs, llmobs_events, mock_ragas_answer_relevancy_calculate_similarity +): + rar_evaluator = RagasAnswerRelevancyEvaluator(llmobs) + with ragas_answer_relevancy_cassette: + rar_evaluator.evaluate(_llm_span_with_expected_ragas_inputs_in_prompt()) + + ragas_spans = [event for event in llmobs_events if event["name"].startswith("dd-ragas.")] + ragas_spans = sorted(ragas_spans, key=lambda d: d["start_ns"]) + + assert len(ragas_spans) == 3 + # check name, io, span kinds match + assert ragas_spans == _expected_ragas_answer_relevancy_spans() + + # verify the trace structure + root_span = ragas_spans[0] + root_span_id = root_span["span_id"] + assert root_span["parent_id"] == "undefined" + assert root_span["meta"] is not None + + root_span_trace_id = root_span["trace_id"] + for child_span in ragas_spans[1:]: + assert child_span["trace_id"] == root_span_trace_id + assert child_span["parent_id"] == root_span_id diff --git a/tests/opentelemetry/flask_app.py b/tests/opentelemetry/flask_app.py index 16f286416fb..e8ef809ab39 100644 --- a/tests/opentelemetry/flask_app.py +++ b/tests/opentelemetry/flask_app.py @@ -10,7 +10,7 @@ opentelemetry.trace.set_tracer_provider(TracerProvider()) -ddtrace.tracer.configure(settings={"FILTERS": [PingFilter()]}) +ddtrace.tracer._configure(trace_processors=[PingFilter()]) app = flask.Flask(__name__) diff --git a/tests/opentracer/conftest.py b/tests/opentracer/conftest.py index 54f2950f799..769a8a6ae69 100644 --- a/tests/opentracer/conftest.py +++ b/tests/opentracer/conftest.py @@ -6,7 +6,7 @@ """ import pytest -from ddtrace.opentracer import Tracer +from ddtrace.opentracer import Tracer as OTTracer from ddtrace.opentracer import set_global_tracer from tests.utils import DummyTracer from tests.utils import TracerSpanContainer @@ -18,12 +18,12 @@ def ot_tracer_factory(): def make_ot_tracer(service_name="my_svc", config=None, scope_manager=None, context_provider=None): config = config or {} - tracer = Tracer(service_name=service_name, config=config, scope_manager=scope_manager) + tracer = OTTracer(service_name=service_name, config=config, scope_manager=scope_manager) # similar to how we test the ddtracer, use a dummy tracer dd_tracer = DummyTracer() if context_provider: - dd_tracer.configure(context_provider=context_provider) + dd_tracer._configure(context_provider=context_provider) # attach the dummy tracer to the opentracer tracer._dd_tracer = dd_tracer diff --git a/tests/opentracer/test_tracer_asyncio.py b/tests/opentracer/test_tracer_asyncio.py index 176f2c61fbe..230ca776986 100644 --- a/tests/opentracer/test_tracer_asyncio.py +++ b/tests/opentracer/test_tracer_asyncio.py @@ -66,7 +66,7 @@ async def f1(): @pytest.mark.asyncio async def test_trace_multiple_calls(ot_tracer, test_spans): - ot_tracer._dd_tracer.configure(context_provider=context_provider) + ot_tracer._dd_tracer._configure(context_provider=context_provider) # create multiple futures so that we expect multiple # traces instead of a single one (helper not used) diff --git a/tests/profiling/test_profiler.py b/tests/profiling/test_profiler.py index 7f98bbf6aa8..b46970f2591 100644 --- a/tests/profiling/test_profiler.py +++ b/tests/profiling/test_profiler.py @@ -232,38 +232,68 @@ def _check_url(prof, url, api_key, endpoint_path="profiling/v1/input"): pytest.fail("Unable to find HTTP exporter") +@pytest.mark.subprocess() def test_tracer_url(): - t = ddtrace.Tracer() - t.configure(hostname="foobar") + import os + + from ddtrace import tracer as t + from ddtrace.profiling import profiler + from tests.profiling.test_profiler import _check_url + + t._configure(hostname="foobar") prof = profiler.Profiler(tracer=t) _check_url(prof, "http://foobar:8126", os.environ.get("DD_API_KEY")) +@pytest.mark.subprocess() def test_tracer_url_https(): - t = ddtrace.Tracer() - t.configure(hostname="foobar", https=True) + import os + + from ddtrace import tracer as t + from ddtrace.profiling import profiler + from tests.profiling.test_profiler import _check_url + + t._configure(hostname="foobar", https=True) prof = profiler.Profiler(tracer=t) _check_url(prof, "https://foobar:8126", os.environ.get("DD_API_KEY")) +@pytest.mark.subprocess() def test_tracer_url_uds_hostname(): - t = ddtrace.Tracer() - t.configure(hostname="foobar", uds_path="/foobar") + import os + + from ddtrace import tracer as t + from ddtrace.profiling import profiler + from tests.profiling.test_profiler import _check_url + + t._configure(hostname="foobar", uds_path="/foobar") prof = profiler.Profiler(tracer=t) _check_url(prof, "unix://foobar/foobar", os.environ.get("DD_API_KEY")) +@pytest.mark.subprocess() def test_tracer_url_uds(): - t = ddtrace.Tracer() - t.configure(uds_path="/foobar") + import os + + from ddtrace import tracer as t + from ddtrace.profiling import profiler + from tests.profiling.test_profiler import _check_url + + t._configure(uds_path="/foobar") prof = profiler.Profiler(tracer=t) _check_url(prof, "unix:///foobar", os.environ.get("DD_API_KEY")) +@pytest.mark.subprocess() def test_tracer_url_configure_after(): - t = ddtrace.Tracer() + import os + + from ddtrace import tracer as t + from ddtrace.profiling import profiler + from tests.profiling.test_profiler import _check_url + prof = profiler.Profiler(tracer=t) - t.configure(hostname="foobar") + t._configure(hostname="foobar") _check_url(prof, "http://foobar:8126", os.environ.get("DD_API_KEY")) @@ -276,11 +306,10 @@ def test_env_no_api_key(): def test_env_endpoint_url(): import os - import ddtrace + from ddtrace import tracer as t from ddtrace.profiling import profiler from tests.profiling.test_profiler import _check_url - t = ddtrace.Tracer() prof = profiler.Profiler(tracer=t) _check_url(prof, "http://foobar:123", os.environ.get("DD_API_KEY")) diff --git a/tests/profiling_v2/collector/test_stack.py b/tests/profiling_v2/collector/test_stack.py index 74def22ed50..774e15fb70d 100644 --- a/tests/profiling_v2/collector/test_stack.py +++ b/tests/profiling_v2/collector/test_stack.py @@ -11,7 +11,6 @@ from ddtrace import ext from ddtrace.internal.datadog.profiling import ddup from ddtrace.profiling.collector import stack -from ddtrace.settings.profiling import config from tests.profiling.collector import pprof_utils from tests.profiling.collector import test_collector @@ -171,7 +170,6 @@ def test_push_span_unregister_thread(tmp_path, monkeypatch, tracer): pytest.skip("stack_v2 is not supported on Python 3.7") with patch("ddtrace.internal.datadog.profiling.stack_v2.unregister_thread") as unregister_thread: - monkeypatch.setattr(config.stack, "v2_enabled", True) tracer._endpoint_call_counter_span_processor.enable() test_name = "test_push_span_unregister_thread" @@ -220,7 +218,7 @@ def target_fun(): ), ) - unregister_thread.assert_called_once_with(thread_id) + unregister_thread.assert_called_with(thread_id) @pytest.mark.parametrize("stack_v2_enabled", [True, False]) @@ -748,6 +746,7 @@ def test_ignore_profiler(stack_v2_enabled, ignore_profiler, tmp_path): # TODO: support ignore profiler with stack_v2 and update this test @pytest.mark.skipif(not TESTING_GEVENT, reason="Not testing gevent") +@pytest.mark.skip(reason="ignore_profiler is not supported with stack v2") @pytest.mark.subprocess( ddtrace_run=True, env=dict(DD_PROFILING_IGNORE_PROFILER="1", DD_PROFILING_OUTPUT_PPROF="/tmp/test_ignore_profiler_gevent_task"), diff --git a/tests/profiling_v2/gunicorn.conf.py b/tests/profiling_v2/gunicorn.conf.py new file mode 100644 index 00000000000..c45f27ce11c --- /dev/null +++ b/tests/profiling_v2/gunicorn.conf.py @@ -0,0 +1,67 @@ +from datetime import datetime +from datetime import timezone +import logging + + +def post_fork(server, worker): + """Log the startup time of each worker.""" + logging.info("Worker %s started", worker.pid) + + +def post_worker_init(worker): + logging.info("Worker %s initialized", worker.pid) + + +class CustomFormatter(logging.Formatter): + """Custom formatter to include timezone offset in the log message.""" + + def formatTime(self, record, datefmt=None): + dt = datetime.fromtimestamp(record.created, tz=timezone.utc).astimezone() + milliseconds = int(record.msecs) + offset = dt.strftime("%z") # Get timezone offset in the form +0530 + if datefmt: + formatted_time = dt.strftime(datefmt) + else: + formatted_time = dt.strftime("%Y-%m-%d %H:%M:%S") + + # Add milliseconds and timezone offset + offset = dt.strftime("%z") # Timezone offset in the form +0530 + return f"{formatted_time}.{milliseconds:03d} {offset}" + + +logconfig_dict = { + "version": 1, + "formatters": { + "default": { + "()": CustomFormatter, # Use the custom formatter + "format": "[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + }, + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": "default", + }, + }, + "loggers": { + "": { # root logger + "handlers": ["console"], + "level": "INFO", + }, + "gunicorn.error": { + "handlers": ["console"], + "level": "INFO", + "propagate": False, + }, + "gunicorn.access": { + "handlers": ["console"], + "level": "INFO", + "propagate": False, + }, + }, + "root": { + "level": "INFO", + "handlers": ["console"], + }, +} diff --git a/tests/profiling_v2/test_gunicorn.py b/tests/profiling_v2/test_gunicorn.py index 4d7adbf6c95..90141445d3a 100644 --- a/tests/profiling_v2/test_gunicorn.py +++ b/tests/profiling_v2/test_gunicorn.py @@ -13,7 +13,7 @@ # DEV: gunicorn tests are hard to debug, so keeping these print statements for # future debugging -DEBUG_PRINT = False +DEBUG_PRINT = True def debug_print(*args): @@ -37,6 +37,8 @@ def _run_gunicorn(*args): "127.0.0.1:7644", "--worker-tmp-dir", "/dev/shm", + "-c", + os.path.dirname(__file__) + "/gunicorn.conf.py", "--chdir", os.path.dirname(__file__), ] diff --git a/tests/snapshots/tests.contrib.botocore.test_bedrock.test_invoke_model_using_aws_arn_model_id.json b/tests/snapshots/tests.contrib.botocore.test_bedrock.test_invoke_model_using_aws_arn_model_id.json new file mode 100644 index 00000000000..0da1e335083 --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test_bedrock.test_invoke_model_using_aws_arn_model_id.json @@ -0,0 +1,39 @@ +[[ + { + "name": "bedrock-runtime.command", + "service": "aws.bedrock-runtime", + "resource": "InvokeModel", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.botocore", + "_dd.p.dm": "-0", + "_dd.p.tid": "6786dfda00000000", + "bedrock.request.max_tokens": "50", + "bedrock.request.model": "titan-tg1-large", + "bedrock.request.model_provider": "amazon", + "bedrock.request.prompt": "Command: can you explain what Datadog is to someone not in the tech industry?", + "bedrock.request.stop_sequences": "[]", + "bedrock.request.temperature": "0", + "bedrock.request.top_p": "0.9", + "bedrock.response.choices.0.finish_reason": "LENGTH", + "bedrock.response.choices.0.text": "\\n\\nDatadog is a monitoring and analytics platform for IT operations, DevOps, and software development teams. It provides real-t...", + "bedrock.response.duration": "2646", + "bedrock.response.id": "b2d0fd44-c29a-4cd4-a97a-6901a48f6264", + "bedrock.usage.completion_tokens": "50", + "bedrock.usage.prompt_tokens": "18", + "language": "python", + "runtime-id": "cf8ef38d3504475ba71634071f15d00f" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 96028 + }, + "duration": 2318000, + "start": 1736892378210317000 + }]] diff --git a/tests/snapshots/tests.integration.test_integration_snapshots.test_trace_with_wrong_metrics_types_not_sent.json b/tests/snapshots/tests.integration.test_integration_snapshots.test_trace_with_wrong_metrics_types_not_sent.json deleted file mode 100644 index a1a67aeefc8..00000000000 --- a/tests/snapshots/tests.integration.test_integration_snapshots.test_trace_with_wrong_metrics_types_not_sent.json +++ /dev/null @@ -1,25 +0,0 @@ -[[ - { - "name": "parent", - "service": "tests.integration", - "resource": "parent", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65f8a77100000000", - "language": "python", - "runtime-id": "005360373bf04c7fb732555994db4f78" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 5837 - }, - "duration": 1004386709, - "start": 1710794609240060721 - }]] diff --git a/tests/snapshots/tests.integration.test_integration_snapshots.test_tracetagsprocessor_only_adds_new_tags.json b/tests/snapshots/tests.integration.test_integration_snapshots.test_tracetagsprocessor_only_adds_new_tags.json index 9298b2342cd..08108bdeff9 100644 --- a/tests/snapshots/tests.integration.test_integration_snapshots.test_tracetagsprocessor_only_adds_new_tags.json +++ b/tests/snapshots/tests.integration.test_integration_snapshots.test_tracetagsprocessor_only_adds_new_tags.json @@ -1,7 +1,7 @@ [[ { "name": "web.request", - "service": "tests.integration", + "service": "ddtrace_subprocess_dir", "resource": "web.request", "trace_id": 0, "span_id": 1, diff --git a/tests/snapshots/tests.integration.test_propagation.test_trace_tags_multispan.json b/tests/snapshots/tests.integration.test_propagation.test_trace_tags_multispan.json new file mode 100644 index 00000000000..000f5f143c7 --- /dev/null +++ b/tests/snapshots/tests.integration.test_propagation.test_trace_tags_multispan.json @@ -0,0 +1,70 @@ +[[ + { + "name": "p", + "service": "tests.integration", + "resource": "p", + "trace_id": 0, + "span_id": 1, + "parent_id": 5678, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-1", + "_dd.p.test": "value", + "language": "python", + "runtime-id": "65e7346cd27a4fcbb1a2ccb98722fed3" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 4531 + }, + "duration": 48000, + "start": 1735013581776627000 + }, + { + "name": "c1", + "service": "tests.integration", + "resource": "c1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "_dd.p.test": "value" + }, + "duration": 5000, + "start": 1735013581776649000 + }, + { + "name": "c2", + "service": "tests.integration", + "resource": "c2", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "_dd.p.test": "value" + }, + "duration": 7000, + "start": 1735013581776662000 + }, + { + "name": "gc", + "service": "tests.integration", + "resource": "gc", + "trace_id": 0, + "span_id": 4, + "parent_id": 3, + "type": "", + "error": 0, + "meta": { + "_dd.p.test": "value" + }, + "duration": 11000, + "start": 1735013581776667000 + }]] diff --git a/tests/telemetry/app.py b/tests/telemetry/app.py index 7390d9b5da6..ae2b9932c9f 100644 --- a/tests/telemetry/app.py +++ b/tests/telemetry/app.py @@ -1,7 +1,7 @@ from flask import Flask from ddtrace.internal.telemetry import telemetry_writer -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_TRACER +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE app = Flask(__name__) @@ -23,7 +23,7 @@ def starting_app_view(): @app.route("/count_metric") def metrics_view(): telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "test_metric", 1.0, ) diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index 558e9961afc..e2aa4552631 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -148,17 +148,13 @@ def test_app_started_error_handled_exception(test_agent_session, run_python_code logging.basicConfig() from ddtrace import tracer -from ddtrace.filters import TraceFilter +from ddtrace.trace import TraceFilter class FailingFilture(TraceFilter): def process_trace(self, trace): raise Exception("Exception raised in trace filter") -tracer.configure( - settings={ - "FILTERS": [FailingFilture()], - } -) +tracer._configure(trace_processors=[FailingFilture()]) # generate and encode span to trigger sampling failure tracer.trace("hello").finish() diff --git a/tests/telemetry/test_telemetry_metrics.py b/tests/telemetry/test_telemetry_metrics.py index a3ea6051b8b..d1061d57770 100644 --- a/tests/telemetry/test_telemetry_metrics.py +++ b/tests/telemetry/test_telemetry_metrics.py @@ -3,8 +3,7 @@ from mock.mock import ANY from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_APPSEC -from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE_TAG_TRACER +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_DISTRIBUTION from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_GENERATE_METRICS from tests.utils import override_global_config @@ -13,7 +12,7 @@ def _assert_metric( test_agent, expected_metrics, - namespace=TELEMETRY_NAMESPACE_TAG_TRACER, + namespace=TELEMETRY_NAMESPACE.TRACERS, type_paypload=TELEMETRY_TYPE_GENERATE_METRICS, ): assert len(expected_metrics) > 0, "expected_metrics should not be empty" @@ -23,7 +22,7 @@ def _assert_metric( metrics = [] for event in metrics_events: - if event["payload"]["namespace"] == namespace: + if event["payload"]["namespace"] == namespace.value: for metric in event["payload"]["series"]: metric["tags"].sort() metrics.append(metric) @@ -49,7 +48,7 @@ def _assert_logs(test_agent, expected_logs): def test_send_metric_flush_and_generate_metrics_series_is_restarted(telemetry_writer, test_agent_session, mock_time): """Check the queue of metrics is empty after run periodic method of PeriodicService""" - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric2", 1, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric2", 1, (("a", "b"),)) expected_series = [ { "common": True, @@ -62,7 +61,7 @@ def test_send_metric_flush_and_generate_metrics_series_is_restarted(telemetry_wr _assert_metric(test_agent_session, expected_series) - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric2", 1, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric2", 1, (("a", "b"),)) _assert_metric(test_agent_session, expected_series) @@ -75,8 +74,8 @@ def test_send_metric_datapoint_equal_type_and_tags_yields_single_series( But in Datadog, a datapoint also includes tags, which declare all the various scopes the datapoint belongs to https://www.datadoghq.com/blog/the-power-of-tagged-metrics/#whats-a-metric-tag """ - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 2, (("a", "b"),)) - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 3, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 2, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 3, (("a", "b"),)) expected_series = [ { @@ -99,9 +98,9 @@ def test_send_metric_datapoint_equal_type_different_tags_yields_multiple_series( But in Datadog, a datapoint also includes tags, which declare all the various scopes the datapoint belongs to https://www.datadoghq.com/blog/the-power-of-tagged-metrics/#whats-a-metric-tag """ - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 4, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 4, (("a", "b"),)) telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "test-metric", 5, ( @@ -109,7 +108,7 @@ def test_send_metric_datapoint_equal_type_different_tags_yields_multiple_series( ("c", "True"), ), ) - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 6, tuple()) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 6, tuple()) expected_series = [ { @@ -144,8 +143,8 @@ def test_send_metric_datapoint_with_different_types(telemetry_writer, test_agent But in Datadog, a datapoint also includes tags, which declare all the various scopes the datapoint belongs to https://www.datadoghq.com/blog/the-power-of-tagged-metrics/#whats-a-metric-tag """ - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 1, (("a", "b"),)) - telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 1, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, (("a", "b"),)) + telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, (("a", "b"),)) expected_series = [ {"common": True, "metric": "test-metric", "points": [[1642544540, 1.0]], "tags": ["a:b"], "type": "count"}, @@ -162,11 +161,11 @@ def test_send_metric_datapoint_with_different_types(telemetry_writer, test_agent def test_send_tracers_count_metric(telemetry_writer, test_agent_session, mock_time): - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 1, (("a", "b"),)) - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 1, (("a", "b"),)) - telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE_TAG_TRACER, "test-metric", 1, tuple()) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, (("a", "b"),)) + telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, tuple()) telemetry_writer.add_count_metric( - TELEMETRY_NAMESPACE_TAG_TRACER, + TELEMETRY_NAMESPACE.TRACERS, "test-metric", 1, ( @@ -203,13 +202,13 @@ def test_send_tracers_count_metric(telemetry_writer, test_agent_session, mock_ti def test_send_appsec_rate_metric(telemetry_writer, test_agent_session, mock_time): telemetry_writer.add_rate_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, (("hi", "HELLO"), ("NAME", "CANDY")), ) - telemetry_writer.add_rate_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 6, tuple()) - telemetry_writer.add_rate_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 6, tuple()) + telemetry_writer.add_rate_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, tuple()) + telemetry_writer.add_rate_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, tuple()) expected_series = [ { @@ -230,12 +229,12 @@ def test_send_appsec_rate_metric(telemetry_writer, test_agent_session, mock_time }, ] - _assert_metric(test_agent_session, expected_series, namespace=TELEMETRY_NAMESPACE_TAG_APPSEC) + _assert_metric(test_agent_session, expected_series, namespace=TELEMETRY_NAMESPACE.APPSEC) def test_send_appsec_gauge_metric(telemetry_writer, test_agent_session, mock_time): telemetry_writer.add_gauge_metric( - TELEMETRY_NAMESPACE_TAG_APPSEC, + TELEMETRY_NAMESPACE.APPSEC, "test-metric", 5, ( @@ -243,8 +242,8 @@ def test_send_appsec_gauge_metric(telemetry_writer, test_agent_session, mock_tim ("NAME", "CANDY"), ), ) - telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 5, (("a", "b"),)) - telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 6, tuple()) + telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 5, (("a", "b"),)) + telemetry_writer.add_gauge_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, tuple()) expected_series = [ { @@ -272,13 +271,13 @@ def test_send_appsec_gauge_metric(telemetry_writer, test_agent_session, mock_tim "type": "gauge", }, ] - _assert_metric(test_agent_session, expected_series, namespace=TELEMETRY_NAMESPACE_TAG_APPSEC) + _assert_metric(test_agent_session, expected_series, namespace=TELEMETRY_NAMESPACE.APPSEC) def test_send_appsec_distributions_metric(telemetry_writer, test_agent_session, mock_time): - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 4, tuple()) - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 5, tuple()) - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 6, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 4, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 5, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, tuple()) expected_series = [ { @@ -290,16 +289,16 @@ def test_send_appsec_distributions_metric(telemetry_writer, test_agent_session, _assert_metric( test_agent_session, expected_series, - namespace=TELEMETRY_NAMESPACE_TAG_APPSEC, + namespace=TELEMETRY_NAMESPACE.APPSEC, type_paypload=TELEMETRY_TYPE_DISTRIBUTION, ) def test_send_metric_flush_and_distributions_series_is_restarted(telemetry_writer, test_agent_session, mock_time): """Check the queue of metrics is empty after run periodic method of PeriodicService""" - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 4, tuple()) - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 5, tuple()) - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 6, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 4, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 5, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 6, tuple()) expected_series = [ { "metric": "test-metric", @@ -311,7 +310,7 @@ def test_send_metric_flush_and_distributions_series_is_restarted(telemetry_write _assert_metric( test_agent_session, expected_series, - namespace=TELEMETRY_NAMESPACE_TAG_APPSEC, + namespace=TELEMETRY_NAMESPACE.APPSEC, type_paypload=TELEMETRY_TYPE_DISTRIBUTION, ) @@ -323,12 +322,12 @@ def test_send_metric_flush_and_distributions_series_is_restarted(telemetry_write } ] - telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE_TAG_APPSEC, "test-metric", 1, tuple()) + telemetry_writer.add_distribution_metric(TELEMETRY_NAMESPACE.APPSEC, "test-metric", 1, tuple()) _assert_metric( test_agent_session, expected_series, - namespace=TELEMETRY_NAMESPACE_TAG_APPSEC, + namespace=TELEMETRY_NAMESPACE.APPSEC, type_paypload=TELEMETRY_TYPE_DISTRIBUTION, ) diff --git a/tests/telemetry/test_writer.py b/tests/telemetry/test_writer.py index 3b5ec7226af..8d4030c84a9 100644 --- a/tests/telemetry/test_writer.py +++ b/tests/telemetry/test_writer.py @@ -268,7 +268,9 @@ def test_app_started_event_configuration_override(test_agent_session, run_python env["DD_SPAN_SAMPLING_RULES_FILE"] = str(file) env["DD_TRACE_PARTIAL_FLUSH_ENABLED"] = "false" env["DD_TRACE_PARTIAL_FLUSH_MIN_SPANS"] = "3" + env["DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT"] = "restart" env["DD_SITE"] = "datadoghq.com" + # By default telemetry collection is enabled after 10 seconds, so we either need to # to sleep for 10 seconds or manually call _app_started() to generate the app started event. # This delay allows us to collect start up errors and dynamic configurations @@ -407,7 +409,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_PROFILING_TAGS", "origin": "default", "value": ""}, {"name": "DD_PROFILING_TIMELINE_ENABLED", "origin": "default", "value": False}, {"name": "DD_PROFILING_UPLOAD_INTERVAL", "origin": "env_var", "value": 10.0}, - {"name": "DD_PROFILING__FORCE_LEGACY_EXPORTER", "origin": "env_var", "value": True}, + {"name": "DD_PROFILING__FORCE_LEGACY_EXPORTER", "origin": "default", "value": False}, {"name": "DD_REMOTE_CONFIGURATION_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS", "origin": "env_var", "value": 1.0}, {"name": "DD_RUNTIME_METRICS_ENABLED", "origin": "unknown", "value": False}, @@ -446,6 +448,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_TRACE_OTEL_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_TRACE_PARTIAL_FLUSH_ENABLED", "origin": "env_var", "value": False}, {"name": "DD_TRACE_PARTIAL_FLUSH_MIN_SPANS", "origin": "env_var", "value": 3}, + {"name": "DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT", "origin": "env_var", "value": "restart"}, {"name": "DD_TRACE_PROPAGATION_EXTRACT_FIRST", "origin": "default", "value": False}, {"name": "DD_TRACE_PROPAGATION_HTTP_BAGGAGE_ENABLED", "origin": "default", "value": False}, {"name": "DD_TRACE_PROPAGATION_STYLE_EXTRACT", "origin": "env_var", "value": "tracecontext"}, diff --git a/tests/tracer/runtime/test_tag_collectors.py b/tests/tracer/runtime/test_tag_collectors.py index 3889b7b7e15..240877b092e 100644 --- a/tests/tracer/runtime/test_tag_collectors.py +++ b/tests/tracer/runtime/test_tag_collectors.py @@ -77,8 +77,8 @@ def test_tracer_tags_config(): def test_tracer_tags_service_from_code(): """Ensure we collect the expected tags for the TracerTagCollector""" import ddtrace - from ddtrace.filters import TraceFilter from ddtrace.internal.runtime import tag_collectors + from ddtrace.trace import TraceFilter from tests.conftest import DEFAULT_DDTRACE_SUBPROCESS_TEST_SERVICE_NAME class DropFilter(TraceFilter): @@ -86,7 +86,7 @@ def process_trace(self, _): return None # Drop all traces so we don't get an error trying to flush - ddtrace.tracer.configure(settings={"FILTERS": [DropFilter()]}) + ddtrace.tracer._configure(trace_processors=[DropFilter()]) ddtrace.config.service = "my-service" diff --git a/tests/tracer/test_filters.py b/tests/tracer/test_filters.py index 73861f8d3a2..d632ceb4998 100644 --- a/tests/tracer/test_filters.py +++ b/tests/tracer/test_filters.py @@ -2,10 +2,10 @@ import pytest +from ddtrace._trace.filters import FilterRequestsOnUrl from ddtrace._trace.span import Span from ddtrace.ext.http import URL -from ddtrace.filters import FilterRequestsOnUrl -from ddtrace.filters import TraceFilter +from ddtrace.trace import TraceFilter class FilterRequestOnUrlTests(TestCase): diff --git a/tests/tracer/test_gitmetadata.py b/tests/tracer/test_gitmetadata.py index 132c577358a..655398e8b0c 100644 --- a/tests/tracer/test_gitmetadata.py +++ b/tests/tracer/test_gitmetadata.py @@ -45,7 +45,7 @@ class GitMetadataTestCase(TracerTestCase): ) def test_gitmetadata_from_package(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -60,7 +60,7 @@ def test_gitmetadata_from_package(self): ) def test_gitmetadata_from_DD_TAGS(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -81,7 +81,7 @@ def test_gitmetadata_from_DD_TAGS(self): ) def test_gitmetadata_from_ENV(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -105,7 +105,7 @@ def test_gitmetadata_from_ENV(self): ) def test_gitmetadata_disabled(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -124,7 +124,7 @@ def test_gitmetadata_disabled(self): ) def test_gitmetadata_package_without_metadata(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -144,7 +144,7 @@ def test_gitmetadata_package_without_metadata(self): ) def test_gitmetadata_from_env_filtering_https(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -164,7 +164,7 @@ def test_gitmetadata_from_env_filtering_https(self): ) def test_gitmetadata_from_ddtags_filtering_https(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -185,7 +185,7 @@ def test_gitmetadata_from_ddtags_filtering_https(self): ) def test_gitmetadata_from_env_filtering_ssh(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass @@ -205,7 +205,7 @@ def test_gitmetadata_from_env_filtering_ssh(self): ) def test_gitmetadata_from_ddtags_filtering_ssh(self): tracer = ddtrace.Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("span") as s: pass diff --git a/tests/tracer/test_instance_config.py b/tests/tracer/test_instance_config.py index fb5235a8d77..457bf53a408 100644 --- a/tests/tracer/test_instance_config.py +++ b/tests/tracer/test_instance_config.py @@ -1,8 +1,8 @@ from unittest import TestCase from ddtrace import config -from ddtrace.pin import Pin from ddtrace.settings import IntegrationConfig +from ddtrace.trace import Pin class InstanceConfigTestCase(TestCase): diff --git a/tests/tracer/test_pin.py b/tests/tracer/test_pin.py index a1b83ca4c37..47712d2f421 100644 --- a/tests/tracer/test_pin.py +++ b/tests/tracer/test_pin.py @@ -2,7 +2,7 @@ import pytest -from ddtrace import Pin +from ddtrace.trace import Pin class PinTestCase(TestCase): diff --git a/tests/tracer/test_processors.py b/tests/tracer/test_processors.py index d06716e1825..f7bf413d916 100644 --- a/tests/tracer/test_processors.py +++ b/tests/tracer/test_processors.py @@ -26,6 +26,7 @@ from ddtrace.internal.processor.endpoint_call_counter import EndpointCallCounterProcessor from ddtrace.internal.sampling import SamplingMechanism from ddtrace.internal.sampling import SpanSamplingRule +from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from tests.utils import DummyTracer from tests.utils import DummyWriter from tests.utils import override_global_config @@ -244,7 +245,7 @@ def test_aggregator_partial_flush_2_spans(): def test_trace_top_level_span_processor_partial_flushing(): """Parent span and child span have the same service name""" tracer = Tracer() - tracer.configure( + tracer._configure( partial_flush_enabled=True, partial_flush_min_spans=2, writer=DummyWriter(), @@ -271,7 +272,7 @@ def test_trace_top_level_span_processor_same_service_name(): """Parent span and child span have the same service name""" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("parent", service="top_level_test") as parent: with tracer.trace("child") as child: @@ -285,7 +286,7 @@ def test_trace_top_level_span_processor_different_service_name(): """Parent span and child span have the different service names""" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("parent", service="top_level_test_service") as parent: with tracer.trace("child", service="top_level_test_service2") as child: @@ -299,7 +300,7 @@ def test_trace_top_level_span_processor_orphan_span(): """Trace chuck does not contain parent span""" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("parent") as parent: pass @@ -353,20 +354,34 @@ def test_span_creation_metrics(): mock_tm.assert_has_calls( [ - mock.call("tracers", "spans_created", 100, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_finished", 100, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_created", 100, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_finished", 100, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_created", 100, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_finished", 100, tags=(("integration_name", "datadog"),)), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_created", 100, tags=(("integration_name", "datadog"),) + ), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_finished", 100, tags=(("integration_name", "datadog"),) + ), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_created", 100, tags=(("integration_name", "datadog"),) + ), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_finished", 100, tags=(("integration_name", "datadog"),) + ), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_created", 100, tags=(("integration_name", "datadog"),) + ), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_finished", 100, tags=(("integration_name", "datadog"),) + ), ] ) mock_tm.reset_mock() aggr.shutdown(None) mock_tm.assert_has_calls( [ - mock.call("tracers", "spans_created", 1, tags=(("integration_name", "datadog"),)), - mock.call("tracers", "spans_finished", 1, tags=(("integration_name", "datadog"),)), + mock.call(TELEMETRY_NAMESPACE.TRACERS, "spans_created", 1, tags=(("integration_name", "datadog"),)), + mock.call( + TELEMETRY_NAMESPACE.TRACERS, "spans_finished", 1, tags=(("integration_name", "datadog"),) + ), ] ) @@ -619,7 +634,7 @@ def test_endpoint_call_counter_processor_disabled(): def test_endpoint_call_counter_processor_real_tracer(): tracer = Tracer() tracer._endpoint_call_counter_span_processor.enable() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("parent", service="top_level_test_service", resource="a", span_type=SpanTypes.WEB): with tracer.trace("child", service="top_level_test_service2"): @@ -642,7 +657,7 @@ def test_endpoint_call_counter_processor_real_tracer(): def test_trace_tag_processor_adds_chunk_root_tags(): tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) with tracer.trace("parent") as parent: with tracer.trace("child") as child: @@ -690,5 +705,5 @@ def test_tracer_reconfigured_with_active_span_does_not_crash(): with ddtrace.tracer.trace("regression1") as exploding_span: # Reconfiguring the tracer clears active traces # Calling .finish() manually bypasses the code that catches the exception - ddtrace.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=1) + ddtrace.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=1) exploding_span.finish() diff --git a/tests/tracer/test_propagation.py b/tests/tracer/test_propagation.py index 0d4c5d7c01d..533e4974250 100644 --- a/tests/tracer/test_propagation.py +++ b/tests/tracer/test_propagation.py @@ -16,6 +16,8 @@ from ddtrace.constants import AUTO_REJECT from ddtrace.constants import USER_KEEP from ddtrace.constants import USER_REJECT +from ddtrace.internal.constants import _PROPAGATION_BEHAVIOR_IGNORE +from ddtrace.internal.constants import _PROPAGATION_BEHAVIOR_RESTART from ddtrace.internal.constants import _PROPAGATION_STYLE_BAGGAGE from ddtrace.internal.constants import _PROPAGATION_STYLE_NONE from ddtrace.internal.constants import _PROPAGATION_STYLE_W3C_TRACECONTEXT @@ -332,7 +334,7 @@ def test_asm_standalone_minimum_trace_per_minute_has_no_downstream_propagation( with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + tracer._configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) try: headers = { "x-datadog-trace-id": "1234", @@ -375,7 +377,7 @@ def test_asm_standalone_minimum_trace_per_minute_has_no_downstream_propagation( finally: with override_env({"DD_APPSEC_SCA_ENABLED": "0"}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) @pytest.mark.parametrize("sca_enabled", ["true", "false"]) @@ -390,7 +392,7 @@ def test_asm_standalone_missing_propagation_tags_no_appsec_event_trace_dropped( with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + tracer._configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -420,11 +422,11 @@ def test_asm_standalone_missing_propagation_tags_no_appsec_event_trace_dropped( finally: with override_env({"DD_APPSEC_SCA_ENABLED": "0"}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_asm_standalone_missing_propagation_tags_appsec_event_present_trace_kept(tracer): # noqa: F811 - tracer.configure(appsec_enabled=True, appsec_standalone_enabled=True) + tracer._configure(appsec_enabled=True, appsec_standalone_enabled=True) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -454,7 +456,7 @@ def test_asm_standalone_missing_propagation_tags_appsec_event_present_trace_kept # Ensure span is user keep assert span._metrics["_sampling_priority_v1"] == USER_KEEP finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) @pytest.mark.parametrize("sca_enabled", ["true", "false"]) @@ -468,7 +470,7 @@ def test_asm_standalone_missing_appsec_tag_no_appsec_event_propagation_resets( with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + tracer._configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -513,13 +515,13 @@ def test_asm_standalone_missing_appsec_tag_no_appsec_event_propagation_resets( finally: with override_env({"DD_APPSEC_SCA_ENABLED": "false"}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_asm_standalone_missing_appsec_tag_appsec_event_present_trace_kept( tracer, # noqa: F811 ): - tracer.configure(appsec_enabled=True, appsec_standalone_enabled=True) + tracer._configure(appsec_enabled=True, appsec_standalone_enabled=True) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -561,7 +563,7 @@ def test_asm_standalone_missing_appsec_tag_appsec_event_present_trace_kept( assert span._metrics["_sampling_priority_v1"] == USER_KEEP finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) @pytest.mark.parametrize("upstream_priority", ["1", "2"]) @@ -576,7 +578,7 @@ def test_asm_standalone_present_appsec_tag_no_appsec_event_propagation_set_to_us with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + tracer._configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -630,7 +632,7 @@ def test_asm_standalone_present_appsec_tag_no_appsec_event_propagation_set_to_us finally: with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) @pytest.mark.parametrize("upstream_priority", ["1", "2"]) @@ -645,7 +647,7 @@ def test_asm_standalone_present_appsec_tag_appsec_event_present_propagation_forc with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + tracer._configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) try: with tracer.trace("local_root_span0"): # First span should be kept, as we keep 1 per min @@ -699,7 +701,7 @@ def test_asm_standalone_present_appsec_tag_appsec_event_present_propagation_forc finally: with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_extract_with_baggage_http_propagation(tracer): # noqa: F811 @@ -1529,6 +1531,9 @@ def test_extract_tracecontext(headers, expected_context): HTTP_HEADER_PARENT_ID: "parent_id", HTTP_HEADER_SAMPLING_PRIORITY: "sample", } + +DATADOG_BAGGAGE_HEADERS_VALID = {**DATADOG_HEADERS_VALID, "baggage": "key1=val1,key2=val2"} + B3_HEADERS_VALID = { _HTTP_HEADER_B3_TRACE_ID: "80f198ee56343ba864fe8b2a57d3eff7", _HTTP_HEADER_B3_SPAN_ID: "a2fb4a1d1a96d312", @@ -1582,6 +1587,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_default", None, + None, DATADOG_HEADERS_VALID, { "trace_id": 13088165645273925489, @@ -1594,6 +1600,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_default_wsgi", None, + None, {get_wsgi_header(name): value for name, value in DATADOG_HEADERS_VALID.items()}, { "trace_id": 13088165645273925489, @@ -1606,6 +1613,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_no_priority", None, + None, DATADOG_HEADERS_VALID_NO_PRIORITY, { "trace_id": 13088165645273925489, @@ -1618,12 +1626,14 @@ def test_extract_tracecontext(headers, expected_context): ( "invalid_datadog", [PROPAGATION_STYLE_DATADOG], + None, DATADOG_HEADERS_INVALID, CONTEXT_EMPTY, ), ( "valid_datadog_explicit_style", [PROPAGATION_STYLE_DATADOG], + None, DATADOG_HEADERS_VALID, { "trace_id": 13088165645273925489, @@ -1636,6 +1646,7 @@ def test_extract_tracecontext(headers, expected_context): ( "invalid_datadog_negative_trace_id", [PROPAGATION_STYLE_DATADOG], + None, { HTTP_HEADER_TRACE_ID: "-1", HTTP_HEADER_PARENT_ID: "5678", @@ -1647,6 +1658,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_explicit_style_wsgi", [PROPAGATION_STYLE_DATADOG], + None, {get_wsgi_header(name): value for name, value in DATADOG_HEADERS_VALID.items()}, { "trace_id": 13088165645273925489, @@ -1659,6 +1671,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_all_styles", [PROPAGATION_STYLE_DATADOG, PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_B3_SINGLE], + None, DATADOG_HEADERS_VALID, { "trace_id": 13088165645273925489, @@ -1671,13 +1684,29 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_datadog_no_datadog_style", [PROPAGATION_STYLE_B3_MULTI], + None, DATADOG_HEADERS_VALID, CONTEXT_EMPTY, ), + ( + "valid_datadog_and_baggage_default", + None, + None, + DATADOG_BAGGAGE_HEADERS_VALID, + { + "trace_id": 13088165645273925489, + "span_id": 5678, + "sampling_priority": 1, + "dd_origin": "synthetics", + "meta": {"_dd.p.dm": "-3"}, + "baggage": {"key1": "val1", "key2": "val2"}, + }, + ), # B3 headers ( "valid_b3_simple", [PROPAGATION_STYLE_B3_MULTI], + None, B3_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -1689,6 +1718,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_wsgi", [PROPAGATION_STYLE_B3_MULTI], + None, {get_wsgi_header(name): value for name, value in B3_HEADERS_VALID.items()}, { "trace_id": TRACE_ID, @@ -1700,6 +1730,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_flags", [PROPAGATION_STYLE_B3_MULTI], + None, { _HTTP_HEADER_B3_TRACE_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_TRACE_ID], _HTTP_HEADER_B3_SPAN_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_SPAN_ID], @@ -1715,6 +1746,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_with_parent_id", [PROPAGATION_STYLE_B3_MULTI], + None, { _HTTP_HEADER_B3_TRACE_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_TRACE_ID], _HTTP_HEADER_B3_SPAN_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_SPAN_ID], @@ -1731,6 +1763,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_only_trace_and_span_id", [PROPAGATION_STYLE_B3_MULTI], + None, { _HTTP_HEADER_B3_TRACE_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_TRACE_ID], _HTTP_HEADER_B3_SPAN_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_SPAN_ID], @@ -1745,6 +1778,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_only_trace_id", [PROPAGATION_STYLE_B3_MULTI], + None, { _HTTP_HEADER_B3_TRACE_ID: B3_HEADERS_VALID[_HTTP_HEADER_B3_TRACE_ID], }, @@ -1758,24 +1792,28 @@ def test_extract_tracecontext(headers, expected_context): ( "invalid_b3", [PROPAGATION_STYLE_B3_MULTI], + None, B3_HEADERS_INVALID, CONTEXT_EMPTY, ), ( "valid_b3_default_style", None, + None, B3_HEADERS_VALID, CONTEXT_EMPTY, ), ( "valid_b3_no_b3_style", [PROPAGATION_STYLE_B3_SINGLE], + None, B3_HEADERS_VALID, CONTEXT_EMPTY, ), ( "valid_b3_all_styles", [PROPAGATION_STYLE_DATADOG, PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_B3_SINGLE], + None, B3_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -1788,6 +1826,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_simple", [PROPAGATION_STYLE_B3_SINGLE], + None, B3_SINGLE_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -1799,6 +1838,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_simple", [PROPAGATION_STYLE_B3_SINGLE], + None, { get_wsgi_header(_HTTP_HEADER_B3_SINGLE): B3_SINGLE_HEADERS_VALID[_HTTP_HEADER_B3_SINGLE], }, @@ -1812,6 +1852,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_simple", [PROPAGATION_STYLE_B3_SINGLE], + None, { get_wsgi_header(_HTTP_HEADER_B3_SINGLE): B3_SINGLE_HEADERS_VALID[_HTTP_HEADER_B3_SINGLE], }, @@ -1825,6 +1866,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_only_sampled", [PROPAGATION_STYLE_B3_SINGLE], + None, { _HTTP_HEADER_B3_SINGLE: "1", }, @@ -1838,6 +1880,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_only_trace_and_span_id", [PROPAGATION_STYLE_B3_SINGLE], + None, { _HTTP_HEADER_B3_SINGLE: "80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1", }, @@ -1851,12 +1894,14 @@ def test_extract_tracecontext(headers, expected_context): ( "invalid_b3_single_header", [PROPAGATION_STYLE_B3_SINGLE], + None, B3_SINGLE_HEADERS_INVALID, CONTEXT_EMPTY, ), ( "valid_b3_single_header_all_styles", [PROPAGATION_STYLE_DATADOG, PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_B3_SINGLE], + None, B3_SINGLE_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -1868,6 +1913,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_extra_data", [PROPAGATION_STYLE_B3_SINGLE], + None, {_HTTP_HEADER_B3_SINGLE: B3_SINGLE_HEADERS_VALID[_HTTP_HEADER_B3_SINGLE] + "-05e3ac9a4f6e3b90-extra-data-here"}, { "trace_id": TRACE_ID, @@ -1879,27 +1925,22 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_b3_single_header_default_style", None, + None, B3_SINGLE_HEADERS_VALID, CONTEXT_EMPTY, ), ( "valid_b3_single_header_no_b3_single_header_style", [PROPAGATION_STYLE_B3_MULTI], + None, B3_SINGLE_HEADERS_VALID, CONTEXT_EMPTY, ), - ( - "baggage_case_insensitive", - None, - {"BAgGage": "key1=val1,key2=val2"}, - { - "baggage": {"key1": "val1", "key2": "val2"}, - }, - ), # All valid headers ( "valid_all_headers_default_style", None, + None, ALL_HEADERS, { "trace_id": 13088165645273925489, @@ -1928,6 +1969,7 @@ def test_extract_tracecontext(headers, expected_context): PROPAGATION_STYLE_B3_SINGLE, _PROPAGATION_STYLE_W3C_TRACECONTEXT, ], + None, ALL_HEADERS, { "trace_id": 13088165645273925489, @@ -1968,6 +2010,7 @@ def test_extract_tracecontext(headers, expected_context): PROPAGATION_STYLE_B3_SINGLE, _PROPAGATION_STYLE_W3C_TRACECONTEXT, ], + None, {get_wsgi_header(name): value for name, value in ALL_HEADERS.items()}, { "trace_id": 13088165645273925489, @@ -2003,6 +2046,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_datadog_style", [PROPAGATION_STYLE_DATADOG], + None, ALL_HEADERS, { "trace_id": 13088165645273925489, @@ -2015,6 +2059,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_datadog_style_wsgi", [PROPAGATION_STYLE_DATADOG], + None, {get_wsgi_header(name): value for name, value in ALL_HEADERS.items()}, { "trace_id": 13088165645273925489, @@ -2027,6 +2072,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_b3_style", [PROPAGATION_STYLE_B3_MULTI], + None, ALL_HEADERS, { "trace_id": TRACE_ID, @@ -2038,6 +2084,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_b3_style_wsgi", [PROPAGATION_STYLE_B3_MULTI], + None, {get_wsgi_header(name): value for name, value in ALL_HEADERS.items()}, { "trace_id": TRACE_ID, @@ -2049,6 +2096,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_both_b3_styles", [PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_B3_SINGLE], + None, ALL_HEADERS, { "trace_id": TRACE_ID, @@ -2060,6 +2108,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_b3_single_style", [PROPAGATION_STYLE_B3_SINGLE], + None, ALL_HEADERS, { "trace_id": TRACE_ID, @@ -2072,6 +2121,7 @@ def test_extract_tracecontext(headers, expected_context): # name, styles, headers, expected_context, "none_style", [_PROPAGATION_STYLE_NONE], + None, ALL_HEADERS, { "trace_id": None, @@ -2080,23 +2130,11 @@ def test_extract_tracecontext(headers, expected_context): "dd_origin": None, }, ), - ( - # name, styles, headers, expected_context, - "none_and_other_prop_style_still_extracts", - [PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_NONE], - ALL_HEADERS, - { - "trace_id": 13088165645273925489, - "span_id": 5678, - "sampling_priority": 1, - "dd_origin": "synthetics", - "meta": {"_dd.p.dm": "-3"}, - }, - ), # Testing that order matters ( "order_matters_B3_SINGLE_HEADER_first", [PROPAGATION_STYLE_B3_SINGLE, PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_DATADOG], + None, B3_SINGLE_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -2113,6 +2151,7 @@ def test_extract_tracecontext(headers, expected_context): PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_W3C_TRACECONTEXT, ], + None, B3_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -2124,6 +2163,7 @@ def test_extract_tracecontext(headers, expected_context): ( "order_matters_B3_second_no_Datadog_headers", [PROPAGATION_STYLE_DATADOG, PROPAGATION_STYLE_B3_MULTI], + None, B3_HEADERS_VALID, { "trace_id": TRACE_ID, @@ -2135,6 +2175,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_b3_single_style_wsgi", [PROPAGATION_STYLE_B3_SINGLE], + None, {get_wsgi_header(name): value for name, value in ALL_HEADERS.items()}, { "trace_id": TRACE_ID, @@ -2153,6 +2194,7 @@ def test_extract_tracecontext(headers, expected_context): _PROPAGATION_STYLE_W3C_TRACECONTEXT, PROPAGATION_STYLE_B3_SINGLE, ], + None, DATADOG_TRACECONTEXT_MATCHING_TRACE_ID_HEADERS, { "trace_id": _get_64_lowest_order_bits_as_int(TRACE_ID), @@ -2170,6 +2212,7 @@ def test_extract_tracecontext(headers, expected_context): ( "no_additional_tracestate_support_when_present_but_trace_ids_do_not_match", [PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_W3C_TRACECONTEXT], + None, {**DATADOG_HEADERS_VALID, **TRACECONTEXT_HEADERS_VALID_RUM_NO_SAMPLING_DECISION}, { "trace_id": 13088165645273925489, @@ -2191,18 +2234,21 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_all_headers_no_style", [], + None, ALL_HEADERS, CONTEXT_EMPTY, ), ( "valid_all_headers_no_style_wsgi", [], + None, {get_wsgi_header(name): value for name, value in ALL_HEADERS.items()}, CONTEXT_EMPTY, ), ( "datadog_tracecontext_conflicting_span_ids", [PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_W3C_TRACECONTEXT], + None, { HTTP_HEADER_TRACE_ID: "9291375655657946024", HTTP_HEADER_PARENT_ID: "15", @@ -2215,6 +2261,144 @@ def test_extract_tracecontext(headers, expected_context): "meta": {"_dd.p.dm": "-3", LAST_DD_PARENT_ID_KEY: "000000000000000f"}, }, ), + ( + "valid_datadog_default_w_restart_behavior", + None, + _PROPAGATION_BEHAVIOR_RESTART, + DATADOG_HEADERS_VALID, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "span_links": [ + SpanLink( + trace_id=13088165645273925489, + span_id=5678, + tracestate=None, + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "datadog"}, + ) + ], + }, + ), + ( + "valid_datadog_tracecontext_and_baggage_default_w_restart_behavior", + None, + _PROPAGATION_BEHAVIOR_RESTART, + {**DATADOG_BAGGAGE_HEADERS_VALID, **TRACECONTEXT_HEADERS_VALID}, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "baggage": {"key1": "val1", "key2": "val2"}, + "span_links": [ + SpanLink( + trace_id=13088165645273925489, + span_id=5678, + tracestate=None, + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "datadog"}, + ) + ], + }, + ), + # All valid headers + ( + "valid_all_headers_default_style_w_restart_behavior", + None, + _PROPAGATION_BEHAVIOR_RESTART, + ALL_HEADERS, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "span_links": [ + SpanLink( + trace_id=13088165645273925489, + span_id=5678, + tracestate=None, + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "datadog"}, + ) + ], + }, + ), + ( + "valid_all_headers_trace_context_datadog_style_w_restart_behavior", + [_PROPAGATION_STYLE_W3C_TRACECONTEXT, PROPAGATION_STYLE_DATADOG], + _PROPAGATION_BEHAVIOR_RESTART, + ALL_HEADERS, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "span_links": [ + SpanLink( + trace_id=171395628812617415352188477958425669623, + span_id=67667974448284343, + tracestate="dd=s:2;o:rum;t.dm:-4;t.usr.id:baz64,congo=t61rcWkgMzE", + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "tracecontext"}, + ) + ], + }, + ), + ( + "valid_all_headers_all_styles_w_restart_behavior", + [PROPAGATION_STYLE_B3_MULTI, PROPAGATION_STYLE_B3_SINGLE, _PROPAGATION_STYLE_W3C_TRACECONTEXT], + _PROPAGATION_BEHAVIOR_RESTART, + ALL_HEADERS, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "span_links": [ + SpanLink( + trace_id=171395628812617415352188477958425669623, + span_id=67667974448284343, + tracestate=None, + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "b3multi"}, + ) + ], + }, + ), + ( + "valid_all_headers_and_baggage_trace_context_datadog_style_w_restart_behavior", + None, + _PROPAGATION_BEHAVIOR_RESTART, + {**ALL_HEADERS, **DATADOG_BAGGAGE_HEADERS_VALID}, + { + "trace_id": None, + "span_id": None, + "sampling_priority": None, + "dd_origin": None, + "baggage": {"key1": "val1", "key2": "val2"}, + "span_links": [ + SpanLink( + trace_id=13088165645273925489, + span_id=5678, + tracestate=None, + flags=1, + attributes={"reason": "propagation_behavior_extract", "context_headers": "datadog"}, + ) + ], + }, + ), + ( + "baggage_case_insensitive", + None, + None, + {"BAgGage": "key1=val1,key2=val2"}, + { + "baggage": {"key1": "val1", "key2": "val2"}, + }, + ), ] # Only add fixtures here if they can't pass both test_propagation_extract_env @@ -2225,6 +2409,7 @@ def test_extract_tracecontext(headers, expected_context): # can't be tested correctly via test_propagation_extract_w_config. It is tested separately "valid_tracecontext_simple", [_PROPAGATION_STYLE_W3C_TRACECONTEXT], + None, TRACECONTEXT_HEADERS_VALID_BASIC, { "trace_id": TRACE_ID, @@ -2241,6 +2426,7 @@ def test_extract_tracecontext(headers, expected_context): ( "valid_tracecontext_rum_no_sampling_decision", [_PROPAGATION_STYLE_W3C_TRACECONTEXT], + None, TRACECONTEXT_HEADERS_VALID_RUM_NO_SAMPLING_DECISION, { "trace_id": TRACE_ID, @@ -2252,11 +2438,51 @@ def test_extract_tracecontext(headers, expected_context): }, }, ), + ( + "none_and_other_prop_style_still_extracts", + [PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_NONE], + None, + ALL_HEADERS, + { + "trace_id": 13088165645273925489, + "span_id": 5678, + "sampling_priority": 1, + "dd_origin": "synthetics", + "meta": {"_dd.p.dm": "-3"}, + }, + ), + # Only works for env since config is modified at startup to set + # propagation_style_extract to [None] if DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT is set to ignore + ( + "valid_datadog_default_w_ignore_behavior", + None, + _PROPAGATION_BEHAVIOR_IGNORE, + DATADOG_HEADERS_VALID, + CONTEXT_EMPTY, + ), + ( + # name, styles, headers, expected_context, + "none_and_other_prop_style_still_extracts", + [PROPAGATION_STYLE_DATADOG, _PROPAGATION_STYLE_NONE], + None, + ALL_HEADERS, + { + "trace_id": 13088165645273925489, + "span_id": 5678, + "sampling_priority": 1, + "dd_origin": "synthetics", + "meta": {"_dd.p.dm": "-3"}, + }, + ), ] -@pytest.mark.parametrize("name,styles,headers,expected_context", EXTRACT_FIXTURES + EXTRACT_FIXTURES_ENV_ONLY) -def test_propagation_extract_env(name, styles, headers, expected_context, run_python_code_in_subprocess): +@pytest.mark.parametrize( + "name,styles,extract_behavior,headers,expected_context", EXTRACT_FIXTURES + EXTRACT_FIXTURES_ENV_ONLY +) +def test_propagation_extract_env( + name, styles, extract_behavior, headers, expected_context, run_python_code_in_subprocess +): # Execute the test code in isolation to ensure env variables work as expected code = """ import json @@ -2274,18 +2500,24 @@ def test_propagation_extract_env(name, styles, headers, expected_context, run_py env = os.environ.copy() if styles is not None: env["DD_TRACE_PROPAGATION_STYLE"] = ",".join(styles) + if extract_behavior is not None: + env["DD_TRACE_PROPAGATION_BEHAVIOR_EXTRACT"] = extract_behavior stdout, stderr, status, _ = run_python_code_in_subprocess(code=code, env=env) print(stderr, stdout) assert status == 0, (stdout, stderr) -@pytest.mark.parametrize("name,styles,headers,expected_context", EXTRACT_FIXTURES) -def test_propagation_extract_w_config(name, styles, headers, expected_context, run_python_code_in_subprocess): +@pytest.mark.parametrize("name,styles,extract_behavior,headers,expected_context", EXTRACT_FIXTURES) +def test_propagation_extract_w_config( + name, styles, extract_behavior, headers, expected_context, run_python_code_in_subprocess +): # Setting via ddtrace.config works as expected too # DEV: This also helps us get code coverage reporting overrides = {} if styles is not None: overrides["_propagation_style_extract"] = styles + if extract_behavior is not None: + overrides["_propagation_behavior_extract"] = extract_behavior with override_global_config(overrides): context = HTTPPropagator.extract(headers) if not expected_context.get("tracestate"): diff --git a/tests/tracer/test_sampler.py b/tests/tracer/test_sampler.py index 9aefde4c284..54c9c1abef3 100644 --- a/tests/tracer/test_sampler.py +++ b/tests/tracer/test_sampler.py @@ -192,7 +192,7 @@ def test_sample_rate_deviation_64bit_trace_id(): def _test_sample_rate_deviation(): for sample_rate in [0.1, 0.25, 0.5, 1]: tracer = DummyTracer() - tracer.configure(sampler=RateByServiceSampler()) + tracer._configure(sampler=RateByServiceSampler()) tracer._sampler.set_sample_rate(sample_rate) iterations = int(1e4 / sample_rate) @@ -769,7 +769,7 @@ def test_datadog_sampler_init(): @mock.patch("ddtrace._trace.sampler.RateSampler.sample") def test_datadog_sampler_sample_no_rules(mock_sample, dummy_tracer): sampler = DatadogSampler() - dummy_tracer.configure(sampler=sampler) + dummy_tracer._configure(sampler=sampler) mock_sample.return_value = True dummy_tracer.trace("test").finish() @@ -935,7 +935,7 @@ def sample(self, span): ], ) def test_datadog_sampler_sample_rules(sampler, sampling_priority, sampling_mechanism, rule, limit, dummy_tracer): - dummy_tracer.configure(sampler=sampler) + dummy_tracer._configure(sampler=sampler) dummy_tracer.trace("span").finish() spans = dummy_tracer.pop() assert len(spans) > 0, "A tracer using DatadogSampler should always emit its spans" @@ -952,7 +952,7 @@ def test_datadog_sampler_sample_rules(sampler, sampling_priority, sampling_mecha def test_datadog_sampler_tracer_child(dummy_tracer): rule = SamplingRule(sample_rate=1.0) sampler = DatadogSampler(rules=[rule]) - dummy_tracer.configure(sampler=sampler) + dummy_tracer._configure(sampler=sampler) with dummy_tracer.trace("parent.span"): dummy_tracer.trace("child.span").finish() @@ -978,7 +978,7 @@ def test_datadog_sampler_tracer_child(dummy_tracer): def test_datadog_sampler_tracer_start_span(dummy_tracer): rule = SamplingRule(sample_rate=1.0) sampler = DatadogSampler(rules=[rule]) - dummy_tracer.configure(sampler=sampler) + dummy_tracer._configure(sampler=sampler) dummy_tracer.start_span("test.span").finish() spans = dummy_tracer.pop() assert len(spans) == 1, "A tracer using a DatadogSampler should emit all of its spans" diff --git a/tests/tracer/test_single_span_sampling_rules.py b/tests/tracer/test_single_span_sampling_rules.py index fa68caa38d9..3ebffed00d5 100644 --- a/tests/tracer/test_single_span_sampling_rules.py +++ b/tests/tracer/test_single_span_sampling_rules.py @@ -130,7 +130,7 @@ def test_env_rules_cause_matching_span_to_be_sampled(): assert sampling_rules[0]._service_matcher.pattern == "test_service" assert sampling_rules[0]._name_matcher.pattern == "test_name" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) span = traced_function(sampling_rules[0], tracer=tracer) assert_sampling_decision_tags(span) @@ -142,7 +142,7 @@ def test_env_rules_dont_cause_non_matching_span_to_be_sampled(): assert sampling_rules[0]._service_matcher.pattern == "test_ser" assert sampling_rules[0]._name_matcher.pattern == "test_na" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) span = traced_function(sampling_rules[0], tracer=tracer) assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None) @@ -154,7 +154,7 @@ def test_single_span_rules_not_applied_when_span_sampled_by_trace_sampling(): assert sampling_rules[0]._service_matcher.pattern == "test_service" assert sampling_rules[0]._name_matcher.pattern == "test_name" tracer = Tracer() - tracer.configure(writer=DummyWriter()) + tracer._configure(writer=DummyWriter()) span = traced_function(sampling_rules[0], tracer=tracer, trace_sampling=True) assert sampling_rules[0].match(span) is True assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None, trace_sampling=True) diff --git a/tests/tracer/test_trace_utils.py b/tests/tracer/test_trace_utils.py index e9869c13b17..a7604636b62 100644 --- a/tests/tracer/test_trace_utils.py +++ b/tests/tracer/test_trace_utils.py @@ -13,7 +13,6 @@ import mock import pytest -from ddtrace import Pin from ddtrace import Tracer from ddtrace import config from ddtrace._trace.context import Context @@ -29,6 +28,7 @@ from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID from ddtrace.settings import Config from ddtrace.settings import IntegrationConfig +from ddtrace.trace import Pin from tests.appsec.utils import asm_context from tests.utils import override_global_config diff --git a/tests/tracer/test_tracer.py b/tests/tracer/test_tracer.py index cae00259086..451e465b5a9 100644 --- a/tests/tracer/test_tracer.py +++ b/tests/tracer/test_tracer.py @@ -251,7 +251,7 @@ def wrapped_function(param, kw_param=None): self.assertEqual(42, kw_param) # set the custom wrap factory after the wrapper has been called - self.tracer.configure(wrap_executor=wrap_executor) + self.tracer._configure(wrap_executor=wrap_executor) # call the function expecting that the custom tracing wrapper is used wrapped_function(42, kw_param=42) @@ -275,7 +275,7 @@ def wrapped_function(param, kw_param=None): self.assertEqual(42, kw_param) # set the custom wrap factory after the wrapper has been called - self.tracer.configure(wrap_executor=wrap_executor) + self.tracer._configure(wrap_executor=wrap_executor) # call the function expecting that the custom tracing wrapper is used with self.trace("wrap.parent", service="webserver"): @@ -487,26 +487,26 @@ def test_adding_mapped_services(self): def test_configure_dogstatsd_url_host_port(self): tracer = Tracer() - tracer.configure(dogstatsd_url="foo:1234") + tracer._configure(dogstatsd_url="foo:1234") assert tracer._writer.dogstatsd.host == "foo" assert tracer._writer.dogstatsd.port == 1234 tracer = Tracer() writer = AgentWriter("http://localhost:8126") - tracer.configure(writer=writer, dogstatsd_url="foo:1234") + tracer._configure(writer=writer, dogstatsd_url="foo:1234") assert tracer._writer.dogstatsd.host == "foo" assert tracer._writer.dogstatsd.port == 1234 def test_configure_dogstatsd_url_socket(self): tracer = Tracer() - tracer.configure(dogstatsd_url="unix:///foo.sock") + tracer._configure(dogstatsd_url="unix:///foo.sock") assert tracer._writer.dogstatsd.host is None assert tracer._writer.dogstatsd.port is None assert tracer._writer.dogstatsd.socket_path == "/foo.sock" tracer = Tracer() writer = AgentWriter("http://localhost:8126") - tracer.configure(writer=writer, dogstatsd_url="unix:///foo.sock") + tracer._configure(writer=writer, dogstatsd_url="unix:///foo.sock") assert tracer._writer.dogstatsd.host is None assert tracer._writer.dogstatsd.port is None assert tracer._writer.dogstatsd.socket_path == "/foo.sock" @@ -680,7 +680,7 @@ def test_tracer_configure_writer_stop_unstarted(): orig_writer = t._writer # Stop should be called when replacing the writer. - t.configure(hostname="localhost", port=8126) + t._configure(hostname="localhost", port=8126) assert orig_writer.stop.called @@ -693,7 +693,7 @@ def test_tracer_configure_writer_stop_started(): with t.trace("something"): pass - t.configure(hostname="localhost", port=8126) + t._configure(hostname="localhost", port=8126) orig_writer.stop.assert_called_once_with() @@ -962,7 +962,7 @@ def test_detect_agentless_env_with_lambda(self): assert not has_aws_lambda_agent_extension() tracer = Tracer() assert isinstance(tracer._writer, LogWriter) - tracer.configure(enabled=True) + tracer._configure(enabled=True) assert isinstance(tracer._writer, LogWriter) @run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func")) @@ -979,7 +979,7 @@ def mock_os_path_exists(path): assert isinstance(tracer._writer, AgentWriter) assert tracer._writer._sync_mode - tracer.configure(enabled=False) + tracer._configure(enabled=False) assert isinstance(tracer._writer, AgentWriter) assert tracer._writer._sync_mode @@ -1238,11 +1238,7 @@ class FilterAll(object): def process_trace(self, trace): return None - tracer.configure( - settings={ - "FILTERS": [FilterAll()], - } - ) + tracer._configure(trace_processors=[FilterAll()]) with tracer.trace("root"): with tracer.trace("child"): @@ -1261,11 +1257,7 @@ def process_trace(self, trace): s.set_tag(self.key, self.value) return trace - tracer.configure( - settings={ - "FILTERS": [FilterMutate("boop", "beep")], - } - ) + tracer._configure(trace_processors=[FilterMutate("boop", "beep")]) with tracer.trace("root"): with tracer.trace("child"): @@ -1278,11 +1270,7 @@ def process_trace(self, trace): assert s2.get_tag("boop") == "beep" # Test multiple filters - tracer.configure( - settings={ - "FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")], - } - ) + tracer._configure(trace_processors=[FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")]) with tracer.trace("root"): with tracer.trace("child"): @@ -1298,11 +1286,7 @@ class FilterBroken(object): def process_trace(self, trace): _ = 1 / 0 - tracer.configure( - settings={ - "FILTERS": [FilterBroken()], - } - ) + tracer._configure(trace_processors=[FilterBroken()]) with tracer.trace("root"): with tracer.trace("child"): @@ -1311,11 +1295,7 @@ def process_trace(self, trace): spans = test_spans.pop() assert len(spans) == 2 - tracer.configure( - settings={ - "FILTERS": [FilterMutate("boop", "beep"), FilterBroken()], - } - ) + tracer._configure(trace_processors=[FilterMutate("boop", "beep"), FilterBroken()]) with tracer.trace("root"): with tracer.trace("child"): pass @@ -1397,22 +1377,22 @@ def test_partial_flush_too_few(self): assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"] def test_partial_flush_configure(self): - self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5) + self.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=5) self.test_partial_flush() def test_partial_flush_too_many_configure(self): - self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=1) + self.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=1) self.test_partial_flush_too_many() def test_partial_flush_too_few_configure(self): - self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=6) + self.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=6) self.test_partial_flush_too_few() @TracerTestCase.run_in_subprocess( env_overrides=dict(DD_TRACE_PARTIAL_FLUSH_ENABLED="false", DD_TRACE_PARTIAL_FLUSH_MIN_SPANS="6") ) def test_partial_flush_configure_precedence(self): - self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5) + self.tracer._configure(partial_flush_enabled=True, partial_flush_min_spans=5) self.test_partial_flush() def _test_partial_flush(self): @@ -1708,16 +1688,16 @@ def test_configure_url_partial(): import ddtrace tracer = ddtrace.Tracer() - tracer.configure(hostname="abc") + tracer._configure(hostname="abc") assert tracer._writer.agent_url == "http://abc:8126" - tracer.configure(port=123) + tracer._configure(port=123) assert tracer._writer.agent_url == "http://abc:123" tracer = ddtrace.Tracer(url="http://abc") assert tracer._writer.agent_url == "http://abc" - tracer.configure(port=123) + tracer._configure(port=123) assert tracer._writer.agent_url == "http://abc:123" - tracer.configure(port=431) + tracer._configure(port=431) assert tracer._writer.agent_url == "http://abc:431" @@ -1932,7 +1912,7 @@ def test_tracer_api_version(): t = Tracer() assert isinstance(t._writer._encoder, MsgpackEncoderV05) - t.configure(api_version="v0.4") + t._configure(api_version="v0.4") assert isinstance(t._writer._encoder, MsgpackEncoderV04) @@ -1954,7 +1934,7 @@ def process_trace(self, trace): t = Tracer() t.enabled = enabled - t.configure(settings={"FILTERS": [DropAllFilter()]}) + t._configure(trace_processors=[DropAllFilter()]) for _ in range(5): with t.trace("test") as span: @@ -2053,7 +2033,7 @@ def test_asm_standalone_configuration(sca_enabled, appsec_enabled, iast_enabled) with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() tracer = ddtrace.Tracer() - tracer.configure(appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, appsec_standalone_enabled=True) + tracer._configure(appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, appsec_standalone_enabled=True) if sca_enabled == "true": assert bool(ddtrace.config._sca_enabled) is True assert tracer.enabled is False @@ -2067,7 +2047,7 @@ def test_asm_standalone_configuration(sca_enabled, appsec_enabled, iast_enabled) # reset tracer values with override_env({"DD_APPSEC_SCA_ENABLED": "false"}): ddtrace.config._reset() - tracer.configure(appsec_enabled=False, iast_enabled=False, appsec_standalone_enabled=False) + tracer._configure(appsec_enabled=False, iast_enabled=False, appsec_standalone_enabled=False) def test_gc_not_used_on_root_spans(): @@ -2088,3 +2068,27 @@ def test_gc_not_used_on_root_spans(): # print("referrers:", [f"object {objects.index(r)}" for r in gc.get_referrers(obj)[:-2]]) # print("referents:", [f"object {objects.index(r)}" if r in objects else r for r in gc.get_referents(obj)]) # print("--------------------") + + +@pytest.mark.subprocess() +def test_multiple_tracer_instances(): + import warnings + + with warnings.catch_warnings(record=True) as warns: + warnings.simplefilter("always") + import ddtrace + + assert ddtrace.tracer is not None + for w in warns: + # Ensure the warning is not about multiple tracer instances is not logged when importing ddtrace + assert "Support for multiple Tracer instances is deprecated" not in str(w.message) + + warns.clear() + t = ddtrace.Tracer() + # TODO: Update this assertion when the deprecation is removed and the tracer becomes a singleton + assert t is not ddtrace.tracer + assert len(warns) == 1 + assert ( + str(warns[0].message) == "Support for multiple Tracer instances is deprecated and will be " + "removed in version '3.0.0'. Use ddtrace.tracer instead." + ) diff --git a/tests/utils.py b/tests/utils.py index de0129f75a3..748731ae459 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -123,6 +123,7 @@ def override_global_config(values): "_health_metrics_enabled", "_propagation_style_extract", "_propagation_style_inject", + "_propagation_behavior_extract", "_x_datadog_tags_max_length", "_128_bit_trace_id_enabled", "_x_datadog_tags_enabled", @@ -159,6 +160,7 @@ def override_global_config(values): "_llmobs_enabled", "_llmobs_sample_rate", "_llmobs_ml_app", + "_extract_ignore_active_span", "_llmobs_agentless_enabled", "_data_streams_enabled", ] @@ -613,7 +615,7 @@ def __init__(self, *args, **kwargs): super(DummyTracer, self).__init__() self._trace_flush_disabled_via_env = not asbool(os.getenv("_DD_TEST_TRACE_FLUSH_ENABLED", True)) self._trace_flush_enabled = True - self.configure(*args, **kwargs) + self._configure(*args, **kwargs) @property def agent_url(self): @@ -645,6 +647,9 @@ def pop_traces(self): return traces def configure(self, *args, **kwargs): + self._configure(*args, **kwargs) + + def _configure(self, *args, **kwargs): assert "writer" not in kwargs or isinstance( kwargs["writer"], DummyWriterMixin ), "cannot configure writer of DummyTracer" @@ -655,7 +660,7 @@ def configure(self, *args, **kwargs): kwargs["writer"] = DummyWriter( trace_flush_enabled=check_test_agent_status() if not self._trace_flush_disabled_via_env else False ) - super(DummyTracer, self).configure(*args, **kwargs) + super(DummyTracer, self)._configure(*args, **kwargs) class TestSpan(Span): @@ -1155,11 +1160,6 @@ def wrapper(wrapped, instance, args, kwargs): else: clsname = "" - if include_tracer: - tracer = Tracer() - else: - tracer = ddtrace.tracer - module = inspect.getmodule(wrapped) # Use the fully qualified function name as a unique test token to @@ -1173,14 +1173,14 @@ def wrapper(wrapped, instance, args, kwargs): with snapshot_context( token, ignores=ignores, - tracer=tracer, + tracer=ddtrace.tracer, async_mode=async_mode, variants=variants, wait_for_num_traces=wait_for_num_traces, ): # Run the test. if include_tracer: - kwargs["tracer"] = tracer + kwargs["tracer"] = ddtrace.tracer return wrapped(*args, **kwargs) return wrapper diff --git a/tests/webclient.py b/tests/webclient.py index 7254a0896dd..33e5751baf6 100644 --- a/tests/webclient.py +++ b/tests/webclient.py @@ -3,9 +3,9 @@ import requests from ddtrace._trace.context import Context -from ddtrace.filters import TraceFilter from ddtrace.internal.utils.retry import retry from ddtrace.propagation.http import HTTPPropagator +from ddtrace.trace import TraceFilter class Client(object):