diff --git a/.github/workflows/api-changes.yml b/.github/workflows/api-changes.yml
deleted file mode 100644
index 667b179712b..00000000000
--- a/.github/workflows/api-changes.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-name: API Changes
-
-on:
- pull_request:
- branches:
- - master
- - release-**
-
-jobs:
- godoc:
- uses: TykTechnologies/github-actions/.github/workflows/godoc.yml@main
- secrets:
- ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }}
- with:
- go-version: 1.22
diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml
index 95acfe3cc6a..d4d1b72eae2 100644
--- a/.github/workflows/ci-tests.yml
+++ b/.github/workflows/ci-tests.yml
@@ -17,55 +17,109 @@ on:
- master
- release-**
-# Only have one runner per PR, and per merged commit.
-#
-# - As a PR gets new commits, any old run jobs get cancelled (PR number)
-# - As a commit gets merged, it doesn't cancel previous running PR's (github.sha)
concurrency:
- group: ${{ github.event.pull_request.number || github.sha }}-ci-tests
- cancel-in-progress: true
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
PYTHON_VERSION: "3.11"
PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python
jobs:
+ golangci-lint:
+ runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ steps:
+ - name: "Checkout PR"
+ uses: TykTechnologies/github-actions/.github/actions/checkout-pr@main
+ with:
+ token: ${{ secrets.ORG_GH_TOKEN }}
+
+ - name: "Get base ref"
+ run: |
+ git fetch origin ${{ github.base_ref }}
+ git rev-parse origin/${{ github.base_ref }}
+
+ - name: Setup Golang
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache-dependency-path: go.sum
+
+ - name: Cache
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.cache/golangci-lint
+ key: 'golangci-lint-${{ runner.os }}-${{ hashFiles(''**/go.sum'') }}'
+ restore-keys: |
+ golangci-lint-${{ runner.os }}-
+
+ - name: Setup CI Tooling
+ uses: shrink/actions-docker-extract@v3
+ with:
+ image: tykio/ci-tools:latest
+ path: /usr/local/bin/golangci-lint
+ destination: /usr/local/bin
+
+ - run: golangci-lint version && golangci-lint cache status
+
+ - name: golangci-lint
+ if: ${{ github.event_name == 'pull_request' }}
+ run: |
+ golangci-lint run --out-format colored-line-number,checkstyle:golangci-lint-report.json --issues-exit-code=0 --new-from-rev=origin/${{ github.base_ref }} -v ./...
+
+ - name: golangci-lint-on-push
+ if: ${{ github.event_name == 'push' }}
+ run: |
+ golangci-lint run --out-format checkstyle:golangci-lint-report.json --issues-exit-code=0 -v ./...
+
+ - uses: actions/upload-artifact@v4
+ if: ${{ always() }}
+ with:
+ name: golangcilint
+ retention-days: 1
+ path: |
+ golangci-lint-report.json
+
test:
name: Go ${{ matrix.go-version }} Redis ${{ matrix.redis-version }}
- runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ needs: golangci-lint
+ # Runs on is pinned to a version that provides python 3.10.
+ # See: https://github.com/actions/runner-images?tab=readme-ov-file#available-images
+ # Avoid using ubuntu-latest as it would upgrade python unattended.
+ runs-on: ubuntu-22.04
strategy:
- fail-fast: true
- # This workflow isn't designed to be run as a pipeline, several issues:
- #
- # - contains golangci-lint jobs, sonarcloud (would duplicate)
- # - cache config not suitable for multiple pipelines
- # - python tests should be separate job, or no job
- #
- # Keep it to a single job run from the matrix as configured
- # until we get a chance to redesign the pipeline properly.
+ fail-fast: false
matrix:
redis-version: [7]
- python-version: ["3.11"]
go-version: [1.22.x]
env:
REDIS_IMAGE: redis:${{ matrix.redis-version }}
steps:
+ - name: "Reclaim some runner space"
+ run: sudo rm -rf /usr/local/bin/* /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
+
- name: Checkout Tyk
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
- - name: Setup Golang
- uses: actions/setup-go@v2
- with:
- go-version: ${{ matrix.go-version }}
-
+ # Regardless that the base image provides a python release, we need
+ # setup-python so it properly configures the python3-venv.
- name: Setup Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
- python-version: ${{ matrix.python-version }}
+ python-version: ${{ env.PYTHON_VERSION }}
+
+ - name: Print runtime python version
+ run: python3 -c 'import sys; print("%d.%d" % (sys.version_info[0], sys.version_info[1]))'
+
+ - name: Print runtime pip version
+ run: pip -V && pip3 -V
- name: Setup CI Tooling
uses: shrink/actions-docker-extract@v3
@@ -74,7 +128,13 @@ jobs:
path: /usr/local/bin/.
destination: /usr/local/bin
- - name: Cache
+ - name: Setup Golang
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache-dependency-path: go.sum
+
+ - name: Restore Golang Cache
uses: actions/cache@v4
with:
path: |
@@ -113,15 +173,12 @@ jobs:
exit 1
fi
- - name: Fetch base branch
- if: ${{ github.event_name == 'pull_request' }}
- run: git fetch origin ${{ github.base_ref }}
-
- - name: Print CPU info
- run: grep '^model name' /proc/cpuinfo
+ - name: Bring up test services
+ run: task services:up
- - name: Print Go env
- run: go env
+ - name: Preflight Python tests
+ if: runner.debug == '1'
+ run: TYK_LOGLEVEL=debug go test -p 1 -parallel 1 -race -v ./dlpython ./coprocess/...
- name: Run Gateway Tests
id: ci-tests
@@ -129,17 +186,50 @@ jobs:
task test:e2e-combined args="-race -timeout=15m"
task test:coverage
- # golangci-lint actions *require* issues-exit-code=0 to pass data along to sonarcloud
- # rather than erroring out on github issues directly with out-format github.
- - name: golangci-lint
- if: ${{ github.event_name == 'pull_request' }}
- run: |
- golangci-lint run --out-format checkstyle --issues-exit-code=0 --new-from-rev=origin/${{ github.base_ref }} ./... > golanglint.xml
+ - uses: actions/upload-artifact@v4
+ if: ${{ always() }}
+ with:
+ name: coverage
+ retention-days: 1
+ path: coverage/gateway-all.cov
- - name: golangci-lint-on-push
- if: ${{ github.event_name == 'push' }}
- run: |
- golangci-lint run --out-format checkstyle --issues-exit-code=0 ./... > golanglint.xml
+ - uses: actions/upload-artifact@v4
+ if: ${{ always() }}
+ with:
+ name: testjson
+ retention-days: 1
+ path: coverage/gateway-all.json
+
+ sonar-cloud-analysis:
+ runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ needs: [test, golangci-lint]
+ steps:
+ - name: "Checkout repository"
+ uses: TykTechnologies/github-actions/.github/actions/checkout-pr@main
+ with:
+ token: ${{ secrets.ORG_GH_TOKEN }}
+
+ - name: Download coverage artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: coverage
+
+ - name: Download golangcilint artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: golangcilint
+
+ - name: Check reports existence
+ id: check_files
+ uses: andstor/file-existence-action@v1
+ with:
+ files: 'coverage/gateway-all.cov, golangci-lint-report.json'
+
+ - name: Install Dependencies
+ env:
+ TOKEN: '${{ secrets.ORG_GH_TOKEN }}'
+ run: git config --global url."https://${TOKEN}@github.com".insteadOf "https://github.com"
- name: SonarCloud Scan
if: always()
@@ -149,12 +239,12 @@ jobs:
-Dsonar.organization=tyktechnologies
-Dsonar.projectKey=TykTechnologies_tyk
-Dsonar.sources=.
- -Dsonar.exclusions=**/testdata/*,test/**,coprocess/**/*,ci/**,smoke-tests/**,apidef/oas/schema/schema.gen.go
+ -Dsonar.exclusions=**/testdata/*,test/**,coprocess/**/*,ci/**,smoke-tests/**,apidef/oas/schema/schema.gen.go,templates/**
-Dsonar.coverage.exclusions=**/*_test.go,**/mock/*
-Dsonar.test.inclusions=**/*_test.go
-Dsonar.tests=.
-Dsonar.go.coverage.reportPaths=coverage/gateway-all.cov
- -Dsonar.go.golangci-lint.reportPaths=golanglint.xml
+ -Dsonar.go.golangci-lint.reportPaths=golangci-lint-report.json
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 14bccb15d6f..62fac2e3a27 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -11,9 +11,14 @@ on:
schedule:
- cron: '0 18 * * 4'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
jobs:
analyze:
name: Analyze
+ if: ${{ !github.event.pull_request.draft }}
runs-on: ubuntu-latest
strategy:
@@ -27,7 +32,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
@@ -39,7 +44,7 @@ jobs:
if: ${{ github.event_name == 'pull_request' }}
- name: Install Go
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version-file: go.mod
diff --git a/.github/workflows/jira-lint.yml b/.github/workflows/jira-lint.yml
deleted file mode 100644
index 23d95f04b16..00000000000
--- a/.github/workflows/jira-lint.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: JIRA lint
-
-on:
- pull_request:
- branches:
- - master
- - release-**
- types:
- - opened
- - reopened
- - synchronize
- - ready_for_review
-
-jobs:
- jira-lint:
- if: ${{ !github.event.pull_request.draft }}
- uses: TykTechnologies/github-actions/.github/workflows/jira-lint.yaml@main
- secrets:
- JIRA_TOKEN: ${{ secrets.JIRA_TOKEN }}
- ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }}
diff --git a/.github/workflows/lint-swagger.yml b/.github/workflows/lint-swagger.yml
index efdacc3d415..638f25f6476 100644
--- a/.github/workflows/lint-swagger.yml
+++ b/.github/workflows/lint-swagger.yml
@@ -7,6 +7,10 @@ on:
paths:
- 'swagger.yml'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
jobs:
redocly_validator:
runs-on: ubuntu-latest
@@ -41,7 +45,7 @@ jobs:
- name: Setup Golang
uses: actions/setup-go@v5
with:
- go-version: '1.22.x'
+ go-version: stable
- name: Install dyff binary
run: |
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 00000000000..19226e8b323
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,29 @@
+# yamllint disable rule:truthy
+---
+name: CI lint
+
+# API Changes (godoc) and JIRA validation only apply to PRs.
+# Branches are not important, supports chaining PRs.
+
+on:
+ pull_request:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
+jobs:
+ godoc:
+ if: ${{ !github.event.pull_request.draft }}
+ uses: TykTechnologies/github-actions/.github/workflows/godoc.yml@main
+ secrets:
+ ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }}
+ with:
+ go-version: stable
+
+ jira:
+ if: ${{ !github.event.pull_request.draft }}
+ uses: TykTechnologies/github-actions/.github/workflows/jira-lint.yaml@main
+ secrets:
+ JIRA_TOKEN: ${{ secrets.JIRA_TOKEN }}
+ ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }}
diff --git a/.github/workflows/plugin-compiler-build.yml b/.github/workflows/plugin-compiler-build.yml
index 53984e0b88a..aa4939fd603 100644
--- a/.github/workflows/plugin-compiler-build.yml
+++ b/.github/workflows/plugin-compiler-build.yml
@@ -8,19 +8,29 @@ on:
- master
- release-**
tags:
- - 'v*'
+ - "v*"
env:
GOLANG_CROSS: 1.22-bullseye
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
jobs:
docker-build:
runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
permissions:
id-token: write
steps:
+ - name: "Reclaim some runner space"
+ run: sudo rm -rf /usr/local/bin/* /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
+
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 1
- name: Configure AWS Credentials
id: configure-aws
@@ -70,3 +80,35 @@ jobs:
BASE-IMAGE=tykio/golang-cross:${{ env.GOLANG_CROSS }}
GITHUB_SHA=${{ github.sha }}
GITHUB_TAG=${{ github.ref_name }}
+
+ - name: Set docker metadata EE
+ id: set-metadata-ee
+ uses: docker/metadata-action@v4
+ with:
+ images: |
+ tykio/tyk-plugin-compiler-ee,enable=${{ startsWith(github.ref, 'refs/tags') }}
+ ${{ steps.login-ecr.outputs.registry }}/tyk-plugin-compiler-ee
+ labels: |
+ org.opencontainers.image.title=tyk-plugin-compiler-ee
+ org.opencontainers.image.description=Plugin compiler for the Tyk API Gateway Enterprise Edition
+ tags: |
+ type=ref,event=pr
+ type=semver,pattern=v{{version}}
+ type=semver,pattern=v{{major}}.{{minor}}
+ type=semver,pattern={{raw}}
+ type=sha,format=long
+
+ - name: Build and push to dockerhub/ECR EE
+ uses: docker/build-push-action@v4
+ with:
+ context: .
+ file: ci/images/plugin-compiler/Dockerfile
+ platforms: linux/amd64
+ push: true
+ labels: ${{ steps.set-metadata-ee.outputs.labels }}
+ tags: ${{ steps.set-metadata-ee.outputs.tags }}
+ build-args: |
+ BASE-IMAGE=tykio/golang-cross:${{ env.GOLANG_CROSS }}
+ GITHUB_SHA=${{ github.sha }}
+ GITHUB_TAG=${{ github.ref_name }}
+ BUILD_TAG=ee
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 87b1bcafc33..e611642b870 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,3 +1,5 @@
+# yamllint disable rule:line-length rule:truthy
+name: Release
# Generated by: gromit policy
# Distribution channels covered by this workflow
@@ -6,8 +8,9 @@
# - docker hub
# - devenv ECR
# - Cloudsmith
-
-name: Release
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
# Trigger release every monday at midnight for master CI images
schedule:
@@ -21,13 +24,14 @@ on:
- 'v*'
env:
GOPRIVATE: github.com/TykTechnologies
- VARIATION: prod
+ VARIATION: inverted
DOCKER_BUILD_SUMMARY: false
DOCKER_BUILD_RECORD_UPLOAD: false
# startsWith covers pull_request_target too
BASE_REF: ${{startsWith(github.event_name, 'pull_request') && github.base_ref || github.ref_name}}
jobs:
goreleaser:
+ if: github.event.pull_request.draft == false
name: '${{ matrix.golang_cross }}'
runs-on: ubuntu-latest-m
permissions:
@@ -45,7 +49,7 @@ jobs:
rpmvers: 'el/7 el/8 el/9 amazon/2 amazon/2023'
debvers: 'ubuntu/xenial ubuntu/bionic ubuntu/focal ubuntu/jammy debian/jessie debian/buster debian/bullseye debian/bookworm'
outputs:
- tags: ${{ steps.ci_metadata.outputs.tags }}
+ tags: ${{ steps.ci_metadata_ee.outputs.tags }}
commit_author: ${{ steps.set_outputs.outputs.commit_author}}
steps:
- name: Checkout of tyk
@@ -80,9 +84,9 @@ jobs:
path: |
~/.cache/go-build
~/go/pkg/mod
- key: ${{ runner.os }}-${{ matrix.golang_cross }}-go-${{ hashFiles('**/go.sum') }}
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
- ${{ runner.os }}-${{ matrix.golang_cross }}-go-${{ hashFiles('**/go.sum') }}
+ ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- name: Build
env:
NFPM_PASSPHRASE: ${{ secrets.SIGNING_KEY_PASSPHRASE }}
@@ -127,7 +131,7 @@ jobs:
with:
mask-password: 'true'
- name: Docker metadata for CI
- id: ci_metadata
+ id: ci_metadata_
if: ${{ matrix.golang_cross == '1.22-bullseye' }}
uses: docker/metadata-action@v5
with:
@@ -153,10 +157,43 @@ jobs:
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
- tags: ${{ steps.ci_metadata.outputs.tags }}
+ tags: ${{ steps.ci_metadata_.outputs.tags }}
+ labels: ${{ steps.tag_metadata.outputs.labels }}
+ build-args: |
+ EDITION=
+ - name: Docker metadata for CI ee
+ id: ci_metadata_ee
+ if: ${{ matrix.golang_cross == '1.22-bullseye' }}
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ steps.ecr.outputs.registry }}/tyk-ee
+ flavor: |
+ latest=false
+ tags: |
+ type=ref,event=branch
+ type=ref,event=pr
+ type=sha,format=long
+ type=semver,pattern={{major}},prefix=v
+ type=semver,pattern={{major}}.{{minor}},prefix=v
+ type=semver,pattern={{version}},prefix=v
+ - name: push image to CI ee
+ if: ${{ matrix.golang_cross == '1.22-bullseye' }}
+ uses: docker/build-push-action@v6
+ with:
+ context: "dist"
+ platforms: linux/amd64,linux/arm64
+ file: ci/Dockerfile.distroless
+ provenance: mode=max
+ sbom: true
+ push: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ tags: ${{ steps.ci_metadata_ee.outputs.tags }}
labels: ${{ steps.tag_metadata.outputs.labels }}
+ build-args: |
+ EDITION=-ee
- name: Docker metadata for tag push
- id: tag_metadata
+ id: tag_metadata_
uses: docker/metadata-action@v5
with:
images: |
@@ -166,13 +203,12 @@ jobs:
latest=false
prefix=v
tags: |
- type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{version}}
labels: "org.opencontainers.image.title=tyk-gateway (distroless) \norg.opencontainers.image.description=Tyk Open Source API Gateway written in Go, supporting REST, GraphQL, TCP and gRPC protocols\norg.opencontainers.image.vendor=tyk.io\norg.opencontainers.image.version=${{ github.ref_name }}\n"
- - name: build multiarch image
+ - name: push image to prod
if: ${{ matrix.golang_cross == '1.22-bullseye' }}
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
context: "dist"
platforms: linux/amd64,linux/arm64
@@ -182,16 +218,52 @@ jobs:
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ startsWith(github.ref, 'refs/tags') }}
- tags: ${{ steps.tag_metadata.outputs.tags }}
- labels: ${{ steps.tag_metadata.outputs.labels }}
- - uses: actions/upload-artifact@v4
+ tags: ${{ steps.tag_metadata_.outputs.tags }}
+ labels: ${{ steps.tag_metadata_.outputs.labels }}
+ build-args: |
+ EDITION=
+ - name: Docker metadata for tag push ee
+ id: tag_metadata_ee
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ tykio/tyk-gateway-ee
+ flavor: |
+ latest=false
+ prefix=v
+ tags: |
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{version}}
+ labels: "org.opencontainers.image.title=tyk-gateway Enterprise Edition (distroless) \norg.opencontainers.image.description=Tyk Open Source API Gateway written in Go, supporting REST, GraphQL, TCP and gRPC protocols\norg.opencontainers.image.vendor=tyk.io\norg.opencontainers.image.version=${{ github.ref_name }}\n"
+ - name: push image to prod ee
+ if: ${{ matrix.golang_cross == '1.22-bullseye' }}
+ uses: docker/build-push-action@v6
+ with:
+ context: "dist"
+ platforms: linux/amd64,linux/arm64
+ file: ci/Dockerfile.distroless
+ provenance: mode=max
+ sbom: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ push: ${{ startsWith(github.ref, 'refs/tags') }}
+ tags: ${{ steps.tag_metadata_ee.outputs.tags }}
+ labels: ${{ steps.tag_metadata_ee.outputs.labels }}
+ build-args: |
+ EDITION=-ee
+ - name: save deb
+ uses: actions/upload-artifact@v4
+ if: ${{ matrix.golang_cross == '1.22-bullseye' }}
with:
name: deb
retention-days: 1
path: |
dist/*.deb
!dist/*PAYG*.deb
- - uses: actions/upload-artifact@v4
+ !dist/*fips*.deb
+ - name: save rpm
+ uses: actions/upload-artifact@v4
+ if: ${{ matrix.golang_cross == '1.22-bullseye' }}
with:
name: rpm
retention-days: 1
@@ -200,6 +272,7 @@ jobs:
!dist/*PAYG*.rpm
!dist/*fips*.rpm
test-controller-api:
+ if: github.event.pull_request.draft == false
needs:
- goreleaser
runs-on: ubuntu-latest
@@ -261,7 +334,7 @@ jobs:
env:
GH_TOKEN: ${{ github.token }}
run: |
- gh release download --repo github.com/tyklabs/tyk-pro --archive tar.gz -O env.tgz
+ gh release download --repo github.com/TykTechnologies/tyk-pro --archive tar.gz -O env.tgz
mkdir auto && tar --strip-components=1 -C auto -xzvf env.tgz
- name: env up
shell: bash
@@ -295,21 +368,15 @@ jobs:
./dash-bootstrap.sh http://localhost:3000
docker compose -p auto -f pro-ha.yml -f deps_pro-ha.yml -f ${{ matrix.envfiles.db }}.yml -f ${{ matrix.envfiles.cache }}.yml --env-file versions.env --profile slave-datacenter up --quiet-pull -d
echo "$(cat pytest.env | grep USER_API_SECRET)" >> $GITHUB_OUTPUT
+ echo "ts=$(date +%s%N)" >> $GITHUB_OUTPUT
- uses: actions/checkout@v4
with:
repository: TykTechnologies/tyk-analytics
path: tyk-analytics
token: ${{ secrets.ORG_GH_TOKEN }}
fetch-depth: 1
+ ref: ${{ env.BASE_REF }}
sparse-checkout: tests/api
- - name: Branch for test code
- id: timestamp
- working-directory: tyk-analytics/tests/api
- run: |
- # Generate report id
- git fetch --no-tags --depth 1 origin "refs/heads/${BASE_REF}:refs/remotes/origin/${BASE_REF}"
- git switch $BASE_REF
- echo "ts=$(date +%s%N)" >> $GITHUB_OUTPUT
- uses: actions/setup-python@v5
with:
cache: 'pip'
@@ -338,11 +405,25 @@ jobs:
USER_API_SECRET=${{ steps.env_up.outputs.USER_API_SECRET }}
EOF
env $(cat pytest.env | xargs) $pytest -m "${{ matrix.envfiles.apimarkers }}"
+ - name: Upload Playwright Test Report to S3
+ if: failure() && steps.test_execution.outcome != 'success' && steps.env_up.outcome == 'success'
+ run: npm run upload_report_to_s3
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.UI_AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.UI_AWS_SECRET_ACCESS_KEY }}
+ RUN_ID: 'tyk-analytics/${{ github.run_id }}'
+ working-directory: tyk-analytics/tests/ui
+ - name: Share S3 report link into summary
+ if: failure() && steps.test_execution.outcome != 'success' && steps.env_up.outcome == 'success'
+ run: |
+ echo "# :clipboard: S3 UI Test REPORT: ${{ matrix.envfiles.db }}-${{ matrix.envfiles.conf }}" >> $GITHUB_STEP_SUMMARY
+ echo "- Status: ${{ steps.test_execution.outcome == 'success' && ':white_check_mark:' || ':no_entry_sign:' }}" >> $GITHUB_STEP_SUMMARY
+ echo "- [Link to report](https://tyk-qa-reports.s3.eu-central-1.amazonaws.com/tyk-analytics/${{ github.run_id }}/index.html)" >> $GITHUB_STEP_SUMMARY
- name: Generate metadata and upload test reports
id: metadata_report
if: always() && (steps.test_execution.conclusion != 'skipped')
env:
- REPORT_NAME: ${{ github.repository }}_${{ github.run_id }}_${{ github.run_attempt }}-${{steps.timestamp.outputs.ts}}
+ REPORT_NAME: ${{ github.repository }}_${{ github.run_id }}_${{ github.run_attempt }}-${{steps.env_up.outputs.ts}}
METADATA_REPORT_PATH: metadata.toml
run: |
# Generate metadata report
@@ -385,6 +466,7 @@ jobs:
retention-days: 3
overwrite: true
test-controller-distros:
+ if: github.event.pull_request.draft == false
needs:
- goreleaser
runs-on: ubuntu-latest
diff --git a/.github/workflows/update-oas-docs.yml b/.github/workflows/update-oas-docs.yml
deleted file mode 100644
index 03c9a552cca..00000000000
--- a/.github/workflows/update-oas-docs.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Keep the docs on tyk-docs in sync with this branch's docs
-
-name: Update OAS docs
-
-on:
- push:
- branches:
- - temporarily-suspended
- paths:
- - apidef/oas/schema/x-tyk-gateway.md
-
-jobs:
- sync:
- name: tyk-oas-docs
- runs-on: ubuntu-latest
- steps:
- - uses: peter-evans/repository-dispatch@v1
- with:
- token: ${{ secrets.ORG_GH_TOKEN }}
- repository: TykTechnologies/tyk-docs
- event-type: tyk-oas-docs
- client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
diff --git a/.gitignore b/.gitignore
index c3d3051bdea..4be204743e6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -61,3 +61,5 @@ tyk_linux_*
*.test
main
+
+/coprocess/*.pb.go-e
diff --git a/.golangci.yml b/.golangci.yml
index cc0a2030073..c7612a18f50 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,48 +1,12 @@
# Options for analysis running.
run:
- # The default concurrency value is the number of available CPU.
- concurrency: 4
-
- # Timeout for analysis, e.g. 30s, 5m.
- # Default: 1m
- timeout: 5m
-
- # Include test files or not.
- # Default: true
+ timeout: 20m
tests: true
# Be as complete as possible when reporting issues
max-issues-per-linter: 0
max-same-issues: 0
- # List of build tags, all linters use it.
- # Default: [].
- build-tags:
- - goplugin
-
- # Which dirs to skip: issues from them won't be reported.
- # Can use regexp here: `generated.*`, regexp is applied on full path.
- # Default value is empty list,
- # but default dirs are skipped independently of this option's value (see skip-dirs-use-default).
- # "/" will be replaced by current OS file path separator to properly work on Windows.
- skip-dirs:
- - ci
- - smoke-tests
-
- # Enables skipping of directories:
- # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
- # Default: true
- skip-dirs-use-default: true
-
- # Which files to skip: they will be analyzed, but issues from them won't be reported.
- # Default value is empty list,
- # but there is no need to include all autogenerated files,
- # we confidently recognize autogenerated files.
- # If it's not please let us know.
- # "/" will be replaced by current OS file path separator to properly work on Windows.
- skip-files:
- - ".*\\.pb\\.go$"
-
# If set we pass it to "go list -mod={option}". From "go help modules":
# If invoked with -mod=readonly, the go command is disallowed from the implicit
# automatic updating of go.mod described above. Instead, it fails when any changes
@@ -60,11 +24,6 @@ run:
# If false (default) - golangci-lint acquires file lock on start.
allow-parallel-runners: false
- # Define the Go version limit.
- # Mainly related to generics support since go1.18.
- # Default: use Go version from the go.mod file, fallback on the env var `GOVERSION`, fallback on 1.18
- go: '1.22'
-
linters:
disable:
- varnamelen
@@ -111,7 +70,6 @@ linters-settings:
disabled: false
govet:
- check-shadowing: true
enable-all: true
disable:
- fieldalignment
@@ -130,7 +88,16 @@ linters-settings:
issues:
max-issues-per-linter: 0
max-same-issues: 0
- exclude-use-default: false
+ exclude-generated: strict
+ exclude-files:
+ - ".*\\.pb\\.go$"
+ - ".*/mock/.+\\.go$"
+ - ".*/bindata.go$"
+ exclude-dirs:
+ - ci
+ - bin
+ - webclient
+ - portal
exclude-rules:
- path: ^cli/
linters:
diff --git a/LICENSE.md b/LICENSE.md
index 771dfbcd592..810075a41b7 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,3 +1,12 @@
+The code in the root directory and all subdirectories, except for the 'ee' folder,
+is licensed under the Mozilla Public License Version 2.0 (the "MPL"), as detailed below.
+
+The code in the 'ee' folder is subject to a separate commercial license.
+See the [LICENSE-EE](ee/LICENSE-EE.md) file in the 'ee' folder for details on the Enterprise Edition license.
+
+For the open source components:
+-------------------------------
+
# Mozilla Public License Version 2.0
## 1. Definitions
@@ -181,4 +190,4 @@ You may add additional accurate notices of copyright ownership.
## Exhibit B - “Incompatible With Secondary Licenses” Notice
-> This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0.
\ No newline at end of file
+> This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0.
diff --git a/README.md b/README.md
index 1ef30ddbf62..bab78264f8d 100644
--- a/README.md
+++ b/README.md
@@ -45,7 +45,7 @@ Tyk runs natively on _Kubernetes_, if you prefer, thanks to the _[Tyk Kubernetes
- The Enterprise API Management platform SaaS: Management Control Plane, Dashboard GUI & Developer Portal.
+ The Enterprise API Management platform SaaS: Management Control Plane, Dashboard GUI & Developer Portal.
Deploy Tyk Cloud
@@ -82,7 +82,7 @@ Your Tyk Gateway is now configured and ready to use. Confirm this by checking ag
```console
curl localhost:8080/hello
```
-Output:
+Output:
```json
{"status": "pass", "version": "v3.2.1", "description": "Tyk GW"}
```
@@ -166,11 +166,13 @@ All the documentation for Tyk Gateway and other OSS-related topics can be found
* [Newsletters ](https://pages.tyk.io/newsletter)- Subscribe to our GraphQL & API newsletters
* If you are using Tyk give us a star ⭐️
-## Open Source License
+## Licensing
-Tyk is released under the MPL v2.0; please see [LICENSE.md](https://github.com/TykTechnologies/tyk/blob/master/LICENSE.md) for a full version of the license.
+Tyk is dual-licensed:
-![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FTykTechnologies%2Ftyk.svg?type=large)
+1. Open Source License: The code in the root directory and all subdirectories except the 'ee' folder is released under the MPL v2.0. Please see [LICENSE](https://github.com/TykTechnologies/tyk/blob/master/LICENSE) for the full version of the open source license.
+
+2. Commercial License: The code in the 'ee' folder is subject to a commercial license. For more information about obtaining a commercial license, please contact our sales team at sales@tyk.io.
## Compiling Tyk Gateway
diff --git a/apidef/api_definitions.go b/apidef/api_definitions.go
index d1c17a4200b..f2202ed1342 100644
--- a/apidef/api_definitions.go
+++ b/apidef/api_definitions.go
@@ -19,6 +19,8 @@ import (
"github.com/TykTechnologies/tyk/internal/reflect"
+ "golang.org/x/oauth2"
+
"github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource"
"github.com/TykTechnologies/gojsonschema"
@@ -762,6 +764,124 @@ type APIDefinition struct {
VersionName string `bson:"-" json:"-"`
DetailedTracing bool `bson:"detailed_tracing" json:"detailed_tracing"`
+
+ // UpstreamAuth stores information about authenticating against upstream.
+ UpstreamAuth UpstreamAuth `bson:"upstream_auth" json:"upstream_auth"`
+}
+
+// UpstreamAuth holds the configurations related to upstream API authentication.
+type UpstreamAuth struct {
+ // Enabled enables upstream API authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // BasicAuth holds the basic authentication configuration for upstream API authentication.
+ BasicAuth UpstreamBasicAuth `bson:"basic_auth" json:"basic_auth"`
+ // OAuth holds the OAuth2 configuration for the upstream client credentials API authentication.
+ OAuth UpstreamOAuth `bson:"oauth" json:"oauth"`
+}
+
+// IsEnabled checks if UpstreamAuthentication is enabled for the API.
+func (u *UpstreamAuth) IsEnabled() bool {
+ return u.Enabled && (u.BasicAuth.Enabled || u.OAuth.Enabled)
+}
+
+// IsEnabled checks if UpstreamOAuth is enabled for the API.
+func (u UpstreamOAuth) IsEnabled() bool {
+ return u.Enabled
+}
+
+// UpstreamBasicAuth holds upstream basic authentication configuration.
+type UpstreamBasicAuth struct {
+ // Enabled enables upstream basic authentication.
+ Enabled bool `bson:"enabled" json:"enabled,omitempty"`
+ // Username is the username to be used for upstream basic authentication.
+ Username string `bson:"username" json:"username"`
+ // Password is the password to be used for upstream basic authentication.
+ Password string `bson:"password" json:"password"`
+ // Header holds the configuration for custom header name to be used for upstream basic authentication.
+ // Defaults to `Authorization`.
+ Header AuthSource `bson:"header" json:"header"`
+}
+
+// UpstreamOAuth holds upstream OAuth2 authentication configuration.
+type UpstreamOAuth struct {
+ // Enabled enables upstream OAuth2 authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // AllowedAuthorizeTypes specifies the allowed authorization types for upstream OAuth2 authentication.
+ AllowedAuthorizeTypes []string `bson:"allowed_authorize_types" json:"allowed_authorize_types"`
+ // ClientCredentials holds the client credentials for upstream OAuth2 authentication.
+ ClientCredentials ClientCredentials `bson:"client_credentials" json:"client_credentials"`
+ // PasswordAuthentication holds the configuration for upstream OAauth password authentication flow.
+ PasswordAuthentication PasswordAuthentication `bson:"password_authentication,omitempty" json:"passwordAuthentication,omitempty"`
+}
+
+// PasswordAuthentication holds the configuration for upstream OAuth2 password authentication flow.
+type PasswordAuthentication struct {
+ ClientAuthData
+ // Header holds the configuration for the custom header to be used for OAuth authentication.
+ Header AuthSource `bson:"header" json:"header"`
+ // Username is the username to be used for upstream OAuth2 password authentication.
+ Username string `bson:"username" json:"username"`
+ // Password is the password to be used for upstream OAuth2 password authentication.
+ Password string `bson:"password" json:"password"`
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string `bson:"token_url" json:"token_url"`
+ // Scopes specifies optional requested permissions.
+ Scopes []string `bson:"scopes" json:"scopes,omitempty"`
+ // ExtraMetadata holds the keys that we want to extract from the token and pass to the upstream.
+ ExtraMetadata []string `bson:"extra_metadata" json:"extra_metadata,omitempty"`
+
+ // TokenProvider is the OAuth2 password authentication flow token for internal use.
+ Token *oauth2.Token `bson:"-" json:"-"`
+}
+
+// ClientAuthData holds the client ID and secret for upstream OAuth2 authentication.
+type ClientAuthData struct {
+ // ClientID is the application's ID.
+ ClientID string `bson:"client_id" json:"client_id"`
+ // ClientSecret is the application's secret.
+ ClientSecret string `bson:"client_secret" json:"client_secret"`
+}
+
+// ClientCredentials holds the client credentials for upstream OAuth2 authentication.
+type ClientCredentials struct {
+ ClientAuthData
+ // Header holds the configuration for the custom header to be used for OAuth authentication.
+ Header AuthSource `bson:"header" json:"header"`
+ // Enabled activates upstream OAuth2 client credentials authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string `bson:"token_url" json:"token_url"`
+ // Scopes specifies optional requested permissions.
+ Scopes []string `bson:"scopes" json:"scopes,omitempty"`
+ // ExtraMetadata holds the keys that we want to extract from the token and pass to the upstream.
+ ExtraMetadata []string `bson:"extra_metadata" json:"extra_metadata,omitempty"`
+
+ // TokenProvider is the OAuth2 token provider for internal use.
+ TokenProvider oauth2.TokenSource `bson:"-" json:"-"`
+}
+
+// AuthSource is a common type to be used for auth configurations.
+type AuthSource struct {
+ // Enabled enables the auth source.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // Name specifies the key to be used in the auth source.
+ Name string `bson:"name" json:"name"`
+}
+
+// IsEnabled returns the enabled status of the auth source.
+func (a AuthSource) IsEnabled() bool {
+ return a.Enabled
+}
+
+// AuthKeyName returns the key name to be used for the auth source.
+func (a AuthSource) AuthKeyName() string {
+ if !a.IsEnabled() {
+ return ""
+ }
+
+ return a.Name
}
type AnalyticsPluginConfig struct {
@@ -1453,6 +1573,9 @@ var Template = template.New("").Funcs(map[string]interface{}{
},
})
+// ExternalOAuth support will be deprecated starting from 5.7.0.
+// To avoid any disruptions, we recommend that you use JSON Web Token (JWT) instead,
+// as explained in https://tyk.io/docs/basic-config-and-security/security/authentication-authorization/ext-oauth-middleware/.
type ExternalOAuth struct {
Enabled bool `bson:"enabled" json:"enabled"`
Providers []Provider `bson:"providers" json:"providers"`
diff --git a/apidef/oas/default.go b/apidef/oas/default.go
index 244f7b128af..ad899b8f472 100644
--- a/apidef/oas/default.go
+++ b/apidef/oas/default.go
@@ -59,6 +59,9 @@ type TykExtensionConfigParams struct {
ValidateRequest *bool
// MockResponse is true if a mocked response is configured.
MockResponse *bool
+
+ // pathItemHasParameters is set to true when parameters are defined the same level as of operations within path.
+ pathItemHasParameters bool
}
// BuildDefaultTykExtension builds a default tyk extension in *OAS based on function arguments.
@@ -218,6 +221,7 @@ func (s *OAS) importMiddlewares(overRideValues TykExtensionConfigParams) {
}
for path, pathItem := range s.Paths {
+ overRideValues.pathItemHasParameters = len(pathItem.Parameters) > 0
for _, method := range allowedMethods {
if operation := pathItem.GetOperation(method); operation != nil {
tykOperation := s.getTykOperation(method, path)
diff --git a/apidef/oas/default_test.go b/apidef/oas/default_test.go
index 97c820354d4..514f298d7f1 100644
--- a/apidef/oas/default_test.go
+++ b/apidef/oas/default_test.go
@@ -1013,6 +1013,81 @@ func TestOAS_BuildDefaultTykExtension(t *testing.T) {
Operations: Operations{},
}, oasDef.GetTykExtension().Middleware)
})
+
+ t.Run("configure validateRequest when OAS request parameters are configured on path level",
+ func(t *testing.T) {
+ oasDef := getOASDef(true, false)
+ petsPathItem := oasDef.Paths.Find("/pets")
+ petsPathItem.Parameters = openapi3.Parameters{
+ {
+ Value: &openapi3.Parameter{
+ Name: "auth",
+ In: header,
+ Schema: &openapi3.SchemaRef{
+ Value: &openapi3.Schema{
+ Type: "string",
+ },
+ },
+ },
+ },
+ }
+
+ t.Run("import=true,validateRequest=enabled", func(t *testing.T) {
+ tykExtensionConfigParams := TykExtensionConfigParams{
+ ValidateRequest: &trueVal,
+ }
+
+ err := oasDef.BuildDefaultTykExtension(tykExtensionConfigParams, true)
+
+ assert.NoError(t, err)
+
+ expectedOperations := getExpectedOperations(true, true, middlewareValidateRequest)
+ expectedOperations[oasGetOperationID] = expectedOperations[oasPostOperationID]
+ assert.Equal(t, expectedOperations, oasDef.GetTykExtension().Middleware.Operations)
+ })
+
+ t.Run("import=true,validateRequest=disabled", func(t *testing.T) {
+ tykExtensionConfigParams := TykExtensionConfigParams{
+ ValidateRequest: &falseVal,
+ }
+
+ err := oasDef.BuildDefaultTykExtension(tykExtensionConfigParams, true)
+
+ assert.NoError(t, err)
+
+ expectedOperations := getExpectedOperations(false, true, middlewareValidateRequest)
+ expectedOperations[oasGetOperationID] = expectedOperations[oasPostOperationID]
+ assert.Equal(t, expectedOperations, oasDef.GetTykExtension().Middleware.Operations)
+ })
+
+ t.Run("import=false,validateRequest=enabled", func(t *testing.T) {
+ tykExtensionConfigParams := TykExtensionConfigParams{
+ ValidateRequest: &trueVal,
+ }
+
+ err := oasDef.BuildDefaultTykExtension(tykExtensionConfigParams, false)
+
+ assert.NoError(t, err)
+
+ expectedOperations := getExpectedOperations(true, true, middlewareValidateRequest)
+ expectedOperations[oasGetOperationID] = expectedOperations[oasPostOperationID]
+ assert.Equal(t, expectedOperations, oasDef.GetTykExtension().Middleware.Operations)
+ })
+
+ t.Run("import=false,validateRequest=disabled", func(t *testing.T) {
+ tykExtensionConfigParams := TykExtensionConfigParams{
+ ValidateRequest: &falseVal,
+ }
+
+ err := oasDef.BuildDefaultTykExtension(tykExtensionConfigParams, false)
+
+ assert.NoError(t, err)
+
+ expectedOperations := getExpectedOperations(false, true, middlewareValidateRequest)
+ expectedOperations[oasGetOperationID] = expectedOperations[oasPostOperationID]
+ assert.Equal(t, expectedOperations, oasDef.GetTykExtension().Middleware.Operations)
+ })
+ })
})
t.Run("mockResponse", func(t *testing.T) {
diff --git a/apidef/oas/linter_test.go b/apidef/oas/linter_test.go
index 307c8ae40c7..bdef6695d19 100644
--- a/apidef/oas/linter_test.go
+++ b/apidef/oas/linter_test.go
@@ -83,6 +83,12 @@ func TestXTykGateway_Lint(t *testing.T) {
}
settings.Upstream.RateLimit.Per = ReadableDuration(10 * time.Second)
+
+ settings.Upstream.Authentication = &UpstreamAuth{
+ Enabled: false,
+ BasicAuth: nil,
+ OAuth: nil,
+ }
}
// Encode data to json
diff --git a/apidef/oas/oas_test.go b/apidef/oas/oas_test.go
index 4b22374ac99..27989e57374 100644
--- a/apidef/oas/oas_test.go
+++ b/apidef/oas/oas_test.go
@@ -312,6 +312,9 @@ func TestOAS_ExtractTo_ResetAPIDefinition(t *testing.T) {
"APIDefinition.AnalyticsPlugin.Enabled",
"APIDefinition.AnalyticsPlugin.PluginPath",
"APIDefinition.AnalyticsPlugin.FuncName",
+ "APIDefinition.UpstreamAuth.OAuth.ClientCredentials.Enabled",
+ "APIDefinition.UpstreamAuth.OAuth.PasswordAuthentication.Header.Enabled",
+ "APIDefinition.UpstreamAuth.OAuth.PasswordAuthentication.Header.Name",
}
assert.Equal(t, expectedFields, noOASSupportFields)
diff --git a/apidef/oas/operation.go b/apidef/oas/operation.go
index 775c2a03cd4..b8a2d9d547b 100644
--- a/apidef/oas/operation.go
+++ b/apidef/oas/operation.go
@@ -117,7 +117,7 @@ func (o *Operation) Import(oasOperation *openapi3.Operation, overRideValues TykE
validate = &ValidateRequest{}
}
- if ok := validate.shouldImport(oasOperation); ok {
+ if ok := validate.shouldImport(oasOperation); ok || overRideValues.pathItemHasParameters {
validate.Import(*overRideValues.ValidateRequest)
o.ValidateRequest = validate
}
diff --git a/apidef/oas/schema/x-tyk-api-gateway.json b/apidef/oas/schema/x-tyk-api-gateway.json
index 8a88d02000c..1c90f314ab8 100644
--- a/apidef/oas/schema/x-tyk-api-gateway.json
+++ b/apidef/oas/schema/x-tyk-api-gateway.json
@@ -15,7 +15,6 @@
"$ref": "#/definitions/X-Tyk-Middleware"
}
},
- "additionalProperties": false,
"required": [
"info",
"upstream",
@@ -24,7 +23,6 @@
"definitions": {
"X-Tyk-ServiceDiscovery": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -67,7 +65,6 @@
},
"X-Tyk-ServiceDiscoveryCache": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -83,7 +80,6 @@
},
"X-Tyk-DetailedActivityLogs": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -95,7 +91,6 @@
},
"X-Tyk-AuthSource": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -110,7 +105,6 @@
},
"X-Tyk-MutualTLS": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -131,7 +125,6 @@
},
"X-Tyk-DomainToCertificate": {
"type": "object",
- "additionalProperties": false,
"properties": {
"domain": {
"$ref": "#/definitions/X-Tyk-DomainDef"
@@ -147,7 +140,6 @@
},
"X-Tyk-CertificatePinning": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -168,7 +160,6 @@
},
"X-Tyk-PinnedPublicKeys": {
"type": "object",
- "additionalProperties": false,
"properties": {
"domain": {
"$ref": "#/definitions/X-Tyk-DomainDef"
@@ -185,7 +176,6 @@
},
"X-Tyk-ClientToPolicy": {
"type": "object",
- "additionalProperties": false,
"properties": {
"clientId": {
"type": "string"
@@ -197,7 +187,6 @@
},
"X-Tyk-Provider": {
"type": "object",
- "additionalProperties": false,
"properties": {
"issuer": {
"type": "string"
@@ -214,7 +203,6 @@
},
"X-Tyk-ScopeToPolicy": {
"type": "object",
- "additionalProperties": false,
"properties": {
"scope": {
"type": "string"
@@ -226,7 +214,6 @@
},
"X-Tyk-Scopes": {
"type": "object",
- "additionalProperties": false,
"properties": {
"claimName": {
"type": "string"
@@ -243,7 +230,6 @@
},
"X-Tyk-ClientCertificates": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -260,7 +246,6 @@
},
"X-Tyk-PluginConfig": {
"type": "object",
- "additionalProperties": false,
"properties": {
"driver": {
"type": "string",
@@ -282,7 +267,6 @@
},
"X-Tyk-PluginConfigData": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -298,7 +282,6 @@
},
"X-Tyk-CustomPluginConfig": {
"type": "object",
- "additionalProperties": false,
"properties": {
"plugins": {
"$ref": "#/definitions/X-Tyk-CustomPlugins"
@@ -315,7 +298,6 @@
"minItems": 1
},
"X-Tyk-CustomPluginDefinition": {
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -340,7 +322,6 @@
},
"X-Tyk-IDExtractorConfig": {
"type": "object",
- "additionalProperties": false,
"properties": {
"headerName": {
"type": "string"
@@ -361,7 +342,6 @@
},
"X-Tyk-IDExtractor": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -395,7 +375,6 @@
},
"X-Tyk-AuthenticationPlugin": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -420,7 +399,6 @@
},
"X-Tyk-PluginBundle": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -437,7 +415,6 @@
},
"X-Tyk-CORS": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -493,7 +470,6 @@
},
"X-Tyk-Cache": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -531,7 +507,6 @@
},
"X-Tyk-Global": {
"type": "object",
- "additionalProperties": false,
"properties": {
"pluginConfig": {
"$ref": "#/definitions/X-Tyk-PluginConfig"
@@ -582,7 +557,6 @@
},
"X-Tyk-Allowance": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -597,7 +571,6 @@
},
"X-Tyk-TransformRequestMethod": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -613,7 +586,6 @@
},
"X-Tyk-TransformBody": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -652,7 +624,6 @@
},
"X-Tyk-TransformHeaders": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -680,7 +651,6 @@
},
"X-Tyk-CachePlugin": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -708,7 +678,6 @@
},
"X-Tyk-EnforceTimeout": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -725,7 +694,6 @@
},
"X-Tyk-ValidateRequest": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -740,7 +708,6 @@
},
"X-Tyk-MockResponse": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -785,7 +752,6 @@
},
"X-Tyk-RequestSizeLimit": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -802,7 +768,6 @@
},
"X-Tyk-VirtualEndpoint": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -833,7 +798,6 @@
},
"X-Tyk-URLRewrite": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -861,7 +825,6 @@
},
"X-Tyk-TrackEndpoint": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -873,7 +836,6 @@
},
"X-Tyk-URLRewriteTrigger": {
"type": "object",
- "additionalProperties": false,
"properties": {
"condition": {
"enum": [
@@ -905,7 +867,6 @@
},
"X-Tyk-URLRewriteRuleForRequestBody": {
"type": "object",
- "additionalProperties": false,
"properties": {
"in": {
"enum": [
@@ -929,7 +890,6 @@
},
"X-Tyk-URLRewriteRule": {
"type": "object",
- "additionalProperties": false,
"properties": {
"in": {
"enum": [
@@ -958,7 +918,6 @@
},
"X-Tyk-EndpointPostPlugin": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -981,7 +940,6 @@
},
"X-Tyk-Internal": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -993,7 +951,6 @@
},
"X-Tyk-Operation": {
"type": "object",
- "additionalProperties": false,
"properties": {
"allow": {
"$ref": "#/definitions/X-Tyk-Allowance"
@@ -1071,12 +1028,10 @@
"\\S+": {
"$ref": "#/definitions/X-Tyk-Operation"
}
- },
- "additionalProperties": false
+ }
},
"X-Tyk-Middleware": {
"type": "object",
- "additionalProperties": false,
"properties": {
"global": {
"$ref": "#/definitions/X-Tyk-Global"
@@ -1088,7 +1043,6 @@
},
"X-Tyk-ListenPath": {
"type": "object",
- "additionalProperties": false,
"properties": {
"value": {
"type": "string",
@@ -1104,7 +1058,6 @@
},
"X-Tyk-HMAC": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1137,7 +1090,6 @@
},
"X-Tyk-OIDC": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1172,7 +1124,6 @@
},
"X-Tyk-CustomPluginAuthentication": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1196,7 +1147,6 @@
},
"X-Tyk-Authentication": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1257,7 +1207,6 @@
},
"X-Tyk-Server": {
"type": "object",
- "additionalProperties": false,
"properties": {
"listenPath": {
"$ref": "#/definitions/X-Tyk-ListenPath"
@@ -1302,7 +1251,6 @@
},
"X-Tyk-GatewayTags": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1324,7 +1272,6 @@
},
"X-Tyk-Upstream": {
"type": "object",
- "additionalProperties": false,
"properties": {
"url": {
"type": "string",
@@ -1349,6 +1296,9 @@
},
"rateLimit": {
"$ref": "#/definitions/X-Tyk-RateLimit"
+ },
+ "authentication": {
+ "$ref": "#/definitions/X-Tyk-UpstreamAuthentication"
}
},
"required": [
@@ -1357,7 +1307,6 @@
},
"X-Tyk-State": {
"type": "object",
- "additionalProperties": false,
"properties": {
"active": {
"type": "boolean"
@@ -1372,7 +1321,6 @@
},
"X-Tyk-Versioning": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1419,7 +1367,6 @@
},
"X-Tyk-VersionToID": {
"type": "object",
- "additionalProperties": false,
"properties": {
"name": {
"type": "string",
@@ -1437,7 +1384,6 @@
},
"X-Tyk-Info": {
"type": "object",
- "additionalProperties": false,
"properties": {
"id": {
"type": "string"
@@ -1469,7 +1415,6 @@
},
"X-Tyk-ExtractCredentialsFromBody": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1487,7 +1432,6 @@
},
"X-Tyk-Notifications": {
"type": "object",
- "additionalProperties": false,
"properties": {
"sharedSecret": {
"type": "string"
@@ -1499,7 +1443,6 @@
},
"X-Tyk-Signature": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1530,7 +1473,6 @@
},
"X-Tyk-Token": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1557,7 +1499,6 @@
},
"X-Tyk-JWT": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1625,7 +1566,6 @@
},
"X-Tyk-Basic": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1655,7 +1595,6 @@
},
"X-Tyk-OAuth": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1695,7 +1634,7 @@
},
"X-Tyk-ExternalOAuth": {
"type": "object",
- "additionalProperties": false,
+ "description": "Support for external OAuth Middleware will be deprecated starting from 5.7.0. To avoid any disruptions, we recommend that you use JSON Web Token (JWT) instead, as explained in https://tyk.io/docs/basic-config-and-security/security/authentication-authorization/ext-oauth-middleware/",
"properties": {
"enabled": {
"type": "boolean"
@@ -1724,7 +1663,6 @@
},
"X-Tyk-OAuthProvider": {
"type": "object",
- "additionalProperties": false,
"properties": {
"jwt": {
"$ref": "#/definitions/X-Tyk-JWTValidation"
@@ -1736,7 +1674,6 @@
},
"X-Tyk-JWTValidation": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1768,7 +1705,6 @@
},
"X-Tyk-Introspection": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1808,7 +1744,6 @@
},
"X-Tyk-CustomDomain": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1832,7 +1767,6 @@
},
"X-Tyk-Header": {
"type": "object",
- "additionalProperties": false,
"properties": {
"name": {
"type": "string"
@@ -1900,7 +1834,6 @@
},
"X-Tyk-DetailedTracing": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -1953,7 +1886,6 @@
},
"X-Tyk-Webhook-Without-ID": {
"type": "object",
- "additionalProperties": false,
"properties": {
"enabled": {
"type": "boolean"
@@ -2065,6 +1997,132 @@
"X-Tyk-DomainDef": {
"type": "string",
"pattern": "^([*a-zA-Z0-9-]+(\\.[*a-zA-Z0-9-]+)*)(:\\d+)?$"
+ },
+ "X-Tyk-UpstreamAuthentication": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "basicAuth": {
+ "$ref": "#/definitions/X-Tyk-UpstreamBasicAuthentication"
+ },
+ "oauth": {
+ "$ref": "#/definitions/X-Tyk-UpstreamOAuth"
+ }
+ },
+ "required": [
+ "enabled"
+ ]
+ },
+ "X-Tyk-UpstreamBasicAuthentication": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "headerName": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "enabled"
+ ]
+ },
+ "X-Tyk-UpstreamOAuth": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "allowedAuthorizeTypes": {
+ "type": ["array", "null"],
+ "items": {
+ "type": "string",
+ "enum": [
+ "clientCredentials",
+ "password"
+ ]
+ },
+ "minItems": 0
+ },
+ "clientCredentials": {
+ "type": "object",
+ "properties": {
+ "clientId": {
+ "type": "string"
+ },
+ "clientSecret": {
+ "type": "string"
+ },
+ "tokenUrl": {
+ "type": "string"
+ },
+ "scopes": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "header": {
+ "$ref": "#/definitions/X-Tyk-AuthSource"
+ },
+ "extra_metadata": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "clientId",
+ "clientSecret",
+ "tokenUrl"
+ ]
+ },
+ "passwordAuthentication": {
+ "type": "object",
+ "properties": {
+ "clientId": {
+ "type": "string"
+ },
+ "clientSecret": {
+ "type": "string"
+ },
+ "tokenUrl": {
+ "type": "string"
+ },
+ "scopes": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "username": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "header": {
+ "$ref": "#/definitions/X-Tyk-AuthSource"
+ },
+ "extra_metadata": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "clientId",
+ "clientSecret",
+ "tokenUrl",
+ "username",
+ "password"
+ ]
+ }
+ }
}
}
-}
+}
\ No newline at end of file
diff --git a/apidef/oas/security.go b/apidef/oas/security.go
index acdc4480373..a10a2497f5a 100644
--- a/apidef/oas/security.go
+++ b/apidef/oas/security.go
@@ -582,6 +582,9 @@ func (c *IntrospectionCache) ExtractTo(cache *apidef.IntrospectionCache) {
}
// ExternalOAuth holds configuration for an external OAuth provider.
+// ExternalOAuth support will be deprecated starting from 5.7.0.
+// To avoid any disruptions, we recommend that you use JSON Web Token (JWT) instead,
+// as explained in https://tyk.io/docs/basic-config-and-security/security/authentication-authorization/ext-oauth-middleware/.
type ExternalOAuth struct {
// Enabled activates external oauth functionality.
Enabled bool `bson:"enabled" json:"enabled"` // required
diff --git a/apidef/oas/upstream.go b/apidef/oas/upstream.go
index 0077bdd3961..8d8ed9b5ba7 100644
--- a/apidef/oas/upstream.go
+++ b/apidef/oas/upstream.go
@@ -29,6 +29,9 @@ type Upstream struct {
// RateLimit contains the configuration related to API level rate limit.
RateLimit *RateLimit `bson:"rateLimit,omitempty" json:"rateLimit,omitempty"`
+
+ // Authentication contains the configuration related to upstream authentication.
+ Authentication *UpstreamAuth `bson:"authentication,omitempty" json:"authentication,omitempty"`
}
// Fill fills *Upstream from apidef.APIDefinition.
@@ -79,6 +82,15 @@ func (u *Upstream) Fill(api apidef.APIDefinition) {
if ShouldOmit(u.RateLimit) {
u.RateLimit = nil
}
+
+ if u.Authentication == nil {
+ u.Authentication = &UpstreamAuth{}
+ }
+
+ u.Authentication.Fill(api.UpstreamAuth)
+ if ShouldOmit(u.Authentication) {
+ u.Authentication = nil
+ }
}
// ExtractTo extracts *Upstream into *apidef.APIDefinition.
@@ -129,6 +141,15 @@ func (u *Upstream) ExtractTo(api *apidef.APIDefinition) {
}
u.RateLimit.ExtractTo(api)
+
+ if u.Authentication == nil {
+ u.Authentication = &UpstreamAuth{}
+ defer func() {
+ u.Authentication = nil
+ }()
+ }
+
+ u.Authentication.ExtractTo(&api.UpstreamAuth)
}
// ServiceDiscovery holds configuration required for service discovery.
@@ -529,3 +550,253 @@ func (r *RateLimitEndpoint) ExtractTo(meta *apidef.RateLimitMeta) {
meta.Rate = float64(r.Rate)
meta.Per = r.Per.Seconds()
}
+
+// UpstreamAuth holds the configurations related to upstream API authentication.
+type UpstreamAuth struct {
+ // Enabled enables upstream API authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // BasicAuth holds the basic authentication configuration for upstream API authentication.
+ BasicAuth *UpstreamBasicAuth `bson:"basicAuth,omitempty" json:"basicAuth,omitempty"`
+ // OAuth contains the configuration for OAuth2 Client Credentials flow.
+ OAuth *UpstreamOAuth `bson:"oauth,omitempty" json:"oauth,omitempty"`
+}
+
+// Fill fills *UpstreamAuth from apidef.UpstreamAuth.
+func (u *UpstreamAuth) Fill(api apidef.UpstreamAuth) {
+ u.Enabled = api.Enabled
+
+ if u.BasicAuth == nil {
+ u.BasicAuth = &UpstreamBasicAuth{}
+ }
+ u.BasicAuth.Fill(api.BasicAuth)
+ if ShouldOmit(u.BasicAuth) {
+ u.BasicAuth = nil
+ }
+
+ if u.OAuth == nil {
+ u.OAuth = &UpstreamOAuth{}
+ }
+ u.OAuth.Fill(api.OAuth)
+ if ShouldOmit(u.OAuth) {
+ u.OAuth = nil
+ }
+}
+
+// ExtractTo extracts *UpstreamAuth into *apidef.UpstreamAuth.
+func (u *UpstreamAuth) ExtractTo(api *apidef.UpstreamAuth) {
+ api.Enabled = u.Enabled
+
+ if u.BasicAuth == nil {
+ u.BasicAuth = &UpstreamBasicAuth{}
+ defer func() {
+ u.BasicAuth = nil
+ }()
+ }
+ u.BasicAuth.ExtractTo(&api.BasicAuth)
+
+ if u.OAuth == nil {
+ u.OAuth = &UpstreamOAuth{}
+ defer func() {
+ u.OAuth = nil
+ }()
+ }
+ u.OAuth.ExtractTo(&api.OAuth)
+}
+
+// UpstreamBasicAuth holds upstream basic authentication configuration.
+type UpstreamBasicAuth struct {
+ // Enabled enables upstream basic authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // Header contains configurations for the header value.
+ Header *AuthSource `bson:"header,omitempty" json:"header,omitempty"`
+ // Username is the username to be used for upstream basic authentication.
+ Username string `bson:"username" json:"username"`
+ // Password is the password to be used for upstream basic authentication.
+ Password string `bson:"password" json:"password"`
+}
+
+// Fill fills *UpstreamBasicAuth from apidef.UpstreamBasicAuth.
+func (u *UpstreamBasicAuth) Fill(api apidef.UpstreamBasicAuth) {
+ u.Enabled = api.Enabled
+ u.Username = api.Username
+ u.Password = api.Password
+
+ if u.Header == nil {
+ u.Header = &AuthSource{}
+ }
+ u.Header.Fill(api.Header.Enabled, api.Header.Name)
+ if ShouldOmit(u.Header) {
+ u.Header = nil
+ }
+}
+
+// ExtractTo extracts *UpstreamBasicAuth into *apidef.UpstreamBasicAuth.
+func (u *UpstreamBasicAuth) ExtractTo(api *apidef.UpstreamBasicAuth) {
+ api.Enabled = u.Enabled
+ api.Enabled = u.Enabled
+ api.Username = u.Username
+ api.Password = u.Password
+
+ if u.Header == nil {
+ u.Header = &AuthSource{}
+ defer func() {
+ u.Header = nil
+ }()
+ }
+ u.Header.ExtractTo(&api.Header.Enabled, &api.Header.Name)
+}
+
+// UpstreamOAuth holds the configuration for OAuth2 Client Credentials flow.
+type UpstreamOAuth struct {
+ // Enabled activates upstream OAuth2 authentication.
+ Enabled bool `bson:"enabled" json:"enabled"`
+ // AllowedAuthorizeTypes specifies the allowed authorization types for upstream OAuth2 authentication.
+ AllowedAuthorizeTypes []string `bson:"allowedAuthorizeTypes" json:"allowedAuthorizeTypes"`
+ // ClientCredentials holds the configuration for OAuth2 Client Credentials flow.
+ ClientCredentials *ClientCredentials `bson:"clientCredentials,omitempty" json:"clientCredentials,omitempty"`
+ // PasswordAuthentication holds the configuration for upstream OAauth password authentication flow.
+ PasswordAuthentication *PasswordAuthentication `bson:"passwordAuthentication,omitempty" json:"passwordAuthentication,omitempty"`
+}
+
+// PasswordAuthentication holds the configuration for upstream OAuth2 password authentication flow.
+type PasswordAuthentication struct {
+ ClientAuthData
+ // Header holds the configuration for the custom header to be used for OAuth authentication.
+ Header *AuthSource `bson:"header" json:"header"`
+ // Username is the username to be used for upstream OAuth2 password authentication.
+ Username string `bson:"username" json:"username"`
+ // Password is the password to be used for upstream OAuth2 password authentication.
+ Password string `bson:"password" json:"password"`
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string `bson:"tokenUrl" json:"tokenUrl"`
+ // Scopes specifies optional requested permissions.
+ Scopes []string `bson:"scopes" json:"scopes,omitempty"`
+ // ExtraMetadata holds the keys that we want to extract from the token and pass to the upstream.
+ ExtraMetadata []string `bson:"extraMetadata" json:"extraMetadata,omitempty"`
+}
+
+// ClientAuthData holds the client ID and secret for OAuth2 authentication.
+type ClientAuthData struct {
+ // ClientID is the application's ID.
+ ClientID string `bson:"clientId" json:"clientId"`
+ // ClientSecret is the application's secret.
+ ClientSecret string `bson:"clientSecret" json:"clientSecret"`
+}
+
+// ClientCredentials holds the configuration for OAuth2 Client Credentials flow.
+type ClientCredentials struct {
+ ClientAuthData
+ // Header holds the configuration for the custom header to be used for OAuth authentication.
+ Header *AuthSource `bson:"header" json:"header"`
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string `bson:"tokenUrl" json:"tokenUrl"`
+ // Scopes specifies optional requested permissions.
+ Scopes []string `bson:"scopes,omitempty" json:"scopes,omitempty"`
+ // HeaderName is the custom header name to be used for OAuth client credential flow authentication.
+ // Defaults to `Authorization`.
+ HeaderName string `bson:"headerName" json:"headerName"`
+ // ExtraMetadata holds the keys that we want to extract from the token and pass to the upstream.
+ ExtraMetadata []string `bson:"extraMetadata" json:"extraMetadata,omitempty"`
+}
+
+func (c *ClientCredentials) Fill(api apidef.ClientCredentials) {
+ c.ClientID = api.ClientID
+ c.ClientSecret = api.ClientSecret
+ c.TokenURL = api.TokenURL
+ c.Scopes = api.Scopes
+ c.ExtraMetadata = api.ExtraMetadata
+
+ if c.Header == nil {
+ c.Header = &AuthSource{}
+ }
+ c.Header.Fill(api.Header.Enabled, api.Header.Name)
+ if ShouldOmit(c.Header) {
+ c.Header = nil
+ }
+}
+
+func (p *PasswordAuthentication) Fill(api apidef.PasswordAuthentication) {
+ p.ClientID = api.ClientID
+ p.ClientSecret = api.ClientSecret
+ p.Username = api.Username
+ p.Password = api.Password
+ p.TokenURL = api.TokenURL
+ p.Scopes = api.Scopes
+ p.ExtraMetadata = api.ExtraMetadata
+ if p.Header == nil {
+ p.Header = &AuthSource{}
+ }
+ p.Header.Fill(api.Header.Enabled, api.Header.Name)
+ if ShouldOmit(p.Header) {
+ p.Header = nil
+ }
+}
+
+func (u *UpstreamOAuth) Fill(api apidef.UpstreamOAuth) {
+ u.Enabled = api.Enabled
+ u.AllowedAuthorizeTypes = api.AllowedAuthorizeTypes
+
+ if u.ClientCredentials == nil {
+ u.ClientCredentials = &ClientCredentials{}
+ }
+ u.ClientCredentials.Fill(api.ClientCredentials)
+ if ShouldOmit(u.ClientCredentials) {
+ u.ClientCredentials = nil
+ }
+
+ if u.PasswordAuthentication == nil {
+ u.PasswordAuthentication = &PasswordAuthentication{}
+ }
+ u.PasswordAuthentication.Fill(api.PasswordAuthentication)
+ if ShouldOmit(u.PasswordAuthentication) {
+ u.PasswordAuthentication = nil
+ }
+}
+
+func (c *ClientCredentials) ExtractTo(api *apidef.ClientCredentials) {
+ api.ClientID = c.ClientID
+ api.ClientSecret = c.ClientSecret
+ api.TokenURL = c.TokenURL
+ api.Scopes = c.Scopes
+ api.ExtraMetadata = c.ExtraMetadata
+
+ if c.Header == nil {
+ c.Header = &AuthSource{}
+ defer func() {
+ c.Header = nil
+ }()
+ }
+ c.Header.ExtractTo(&api.Header.Enabled, &api.Header.Name)
+}
+
+func (p *PasswordAuthentication) ExtractTo(api *apidef.PasswordAuthentication) {
+ api.ClientID = p.ClientID
+ api.ClientSecret = p.ClientSecret
+ api.Username = p.Username
+ api.Password = p.Password
+ api.TokenURL = p.TokenURL
+ api.Scopes = p.Scopes
+ api.ExtraMetadata = p.ExtraMetadata
+}
+
+func (u *UpstreamOAuth) ExtractTo(api *apidef.UpstreamOAuth) {
+ api.Enabled = u.Enabled
+ api.AllowedAuthorizeTypes = u.AllowedAuthorizeTypes
+ if u.ClientCredentials == nil {
+ u.ClientCredentials = &ClientCredentials{}
+ defer func() {
+ u.ClientCredentials = nil
+ }()
+ }
+ u.ClientCredentials.ExtractTo(&api.ClientCredentials)
+
+ if u.PasswordAuthentication == nil {
+ u.PasswordAuthentication = &PasswordAuthentication{}
+ defer func() {
+ u.PasswordAuthentication = nil
+ }()
+ }
+ u.PasswordAuthentication.ExtractTo(&api.PasswordAuthentication)
+}
diff --git a/apidef/rpc.go b/apidef/rpc.go
index fe53207b0d7..76904c51b1e 100644
--- a/apidef/rpc.go
+++ b/apidef/rpc.go
@@ -1,9 +1,5 @@
package apidef
-import (
- internalmodel "github.com/TykTechnologies/tyk/internal/model"
-)
-
type InboundData struct {
KeyName string
Value string
@@ -26,6 +22,14 @@ type GroupLoginRequest struct {
Node []byte
}
+// HostDetails contains information about a host machine,
+// including its hostname, process ID (PID), and IP address.
+type HostDetails struct {
+ Hostname string
+ PID int
+ Address string
+}
+
type NodeData struct {
NodeID string `json:"node_id"`
APIKey string `json:"api_key"`
@@ -36,7 +40,7 @@ type NodeData struct {
Tags []string `json:"tags"`
Health map[string]HealthCheckItem `json:"health"`
Stats GWStats `json:"stats"`
- HostDetails internalmodel.HostDetails `json:"host_details"`
+ HostDetails HostDetails `json:"host_details"`
}
type GWStats struct {
diff --git a/apidef/schema.go b/apidef/schema.go
index 04c7eb0682f..3764f49b5c0 100644
--- a/apidef/schema.go
+++ b/apidef/schema.go
@@ -1,771 +1,1127 @@
package apidef
const Schema = `{
- "type": ["object", "null"],
- "$schema": "http://json-schema.org/draft-04/schema",
- "id": "http://jsonschema.net",
- "additionalProperties": false,
- "properties": {
- "is_site": {
- "type": "boolean"
- },
- "uptime_tests": {
- "type": ["object", "null"]
- },
- "expire_analytics_after": {
- "type": "number"
- },
- "id": {
- "type": "string"
- },
- "org_id": {
- "type": "string"
- },
- "api_id": {
- "type": "string"
- },
- "expiration": {
- "type": "string"
- },
- "tags_disabled": {
- "type": "boolean"
- },
- "enable_ip_whitelisting": {
- "type": "boolean"
- },
- "enable_ip_blacklisting": {
- "type": "boolean"
- },
- "enable_context_vars": {
- "type": "boolean"
- },
- "strip_auth_data": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "$schema": "http://json-schema.org/draft-04/schema",
+ "id": "http://jsonschema.net",
+ "additionalProperties": false,
+ "properties": {
+ "is_site": {
+ "type": "boolean"
+ },
+ "uptime_tests": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "expire_analytics_after": {
+ "type": "number"
+ },
+ "id": {
+ "type": "string"
+ },
+ "org_id": {
+ "type": "string"
+ },
+ "api_id": {
+ "type": "string"
+ },
+ "expiration": {
+ "type": "string"
+ },
+ "tags_disabled": {
+ "type": "boolean"
+ },
+ "enable_ip_whitelisting": {
+ "type": "boolean"
+ },
+ "enable_ip_blacklisting": {
+ "type": "boolean"
+ },
+ "enable_context_vars": {
+ "type": "boolean"
+ },
+ "strip_auth_data": {
+ "type": "boolean"
+ },
+ "do_not_track": {
+ "type": "boolean"
+ },
+ "enable_jwt": {
+ "type": "boolean"
+ },
+ "use_openid": {
+ "type": "boolean"
+ },
+ "openid_options": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "use_standard_auth": {
+ "type": "boolean"
+ },
+ "use_go_plugin_auth": {
+ "type": "boolean"
+ },
+ "enable_coprocess_auth": {
+ "type": "boolean"
+ },
+ "custom_plugin_auth_enabled": {
+ "type": "boolean"
+ },
+ "jwt_skip_kid": {
+ "type": "boolean"
+ },
+ "base_identity_provided_by": {
+ "type": "string"
+ },
+ "disable_rate_limit": {
+ "type": "boolean"
+ },
+ "disable_quota": {
+ "type": "boolean"
+ },
+ "custom_middleware_bundle": {
+ "type": "string"
+ },
+ "custom_middleware_bundle_disabled": {
+ "type": "boolean"
+ },
+ "jwt_policy_field_name": {
+ "type": "string"
+ },
+ "jwt_default_policies": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "jwt_signing_method": {
+ "type": "string"
+ },
+ "jwt_source": {
+ "type": "string"
+ },
+ "jwt_identity_base_field": {
+ "type": "string"
+ },
+ "jwt_client_base_field": {
+ "type": "string"
+ },
+ "jwt_disable_issued_at_validation": {
+ "type": "boolean"
+ },
+ "jwt_disable_expires_at_validation": {
+ "type": "boolean"
+ },
+ "jwt_disable_not_before_validation": {
+ "type": "boolean"
+ },
+ "jwt_issued_at_validation_skew": {
+ "type": "number"
+ },
+ "jwt_expires_at_validation_skew": {
+ "type": "number"
+ },
+ "jwt_not_before_validation_skew": {
+ "type": "number"
+ },
+ "jwt_scope_to_policy_mapping": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "jwt_scope_claim_name": {
+ "type": "string"
+ },
+ "scopes": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "jwt": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "scope_claim_name": {
+ "type": "string"
+ },
+ "scope_to_policy": {
+ "type": [
+ "object",
+ "null"
+ ]
+ }
+ }
+ },
+ "oidc": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "scope_claim_name": {
+ "type": "string"
+ },
+ "scope_to_policy": {
+ "type": [
+ "object",
+ "null"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "idp_client_id_mapping_disabled": {
+ "type": "boolean"
+ },
+ "use_keyless": {
+ "type": "boolean"
+ },
+ "use_basic_auth": {
+ "type": "boolean"
+ },
+ "use_mutual_tls_auth": {
+ "type": "boolean"
+ },
+ "client_certificates": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "upstream_certificates": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "upstream_certificates_disabled": {
+ "type": "boolean"
+ },
+ "pinned_public_keys": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "certificate_pinning_disabled": {
+ "type": "boolean"
+ },
+ "allowed_ips": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "blacklisted_ips": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "enable_batch_request_support": {
+ "type": "boolean"
+ },
+ "event_handlers": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "notifications": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "use_oauth2": {
+ "type": "boolean"
+ },
+ "oauth_meta": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "external_oauth": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "cache_options": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "tags": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "tag_headers": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "basic_auth": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "CORS": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "response_processors": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "auth_provider": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "enum": [
+ ""
+ ]
+ },
+ "storage_engine": {
+ "type": "string",
+ "enum": [
+ ""
+ ]
+ }
+ }
+ },
+ "session_provider": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "enum": [
+ ""
+ ]
+ },
+ "storage_engine": {
+ "type": "string",
+ "enum": [
+ ""
+ ]
+ }
+ }
+ },
+ "hmac_allowed_clock_skew": {
+ "type": "number"
+ },
+ "hmac_allowed_algorithms": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "dont_set_quota_on_create": {
+ "type": "boolean"
+ },
+ "custom_middleware": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "pre": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "post": {
+ "type": [
+ "array",
+ "null"
+ ]
+ }
+ }
+ },
+ "session_lifetime_respects_key_expiration": {
+ "type": "boolean"
+ },
+ "session_lifetime": {
+ "type": "number"
+ },
+ "enable_detailed_recording": {
+ "type": "boolean"
+ },
+ "enable_signature_checking": {
+ "type": "boolean"
+ },
+ "active": {
+ "type": "boolean"
+ },
+ "internal": {
+ "type": "boolean"
+ },
+ "auth": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/auth",
+ "properties": {
+ "auth_header_name": {
+ "type": "string",
+ "id": "http://jsonschema.net/auth/auth_header_name"
+ },
+ "use_certificate": {
"type": "boolean"
+ }
+ }
+ },
+ "auth_configs": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "definition": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/definition",
+ "properties": {
+ "key": {
+ "type": "string",
+ "id": "http://jsonschema.net/definition/key"
+ },
+ "location": {
+ "type": "string",
+ "id": "http://jsonschema.net/definition/location"
+ },
+ "strip_path": {
+ "type": "boolean",
+ "id": "http://jsonschema.net/definition/location"
+ }
+ },
+ "required": [
+ "key",
+ "location"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "id": "http://jsonschema.net/name"
+ },
+ "slug": {
+ "type": "string",
+ "pattern": "[a-zA-Z0-9]*",
+ "id": "http://jsonschema.net/name"
+ },
+ "domain": {
+ "type": "string"
+ },
+ "domain_disabled": {
+ "type": "boolean"
+ },
+ "listen_port": {
+ "type": "number"
+ },
+ "protocol": {
+ "type": "string"
+ },
+ "enable_proxy_protocol": {
+ "type": "boolean"
+ },
+ "certificates": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "check_host_against_uptime_tests": {
+ "type": "boolean"
+ },
+ "proxy": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/proxy",
+ "properties": {
+ "target_url": {
+ "type": "string",
+ "id": "http://jsonschema.net/proxy/target_url"
},
- "do_not_track": {
- "type": "boolean"
- },
- "enable_jwt": {
- "type": "boolean"
- },
- "use_openid": {
- "type": "boolean"
- },
- "openid_options": {
- "type": ["object", "null"]
- },
- "use_standard_auth": {
- "type": "boolean"
- },
- "use_go_plugin_auth": {
- "type": "boolean"
- },
- "enable_coprocess_auth": {
- "type": "boolean"
- },
- "custom_plugin_auth_enabled": {
- "type": "boolean"
- },
- "jwt_skip_kid": {
- "type": "boolean"
- },
- "base_identity_provided_by": {
- "type": "string"
- },
- "disable_rate_limit": {
- "type": "boolean"
- },
- "disable_quota": {
- "type": "boolean"
- },
- "custom_middleware_bundle": {
- "type": "string"
- },
- "custom_middleware_bundle_disabled": {
- "type": "boolean"
- },
- "jwt_policy_field_name": {
- "type": "string"
- },
- "jwt_default_policies": {
- "type": ["array", "null"]
- },
- "jwt_signing_method": {
- "type": "string"
- },
- "jwt_source": {
- "type": "string"
- },
- "jwt_identity_base_field": {
- "type": "string"
- },
- "jwt_client_base_field": {
- "type": "string"
- },
- "jwt_disable_issued_at_validation": {
- "type": "boolean"
- },
- "jwt_disable_expires_at_validation": {
- "type": "boolean"
- },
- "jwt_disable_not_before_validation": {
- "type": "boolean"
- },
- "jwt_issued_at_validation_skew": {
- "type": "number"
- },
- "jwt_expires_at_validation_skew": {
- "type": "number"
- },
- "jwt_not_before_validation_skew": {
- "type": "number"
- },
- "jwt_scope_to_policy_mapping": {
- "type": ["object", "null"]
- },
- "jwt_scope_claim_name": {
- "type": "string"
- },
- "scopes" : {
- "type":["object", "null"],
- "properties": {
- "jwt": {
- "type":["object", "null"],
- "properties" : {
- "scope_claim_name": {
- "type": "string"
- },
- "scope_to_policy": {
- "type":["object", "null"]
- }
- }
- },
- "oidc": {
- "type":["object", "null"],
- "properties" : {
- "scope_claim_name": {
- "type": "string"
- },
- "scope_to_policy": {
- "type":["object", "null"]
- }
- }
- }
- }
- },
- "idp_client_id_mapping_disabled": {
- "type": "boolean"
- },
- "use_keyless": {
- "type": "boolean"
- },
- "use_basic_auth": {
- "type": "boolean"
- },
- "use_mutual_tls_auth": {
- "type": "boolean"
- },
- "client_certificates": {
- "type": ["array", "null"]
- },
- "upstream_certificates": {
- "type": ["object", "null"]
- },
- "upstream_certificates_disabled": {
- "type": "boolean"
- },
- "pinned_public_keys": {
- "type": ["object", "null"]
- },
- "certificate_pinning_disabled": {
- "type": "boolean"
- },
- "allowed_ips": {
- "type": ["array", "null"]
- },
- "blacklisted_ips": {
- "type": ["array", "null"]
- },
- "enable_batch_request_support": {
- "type": "boolean"
- },
- "event_handlers": {
- "type":["object", "null"]
- },
- "notifications": {
- "type":["object", "null"]
- },
- "use_oauth2": {
- "type": "boolean"
- },
- "oauth_meta": {
- "type":["object", "null"]
- },
- "external_oauth": {
- "type":["object", "null"]
- },
- "cache_options": {
- "type":["object", "null"]
- },
- "tags": {
- "type": ["array", "null"]
- },
- "tag_headers": {
- "type": ["array", "null"]
- },
- "basic_auth": {
- "type": ["object", "null"]
- },
- "CORS": {
- "type":["object", "null"]
+ "check_host_against_uptime_tests": {
+ "type": "boolean"
},
- "response_processors": {
- "type": ["array", "null"]
+ "preserve_host_header": {
+ "type": "boolean"
},
- "auth_provider": {
- "type":["object", "null"],
- "properties": {
- "name": {
- "type": "string",
- "enum": [""]
- },
- "storage_engine": {
- "type": "string",
- "enum": [""]
- }
+ "transport": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "ssl_ciphers": {
+ "type": [
+ "array",
+ "null"
+ ]
+ },
+ "ssl_min_version": {
+ "type": "number"
+ },
+ "ssl_max_version": {
+ "type": "number"
+ },
+ "proxy_url": {
+ "type": "string"
+ },
+ "ssl_force_common_name_check": {
+ "type": "boolean"
}
- },
- "session_provider": {
- "type":["object", "null"],
- "properties": {
+ }
+ }
+ },
+ "required": [
+ "target_url"
+ ]
+ },
+ "hook_references": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "version_data": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data",
+ "properties": {
+ "not_versioned": {
+ "type": "boolean",
+ "id": "http://jsonschema.net/version_data/not_versioned"
+ },
+ "default_version": {
+ "type": "string",
+ "id": "http://jsonschema.net/version_data/default_version"
+ },
+ "versions": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data/versions",
+ "patternProperties": {
+ "^[a-zA-Z0-9]+$": {
+ "title": "versionInfoProperty",
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/access_rights/versionInfoProperty",
+ "properties": {
+ "expires": {
+ "type": "string",
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/expires"
+ },
"name": {
- "type": "string",
- "enum": [""]
+ "type": "string",
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/name"
},
- "storage_engine": {
- "type": "string",
- "enum": [""]
+ "paths": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths",
+ "properties": {
+ "black_list": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/black_list"
+ },
+ "ignored": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/ignored"
+ },
+ "white_list": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/white_list"
+ }
+ }
}
+ },
+ "required": [
+ "name"
+ ]
}
+ }
+ }
+ },
+ "required": [
+ "not_versioned",
+ "versions"
+ ]
+ },
+ "config_data": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "config_data_disabled": {
+ "type": "boolean"
+ },
+ "global_rate_limit": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "rate": {
+ "type": "number"
+ },
+ "per": {
+ "type": "number"
+ }
+ }
+ },
+ "request_signing": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "is_enabled": {
+ "type": "boolean"
},
- "hmac_allowed_clock_skew": {
- "type": "number"
+ "secret": {
+ "type": "string"
},
- "hmac_allowed_algorithms": {
- "type": ["array", "null"]
+ "key_id": {
+ "type": "string"
},
- "dont_set_quota_on_create": {
- "type": "boolean"
+ "algorithm": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "is_enabled"
+ ]
+ },
+ "graphql": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "version": {
+ "type": "string"
+ },
+ "execution_mode": {
+ "type": "string",
+ "enum": [
+ "proxyOnly",
+ "executionEngine",
+ "subgraph",
+ "supergraph",
+ ""
+ ]
+ },
+ "schema": {
+ "type": "string"
+ },
+ "last_schema_update": {
+ "type": "string",
+ "format": "date-time"
+ },
+ "type_field_configurations": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "type_name": {
+ "type": "string"
+ },
+ "field_name": {
+ "type": "string"
},
- "custom_middleware": {
- "type":["object", "null"],
- "properties": {
- "pre": {
- "type": ["array", "null"]
+ "mapping": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "disabled": {
+ "type": "boolean"
},
- "post": {
- "type": ["array", "null"]
+ "path": {
+ "type": "string"
}
- }
- },
- "session_lifetime_respects_key_expiration": {
- "type": "boolean"
- },
- "session_lifetime": {
- "type": "number"
- },
- "enable_detailed_recording": {
- "type": "boolean"
- },
- "enable_signature_checking": {
- "type": "boolean"
- },
- "active": {
- "type": "boolean"
- },
- "internal": {
- "type": "boolean"
- },
- "auth": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/auth",
- "properties": {
- "auth_header_name": {
- "type": "string",
- "id": "http://jsonschema.net/auth/auth_header_name"
+ },
+ "required": [
+ "disabled"
+ ]
+ },
+ "data_source": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "kind": {
+ "type": "boolean"
},
- "use_certificate": {
- "type": "boolean"
+ "data_source_config": {
+ "type": [
+ "object",
+ "null"
+ ]
}
+ },
+ "required": [
+ "kind"
+ ]
}
- },
- "auth_configs":{
- "type": ["object", "null"]
- },
- "definition": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/definition",
- "properties": {
- "key": {
- "type": "string",
- "id": "http://jsonschema.net/definition/key"
+ },
+ "required": [
+ "type_name",
+ "field_name"
+ ]
+ },
+ "engine": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "field_configs": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "type_name": {
+ "type": "string"
+ },
+ "field_name": {
+ "type": "string"
},
- "location": {
- "type": "string",
- "id": "http://jsonschema.net/definition/location"
+ "disable_default_mapping": {
+ "type": "boolean"
},
- "strip_path": {
- "type": "boolean",
- "id": "http://jsonschema.net/definition/location"
+ "path": {
+ "type": [
+ "array",
+ "null"
+ ]
}
+ }
},
- "required": [
- "key",
- "location"
- ]
- },
- "name": {
- "type": "string",
- "id": "http://jsonschema.net/name"
- },
- "slug": {
- "type": "string",
- "pattern": "[a-zA-Z0-9]*",
- "id": "http://jsonschema.net/name"
- },
- "domain": {
- "type": "string"
- },
- "domain_disabled": {
- "type": "boolean"
- },
- "listen_port": {
- "type": "number"
- },
- "protocol": {
- "type": "string"
- },
- "enable_proxy_protocol": {
- "type": "boolean"
- },
- "certificates": {
- "type": ["array", "null"]
- },
- "check_host_against_uptime_tests": {
- "type": "boolean"
- },
- "proxy": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/proxy",
- "properties": {
- "target_url": {
- "type": "string",
- "id": "http://jsonschema.net/proxy/target_url"
+ "data_sources": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "kind": {
+ "type": "string",
+ "enum": [
+ "REST",
+ "GraphQL",
+ ""
+ ]
},
- "check_host_against_uptime_tests": {
- "type": "boolean"
+ "name": {
+ "type": "string"
},
- "preserve_host_header": {
- "type": "boolean"
+ "internal": {
+ "type": "boolean"
},
- "transport": {
- "type": ["object", "null"],
- "properties": {
- "ssl_ciphers": {
- "type": ["array", "null"]
- },
- "ssl_min_version": {
- "type": "number"
- },
- "ssl_max_version": {
- "type": "number"
- },
- "proxy_url": {
- "type": "string"
- },
- "ssl_force_common_name_check": {
- "type": "boolean"
- }
+ "root_fields": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "fields": {
+ "type": [
+ "array",
+ "null"
+ ]
}
- }
- },
- "required": [
- "target_url"
- ]
- },
- "hook_references": {
- "type": ["object", "null"]
- },
- "version_data": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/version_data",
- "properties": {
- "not_versioned": {
- "type": "boolean",
- "id": "http://jsonschema.net/version_data/not_versioned"
- },
- "default_version":{
- "type": "string",
- "id": "http://jsonschema.net/version_data/default_version"
+ }
},
- "versions": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/version_data/versions",
- "patternProperties": {
- "^[a-zA-Z0-9]+$": {
- "title": "versionInfoProperty",
- "type": ["object", "null"],
- "id": "http://jsonschema.net/access_rights/versionInfoProperty",
- "properties": {
- "expires": {
- "type": "string",
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/expires"
- },
- "name": {
- "type": "string",
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/name"
- },
- "paths": {
- "type": ["object", "null"],
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths",
- "properties": {
- "black_list": {
- "type": ["array", "null"],
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/black_list"
- },
- "ignored": {
- "type": ["array", "null"],
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/ignored"
- },
- "white_list": {
- "type": ["array", "null"],
- "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/white_list"
- }
- }
- }
- },
- "required": [
- "name"
- ]
- }
- }
+ "config": {
+ "type": [
+ "object",
+ "null"
+ ]
}
+ },
+ "required": [
+ "kind"
+ ]
},
- "required": [
- "not_versioned",
- "versions"
- ]
- },
- "config_data": {
- "type": ["object", "null"]
- },
- "config_data_disabled": {
- "type": "boolean"
- },
- "global_rate_limit": {
- "type": ["object", "null"],
- "properties": {
- "rate": {
- "type": "number"
+ "global_headers": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "key": {
+ "type": "string"
},
- "per": {
- "type": "number"
+ "value": {
+ "type": "string"
}
+ },
+ "required": [
+ "key",
+ "value"
+ ]
}
+ }
},
- "request_signing": {
- "type": ["object", "null"],
- "properties": {
- "is_enabled": {
+ "proxy": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "features": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "use_immutable_headers": {
+ "type": "boolean"
+ }
+ }
+ },
+ "auth_headers": {
+ "type": [
+ "object",
+ "null"
+ ]
+ },
+ "request_headers_rewrite": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "value": {
+ "type": "string"
+ },
+ "remove": {
"type": "boolean"
+ }
},
- "secret": {
- "type": "string"
+ "required": [
+ "value",
+ "remove"
+ ]
+ }
+ }
+ }
+ },
+ "subgraph": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "sdl": {
+ "type": "string"
+ }
+ }
+ },
+ "supergraph": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "updated_at": {
+ "type": "string",
+ "format": "date-time"
+ },
+ "disable_query_batching": {
+ "type": "boolean"
+ },
+ "subgraphs": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "properties": {
+ "api_id": {
+ "type": "string"
},
- "key_id": {
- "type": "string"
+ "name": {
+ "type": "string"
},
- "algorithm": {
- "type": "string"
+ "url": {
+ "type": "string"
+ },
+ "sdl": {
+ "type": "string"
+ },
+ "headers": {
+ "type": [
+ "object",
+ "null"
+ ]
}
+ }
+ },
+ "global_headers": {
+ "type": [
+ "object",
+ "null"
+ ]
},
- "required": [
- "is_enabled"
- ]
+ "merged_sdl": {
+ "type": "string"
+ }
+ }
+ },
+ "introspection": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "disabled": {
+ "type": "boolean"
+ }
+ }
+ },
+ "playground": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "path": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "enabled"
+ ]
+ }
+ },
+ "required": [
+ "enabled"
+ ]
+ },
+ "analytics_plugin": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "enabled": {
+ "type": "boolean"
},
- "graphql": {
- "type": ["object", "null"],
- "properties": {
- "enabled": {
- "type": "boolean"
- },
- "version": {
- "type": "string"
+ "plugin_path": {
+ "type": "string"
+ },
+ "func_name": {
+ "type": "string"
+ }
+ }
+ },
+ "is_oas": {
+ "type": "boolean"
+ },
+ "detailed_tracing": {
+ "type": "boolean"
+ },
+ "upstream_auth": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "basic_auth": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "username": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "header_name": {
+ "type": "string"
+ }
+ }
+ },
+ "oauth": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "client_credentials": {
+ "type": "object",
+ "properties": {
+ "client_id": {
+ "type": "string"
},
- "execution_mode": {
- "type": "string",
- "enum": [
- "proxyOnly",
- "executionEngine",
- "subgraph",
- "supergraph",
- ""
- ]
+ "client_secret": {
+ "type": "string"
},
- "schema": {
- "type": "string"
+ "token_url": {
+ "type": "string"
},
- "last_schema_update": {
- "type": "string",
- "format": "date-time"
+ "scopes": {
+ "type": [
+ "array",
+ "null"
+ ]
},
- "type_field_configurations": {
- "type": ["array", "null"],
- "properties": {
- "type_name": {
- "type": "string"
- },
- "field_name": {
- "type": "string"
- },
- "mapping": {
- "type": ["object", "null"],
- "properties": {
- "disabled": {
- "type": "boolean"
- },
- "path": {
- "type": "string"
- }
- },
- "required": [
- "disabled"
- ]
- },
- "data_source": {
- "type": ["object", "null"],
- "properties": {
- "kind": {
- "type": "boolean"
- },
- "data_source_config": {
- "type": ["object", "null"]
- }
- },
- "required": [
- "kind"
- ]
- }
+ "header": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
},
- "required": [
- "type_name",
- "field_name"
- ]
- },
- "engine": {
- "type": ["object", "null"],
- "properties": {
- "field_configs": {
- "type": ["array", "null"],
- "properties": {
- "type_name": {
- "type": "string"
- },
- "field_name": {
- "type": "string"
- },
- "disable_default_mapping": {
- "type": "boolean"
- },
- "path": {
- "type": ["array", "null"]
- }
- }
- },
- "data_sources": {
- "type": ["array", "null"],
- "properties": {
- "kind": {
- "type": "string",
- "enum": [
- "REST",
- "GraphQL",
- ""
- ]
- },
- "name": {
- "type": "string"
- },
- "internal": {
- "type": "boolean"
- },
- "root_fields": {
- "type": ["array", "null"],
- "properties": {
- "type": {
- "type": "string"
- },
- "fields": {
- "type": ["array", "null"]
- }
- }
- },
- "config": {
- "type": ["object", "null"]
- }
- },
- "required": [
- "kind"
- ]
- },
- "global_headers": {
- "type": [
- "array",
- "null"
- ],
- "properties": {
- "key": {
- "type": "string"
- },
- "value": {
- "type": "string"
- }
- },
- "required": [
- "key",
- "value"
- ]
- }
+ "name": {
+ "type": "string"
}
+ },
+ "required": [
+ "enabled"
+ ]
},
- "proxy": {
- "type": ["object", "null"],
- "properties": {
- "features": {
- "type": ["object", "null"],
- "properties": {
- "use_immutable_headers": {
- "type": "boolean"
- }
- }
- },
- "auth_headers": {
- "type": ["object", "null"]
- },
- "request_headers_rewrite": {
- "type": ["object", "null"],
- "additionalProperties": {
- "type": "object",
- "properties": {
- "value": {
- "type": "string"
- },
- "remove": {
- "type": "boolean"
- }
- },
- "required": ["value", "remove"]
- }
- }
- }
+ "extra_metadata" :{
+ "type": ["array", "null"]
+ }
+ },
+ "required": [
+ "client_id",
+ "client_secret",
+ "token_url"
+ ]
+ },
+ "password_authentication": {
+ "type": "object",
+ "properties": {
+ "client_id": {
+ "type": "string"
},
- "subgraph": {
- "type": ["object", "null"],
- "properties": {
- "sdl": {
- "type": "string"
- }
- }
+ "client_secret": {
+ "type": "string"
},
- "supergraph": {
- "type": ["object", "null"],
- "properties": {
- "updated_at": {
- "type": "string",
- "format": "date-time"
- },
- "disable_query_batching": {
- "type": "boolean"
- },
- "subgraphs": {
- "type": ["array", "null"],
- "properties": {
- "api_id": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "sdl": {
- "type": "string"
- },
- "headers": {
- "type": ["object", "null"]
- }
- }
- },
- "global_headers": {
- "type": ["object", "null"]
- },
- "merged_sdl": {
- "type": "string"
- }
- }
+ "username": {
+ "type": "string"
},
- "introspection": {
- "type": ["object", "null"],
- "properties": {
- "disabled": {
- "type": "boolean"
- }
- }
+ "password": {
+ "type": "string"
},
- "playground": {
- "type": ["object", "null"],
- "properties": {
- "enabled": {
- "type": "boolean"
- },
- "path": {
- "type": "string"
- }
- },
- "required": [
- "enabled"
- ]
- }
- },
- "required": [
- "enabled"
- ]
- },
- "analytics_plugin": {
- "type": ["object", "null"],
- "properties": {
- "enabled": {
- "type": "boolean"
+ "token_url": {
+ "type": "string"
},
- "plugin_path": {
- "type": "string"
+ "scopes": {
+ "type": [
+ "array",
+ "null"
+ ]
},
- "func_name": {
- "type": "string"
- }
+ "header": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "enabled"
+ ]
+ },
+ "extra_metadata" :{
+ "type": ["array", "null"]
+ }
+ },
+ "required": [
+ "client_id",
+ "client_secret",
+ "token_url",
+ "username",
+ "password"
+ ]
}
- },
- "is_oas": {
- "type": "boolean"
- },
- "detailed_tracing": {
- "type": "boolean"
+ }
}
- },
- "required": [
- "name",
- "proxy",
- "version_data"
- ]
-}`
+ }
+ }
+ },
+ "required": [
+ "name",
+ "proxy",
+ "version_data"
+ ]
+}
+`
diff --git a/ci/Dockerfile.distroless b/ci/Dockerfile.distroless
index 45570f602f9..d823708f0d1 100644
--- a/ci/Dockerfile.distroless
+++ b/ci/Dockerfile.distroless
@@ -2,11 +2,12 @@
FROM debian:bookworm-slim as DEB
ARG TARGETARCH
+ARG EDITION
ENV DEBIAN_FRONTEND=noninteractive
COPY *${TARGETARCH}.deb /
-RUN rm -f /*fips*.deb && dpkg -i /tyk-gateway*${TARGETARCH}.deb && rm /*.deb
+RUN rm -f /*fips*.deb && dpkg -i /tyk-gateway${EDITION}_*${TARGETARCH}.deb && rm /*.deb
FROM gcr.io/distroless/base-debian12:latest
diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml
index 1a52115a178..8296607b48f 100644
--- a/ci/goreleaser/goreleaser.yml
+++ b/ci/goreleaser/goreleaser.yml
@@ -21,12 +21,45 @@ builds:
goarch:
- amd64
binary: tyk
- - id: fips
+ - id: std-arm64
flags:
- -tags=ignore
- -trimpath
- -tags=goplugin
- - -tags=fips,boringcrypto
+ ldflags:
+ - -X github.com/TykTechnologies/tyk/internal/build.Version={{.Version}}
+ - -X github.com/TykTechnologies/tyk/internal/build.Commit={{.FullCommit}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuildDate={{.Date}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuiltBy=goreleaser
+ env:
+ - CC=aarch64-linux-gnu-gcc
+ goos:
+ - linux
+ goarch:
+ - arm64
+ binary: tyk
+ - id: std-s390x
+ flags:
+ - -tags=ignore
+ - -trimpath
+ - -tags=goplugin
+ ldflags:
+ - -X github.com/TykTechnologies/tyk/internal/build.Version={{.Version}}
+ - -X github.com/TykTechnologies/tyk/internal/build.Commit={{.FullCommit}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuildDate={{.Date}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuiltBy=goreleaser
+ env:
+ - CC=s390x-linux-gnu-gcc
+ goos:
+ - linux
+ goarch:
+ - s390x
+ binary: tyk
+ - id: fips
+ flags:
+ - -tags=ignore
+ - -trimpath
+ - -tags=goplugin,fips,boringcrypto,ee
env:
- GOEXPERIMENT=boringcrypto
ldflags:
@@ -39,11 +72,26 @@ builds:
goarch:
- amd64
binary: tyk
- - id: std-arm64
+ - id: ee
flags:
- -tags=ignore
- -trimpath
- - -tags=goplugin
+ - -tags=goplugin,ee
+ ldflags:
+ - -X github.com/TykTechnologies/tyk/internal/build.Version={{.Version}}
+ - -X github.com/TykTechnologies/tyk/internal/build.Commit={{.FullCommit}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuildDate={{.Date}}
+ - -X github.com/TykTechnologies/tyk/internal/build.BuiltBy=goreleaser
+ goos:
+ - linux
+ goarch:
+ - amd64
+ binary: tyk
+ - id: ee-arm64
+ flags:
+ - -tags=ignore
+ - -trimpath
+ - -tags=goplugin,ee
ldflags:
- -X github.com/TykTechnologies/tyk/internal/build.Version={{.Version}}
- -X github.com/TykTechnologies/tyk/internal/build.Commit={{.FullCommit}}
@@ -56,11 +104,11 @@ builds:
goarch:
- arm64
binary: tyk
- - id: std-s390x
+ - id: ee-s390x
flags:
- -tags=ignore
- -trimpath
- - -tags=goplugin
+ - -tags=goplugin,ee
ldflags:
- -X github.com/TykTechnologies/tyk/internal/build.Version={{.Version}}
- -X github.com/TykTechnologies/tyk/internal/build.Commit={{.FullCommit}}
@@ -190,6 +238,65 @@ nfpms:
signature:
key_file: tyk.io.signing.key
type: origin
+ - id: ee
+ vendor: "Tyk Technologies Ltd"
+ homepage: "https://tyk.io"
+ maintainer: "Tyk "
+ description: Tyk Open Source API Gateway written in Go, supporting REST, GraphQL, TCP and gRPC protocols
+ package_name: tyk-gateway-ee
+ file_name_template: "{{ .ConventionalFileName }}"
+ builds:
+ - ee
+ - ee-arm64
+ - ee-s390x
+ formats:
+ - deb
+ - rpm
+ contents:
+ - src: "README.md"
+ dst: "/opt/share/docs/tyk-gateway/README.md"
+ - src: "ci/install/*"
+ dst: "/opt/tyk-gateway/install"
+ - src: ci/install/inits/systemd/system/tyk-gateway.service
+ dst: /lib/systemd/system/tyk-gateway.service
+ - src: ci/install/inits/sysv/init.d/tyk-gateway
+ dst: /etc/init.d/tyk-gateway
+ - src: /opt/tyk-gateway
+ dst: /opt/tyk
+ type: "symlink"
+ - src: "LICENSE.md"
+ dst: "/opt/share/docs/tyk-gateway/LICENSE.md"
+ - src: "apps/app_sample.*"
+ dst: "/opt/tyk-gateway/apps"
+ - src: "templates/*.json"
+ dst: "/opt/tyk-gateway/templates"
+ - src: "templates/playground/*"
+ dst: "/opt/tyk-gateway/templates/playground"
+ - src: "middleware/*.js"
+ dst: "/opt/tyk-gateway/middleware"
+ - src: "event_handlers/sample/*.js"
+ dst: "/opt/tyk-gateway/event_handlers/sample"
+ - src: "policies/*.json"
+ dst: "/opt/tyk-gateway/policies"
+ - src: "coprocess/*"
+ dst: "/opt/tyk-gateway/coprocess"
+ - src: tyk.conf.example
+ dst: /opt/tyk-gateway/tyk.conf
+ type: "config|noreplace"
+ scripts:
+ preinstall: "ci/install/before_install.sh"
+ postinstall: "ci/install/post_install.sh"
+ postremove: "ci/install/post_remove.sh"
+ bindir: "/opt/tyk-gateway"
+ rpm:
+ scripts:
+ posttrans: ci/install/post_trans.sh
+ signature:
+ key_file: tyk.io.signing.key
+ deb:
+ signature:
+ key_file: tyk.io.signing.key
+ type: origin
publishers:
- name: tyk-gateway-unstable
env:
diff --git a/ci/images/plugin-compiler/Dockerfile b/ci/images/plugin-compiler/Dockerfile
index 1127fa4e0ed..f045ce5a784 100644
--- a/ci/images/plugin-compiler/Dockerfile
+++ b/ci/images/plugin-compiler/Dockerfile
@@ -20,17 +20,24 @@ RUN apt-get purge -y --allow-remove-essential --auto-remove mercurial wget curl
ADD go.mod go.sum $TYK_GW_PATH
WORKDIR $TYK_GW_PATH
-RUN --mount=type=cache,mode=0755,target=/go/pkg/mod go mod download
+RUN --mount=type=cache,mode=0755,target=/go/pkg/mod \
+ --mount=type=cache,mode=0755,target=/root/.cache/go-build \
+ go mod download
ADD . $TYK_GW_PATH
# Provide a gateway test binary for testing plugin loading.
-RUN --mount=type=cache,mode=0755,target=/go/pkg/mod GOBIN=/usr/local/bin go install -tags=goplugin -trimpath .
+RUN --mount=type=cache,mode=0755,target=/go/pkg/mod \
+ --mount=type=cache,mode=0755,target=/root/.cache/go-build \
+ GOBIN=/usr/local/bin go install -tags=goplugin -trimpath .
ARG GITHUB_SHA
ARG GITHUB_TAG
-ENV GITHUB_SHA ${GITHUB_SHA}
-ENV GITHUB_TAG ${GITHUB_TAG}
+ENV GITHUB_SHA=${GITHUB_SHA}
+ENV GITHUB_TAG=${GITHUB_TAG}
+
+ARG BUILD_TAG
+ENV BUILD_TAG=${BUILD_TAG}
COPY ci/images/plugin-compiler/data/build.sh /build.sh
RUN chmod +x /build.sh
diff --git a/ci/images/plugin-compiler/Taskfile.yml b/ci/images/plugin-compiler/Taskfile.yml
index 669ade027b6..732c889cfd1 100644
--- a/ci/images/plugin-compiler/Taskfile.yml
+++ b/ci/images/plugin-compiler/Taskfile.yml
@@ -14,7 +14,13 @@ tasks:
desc: "Build plugin compiler"
dir: '{{.root}}'
cmds:
- - docker build --no-cache --progress=plain --build-arg GO_VERSION=1.22 --build-arg GITHUB_TAG={{.tag}} --build-arg GITHUB_SHA={{.sha}} --platform=linux/amd64 --rm -t {{.image}} -f ci/images/plugin-compiler/Dockerfile .
+ - docker build --build-arg GITHUB_TAG={{.tag}} --build-arg GITHUB_SHA={{.sha}} --platform=linux/amd64 --rm -t {{.image}} -f ci/images/plugin-compiler/Dockerfile .
+
+ build-nocache:
+ desc: "Build plugin compiler"
+ dir: '{{.root}}'
+ cmds:
+ - docker build --no-cache --progress=plain --build-arg GITHUB_TAG={{.tag}} --build-arg GITHUB_SHA={{.sha}} --platform=linux/amd64 --rm -t {{.image}} -f ci/images/plugin-compiler/Dockerfile .
test:
desc: "Run test docker image"
diff --git a/ci/images/plugin-compiler/data/build.sh b/ci/images/plugin-compiler/data/build.sh
index 885af399802..c5006653e46 100755
--- a/ci/images/plugin-compiler/data/build.sh
+++ b/ci/images/plugin-compiler/data/build.sh
@@ -16,7 +16,7 @@ GATEWAY_VERSION=$(echo $GITHUB_TAG | perl -n -e'/v(\d+).(\d+).(\d+)/'' && print
#
# If GOOS and GOARCH are not set, it will build `{plugin_name}`.
#
-# Example command: ./build.sh
+# Example command: ./build.sh
# Example output: tyk-extras_5.0.0_linux_amd64.so
plugin_name=$1
@@ -145,7 +145,11 @@ if [[ "$DEBUG" == "1" ]] ; then
git diff --cached
fi
-CC=$CC CGO_ENABLED=1 GOOS=$GOOS GOARCH=$GOARCH go build -buildmode=plugin -trimpath -o $plugin_name
+if [ -n "$BUILD_TAG" ]; then
+ CC=$CC CGO_ENABLED=1 GOOS=$GOOS GOARCH=$GOARCH go build -buildmode=plugin -trimpath -tags=$BUILD_TAG -o $plugin_name
+else
+ CC=$CC CGO_ENABLED=1 GOOS=$GOOS GOARCH=$GOARCH go build -buildmode=plugin -trimpath -o $plugin_name
+fi
set +x
diff --git a/ci/tests/tracing/apps/test-graphql-tracing-invalid.json b/ci/tests/tracing/apps/test-graphql-tracing-invalid.json
index eb71dc6b3c5..52a4da4b600 100644
--- a/ci/tests/tracing/apps/test-graphql-tracing-invalid.json
+++ b/ci/tests/tracing/apps/test-graphql-tracing-invalid.json
@@ -303,7 +303,7 @@
"proxy": {
"preserve_host_header": false,
"listen_path": "/test-graphql-tracing-invalid/",
- "target_url": "https://httpbin.com",
+ "target_url": "http://httpbin:80/status/404",
"disable_strip_slash": true,
"strip_listen_path": true,
"enable_load_balancing": false,
diff --git a/ci/tests/tracing/scenarios/tyk_test-graphql-tracing-invalid_404.yml b/ci/tests/tracing/scenarios/tyk_test-graphql-tracing-invalid_404.yml
index 54bf5600f1e..819ce641083 100644
--- a/ci/tests/tracing/scenarios/tyk_test-graphql-tracing-invalid_404.yml
+++ b/ci/tests/tracing/scenarios/tyk_test-graphql-tracing-invalid_404.yml
@@ -17,7 +17,7 @@ spec:
name: Should return 404 for upstream
assertions:
- attr:http.status_code = 404
- - attr:http.url = "https://httpbin.com"
+ - attr:http.url = "http://httpbin:80/status/404"
- selector: span[tracetest.span.type = "general" name = "GraphqlMiddleware Validation"] span[tracetest.span.type="general" name="GraphqlEngine"]
name: Make sure Graphql Engine is a child of GraphqlMiddleware Validation
assertions:
diff --git a/ci/tests/tracing/scenarios/tyk_test_200.yml b/ci/tests/tracing/scenarios/tyk_test_200.yml
index e0c807d5082..3b84ffe5e41 100644
--- a/ci/tests/tracing/scenarios/tyk_test_200.yml
+++ b/ci/tests/tracing/scenarios/tyk_test_200.yml
@@ -10,6 +10,8 @@ spec:
headers:
- key: Content-Type
value: application/json
+ - key: User-Agent
+ value: Go-http-client/1.1
specs:
- selector: span[tracetest.span.type="http" name="GET /test/ip" http.method="GET"]
name: Test main span attributes
diff --git a/config/config.go b/config/config.go
index fd9e810d9a3..2b504564033 100644
--- a/config/config.go
+++ b/config/config.go
@@ -414,40 +414,40 @@ type HttpServerOptionsConfig struct {
// EnablePathPrefixMatching changes how the gateway matches incoming URL paths against routes (patterns) defined in the API definition.
// By default, the gateway uses wildcard matching. When EnablePathPrefixMatching is enabled, it switches to prefix matching. For example, a defined path such as `/json` will only match request URLs that begin with `/json`, rather than matching any URL containing `/json`.
-
+ //
// The gateway checks the request URL against several variations depending on whether path versioning is enabled:
// - Full path (listen path + version + endpoint): `/listen-path/v4/json`
// - Non-versioned full path (listen path + endpoint): `/listen-path/json`
// - Path without version (endpoint only): `/json`
-
+ //
// For patterns that start with `/`, the gateway prepends `^` before performing the check, ensuring a true prefix match.
// For patterns that start with `^`, the gateway will already perform prefix matching so EnablePathPrefixMatching will have no impact.
// This option allows for more specific and controlled routing of API requests, potentially reducing unintended matches. Note that you may need to adjust existing route definitions when enabling this option.
-
+ //
// Example:
-
+ //
// With wildcard matching, `/json` might match `/api/v1/data/json`.
// With prefix matching, `/json` would not match `/api/v1/data/json`, but would match `/json/data`.
-
+ //
// Combining EnablePathPrefixMatching with EnablePathSuffixMatching will result in exact URL matching, with `/json` being evaluated as `^/json$`.
EnablePathPrefixMatching bool `json:"enable_path_prefix_matching"`
// EnablePathSuffixMatching changes how the gateway matches incoming URL paths against routes (patterns) defined in the API definition.
// By default, the gateway uses wildcard matching. When EnablePathSuffixMatching is enabled, it switches to suffix matching. For example, a defined path such as `/json` will only match request URLs that end with `/json`, rather than matching any URL containing `/json`.
-
+ //
// The gateway checks the request URL against several variations depending on whether path versioning is enabled:
// - Full path (listen path + version + endpoint): `/listen-path/v4/json`
// - Non-versioned full path (listen path + endpoint): `/listen-path/json`
// - Path without version (endpoint only): `/json`
-
+ //
// For patterns that already end with `$`, the gateway will already perform suffix matching so EnablePathSuffixMatching will have no impact. For all other patterns, the gateway appends `$` before performing the check, ensuring a true suffix match.
// This option allows for more specific and controlled routing of API requests, potentially reducing unintended matches. Note that you may need to adjust existing route definitions when enabling this option.
-
+ //
// Example:
-
+ //
// With wildcard matching, `/json` might match `/api/v1/json/data`.
// With suffix matching, `/json` would not match `/api/v1/json/data`, but would match `/api/v1/json`.
-
+ //
// Combining EnablePathSuffixMatching with EnablePathPrefixMatching will result in exact URL matching, with `/json` being evaluated as `^/json$`.
EnablePathSuffixMatching bool `json:"enable_path_suffix_matching"`
diff --git a/ctx/ctx.go b/ctx/ctx.go
index 13b7fd76aeb..43a31a2d838 100644
--- a/ctx/ctx.go
+++ b/ctx/ctx.go
@@ -5,6 +5,8 @@ import (
"encoding/json"
"net/http"
+ "github.com/TykTechnologies/tyk/internal/httputil"
+
"github.com/TykTechnologies/tyk/apidef/oas"
"github.com/TykTechnologies/tyk/config"
@@ -53,11 +55,6 @@ const (
OASDefinition
)
-func setContext(r *http.Request, ctx context.Context) {
- r2 := r.WithContext(ctx)
- *r = *r2
-}
-
func ctxSetSession(r *http.Request, s *user.SessionState, scheduleUpdate bool, hashKey bool) {
if s == nil {
@@ -81,7 +78,7 @@ func ctxSetSession(r *http.Request, s *user.SessionState, scheduleUpdate bool, h
s.Touch()
}
- setContext(r, ctx)
+ httputil.SetContext(r, ctx)
}
func GetAuthToken(r *http.Request) string {
@@ -119,7 +116,7 @@ func SetSession(r *http.Request, s *user.SessionState, scheduleUpdate bool, hash
func SetDefinition(r *http.Request, s *apidef.APIDefinition) {
ctx := r.Context()
ctx = context.WithValue(ctx, Definition, s)
- setContext(r, ctx)
+ httputil.SetContext(r, ctx)
}
func GetDefinition(r *http.Request) *apidef.APIDefinition {
diff --git a/dlpython/main.go b/dlpython/main.go
index 23d3614616a..7b3e50a74e4 100644
--- a/dlpython/main.go
+++ b/dlpython/main.go
@@ -17,8 +17,6 @@ import (
"path/filepath"
"regexp"
"runtime"
- "sort"
- "strconv"
"strings"
"unsafe"
@@ -51,7 +49,7 @@ func FindPythonConfig(customVersion string) (selectedVersion string, err error)
}
// Scan python-config binaries:
- pythonConfigBinaries := map[float64]string{}
+ pythonConfigBinaries := map[string]string{}
for _, p := range strings.Split(paths, ":") {
if !strings.HasSuffix(p, "/bin") {
@@ -72,16 +70,15 @@ func FindPythonConfig(customVersion string) (selectedVersion string, err error)
minorVersion := matches[0][3]
pyMallocBuild := matches[0][4]
isConfig := matches[0][5]
- versionStr := "3"
+ version := "3"
if minorVersion != "" {
- versionStr += "." + minorVersion
+ version += "." + minorVersion
}
if pyMallocBuild != "" {
- versionStr += pyMallocBuild
+ version += pyMallocBuild
}
- version, err := strconv.ParseFloat(versionStr, 64)
- if err != nil || isConfig == "" {
+ if isConfig == "" {
continue
}
@@ -96,25 +93,22 @@ func FindPythonConfig(customVersion string) (selectedVersion string, err error)
}
for ver, binPath := range pythonConfigBinaries {
- logger.Debugf("Found python-config binary: %.1f (%s)", ver, binPath)
+ logger.Debugf("Found python-config binary: %s (%s)", ver, binPath)
}
if customVersion == "" {
- var availableVersions []float64
- for v := range pythonConfigBinaries {
- availableVersions = append(availableVersions, v)
+ var availableVersions []string
+ for k := range pythonConfigBinaries {
+ availableVersions = append(availableVersions, k)
}
- sort.Float64s(availableVersions)
- lastVersion := availableVersions[len(availableVersions)-1]
+ lastVersion := selectLatestVersion(availableVersions)
+
pythonConfigPath = pythonConfigBinaries[lastVersion]
- selectedVersion = strconv.FormatFloat(lastVersion, 'f', -1, 64)
- logger.Debug("Using latest Python version")
+ selectedVersion = lastVersion
+
+ logger.Debug("Using Python version", selectedVersion)
} else {
- prefixF, err := strconv.ParseFloat(customVersion, 64)
- if err != nil {
- return selectedVersion, errors.New("Couldn't parse Python version")
- }
- cfgPath, ok := pythonConfigBinaries[prefixF]
+ cfgPath, ok := pythonConfigBinaries[customVersion]
if !ok {
return selectedVersion, errors.New("No python-config was found for the specified version")
}
diff --git a/dlpython/main_test.go b/dlpython/main_test.go
index 3550025e90e..fe90efe3270 100644
--- a/dlpython/main_test.go
+++ b/dlpython/main_test.go
@@ -20,7 +20,8 @@ func TestFindPythonConfig(t *testing.T) {
if err == nil {
t.Fatal("Should fail when loading a nonexistent Python version")
}
- _, err = FindPythonConfig(testVersion)
+ pythonVersion, err := FindPythonConfig(testVersion)
+ t.Logf("Version is %s", pythonVersion)
t.Logf("Library path is %s", pythonLibraryPath)
if err != nil {
t.Fatalf("Couldn't find Python %s", testVersion)
diff --git a/dlpython/version.go b/dlpython/version.go
new file mode 100644
index 00000000000..0d5a6a89189
--- /dev/null
+++ b/dlpython/version.go
@@ -0,0 +1,32 @@
+package python
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+func selectLatestVersion(versions []string) string {
+ // Sort the versions based on a custom comparison function
+ sort.Slice(versions, func(i, j int) bool {
+ // Split the version numbers into components (e.g., "3.5" -> ["3", "5"])
+ versionI := strings.Split(versions[i], ".")
+ versionJ := strings.Split(versions[j], ".")
+
+ // Compare each component (major, minor, etc.) as integers
+ for x := 0; x < len(versionI) && x < len(versionJ); x++ {
+ // Convert the components to integers for comparison
+ numI, _ := strconv.Atoi(versionI[x])
+ numJ, _ := strconv.Atoi(versionJ[x])
+
+ if numI != numJ {
+ return numI < numJ
+ }
+ }
+ // If all compared components are the same, the shorter version is considered smaller
+ return len(versionI) < len(versionJ)
+ })
+
+ // The latest version will be the last element after sorting
+ return versions[len(versions)-1]
+}
diff --git a/dlpython/version_test.go b/dlpython/version_test.go
new file mode 100644
index 00000000000..7b549b86a60
--- /dev/null
+++ b/dlpython/version_test.go
@@ -0,0 +1,15 @@
+package python
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestVersionSelection(t *testing.T) {
+ assert.Equal(t, "3.5", selectLatestVersion([]string{"2.0", "3.5"}))
+ assert.Equal(t, "3.8", selectLatestVersion([]string{"3.5", "3.8"}))
+ assert.Equal(t, "3.10", selectLatestVersion([]string{"3.9", "3.10"}))
+ assert.Equal(t, "3.11", selectLatestVersion([]string{"3.9", "3.11"}))
+ assert.Equal(t, "3.12", selectLatestVersion([]string{"3.11", "3.12"}))
+}
diff --git a/ee/EULA.pdf b/ee/EULA.pdf
new file mode 100644
index 00000000000..53c2c895c0a
Binary files /dev/null and b/ee/EULA.pdf differ
diff --git a/ee/LICENSE-EE.md b/ee/LICENSE-EE.md
new file mode 100644
index 00000000000..de7aeeab9d7
--- /dev/null
+++ b/ee/LICENSE-EE.md
@@ -0,0 +1,7 @@
+Commercial License
+
+The code in this 'ee' folder is subject to the commercial license terms outlined in the accompanying [EULA.pdf](ee/EULA.pdf) file.
+
+For the full End User License Agreement, please refer to the [EULA.pdf](EULA.pdf) file in this directory.
+
+Copyright 2024 Tyk Technologies. All rights reserved.
diff --git a/ee/middleware/streams/manager.go b/ee/middleware/streams/manager.go
new file mode 100644
index 00000000000..42b2ce78f36
--- /dev/null
+++ b/ee/middleware/streams/manager.go
@@ -0,0 +1,115 @@
+package streams
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/gorilla/mux"
+)
+
+// Manager is responsible for creating a single stream.
+type Manager struct {
+ streams sync.Map
+ routeLock sync.Mutex
+ muxer *mux.Router
+ mw *Middleware
+ dryRun bool
+ listenPaths []string
+ activityCounter atomic.Int32 // Counts active subscriptions, requests.
+}
+
+func (sm *Manager) initStreams(r *http.Request, config *StreamsConfig) {
+ // Clear existing routes for this consumer group
+ sm.muxer = mux.NewRouter()
+
+ for streamID, streamConfig := range config.Streams {
+ sm.setUpOrDryRunStream(streamConfig, streamID)
+ }
+
+ // If it is default stream manager, init muxer
+ if r == nil {
+ for _, path := range sm.listenPaths {
+ sm.muxer.HandleFunc(path, func(_ http.ResponseWriter, _ *http.Request) {
+ // Dummy handler
+ })
+ }
+ }
+}
+
+func (sm *Manager) setUpOrDryRunStream(streamConfig any, streamID string) {
+ if streamMap, ok := streamConfig.(map[string]interface{}); ok {
+ httpPaths := GetHTTPPaths(streamMap)
+
+ if sm.dryRun {
+ if len(httpPaths) == 0 {
+ err := sm.createStream(streamID, streamMap)
+ if err != nil {
+ sm.mw.Logger().WithError(err).Errorf("Error creating stream %s", streamID)
+ }
+ }
+ } else {
+ err := sm.createStream(streamID, streamMap)
+ if err != nil {
+ sm.mw.Logger().WithError(err).Errorf("Error creating stream %s", streamID)
+ }
+ }
+ sm.listenPaths = append(sm.listenPaths, httpPaths...)
+ }
+}
+
+// removeStream removes a stream
+func (sm *Manager) removeStream(streamID string) error {
+ streamFullID := fmt.Sprintf("%s_%s", sm.mw.Spec.APIID, streamID)
+
+ if streamValue, exists := sm.streams.Load(streamFullID); exists {
+ stream, ok := streamValue.(*Stream)
+ if !ok {
+ return fmt.Errorf("stream %s is not a valid stream", streamID)
+ }
+ err := stream.Stop()
+ if err != nil {
+ return err
+ }
+ sm.streams.Delete(streamFullID)
+ } else {
+ return fmt.Errorf("stream %s does not exist", streamID)
+ }
+ return nil
+}
+
+// createStream creates a new stream
+func (sm *Manager) createStream(streamID string, config map[string]interface{}) error {
+ streamFullID := fmt.Sprintf("%s_%s", sm.mw.Spec.APIID, streamID)
+ sm.mw.Logger().Debugf("Creating stream: %s", streamFullID)
+
+ stream := NewStream(sm.mw.allowedUnsafe)
+ err := stream.Start(config, &handleFuncAdapter{
+ mw: sm.mw,
+ streamID: streamFullID,
+ muxer: sm.muxer,
+ sm: sm,
+ // child logger is necessary to prevent race condition
+ logger: sm.mw.Logger().WithField("stream", streamFullID),
+ })
+ if err != nil {
+ sm.mw.Logger().Errorf("Failed to start stream %s: %v", streamFullID, err)
+ return err
+ }
+
+ sm.streams.Store(streamFullID, stream)
+ sm.mw.Logger().Infof("Successfully created stream: %s", streamFullID)
+
+ return nil
+}
+
+func (sm *Manager) hasPath(path string) bool {
+ for _, p := range sm.listenPaths {
+ if strings.TrimPrefix(path, "/") == strings.TrimPrefix(p, "/") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/ee/middleware/streams/middleware.go b/ee/middleware/streams/middleware.go
new file mode 100644
index 00000000000..5f96a8a0e3f
--- /dev/null
+++ b/ee/middleware/streams/middleware.go
@@ -0,0 +1,281 @@
+package streams
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/gorilla/mux"
+ "github.com/sirupsen/logrus"
+
+ "github.com/TykTechnologies/tyk/internal/middleware"
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+// Middleware implements a streaming middleware.
+type Middleware struct {
+ Spec *APISpec
+ Gw Gateway
+
+ base BaseMiddleware
+
+ createStreamManagerLock sync.Mutex
+ StreamManagerCache sync.Map // Map of payload hash to Manager
+
+ ctx context.Context
+ cancel context.CancelFunc
+ allowedUnsafe []string
+ defaultManager *Manager
+}
+
+// Middleware implements model.Middleware.
+var _ model.Middleware = &Middleware{}
+
+// NewMiddleware returns a new instance of Middleware.
+func NewMiddleware(gw Gateway, mw BaseMiddleware, spec *APISpec) *Middleware {
+ return &Middleware{
+ base: mw,
+ Gw: gw,
+ Spec: spec,
+ }
+}
+
+// Logger returns a logger with middleware filled out.
+func (s *Middleware) Logger() *logrus.Entry {
+ return s.base.Logger().WithField("mw", s.Name())
+}
+
+// Name returns the name for the middleware.
+func (s *Middleware) Name() string {
+ return "StreamingMiddleware"
+}
+
+// EnabledForSpec checks if streaming is enabled on the config.
+func (s *Middleware) EnabledForSpec() bool {
+ s.Logger().Debug("Checking if streaming is enabled")
+
+ streamingConfig := s.Gw.GetConfig().Streaming
+ s.Logger().Debugf("Streaming config: %+v", streamingConfig)
+
+ if streamingConfig.Enabled {
+ s.Logger().Debug("Streaming is enabled in the config")
+ s.allowedUnsafe = streamingConfig.AllowUnsafe
+ s.Logger().Debugf("Allowed unsafe components: %v", s.allowedUnsafe)
+
+ config := s.getStreamsConfig(nil)
+ GlobalStreamCounter.Add(int64(len(config.Streams)))
+
+ s.Logger().Debug("Total streams count: ", len(config.Streams))
+
+ return len(config.Streams) != 0
+ }
+
+ s.Logger().Debug("Streaming is not enabled in the config")
+ return false
+}
+
+// Init initializes the middleware
+func (s *Middleware) Init() {
+ s.Logger().Debug("Initializing Middleware")
+ s.ctx, s.cancel = context.WithCancel(context.Background())
+
+ s.Logger().Debug("Initializing default stream manager")
+ s.defaultManager = s.CreateStreamManager(nil)
+
+ // Start garbage collection routine
+ go func() {
+ ticker := time.NewTicker(StreamGCInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ s.GC()
+ case <-s.ctx.Done():
+ return
+ }
+ }
+ }()
+}
+
+// CreateStreamManager creates or retrieves a stream manager based on the request.
+func (s *Middleware) CreateStreamManager(r *http.Request) *Manager {
+ streamsConfig := s.getStreamsConfig(r)
+ configJSON, _ := json.Marshal(streamsConfig)
+ cacheKey := fmt.Sprintf("%x", sha256.Sum256(configJSON))
+
+ s.createStreamManagerLock.Lock()
+ defer s.createStreamManagerLock.Unlock()
+
+ s.Logger().Debug("Attempting to load stream manager from cache")
+ s.Logger().Debugf("Cache key: %s", cacheKey)
+ if cachedManager, found := s.StreamManagerCache.Load(cacheKey); found {
+ s.Logger().Debug("Found cached stream manager")
+ return cachedManager.(*Manager)
+ }
+
+ newManager := &Manager{
+ muxer: mux.NewRouter(),
+ mw: s,
+ dryRun: r == nil,
+ activityCounter: atomic.Int32{},
+ }
+ newManager.initStreams(r, streamsConfig)
+
+ if r != nil {
+ s.StreamManagerCache.Store(cacheKey, newManager)
+ }
+ return newManager
+}
+
+// GC removes inactive stream managers.
+func (s *Middleware) GC() {
+ s.Logger().Debug("Starting garbage collection for inactive stream managers")
+
+ s.StreamManagerCache.Range(func(key, value interface{}) bool {
+ manager := value.(*Manager)
+ if manager == s.defaultManager {
+ return true
+ }
+
+ if manager.activityCounter.Load() <= 0 {
+ s.Logger().Infof("Removing inactive stream manager: %v", key)
+ manager.streams.Range(func(streamKey, streamValue interface{}) bool {
+ streamID := streamKey.(string)
+ err := manager.removeStream(streamID)
+ if err != nil {
+ s.Logger().WithError(err).Errorf("Error removing stream %s", streamID)
+ }
+ return true
+ })
+ s.StreamManagerCache.Delete(key)
+ }
+
+ return true
+ })
+}
+
+func (s *Middleware) getStreamsConfig(r *http.Request) *StreamsConfig {
+ config := &StreamsConfig{Streams: make(map[string]any)}
+ if !s.Spec.IsOAS {
+ return config
+ }
+
+ extension, ok := s.Spec.OAS.T.Extensions[ExtensionTykStreaming]
+ if !ok {
+ return config
+ }
+
+ if streamsMap, ok := extension.(map[string]any); ok {
+ if streams, ok := streamsMap["streams"].(map[string]any); ok {
+ s.processStreamsConfig(r, streams, config)
+ }
+ }
+
+ return config
+}
+
+func (s *Middleware) processStreamsConfig(r *http.Request, streams map[string]any, config *StreamsConfig) {
+ for streamID, stream := range streams {
+ if r == nil {
+ s.Logger().Debugf("No request available to replace variables in stream config for %s", streamID)
+ } else {
+ s.Logger().Debugf("Stream config for %s: %v", streamID, stream)
+ marshaledStream, err := json.Marshal(stream)
+ if err != nil {
+ s.Logger().Errorf("Failed to marshal stream config: %v", err)
+ continue
+ }
+ replacedStream := s.Gw.ReplaceTykVariables(r, string(marshaledStream), true)
+
+ if replacedStream != string(marshaledStream) {
+ s.Logger().Debugf("Stream config changed for %s: %s", streamID, replacedStream)
+ } else {
+ s.Logger().Debugf("Stream config has not changed for %s: %s", streamID, replacedStream)
+ }
+
+ var unmarshaledStream map[string]interface{}
+ err = json.Unmarshal([]byte(replacedStream), &unmarshaledStream)
+ if err != nil {
+ s.Logger().Errorf("Failed to unmarshal replaced stream config: %v", err)
+ continue
+ }
+ stream = unmarshaledStream
+ }
+ config.Streams[streamID] = stream
+ }
+}
+
+// ProcessRequest will handle the streaming functionality.
+func (s *Middleware) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
+ strippedPath := s.Spec.StripListenPath(r.URL.Path)
+ if !s.defaultManager.hasPath(strippedPath) {
+ return nil, http.StatusOK
+ }
+
+ s.Logger().Debugf("Processing request: %s, %s", r.URL.Path, strippedPath)
+
+ newRequest := &http.Request{
+ Method: r.Method,
+ URL: &url.URL{Scheme: r.URL.Scheme, Host: r.URL.Host, Path: strippedPath},
+ }
+
+ if !s.defaultManager.muxer.Match(newRequest, &mux.RouteMatch{}) {
+ return nil, http.StatusOK
+ }
+
+ var match mux.RouteMatch
+ streamManager := s.CreateStreamManager(r)
+ streamManager.routeLock.Lock()
+ streamManager.muxer.Match(newRequest, &match)
+ streamManager.routeLock.Unlock()
+
+ // direct Bento handler
+ handler, ok := match.Handler.(http.HandlerFunc)
+ if !ok {
+ return errors.New("invalid route handler"), http.StatusInternalServerError
+ }
+
+ streamManager.activityCounter.Add(1)
+ defer streamManager.activityCounter.Add(-1)
+
+ handler.ServeHTTP(w, r)
+
+ return nil, middleware.StatusRespond
+}
+
+// Unload closes and remove active streams.
+func (s *Middleware) Unload() {
+ s.Logger().Debugf("Unloading streaming middleware %s", s.Spec.Name)
+
+ totalStreams := 0
+ s.cancel()
+
+ s.StreamManagerCache.Range(func(_, value interface{}) bool {
+ manager, ok := value.(*Manager)
+ if !ok {
+ return true
+ }
+ manager.streams.Range(func(_, streamValue interface{}) bool {
+ totalStreams++
+ if stream, ok := streamValue.(*Stream); ok {
+ if err := stream.Reset(); err != nil {
+ s.Logger().WithError(err).Error("Failed to reset stream")
+ }
+ }
+ return true
+ })
+ return true
+ })
+
+ GlobalStreamCounter.Add(-int64(totalStreams))
+ s.StreamManagerCache = sync.Map{}
+ s.Logger().Info("All streams successfully removed")
+}
diff --git a/ee/middleware/streams/model.go b/ee/middleware/streams/model.go
new file mode 100644
index 00000000000..784cfe215e7
--- /dev/null
+++ b/ee/middleware/streams/model.go
@@ -0,0 +1,60 @@
+package streams
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/TykTechnologies/tyk/apidef/oas"
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+const (
+ // ExtensionTykStreaming is the OAS extension for Tyk streaming.
+ ExtensionTykStreaming = "x-tyk-streaming"
+ StreamGCInterval = 1 * time.Minute
+)
+
+// BaseMiddleware is the subset of BaseMiddleware APIs that the middleware uses.
+type BaseMiddleware interface {
+ model.LoggerProvider
+}
+
+// Gateway is the subset of Gateway APIs that the middleware uses.
+type Gateway interface {
+ model.ConfigProvider
+ model.ReplaceTykVariables
+}
+
+// APISpec is a subset of gateway.APISpec for the values the middleware consumes.
+type APISpec struct {
+ APIID string
+ Name string
+ IsOAS bool
+ OAS oas.OAS
+
+ StripListenPath model.StripListenPathFunc
+}
+
+// NewAPISpec creates a new APISpec object based on the required inputs.
+// The resulting object is a subset of `*gateway.APISpec`.
+func NewAPISpec(id string, name string, isOasDef bool, oasDef oas.OAS, stripListenPath model.StripListenPathFunc) *APISpec {
+ return &APISpec{
+ APIID: id,
+ Name: name,
+ IsOAS: isOasDef,
+ OAS: oasDef,
+ StripListenPath: stripListenPath,
+ }
+}
+
+// StreamsConfig represents a stream configuration.
+type StreamsConfig struct {
+ Info struct {
+ Version string `json:"version"`
+ } `json:"info"`
+
+ Streams map[string]any `json:"streams"`
+}
+
+// GlobalStreamCounter is used for testing.
+var GlobalStreamCounter atomic.Int64
diff --git a/internal/streaming/manager.go b/ee/middleware/streams/stream.go
similarity index 94%
rename from internal/streaming/manager.go
rename to ee/middleware/streams/stream.go
index 65c95542ec4..cc5554e2361 100644
--- a/internal/streaming/manager.go
+++ b/ee/middleware/streams/stream.go
@@ -1,4 +1,4 @@
-package streaming
+package streams
import (
"context"
@@ -17,7 +17,7 @@ import (
_ "github.com/TykTechnologies/tyk/internal/portal"
)
-// Stream is a wrapper around benthos stream
+// Stream is a wrapper around stream
type Stream struct {
allowedUnsafe []string
streamConfig string
@@ -51,7 +51,7 @@ func (s *Stream) SetLogger(logger *logrus.Logger) {
}
}
-// Start loads up the configuration and starts the benthos stream. Non blocking
+// Start loads up the configuration and starts the stream. Non blocking
func (s *Stream) Start(config map[string]interface{}, mux service.HTTPMultiplexer) error {
s.log.Debugf("Starting stream")
@@ -107,7 +107,7 @@ func (s *Stream) Start(config map[string]interface{}, mux service.HTTPMultiplexe
return nil
}
-// Stop cleans up the benthos stream
+// Stop cleans up the stream
func (s *Stream) Stop() error {
s.log.Printf("Stopping stream")
@@ -141,7 +141,7 @@ func (s *Stream) Stop() error {
return nil
}
-// GetConfig returns the benthos configuration of the stream
+// GetConfig returns the configuration of the stream
func (s *Stream) GetConfig() string {
return s.streamConfig
}
diff --git a/internal/streaming/manager_test.go b/ee/middleware/streams/stream_test.go
similarity index 99%
rename from internal/streaming/manager_test.go
rename to ee/middleware/streams/stream_test.go
index 5f565130a39..7c2a0b8ee19 100644
--- a/internal/streaming/manager_test.go
+++ b/ee/middleware/streams/stream_test.go
@@ -1,4 +1,4 @@
-package streaming
+package streams
import (
"strings"
diff --git a/ee/middleware/streams/util.go b/ee/middleware/streams/util.go
new file mode 100644
index 00000000000..a3218a12b1f
--- /dev/null
+++ b/ee/middleware/streams/util.go
@@ -0,0 +1,98 @@
+package streams
+
+import (
+ "net/http"
+
+ "github.com/gorilla/mux"
+ "github.com/sirupsen/logrus"
+)
+
+type handleFuncAdapter struct {
+ streamID string
+ sm *Manager
+ mw *Middleware
+ muxer *mux.Router
+ logger *logrus.Entry
+}
+
+func (h *handleFuncAdapter) HandleFunc(path string, f func(http.ResponseWriter, *http.Request)) {
+ h.logger.Debugf("Registering streaming handleFunc for path: %s", path)
+
+ if h.mw == nil || h.muxer == nil {
+ h.logger.Error("Middleware or muxer is nil")
+ return
+ }
+
+ h.sm.routeLock.Lock()
+ h.muxer.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+ h.sm.activityCounter.Add(1)
+ defer h.sm.activityCounter.Add(-1)
+ f(w, r)
+ })
+ h.sm.routeLock.Unlock()
+ h.logger.Debugf("Registered handler for path: %s", path)
+}
+
+// Helper function to extract paths from an http_server configuration
+func extractPaths(httpConfig map[string]interface{}) []string {
+ var paths []string
+ defaultPaths := map[string]string{
+ "path": "/post",
+ "ws_path": "/post/ws",
+ "stream_path": "/get/stream",
+ }
+ for key, defaultValue := range defaultPaths {
+ if val, ok := httpConfig[key].(string); ok {
+ paths = append(paths, val)
+ } else {
+ paths = append(paths, defaultValue)
+ }
+ }
+ return paths
+}
+
+// extractHTTPServerPaths is a helper function to extract HTTP server paths from a given configuration.
+func extractHTTPServerPaths(config map[string]interface{}) []string {
+ if httpServerConfig, ok := config["http_server"].(map[string]interface{}); ok {
+ return extractPaths(httpServerConfig)
+ }
+ return nil
+}
+
+// handleBroker is a helper function to handle broker configurations.
+func handleBroker(brokerConfig map[string]interface{}) []string {
+ var paths []string
+ for _, ioKey := range []string{"inputs", "outputs"} {
+ if ioList, ok := brokerConfig[ioKey].([]interface{}); ok {
+ for _, ioItem := range ioList {
+ if ioItemMap, ok := ioItem.(map[string]interface{}); ok {
+ paths = append(paths, extractHTTPServerPaths(ioItemMap)...)
+ }
+ }
+ }
+ }
+ return paths
+}
+
+// GetHTTPPaths is the main function to get HTTP paths from the stream configuration.
+func GetHTTPPaths(streamConfig map[string]interface{}) []string {
+ var paths []string
+ for _, component := range []string{"input", "output"} {
+ if componentMap, ok := streamConfig[component].(map[string]interface{}); ok {
+ paths = append(paths, extractHTTPServerPaths(componentMap)...)
+ if brokerConfig, ok := componentMap["broker"].(map[string]interface{}); ok {
+ paths = append(paths, handleBroker(brokerConfig)...)
+ }
+ }
+ }
+ // remove duplicates
+ var deduplicated []string
+ exists := map[string]struct{}{}
+ for _, item := range paths {
+ if _, ok := exists[item]; !ok {
+ deduplicated = append(deduplicated, item)
+ exists[item] = struct{}{}
+ }
+ }
+ return deduplicated
+}
diff --git a/ee/middleware/upstreambasicauth/middleware.go b/ee/middleware/upstreambasicauth/middleware.go
new file mode 100644
index 00000000000..918b1502cda
--- /dev/null
+++ b/ee/middleware/upstreambasicauth/middleware.go
@@ -0,0 +1,78 @@
+package upstreambasicauth
+
+import (
+ "net/http"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/internal/httputil"
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+// Middleware implements upstream basic auth middleware.
+type Middleware struct {
+ Spec *APISpec
+ Gw Gateway
+
+ base BaseMiddleware
+}
+
+// Middleware implements model.Middleware.
+var _ model.Middleware = &Middleware{}
+
+// NewMiddleware returns a new instance of Middleware.
+func NewMiddleware(gw Gateway, mw BaseMiddleware, spec *APISpec) *Middleware {
+ return &Middleware{
+ base: mw,
+ Gw: gw,
+ Spec: spec,
+ }
+}
+
+// Logger returns a logger with middleware filled out.
+func (m *Middleware) Logger() *logrus.Entry {
+ return m.base.Logger().WithField("mw", m.Name())
+}
+
+// Name returns the name for the middleware.
+func (m *Middleware) Name() string {
+ return "UpstreamBasicAuthMiddleware"
+}
+
+// EnabledForSpec checks if streaming is enabled on the config.
+func (m *Middleware) EnabledForSpec() bool {
+ if !m.Spec.UpstreamAuth.IsEnabled() {
+ return false
+ }
+
+ if !m.Spec.UpstreamAuth.BasicAuth.Enabled {
+ return false
+ }
+
+ return true
+}
+
+// Init initializes the middleware.
+func (m *Middleware) Init() {
+ m.Logger().Debug("Initializing Upstream basic auth Middleware")
+}
+
+// ProcessRequest will handle upstream basic auth.
+func (m *Middleware) ProcessRequest(_ http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
+ basicAuthConfig := m.Spec.UpstreamAuth.BasicAuth
+
+ upstreamBasicAuthProvider := Provider{
+ Logger: m.Logger(),
+ HeaderName: header.Authorization,
+ }
+
+ if basicAuthConfig.Header.AuthKeyName() != "" {
+ upstreamBasicAuthProvider.HeaderName = basicAuthConfig.Header.AuthKeyName()
+ }
+
+ upstreamBasicAuthProvider.AuthValue = httputil.AuthHeader(basicAuthConfig.Username, basicAuthConfig.Password)
+
+ httputil.SetUpstreamAuth(r, upstreamBasicAuthProvider)
+ return nil, http.StatusOK
+}
diff --git a/ee/middleware/upstreambasicauth/model.go b/ee/middleware/upstreambasicauth/model.go
new file mode 100644
index 00000000000..93394cf6b51
--- /dev/null
+++ b/ee/middleware/upstreambasicauth/model.go
@@ -0,0 +1,49 @@
+package upstreambasicauth
+
+import (
+ "time"
+
+ "github.com/TykTechnologies/tyk/apidef"
+
+ "github.com/TykTechnologies/tyk/apidef/oas"
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+const (
+ // ExtensionTykStreaming is the OAS extension for Tyk streaming.
+ ExtensionTykStreaming = "x-tyk-streaming"
+ StreamGCInterval = 1 * time.Minute
+)
+
+// BaseMiddleware is the subset of BaseMiddleware APIs that the middleware uses.
+type BaseMiddleware interface {
+ model.LoggerProvider
+}
+
+// Gateway is the subset of Gateway APIs that the middleware uses.
+type Gateway interface {
+ model.ConfigProvider
+ model.ReplaceTykVariables
+}
+
+// APISpec is a subset of gateway.APISpec for the values the middleware consumes.
+type APISpec struct {
+ APIID string
+ Name string
+ IsOAS bool
+ OAS oas.OAS
+
+ UpstreamAuth apidef.UpstreamAuth
+}
+
+// NewAPISpec creates a new APISpec object based on the required inputs.
+// The resulting object is a subset of `*gateway.APISpec`.
+func NewAPISpec(id string, name string, isOasDef bool, oasDef oas.OAS, upstreamAuth apidef.UpstreamAuth) *APISpec {
+ return &APISpec{
+ APIID: id,
+ Name: name,
+ IsOAS: isOasDef,
+ OAS: oasDef,
+ UpstreamAuth: upstreamAuth,
+ }
+}
diff --git a/ee/middleware/upstreambasicauth/provider.go b/ee/middleware/upstreambasicauth/provider.go
new file mode 100644
index 00000000000..8a5086a7517
--- /dev/null
+++ b/ee/middleware/upstreambasicauth/provider.go
@@ -0,0 +1,27 @@
+package upstreambasicauth
+
+import (
+ "net/http"
+
+ "github.com/sirupsen/logrus"
+)
+
+// Provider implements upstream auth provider.
+type Provider struct {
+ // Logger is the logger to be used.
+ Logger *logrus.Entry
+ // HeaderName is the header name to be used to fill upstream auth with.
+ HeaderName string
+ // AuthValue is the value of auth header.
+ AuthValue string
+}
+
+// Fill sets the request's HeaderName with AuthValue
+func (u Provider) Fill(r *http.Request) {
+ if r.Header.Get(u.HeaderName) != "" {
+ u.Logger.WithFields(logrus.Fields{
+ "header": u.HeaderName,
+ }).Info("Authorization header conflict detected: Client header overwritten by Gateway upstream authentication header.")
+ }
+ r.Header.Set(u.HeaderName, u.AuthValue)
+}
diff --git a/gateway/api_definition.go b/gateway/api_definition.go
index 9e16f1770b8..2ee68d1b1c0 100644
--- a/gateway/api_definition.go
+++ b/gateway/api_definition.go
@@ -47,6 +47,7 @@ import (
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/config"
"github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/regexp"
"github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/storage"
@@ -321,7 +322,7 @@ type APIDefinitionLoader struct {
// MakeSpec will generate a flattened URLSpec from and APIDefinitions' VersionInfo data. paths are
// keyed to the Api version name, which is determined during routing to speed up lookups
-func (a APIDefinitionLoader) MakeSpec(def *nestedApiDefinition, logger *logrus.Entry) (*APISpec, error) {
+func (a APIDefinitionLoader) MakeSpec(def *model.MergedAPI, logger *logrus.Entry) (*APISpec, error) {
spec := &APISpec{}
apiString, err := json.Marshal(def)
if err != nil {
@@ -454,52 +455,6 @@ func (a APIDefinitionLoader) MakeSpec(def *nestedApiDefinition, logger *logrus.E
return spec, nil
}
-// nestedApiDefinitionList is the response body for FromDashboardService
-type nestedApiDefinitionList struct {
- Message []nestedApiDefinition
- Nonce string
-}
-
-type nestedApiDefinition struct {
- *apidef.APIDefinition `json:"api_definition,inline"`
- OAS *oas.OAS `json:"oas"`
-}
-
-func (f *nestedApiDefinitionList) set(defs []*apidef.APIDefinition) {
- for _, def := range defs {
- f.Message = append(f.Message, nestedApiDefinition{APIDefinition: def})
- }
-}
-
-func (f *nestedApiDefinitionList) filter(enabled bool, tags ...string) []nestedApiDefinition {
- if !enabled {
- return f.Message
- }
-
- if len(tags) == 0 {
- return nil
- }
-
- tagMap := map[string]bool{}
- for _, tag := range tags {
- tagMap[tag] = true
- }
-
- result := make([]nestedApiDefinition, 0, len(f.Message))
- for _, v := range f.Message {
- if v.TagsDisabled {
- continue
- }
- for _, tag := range v.Tags {
- if ok := tagMap[tag]; ok {
- result = append(result, nestedApiDefinition{v.APIDefinition, v.OAS})
- break
- }
- }
- }
- return result
-}
-
// FromDashboardService will connect and download ApiDefintions from a Tyk Dashboard instance.
func (a APIDefinitionLoader) FromDashboardService(endpoint string) ([]*APISpec, error) {
// Get the definitions
@@ -539,7 +494,7 @@ func (a APIDefinitionLoader) FromDashboardService(endpoint string) ([]*APISpec,
}
// Extract tagged APIs#
- list := &nestedApiDefinitionList{}
+ list := model.NewMergedAPIList()
inBytes, err := io.ReadAll(resp.Body)
if err != nil {
log.Error("Couldn't read api definition list")
@@ -555,7 +510,7 @@ func (a APIDefinitionLoader) FromDashboardService(endpoint string) ([]*APISpec,
}
// Extract tagged entries only
- apiDefs := list.filter(gwConfig.DBAppConfOptions.NodeIsSegmented, gwConfig.DBAppConfOptions.Tags...)
+ apiDefs := list.Filter(gwConfig.DBAppConfOptions.NodeIsSegmented, gwConfig.DBAppConfOptions.Tags...)
// Process
specs := a.prepareSpecs(apiDefs, gwConfig, false)
@@ -697,27 +652,24 @@ func (a APIDefinitionLoader) FromRPC(store RPCDataLoader, orgId string, gw *Gate
}
func (a APIDefinitionLoader) processRPCDefinitions(apiCollection string, gw *Gateway) ([]*APISpec, error) {
-
- var payload []nestedApiDefinition
+ var payload []model.MergedAPI
if err := json.Unmarshal([]byte(apiCollection), &payload); err != nil {
return nil, err
}
- list := &nestedApiDefinitionList{
- Message: payload,
- }
+ list := model.NewMergedAPIList(payload...)
gwConfig := a.Gw.GetConfig()
// Extract tagged entries only
- apiDefs := list.filter(gwConfig.DBAppConfOptions.NodeIsSegmented, gwConfig.DBAppConfOptions.Tags...)
+ apiDefs := list.Filter(gwConfig.DBAppConfOptions.NodeIsSegmented, gwConfig.DBAppConfOptions.Tags...)
specs := a.prepareSpecs(apiDefs, gwConfig, true)
return specs, nil
}
-func (a APIDefinitionLoader) prepareSpecs(apiDefs []nestedApiDefinition, gwConfig config.Config, fromRPC bool) []*APISpec {
+func (a APIDefinitionLoader) prepareSpecs(apiDefs []model.MergedAPI, gwConfig config.Config, fromRPC bool) []*APISpec {
var specs []*APISpec
for _, def := range apiDefs {
@@ -805,7 +757,7 @@ func (a APIDefinitionLoader) loadDefFromFilePath(filePath string) (*APISpec, err
return nil, err
}
- nestDef := nestedApiDefinition{APIDefinition: &def}
+ nestDef := model.MergedAPI{APIDefinition: &def}
if def.IsOAS {
loader := openapi3.NewLoader()
// use openapi3.ReadFromFile as ReadFromURIFunc since the default implementation cache spec based on file path.
diff --git a/gateway/api_definition_test.go b/gateway/api_definition_test.go
index cece0631fb9..9915b61dd76 100644
--- a/gateway/api_definition_test.go
+++ b/gateway/api_definition_test.go
@@ -21,6 +21,8 @@ import (
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/apidef/oas"
"github.com/TykTechnologies/tyk/config"
+ "github.com/TykTechnologies/tyk/internal/model"
+ "github.com/TykTechnologies/tyk/internal/policy"
"github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/test"
"github.com/TykTechnologies/tyk/user"
@@ -235,8 +237,8 @@ func TestGatewayTagsFilter(t *testing.T) {
}
}
- data := &nestedApiDefinitionList{}
- data.set([]*apidef.APIDefinition{
+ data := &model.MergedAPIList{}
+ data.SetClassic([]*apidef.APIDefinition{
newApiWithTags(false, []string{}),
newApiWithTags(true, []string{}),
newApiWithTags(true, []string{"a", "b", "c"}),
@@ -249,27 +251,27 @@ func TestGatewayTagsFilter(t *testing.T) {
// Test NodeIsSegmented=false
{
enabled := false
- assert.Len(t, data.filter(enabled), 5)
- assert.Len(t, data.filter(enabled, "a"), 5)
- assert.Len(t, data.filter(enabled, "b"), 5)
- assert.Len(t, data.filter(enabled, "c"), 5)
+ assert.Len(t, data.Filter(enabled), 5)
+ assert.Len(t, data.Filter(enabled, "a"), 5)
+ assert.Len(t, data.Filter(enabled, "b"), 5)
+ assert.Len(t, data.Filter(enabled, "c"), 5)
}
// Test NodeIsSegmented=true
{
enabled := true
- assert.Len(t, data.filter(enabled), 0)
- assert.Len(t, data.filter(enabled, "a"), 3)
- assert.Len(t, data.filter(enabled, "b"), 2)
- assert.Len(t, data.filter(enabled, "c"), 1)
+ assert.Len(t, data.Filter(enabled), 0)
+ assert.Len(t, data.Filter(enabled, "a"), 3)
+ assert.Len(t, data.Filter(enabled, "b"), 2)
+ assert.Len(t, data.Filter(enabled, "c"), 1)
}
// Test NodeIsSegmented=true, multiple gw tags
{
enabled := true
- assert.Len(t, data.filter(enabled), 0)
- assert.Len(t, data.filter(enabled, "a", "b"), 3)
- assert.Len(t, data.filter(enabled, "b", "c"), 2)
+ assert.Len(t, data.Filter(enabled), 0)
+ assert.Len(t, data.Filter(enabled, "a", "b"), 3)
+ assert.Len(t, data.Filter(enabled, "b", "c"), 2)
}
}
@@ -1448,9 +1450,9 @@ func Test_LoadAPIsFromRPC(t *testing.T) {
loader := APIDefinitionLoader{Gw: ts.Gw}
t.Run("load APIs from RPC - success", func(t *testing.T) {
- mockedStorage := &RPCDataLoaderMock{
+ mockedStorage := &policy.RPCDataLoaderMock{
ShouldConnect: true,
- Apis: []nestedApiDefinition{
+ Apis: []model.MergedAPI{
{APIDefinition: &apidef.APIDefinition{Id: objectID, OrgID: "org1", APIID: "api1"}},
},
}
@@ -1462,9 +1464,9 @@ func Test_LoadAPIsFromRPC(t *testing.T) {
})
t.Run("load APIs from RPC - success - then fail", func(t *testing.T) {
- mockedStorage := &RPCDataLoaderMock{
+ mockedStorage := &policy.RPCDataLoaderMock{
ShouldConnect: true,
- Apis: []nestedApiDefinition{
+ Apis: []model.MergedAPI{
{APIDefinition: &apidef.APIDefinition{Id: objectID, OrgID: "org1", APIID: "api1"}},
},
}
diff --git a/gateway/api_loader.go b/gateway/api_loader.go
index 01ad37e940e..1bd3a96373b 100644
--- a/gateway/api_loader.go
+++ b/gateway/api_loader.go
@@ -15,8 +15,6 @@ import (
"sync"
texttemplate "text/template"
- "github.com/TykTechnologies/tyk/rpc"
-
"github.com/gorilla/mux"
"github.com/justinas/alice"
"github.com/rs/cors"
@@ -24,9 +22,11 @@ import (
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/coprocess"
- "github.com/TykTechnologies/tyk/internal/otel"
+ "github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/storage"
"github.com/TykTechnologies/tyk/trace"
+
+ "github.com/TykTechnologies/tyk/internal/otel"
)
const (
@@ -426,13 +426,22 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int,
gw.mwAppendEnabled(&chainArray, &RateLimitForAPI{BaseMiddleware: baseMid})
gw.mwAppendEnabled(&chainArray, &GraphQLMiddleware{BaseMiddleware: baseMid})
- gw.mwAppendEnabled(&chainArray, &StreamingMiddleware{BaseMiddleware: baseMid})
+
+ if streamMw := getStreamingMiddleware(baseMid); streamMw != nil {
+ gw.mwAppendEnabled(&chainArray, streamMw)
+ }
if !spec.UseKeylessAccess {
gw.mwAppendEnabled(&chainArray, &GraphQLComplexityMiddleware{BaseMiddleware: baseMid})
gw.mwAppendEnabled(&chainArray, &GraphQLGranularAccessMiddleware{BaseMiddleware: baseMid})
}
+ if upstreamBasicAuthMw := getUpstreamBasicAuthMw(baseMid); upstreamBasicAuthMw != nil {
+ gw.mwAppendEnabled(&chainArray, upstreamBasicAuthMw)
+ }
+
+ gw.mwAppendEnabled(&chainArray, &UpstreamOAuth{BaseMiddleware: baseMid})
+
gw.mwAppendEnabled(&chainArray, &ValidateJSON{BaseMiddleware: baseMid})
gw.mwAppendEnabled(&chainArray, &ValidateRequest{BaseMiddleware: baseMid})
gw.mwAppendEnabled(&chainArray, &PersistGraphQLOperationMiddleware{BaseMiddleware: baseMid})
@@ -467,7 +476,6 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int,
chainArray = append(chainArray, gw.createDynamicMiddleware(obj.Name, false, obj.RequireSession, baseMid))
}
}
-
chain = alice.New(chainArray...).Then(&DummyProxyHandler{SH: SuccessHandler{baseMid}, Gw: gw})
if !spec.UseKeylessAccess {
diff --git a/gateway/coprocess_id_extractor_test.go b/gateway/coprocess_id_extractor_test.go
index 49de2e50632..e1a328a850d 100644
--- a/gateway/coprocess_id_extractor_test.go
+++ b/gateway/coprocess_id_extractor_test.go
@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/storage"
)
@@ -31,7 +32,7 @@ const (
func (ts *Test) createSpecTestFrom(tb testing.TB, def *apidef.APIDefinition) *APISpec {
tb.Helper()
loader := APIDefinitionLoader{Gw: ts.Gw}
- spec, _ := loader.MakeSpec(&nestedApiDefinition{APIDefinition: def}, nil)
+ spec, _ := loader.MakeSpec(&model.MergedAPI{APIDefinition: def}, nil)
tname := tb.Name()
redisStore := &storage.RedisCluster{KeyPrefix: tname + "-apikey.", ConnectionHandler: ts.Gw.StorageConnectionHandler}
healthStore := &storage.RedisCluster{KeyPrefix: tname + "-apihealth.", ConnectionHandler: ts.Gw.StorageConnectionHandler}
diff --git a/gateway/event_system.go b/gateway/event_system.go
index 4c1cb42488a..4cf80d04bec 100644
--- a/gateway/event_system.go
+++ b/gateway/event_system.go
@@ -71,6 +71,12 @@ type EventHostStatusMeta struct {
HostInfo HostHealthReport
}
+// EventUpstreamOAuthMeta is the metadata structure for an upstream OAuth event
+type EventUpstreamOAuthMeta struct {
+ EventMetaDefault
+ APIID string
+}
+
// EventKeyFailureMeta is the metadata structure for any failure related
// to a key, such as quota or auth failures.
type EventKeyFailureMeta struct {
diff --git a/gateway/gateway.go b/gateway/gateway.go
index 90cbcf65458..cef70e752e3 100644
--- a/gateway/gateway.go
+++ b/gateway/gateway.go
@@ -1,17 +1,12 @@
package gateway
import (
- "github.com/TykTechnologies/tyk/internal/policy"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/user"
)
-// Repository is a description of our Gateway API promises.
-type Repository interface {
- policy.Repository
-}
-
// Gateway implements the Repository interface.
-var _ Repository = &Gateway{}
+var _ model.Gateway = &Gateway{}
// PolicyIDs returns a list of IDs for each policy loaded in the gateway.
func (gw *Gateway) PolicyIDs() []string {
@@ -41,3 +36,22 @@ func (gw *Gateway) PolicyCount() int {
return len(gw.policiesByID)
}
+
+// SetPolicies updates the internal policy map with a new policy map.
+func (gw *Gateway) SetPolicies(pols map[string]user.Policy) {
+ gw.policiesMu.Lock()
+ defer gw.policiesMu.Unlock()
+
+ gw.policiesByID = pols
+}
+
+// SetPoliciesByID will update the internal policiesByID map with new policies.
+// The key used will be the policy ID.
+func (gw *Gateway) SetPoliciesByID(pols ...user.Policy) {
+ gw.policiesMu.Lock()
+ defer gw.policiesMu.Unlock()
+
+ for _, pol := range pols {
+ gw.policiesByID[pol.ID] = pol
+ }
+}
diff --git a/gateway/handler_success.go b/gateway/handler_success.go
index 0f24aadc3f9..f61281a2b7f 100644
--- a/gateway/handler_success.go
+++ b/gateway/handler_success.go
@@ -343,19 +343,16 @@ func recordDetail(r *http.Request, spec *APISpec) bool {
}
}
- // Are we even checking?
- if !spec.GlobalConfig.EnforceOrgDataDetailLogging {
- return spec.GlobalConfig.AnalyticsConfig.EnableDetailedRecording
- }
-
- // We are, so get session data
- session, ok := r.Context().Value(ctx.OrgSessionContext).(*user.SessionState)
- if ok && session != nil {
- return session.EnableDetailedRecording || session.EnableDetailRecording // nolint:staticcheck // Deprecated DetailRecording
+ // decide based on org session.
+ if spec.GlobalConfig.EnforceOrgDataDetailLogging {
+ session, ok := r.Context().Value(ctx.OrgSessionContext).(*user.SessionState)
+ if ok && session != nil {
+ return session.EnableDetailedRecording || session.EnableDetailRecording // nolint:staticcheck // Deprecated DetailRecording
+ }
}
- // no session found, use global config
- return spec.GlobalConfig.AnalyticsConfig.EnableDetailedRecording
+ // no org session found, use global config
+ return spec.GraphQL.Enabled || spec.GlobalConfig.AnalyticsConfig.EnableDetailedRecording
}
// ServeHTTP will store the request details in the analytics store if necessary and proxy the request to it's
diff --git a/gateway/handler_success_test.go b/gateway/handler_success_test.go
index 9f1c4a1d9d3..e96cf99eecd 100644
--- a/gateway/handler_success_test.go
+++ b/gateway/handler_success_test.go
@@ -102,6 +102,13 @@ func TestRecordDetail(t *testing.T) {
},
expect: true,
},
+ {
+ title: "graphql request",
+ spec: testAPISpec(func(spec *APISpec) {
+ spec.GraphQL.Enabled = true
+ }),
+ expect: true,
+ },
}
for _, tc := range testcases {
diff --git a/gateway/health_check.go b/gateway/health_check.go
index 086071ecd8b..1a3c57d7d43 100644
--- a/gateway/health_check.go
+++ b/gateway/health_check.go
@@ -8,29 +8,42 @@ import (
"sync"
"time"
- "github.com/TykTechnologies/tyk/rpc"
-
"github.com/sirupsen/logrus"
- "github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/internal/model"
+ "github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/storage"
)
-func (gw *Gateway) setCurrentHealthCheckInfo(h map[string]apidef.HealthCheckItem) {
+type (
+ HealthCheckItem = model.HealthCheckItem
+ HealthCheckStatus = model.HealthCheckStatus
+ HealthCheckResponse = model.HealthCheckResponse
+)
+
+const (
+ Pass = model.Pass
+ Fail = model.Fail
+ Warn = model.Warn
+ Datastore = model.Datastore
+ System = model.System
+)
+
+func (gw *Gateway) setCurrentHealthCheckInfo(h map[string]model.HealthCheckItem) {
gw.healthCheckInfo.Store(h)
}
-func (gw *Gateway) getHealthCheckInfo() map[string]apidef.HealthCheckItem {
- ret, ok := gw.healthCheckInfo.Load().(map[string]apidef.HealthCheckItem)
+func (gw *Gateway) getHealthCheckInfo() map[string]HealthCheckItem {
+ ret, ok := gw.healthCheckInfo.Load().(map[string]HealthCheckItem)
if !ok {
- return make(map[string]apidef.HealthCheckItem, 0)
+ return make(map[string]HealthCheckItem, 0)
}
return ret
}
func (gw *Gateway) initHealthCheck(ctx context.Context) {
- gw.setCurrentHealthCheckInfo(make(map[string]apidef.HealthCheckItem, 3))
+ gw.setCurrentHealthCheckInfo(make(map[string]HealthCheckItem, 3))
go func(ctx context.Context) {
var n = gw.GetConfig().LivenessCheck.CheckDuration
@@ -58,12 +71,12 @@ func (gw *Gateway) initHealthCheck(ctx context.Context) {
}
type SafeHealthCheck struct {
- info map[string]apidef.HealthCheckItem
+ info map[string]HealthCheckItem
mux sync.Mutex
}
func (gw *Gateway) gatherHealthChecks() {
- allInfos := SafeHealthCheck{info: make(map[string]apidef.HealthCheckItem, 3)}
+ allInfos := SafeHealthCheck{info: make(map[string]HealthCheckItem, 3)}
redisStore := storage.RedisCluster{KeyPrefix: "livenesscheck-", ConnectionHandler: gw.StorageConnectionHandler}
@@ -75,9 +88,9 @@ func (gw *Gateway) gatherHealthChecks() {
go func() {
defer wg.Done()
- var checkItem = apidef.HealthCheckItem{
- Status: apidef.Pass,
- ComponentType: apidef.Datastore,
+ var checkItem = HealthCheckItem{
+ Status: Pass,
+ ComponentType: Datastore,
Time: time.Now().Format(time.RFC3339),
}
@@ -85,7 +98,7 @@ func (gw *Gateway) gatherHealthChecks() {
if err != nil {
mainLog.WithField("liveness-check", true).WithError(err).Error("Redis health check failed")
checkItem.Output = err.Error()
- checkItem.Status = apidef.Fail
+ checkItem.Status = Fail
}
allInfos.mux.Lock()
@@ -99,9 +112,9 @@ func (gw *Gateway) gatherHealthChecks() {
go func() {
defer wg.Done()
- var checkItem = apidef.HealthCheckItem{
- Status: apidef.Pass,
- ComponentType: apidef.Datastore,
+ var checkItem = HealthCheckItem{
+ Status: Pass,
+ ComponentType: Datastore,
Time: time.Now().Format(time.RFC3339),
}
@@ -109,14 +122,14 @@ func (gw *Gateway) gatherHealthChecks() {
err := errors.New("Dashboard service not initialized")
mainLog.WithField("liveness-check", true).Error(err)
checkItem.Output = err.Error()
- checkItem.Status = apidef.Fail
+ checkItem.Status = Fail
} else if err := gw.DashService.Ping(); err != nil {
mainLog.WithField("liveness-check", true).Error(err)
checkItem.Output = err.Error()
- checkItem.Status = apidef.Fail
+ checkItem.Status = Fail
}
- checkItem.ComponentType = apidef.System
+ checkItem.ComponentType = System
allInfos.mux.Lock()
allInfos.info["dashboard"] = checkItem
@@ -131,18 +144,18 @@ func (gw *Gateway) gatherHealthChecks() {
go func() {
defer wg.Done()
- var checkItem = apidef.HealthCheckItem{
- Status: apidef.Pass,
- ComponentType: apidef.Datastore,
+ var checkItem = HealthCheckItem{
+ Status: Pass,
+ ComponentType: Datastore,
Time: time.Now().Format(time.RFC3339),
}
if !rpc.Login() {
checkItem.Output = "Could not connect to RPC"
- checkItem.Status = apidef.Fail
+ checkItem.Status = Fail
}
- checkItem.ComponentType = apidef.System
+ checkItem.ComponentType = System
allInfos.mux.Lock()
allInfos.info["rpc"] = checkItem
@@ -165,8 +178,8 @@ func (gw *Gateway) liveCheckHandler(w http.ResponseWriter, r *http.Request) {
checks := gw.getHealthCheckInfo()
- res := apidef.HealthCheckResponse{
- Status: apidef.Pass,
+ res := HealthCheckResponse{
+ Status: Pass,
Version: VERSION,
Description: "Tyk GW",
Details: checks,
@@ -175,22 +188,22 @@ func (gw *Gateway) liveCheckHandler(w http.ResponseWriter, r *http.Request) {
var failCount int
for _, v := range checks {
- if v.Status == apidef.Fail {
+ if v.Status == Fail {
failCount++
}
}
- var status apidef.HealthCheckStatus
+ var status HealthCheckStatus
switch failCount {
case 0:
- status = apidef.Pass
+ status = Pass
case len(checks):
- status = apidef.Fail
+ status = Fail
default:
- status = apidef.Warn
+ status = Warn
}
res.Status = status
diff --git a/gateway/middleware.go b/gateway/middleware.go
index e06baca0927..55a68ff67fb 100644
--- a/gateway/middleware.go
+++ b/gateway/middleware.go
@@ -12,14 +12,6 @@ import (
"strconv"
"time"
- "github.com/TykTechnologies/tyk/internal/cache"
- "github.com/TykTechnologies/tyk/internal/event"
- "github.com/TykTechnologies/tyk/internal/otel"
- "github.com/TykTechnologies/tyk/internal/policy"
- "github.com/TykTechnologies/tyk/rpc"
-
- "github.com/TykTechnologies/tyk/header"
-
"github.com/gocraft/health"
"github.com/justinas/alice"
newrelic "github.com/newrelic/go-agent"
@@ -28,14 +20,21 @@ import (
"golang.org/x/sync/singleflight"
"github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/internal/cache"
+ "github.com/TykTechnologies/tyk/internal/event"
+ "github.com/TykTechnologies/tyk/internal/middleware"
+ "github.com/TykTechnologies/tyk/internal/otel"
+ "github.com/TykTechnologies/tyk/internal/policy"
"github.com/TykTechnologies/tyk/request"
+ "github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/storage"
"github.com/TykTechnologies/tyk/trace"
"github.com/TykTechnologies/tyk/user"
)
const (
- mwStatusRespond = 666
+ mwStatusRespond = middleware.StatusRespond
DEFAULT_ORG_SESSION_EXPIRATION = int64(604800)
)
@@ -45,9 +44,10 @@ var (
)
type TykMiddleware interface {
- Init()
Base() *BaseMiddleware
+ GetSpec() *APISpec
+ Init()
SetName(string)
SetRequestLogger(*http.Request)
Logger() *logrus.Entry
@@ -56,8 +56,6 @@ type TykMiddleware interface {
EnabledForSpec() bool
Name() string
- GetSpec() *APISpec
-
Unload()
}
@@ -533,6 +531,21 @@ func (t *BaseMiddleware) emitRateLimitEvent(r *http.Request, e event.Event, mess
})
}
+// emitUpstreamOAuthEvent emits an upstream OAuth event with an optional custom message.
+func (t *BaseMiddleware) emitUpstreamOAuthEvent(r *http.Request, e event.Event, message string, apiId string) {
+ if message == "" {
+ message = event.String(e)
+ }
+
+ t.FireEvent(e, EventUpstreamOAuthMeta{
+ EventMetaDefault: EventMetaDefault{
+ Message: message,
+ OriginatingRequest: EncodeRequestToEvent(r),
+ },
+ APIID: apiId,
+ })
+}
+
// handleRateLimitFailure handles the actions to be taken when a rate limit failure occurs.
func (t *BaseMiddleware) handleRateLimitFailure(r *http.Request, e event.Event, message string, rateLimitKey string) (error, int) {
t.emitRateLimitEvent(r, e, message, rateLimitKey)
diff --git a/gateway/middleware_wrap.go b/gateway/middleware_wrap.go
new file mode 100644
index 00000000000..0cd985d3ef1
--- /dev/null
+++ b/gateway/middleware_wrap.go
@@ -0,0 +1,54 @@
+package gateway
+
+import (
+ "net/http"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+type wrapMiddleware struct {
+ *BaseMiddleware
+ mw model.Middleware
+}
+
+var _ TykMiddleware = &wrapMiddleware{}
+
+// WrapMiddleware returns a new TykMiddleware with the provided base middleware,
+// and the smaller model.Middleware interface. It allows to implement model.Middleware,
+// and use it as a TykMiddleware.
+func WrapMiddleware(base *BaseMiddleware, in model.Middleware) TykMiddleware {
+ return &wrapMiddleware{
+ BaseMiddleware: base,
+ mw: in,
+ }
+}
+
+func (w *wrapMiddleware) Base() *BaseMiddleware {
+ return w.BaseMiddleware
+}
+
+func (w *wrapMiddleware) Config() (interface{}, error) {
+ return w.BaseMiddleware.Config()
+}
+
+func (w *wrapMiddleware) Init() {
+ w.mw.Init()
+}
+
+func (w *wrapMiddleware) Name() string {
+ return w.mw.Name()
+}
+
+func (s *wrapMiddleware) Logger() *logrus.Entry {
+ return s.mw.Logger()
+}
+
+func (w *wrapMiddleware) EnabledForSpec() bool {
+ return w.mw.EnabledForSpec()
+}
+
+func (w *wrapMiddleware) ProcessRequest(rw http.ResponseWriter, r *http.Request, data interface{}) (error, int) {
+ return w.mw.ProcessRequest(rw, r, data)
+}
diff --git a/gateway/mw_api_rate_limit.go b/gateway/mw_api_rate_limit.go
index 6471845ce4b..619ae7d64de 100644
--- a/gateway/mw_api_rate_limit.go
+++ b/gateway/mw_api_rate_limit.go
@@ -1,6 +1,7 @@
package gateway
import (
+ "fmt"
"net/http"
"strconv"
@@ -54,7 +55,7 @@ func (k *RateLimitForAPI) getSession(r *http.Request) *user.SessionState {
if ok {
if limits := spec.RateLimit; limits.Valid() {
// track per-endpoint with a hash of the path
- keyname := k.keyName + "-" + storage.HashStr(limits.Path)
+ keyname := k.keyName + "-" + storage.HashStr(fmt.Sprintf("%s:%s", limits.Method, limits.Path))
session := &user.SessionState{
Rate: limits.Rate,
diff --git a/gateway/mw_auth_key.go b/gateway/mw_auth_key.go
index aaf136ef778..25dd659468c 100644
--- a/gateway/mw_auth_key.go
+++ b/gateway/mw_auth_key.go
@@ -242,7 +242,7 @@ func (k *AuthKey) validateSignature(r *http.Request, key string) (error, int) {
return errors.New(errorMessage), errorCode
}
- secret := k.Gw.replaceTykVariables(r, authConfig.Signature.Secret, false)
+ secret := k.Gw.ReplaceTykVariables(r, authConfig.Signature.Secret, false)
if secret == "" {
logger.Info("Request signature secret not found or empty")
diff --git a/gateway/mw_external_oauth.go b/gateway/mw_external_oauth.go
index 24654134d42..39730badb4d 100644
--- a/gateway/mw_external_oauth.go
+++ b/gateway/mw_external_oauth.go
@@ -38,6 +38,10 @@ func (k *ExternalOAuthMiddleware) Name() string {
}
func (k *ExternalOAuthMiddleware) EnabledForSpec() bool {
+ if k.Spec.ExternalOAuth.Enabled {
+ log.Warn("Support for external OAuth Middleware will be deprecated starting from 5.7.0. To avoid any disruptions, we recommend that you use JSON Web Token (JWT) instead, as explained in https://tyk.io/docs/basic-config-and-security/security/authentication-authorization/ext-oauth-middleware/")
+ }
+
return k.Spec.ExternalOAuth.Enabled
}
diff --git a/gateway/mw_graphql.go b/gateway/mw_graphql.go
index f2b83b8a17c..5f3e017ba3c 100644
--- a/gateway/mw_graphql.go
+++ b/gateway/mw_graphql.go
@@ -116,7 +116,7 @@ func (m *GraphQLMiddleware) Init() {
}
return body, nil
},
- TykVariableReplacer: m.Gw.replaceTykVariables,
+ TykVariableReplacer: m.Gw.ReplaceTykVariables,
},
})
} else if m.Spec.GraphQL.Version == apidef.GraphQLConfigVersion3Preview {
@@ -153,7 +153,7 @@ func (m *GraphQLMiddleware) Init() {
}
return body, nil
},
- TykVariableReplacer: m.Gw.replaceTykVariables,
+ TykVariableReplacer: m.Gw.ReplaceTykVariables,
},
})
if err != nil {
diff --git a/gateway/mw_modify_headers.go b/gateway/mw_modify_headers.go
index cc18f93f459..1d8a178f9e9 100644
--- a/gateway/mw_modify_headers.go
+++ b/gateway/mw_modify_headers.go
@@ -44,7 +44,7 @@ func (t *TransformHeaders) ProcessRequest(w http.ResponseWriter, r *http.Request
// Add
for nKey, nVal := range vInfo.GlobalHeaders {
t.Logger().Debug("Adding: ", nKey)
- setCustomHeader(r.Header, nKey, t.Gw.replaceTykVariables(r, nVal, false), ignoreCanonical)
+ setCustomHeader(r.Header, nKey, t.Gw.ReplaceTykVariables(r, nVal, false), ignoreCanonical)
}
}
@@ -56,7 +56,7 @@ func (t *TransformHeaders) ProcessRequest(w http.ResponseWriter, r *http.Request
r.Header.Del(dKey)
}
for nKey, nVal := range hmeta.AddHeaders {
- setCustomHeader(r.Header, nKey, t.Gw.replaceTykVariables(r, nVal, false), ignoreCanonical)
+ setCustomHeader(r.Header, nKey, t.Gw.ReplaceTykVariables(r, nVal, false), ignoreCanonical)
}
}
diff --git a/gateway/mw_oauth2_auth.go b/gateway/mw_oauth2_auth.go
new file mode 100644
index 00000000000..8f7208e1b6e
--- /dev/null
+++ b/gateway/mw_oauth2_auth.go
@@ -0,0 +1,397 @@
+package gateway
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+
+ "github.com/sirupsen/logrus"
+ oauth2clientcredentials "golang.org/x/oauth2/clientcredentials"
+
+ "github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/internal/httputil"
+ "github.com/TykTechnologies/tyk/storage"
+)
+
+const (
+ UpstreamOAuthErrorEventName = "UpstreamOAuthError"
+ UpstreamOAuthMiddlewareName = "UpstreamOAuth"
+ ClientCredentialsAuthorizeType = "clientCredentials"
+ PasswordAuthorizeType = "password"
+)
+
+type OAuthHeaderProvider interface {
+ // getOAuthToken returns the OAuth token for the request.
+ getOAuthToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error)
+ getHeaderName(OAuthSpec *UpstreamOAuth) string
+ headerEnabled(OAuthSpec *UpstreamOAuth) bool
+}
+
+type ClientCredentialsOAuthProvider struct{}
+
+type PerAPIClientCredentialsOAuthProvider struct{}
+
+type PasswordOAuthProvider struct{}
+
+func newUpstreamOAuthClientCredentialsCache(connectionHandler *storage.ConnectionHandler) UpstreamOAuthCache {
+ return &upstreamOAuthClientCredentialsCache{RedisCluster: storage.RedisCluster{KeyPrefix: "upstreamOAuthCC-", ConnectionHandler: connectionHandler}}
+}
+
+func newUpstreamOAuthPasswordCache(connectionHandler *storage.ConnectionHandler) UpstreamOAuthCache {
+ return &upstreamOAuthPasswordCache{RedisCluster: storage.RedisCluster{KeyPrefix: "upstreamOAuthPW-", ConnectionHandler: connectionHandler}}
+}
+
+type upstreamOAuthClientCredentialsCache struct {
+ storage.RedisCluster
+}
+
+type upstreamOAuthPasswordCache struct {
+ storage.RedisCluster
+}
+
+func (cache *upstreamOAuthPasswordCache) getToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error) {
+ cacheKey := generatePasswordOAuthCacheKey(OAuthSpec.Spec.UpstreamAuth.OAuth, OAuthSpec.Spec.APIID)
+
+ tokenString, err := retryGetKeyAndLock(cacheKey, &cache.RedisCluster)
+ if err != nil {
+ return "", err
+ }
+
+ if tokenString != "" {
+ decryptedToken := decrypt(getPaddedSecret(OAuthSpec.Gw.GetConfig().Secret), tokenString)
+ return decryptedToken, nil
+ }
+
+ token, err := cache.obtainToken(r.Context(), OAuthSpec)
+ if err != nil {
+ return "", err
+ }
+
+ encryptedToken := encrypt(getPaddedSecret(OAuthSpec.Gw.GetConfig().Secret), token.AccessToken)
+ setExtraMetadata(r, OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.ExtraMetadata, token)
+
+ ttl := time.Until(token.Expiry)
+ if err := setTokenInCache(cacheKey, encryptedToken, ttl, &cache.RedisCluster); err != nil {
+ return "", err
+ }
+
+ return token.AccessToken, nil
+}
+
+func (cache *upstreamOAuthPasswordCache) obtainToken(ctx context.Context, OAuthSpec *UpstreamOAuth) (*oauth2.Token, error) {
+ cfg := newOAuth2PasswordConfig(OAuthSpec)
+
+ token, err := cfg.PasswordCredentialsToken(ctx, OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.Username, OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.Password)
+ if err != nil {
+ return &oauth2.Token{}, err
+ }
+
+ return token, nil
+}
+
+type UpstreamOAuthCache interface {
+ // getToken returns the token from cache or issues a request to obtain it from the OAuth provider.
+ getToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error)
+ // obtainToken issues a request to obtain the token from the OAuth provider.
+ obtainToken(ctx context.Context, OAuthSpec *UpstreamOAuth) (*oauth2.Token, error)
+}
+
+// UpstreamOAuth is a middleware that will do basic authentication for upstream connections.
+// UpstreamOAuth middleware is only supported in Tyk OAS API definitions.
+type UpstreamOAuth struct {
+ *BaseMiddleware
+}
+
+// Name returns the name of middleware.
+func (OAuthSpec *UpstreamOAuth) Name() string {
+ return UpstreamOAuthMiddlewareName
+}
+
+// EnabledForSpec returns true if the middleware is enabled based on API Spec.
+func (OAuthSpec *UpstreamOAuth) EnabledForSpec() bool {
+ if !OAuthSpec.Spec.UpstreamAuth.Enabled {
+ return false
+ }
+
+ if !OAuthSpec.Spec.UpstreamAuth.OAuth.Enabled {
+ return false
+ }
+
+ return true
+}
+
+// ProcessRequest will inject basic auth info into request context so that it can be used during reverse proxy.
+func (OAuthSpec *UpstreamOAuth) ProcessRequest(_ http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
+ oauthConfig := OAuthSpec.Spec.UpstreamAuth.OAuth
+
+ upstreamOAuthProvider := UpstreamOAuthProvider{
+ HeaderName: header.Authorization,
+ }
+
+ provider, err := getOAuthHeaderProvider(oauthConfig)
+ if err != nil {
+ return fmt.Errorf("failed to get OAuth header provider: %w", err), http.StatusInternalServerError
+ }
+
+ payload, err := provider.getOAuthToken(r, OAuthSpec)
+ if err != nil {
+ return fmt.Errorf("failed to get OAuth token: %w", err), http.StatusInternalServerError
+ }
+
+ upstreamOAuthProvider.AuthValue = payload
+ headerName := provider.getHeaderName(OAuthSpec)
+ if headerName != "" {
+ upstreamOAuthProvider.HeaderName = headerName
+ }
+
+ if provider.headerEnabled(OAuthSpec) {
+ headerName := provider.getHeaderName(OAuthSpec)
+ if headerName != "" {
+ upstreamOAuthProvider.HeaderName = headerName
+ }
+ }
+
+ httputil.SetUpstreamAuth(r, upstreamOAuthProvider)
+ return nil, http.StatusOK
+}
+
+func getOAuthHeaderProvider(oauthConfig apidef.UpstreamOAuth) (OAuthHeaderProvider, error) {
+ if !oauthConfig.IsEnabled() {
+ return nil, fmt.Errorf("upstream OAuth is not enabled")
+ }
+
+ switch {
+ case len(oauthConfig.AllowedAuthorizeTypes) == 0:
+ return nil, fmt.Errorf("no OAuth configuration selected")
+ case len(oauthConfig.AllowedAuthorizeTypes) > 1:
+ return nil, fmt.Errorf("both client credentials and password authentication are provided")
+ case oauthConfig.AllowedAuthorizeTypes[0] == ClientCredentialsAuthorizeType:
+ return &ClientCredentialsOAuthProvider{}, nil
+ case oauthConfig.AllowedAuthorizeTypes[0] == PasswordAuthorizeType:
+ return &PasswordOAuthProvider{}, nil
+ default:
+ return nil, fmt.Errorf("no valid OAuth configuration provided")
+ }
+}
+
+func (p *PerAPIClientCredentialsOAuthProvider) getOAuthHeaderValue(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error) {
+ oauthConfig := OAuthSpec.Spec.UpstreamAuth.OAuth
+
+ if oauthConfig.ClientCredentials.TokenProvider == nil {
+ cfg := newOAuth2ClientCredentialsConfig(OAuthSpec)
+ tokenSource := cfg.TokenSource(r.Context())
+
+ oauthConfig.ClientCredentials.TokenProvider = tokenSource
+ }
+
+ oauthToken, err := oauthConfig.ClientCredentials.TokenProvider.Token()
+ if err != nil {
+ return handleOAuthError(r, OAuthSpec, err)
+ }
+
+ payload := fmt.Sprintf("Bearer %s", oauthToken.AccessToken)
+ return payload, nil
+}
+
+func handleOAuthError(r *http.Request, OAuthSpec *UpstreamOAuth, err error) (string, error) {
+ OAuthSpec.emitUpstreamOAuthEvent(r, UpstreamOAuthErrorEventName, err.Error(), OAuthSpec.Spec.APIID)
+ return "", err
+}
+
+func (p *ClientCredentialsOAuthProvider) getOAuthToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error) {
+ if OAuthSpec.Gw.UpstreamOAuthCache == nil {
+ OAuthSpec.Gw.UpstreamOAuthCache = newUpstreamOAuthClientCredentialsCache(OAuthSpec.Gw.StorageConnectionHandler)
+ }
+
+ token, err := OAuthSpec.Gw.UpstreamOAuthCache.getToken(r, OAuthSpec)
+ if err != nil {
+ return handleOAuthError(r, OAuthSpec, err)
+ }
+
+ return fmt.Sprintf("Bearer %s", token), nil
+}
+
+func (p *ClientCredentialsOAuthProvider) headerEnabled(OAuthSpec *UpstreamOAuth) bool {
+ return OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.Header.Enabled
+}
+
+func (p *ClientCredentialsOAuthProvider) getHeaderName(OAuthSpec *UpstreamOAuth) string {
+ return OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.Header.Name
+}
+
+func (p *PasswordOAuthProvider) getOAuthToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error) {
+ if OAuthSpec.Gw.UpstreamOAuthCache == nil {
+ OAuthSpec.Gw.UpstreamOAuthCache = newUpstreamOAuthPasswordCache(OAuthSpec.Gw.StorageConnectionHandler)
+ }
+
+ token, err := OAuthSpec.Gw.UpstreamOAuthCache.getToken(r, OAuthSpec)
+ if err != nil {
+ return handleOAuthError(r, OAuthSpec, err)
+ }
+
+ return fmt.Sprintf("Bearer %s", token), nil
+}
+
+func (p *PasswordOAuthProvider) getHeaderName(OAuthSpec *UpstreamOAuth) string {
+ return OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.Header.Name
+}
+
+func (p *PasswordOAuthProvider) headerEnabled(OAuthSpec *UpstreamOAuth) bool {
+ return OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.Header.Enabled
+}
+
+func generatePasswordOAuthCacheKey(config apidef.UpstreamOAuth, apiId string) string {
+ key := fmt.Sprintf(
+ "%s|%s|%s|%s",
+ apiId,
+ config.PasswordAuthentication.ClientID,
+ config.PasswordAuthentication.ClientSecret,
+ strings.Join(config.PasswordAuthentication.Scopes, ","))
+
+ hash := sha256.New()
+ hash.Write([]byte(key))
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+func generateClientCredentialsCacheKey(config apidef.UpstreamOAuth, apiId string) string {
+ key := fmt.Sprintf(
+ "%s|%s|%s|%s",
+ apiId,
+ config.ClientCredentials.ClientID,
+ config.ClientCredentials.TokenURL,
+ strings.Join(config.ClientCredentials.Scopes, ","))
+
+ hash := sha256.New()
+ hash.Write([]byte(key))
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+func (cache *upstreamOAuthClientCredentialsCache) getToken(r *http.Request, OAuthSpec *UpstreamOAuth) (string, error) {
+ cacheKey := generateClientCredentialsCacheKey(OAuthSpec.Spec.UpstreamAuth.OAuth, OAuthSpec.Spec.APIID)
+
+ tokenString, err := retryGetKeyAndLock(cacheKey, &cache.RedisCluster)
+ if err != nil {
+ return "", err
+ }
+
+ if tokenString != "" {
+ decryptedToken := decrypt(getPaddedSecret(OAuthSpec.Gw.GetConfig().Secret), tokenString)
+ return decryptedToken, nil
+ }
+
+ token, err := cache.obtainToken(r.Context(), OAuthSpec)
+ if err != nil {
+ return "", err
+ }
+
+ encryptedToken := encrypt(getPaddedSecret(OAuthSpec.Gw.GetConfig().Secret), token.AccessToken)
+ setExtraMetadata(r, OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.ExtraMetadata, token)
+
+ ttl := time.Until(token.Expiry)
+ if err := setTokenInCache(cacheKey, encryptedToken, ttl, &cache.RedisCluster); err != nil {
+ return "", err
+ }
+
+ return token.AccessToken, nil
+}
+
+func setExtraMetadata(r *http.Request, keyList []string, token *oauth2.Token) {
+ contextDataObject := ctxGetData(r)
+ if contextDataObject == nil {
+ contextDataObject = make(map[string]interface{})
+ }
+ for _, key := range keyList {
+ val := token.Extra(key)
+ if val != "" {
+ contextDataObject[key] = val
+ }
+ }
+ ctxSetData(r, contextDataObject)
+}
+
+func retryGetKeyAndLock(cacheKey string, cache *storage.RedisCluster) (string, error) {
+ const maxRetries = 10
+ const retryDelay = 100 * time.Millisecond
+
+ var token string
+ var err error
+
+ for i := 0; i < maxRetries; i++ {
+ token, err = cache.GetKey(cacheKey)
+ if err == nil {
+ return token, nil
+ }
+
+ lockKey := cacheKey + ":lock"
+ ok, err := cache.Lock(lockKey, time.Second*5)
+ if err == nil && ok {
+ return "", nil
+ }
+
+ time.Sleep(retryDelay)
+ }
+
+ return "", fmt.Errorf("failed to acquire lock after retries: %v", err)
+}
+
+func newOAuth2ClientCredentialsConfig(OAuthSpec *UpstreamOAuth) oauth2clientcredentials.Config {
+ return oauth2clientcredentials.Config{
+ ClientID: OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.ClientID,
+ ClientSecret: OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.ClientSecret,
+ TokenURL: OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.TokenURL,
+ Scopes: OAuthSpec.Spec.UpstreamAuth.OAuth.ClientCredentials.Scopes,
+ }
+}
+
+func newOAuth2PasswordConfig(OAuthSpec *UpstreamOAuth) oauth2.Config {
+ return oauth2.Config{
+ ClientID: OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.ClientID,
+ ClientSecret: OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.ClientSecret,
+ Endpoint: oauth2.Endpoint{
+ TokenURL: OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.TokenURL,
+ },
+ Scopes: OAuthSpec.Spec.UpstreamAuth.OAuth.PasswordAuthentication.Scopes,
+ }
+}
+
+func (cache *upstreamOAuthClientCredentialsCache) obtainToken(ctx context.Context, OAuthSpec *UpstreamOAuth) (*oauth2.Token, error) {
+ cfg := newOAuth2ClientCredentialsConfig(OAuthSpec)
+
+ tokenSource := cfg.TokenSource(ctx)
+ oauthToken, err := tokenSource.Token()
+ if err != nil {
+ return &oauth2.Token{}, err
+ }
+
+ return oauthToken, nil
+}
+
+func setTokenInCache(cacheKey string, token string, ttl time.Duration, cache *storage.RedisCluster) error {
+ oauthTokenExpiry := time.Now().Add(ttl)
+ return cache.SetKey(cacheKey, token, int64(oauthTokenExpiry.Sub(time.Now()).Seconds()))
+}
+
+// UpstreamOAuthProvider implements upstream auth provider.
+type UpstreamOAuthProvider struct {
+ // HeaderName is the header name to be used to fill upstream auth with.
+ HeaderName string
+ // AuthValue is the value of auth header.
+ AuthValue string
+}
+
+// Fill sets the request's HeaderName with AuthValue
+func (u UpstreamOAuthProvider) Fill(r *http.Request) {
+ if r.Header.Get(u.HeaderName) != "" {
+ log.WithFields(logrus.Fields{
+ "header": u.HeaderName,
+ }).Info("Authorization header conflict detected: Client header overwritten by Gateway upstream authentication header.")
+ }
+ r.Header.Set(u.HeaderName, u.AuthValue)
+}
diff --git a/gateway/mw_oauth2_auth_test.go b/gateway/mw_oauth2_auth_test.go
new file mode 100644
index 00000000000..bc510988767
--- /dev/null
+++ b/gateway/mw_oauth2_auth_test.go
@@ -0,0 +1,179 @@
+package gateway
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/test"
+)
+
+func TestUpstreamOauth2(t *testing.T) {
+
+ tst := StartTest(nil)
+ t.Cleanup(tst.Close)
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ assert.Fail(t, "authenticate client request URL = %q; want %q", r.URL, "/token")
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ assert.Fail(t, "Unexpected authorization header, %v is found.", headerAuth)
+ }
+ if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want {
+ assert.Fail(t, "Content-Type header = %q; want %q", got, want)
+ }
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ r.Body.Close()
+ }
+ if err != nil {
+ assert.Fail(t, "failed reading request body: %s.", err)
+ }
+ if string(body) != "grant_type=client_credentials&scope=scope1+scope2" {
+ assert.Fail(t, "payload = %q; want %q", string(body), "grant_type=client_credentials&scope=scope1+scope2")
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer&instance_url=https://tykxample.com"))
+ }))
+ defer t.Cleanup(func() { ts.Close() })
+
+ cfg := apidef.ClientCredentials{
+ Enabled: true,
+ ClientAuthData: apidef.ClientAuthData{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ },
+ TokenURL: ts.URL + "/token",
+ Scopes: []string{"scope1", "scope2"},
+ Header: apidef.AuthSource{Enabled: true, Name: "Authorization"},
+ ExtraMetadata: []string{"instance_url"},
+ }
+
+ tst.Gw.BuildAndLoadAPI(
+ func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-oauth-distributed/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: true,
+ OAuth: apidef.UpstreamOAuth{
+ Enabled: true,
+ ClientCredentials: cfg,
+ AllowedAuthorizeTypes: []string{ClientCredentialsAuthorizeType},
+ },
+ }
+ spec.Proxy.StripListenPath = true
+ },
+ )
+
+ _, _ = tst.Run(t, test.TestCases{
+ {
+ Path: "/upstream-oauth-distributed/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.Contains(t, resp.Headers, header.Authorization)
+ assert.NotEmpty(t, resp.Headers[header.Authorization])
+ assert.Equal(t, "Bearer 90d64460d14870c08c81352a05dedd3465940a7c", resp.Headers[header.Authorization])
+
+ return true
+ },
+ },
+ }...)
+
+}
+
+func TestPasswordCredentialsTokenRequest(t *testing.T) {
+ tst := StartTest(nil)
+ t.Cleanup(tst.Close)
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer r.Body.Close()
+ expected := "/token"
+ if r.URL.String() != expected {
+ assert.Fail(t, "URL = %q; want %q", r.URL, expected)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
+ if headerAuth != expected {
+ assert.Fail(t, "Authorization header = %q; want %q", headerAuth, expected)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ expected = "application/x-www-form-urlencoded"
+ if headerContentType != expected {
+ assert.Fail(t, "Content-Type header = %q; want %q", headerContentType, expected)
+ }
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ assert.Fail(t, "Failed reading request body: %s.", err)
+ }
+ expected = "grant_type=password&password=password1&scope=scope1+scope2&username=user1"
+ if string(body) != expected {
+ assert.Fail(t, "payload = %q; want %q", string(body), expected)
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer&instance_url=https://tykxample.com"))
+ }))
+ defer t.Cleanup(func() { ts.Close() })
+
+ cfg := apidef.PasswordAuthentication{
+ ClientAuthData: apidef.ClientAuthData{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ },
+ Username: "user1",
+ Password: "password1",
+ TokenURL: ts.URL + "/token",
+ Scopes: []string{"scope1", "scope2"},
+ Header: apidef.AuthSource{Enabled: true, Name: "Authorization"},
+ ExtraMetadata: []string{"instance_url"},
+ }
+
+ tst.Gw.BuildAndLoadAPI(
+ func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-oauth-password/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: true,
+ OAuth: apidef.UpstreamOAuth{
+ Enabled: true,
+ PasswordAuthentication: cfg,
+ AllowedAuthorizeTypes: []string{PasswordAuthorizeType},
+ },
+ }
+ spec.Proxy.StripListenPath = true
+ },
+ )
+
+ _, _ = tst.Run(t, test.TestCases{
+ {
+ Path: "/upstream-oauth-password/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.Contains(t, resp.Headers, header.Authorization)
+ assert.NotEmpty(t, resp.Headers[header.Authorization])
+ assert.Equal(t, "Bearer 90d64460d14870c08c81352a05dedd3465940a7c", resp.Headers[header.Authorization])
+
+ return true
+ },
+ },
+ }...)
+}
diff --git a/gateway/mw_persist_graphql_operation.go b/gateway/mw_persist_graphql_operation.go
index 79f73ac86cf..6cd2aa03c28 100644
--- a/gateway/mw_persist_graphql_operation.go
+++ b/gateway/mw_persist_graphql_operation.go
@@ -74,7 +74,7 @@ func (i *PersistGraphQLOperationMiddleware) ProcessRequest(w http.ResponseWriter
return ProxyingRequestFailedErr, http.StatusInternalServerError
}
- variablesStr := i.Gw.replaceTykVariables(r, string(varBytes), false)
+ variablesStr := i.Gw.ReplaceTykVariables(r, string(varBytes), false)
requestPathParts := strings.Split(r.RequestURI, "/")
for replacer, pathIndex := range replacers {
diff --git a/gateway/mw_rate_limiting.go b/gateway/mw_rate_limiting.go
index dd9ab045540..7b38971d05a 100644
--- a/gateway/mw_rate_limiting.go
+++ b/gateway/mw_rate_limiting.go
@@ -59,7 +59,7 @@ func (k *RateLimitAndQuotaCheck) ProcessRequest(w http.ResponseWriter, r *http.R
if pattern, found := session.MetaData["rate_limit_pattern"]; found {
if patternString, ok := pattern.(string); ok && patternString != "" {
- if customKeyValue := k.Gw.replaceTykVariables(r, patternString, false); customKeyValue != "" {
+ if customKeyValue := k.Gw.ReplaceTykVariables(r, patternString, false); customKeyValue != "" {
rateLimitKey = customKeyValue
quotaKey = customKeyValue
}
diff --git a/gateway/mw_streaming.go b/gateway/mw_streaming.go
index 4d0235ede71..3131ce1b582 100644
--- a/gateway/mw_streaming.go
+++ b/gateway/mw_streaming.go
@@ -1,425 +1,34 @@
+//go:build !ee && !dev
+
+// Provides getStreamingMiddleware
package gateway
import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
"net/http"
- "net/url"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/sirupsen/logrus"
-
- "github.com/gorilla/mux"
-
- "github.com/TykTechnologies/tyk/internal/streaming"
)
-const (
- // ExtensionTykStreaming is the oas extension for tyk streaming
- ExtensionTykStreaming = "x-tyk-streaming"
-)
-
-// StreamsConfig represents a stream configuration
-type StreamsConfig struct {
- Info struct {
- Version string `json:"version"`
- } `json:"info"`
- Streams map[string]any `json:"streams"`
+func getStreamingMiddleware(base *BaseMiddleware) TykMiddleware {
+ return &dummyStreamingMiddleware{base}
}
-// Used for testing
-var globalStreamCounter atomic.Int64
-
-// StreamingMiddleware is a middleware that handles streaming functionality
-type StreamingMiddleware struct {
+type dummyStreamingMiddleware struct {
*BaseMiddleware
- streamManagers sync.Map // Map of consumer group IDs to StreamManager
- ctx context.Context
- cancel context.CancelFunc
- allowedUnsafe []string
- defaultStreamManager *StreamManager
}
-// StreamManager is responsible for creating a single stream
-type StreamManager struct {
- streams sync.Map
- routeLock sync.Mutex
- muxer *mux.Router
- mw *StreamingMiddleware
- dryRun bool
- listenPaths []string
-}
-
-func (sm *StreamManager) initStreams(r *http.Request, config *StreamsConfig) {
- // Clear existing routes for this consumer group
- sm.muxer = mux.NewRouter()
-
- for streamID, streamConfig := range config.Streams {
- sm.setUpOrDryRunStream(streamConfig, streamID)
- }
-
- // If it is default stream manager, init muxer
- if r == nil {
- for _, path := range sm.listenPaths {
- sm.muxer.HandleFunc(path, func(_ http.ResponseWriter, _ *http.Request) {
- // Dummy handler
- })
- }
- }
+func (d *dummyStreamingMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
+ return nil, http.StatusOK
}
-func (sm *StreamManager) setUpOrDryRunStream(streamConfig any, streamID string) {
- if streamMap, ok := streamConfig.(map[string]interface{}); ok {
- httpPaths := GetHTTPPaths(streamMap)
-
- if sm.dryRun {
- if len(httpPaths) == 0 {
- err := sm.createStream(streamID, streamMap)
- if err != nil {
- sm.mw.Logger().WithError(err).Errorf("Error creating stream %s", streamID)
- }
- }
- } else {
- err := sm.createStream(streamID, streamMap)
- if err != nil {
- sm.mw.Logger().WithError(err).Errorf("Error creating stream %s", streamID)
- }
- }
- sm.listenPaths = append(sm.listenPaths, httpPaths...)
- }
-}
-
-// removeStream removes a stream
-func (sm *StreamManager) removeStream(streamID string) error {
- streamFullID := fmt.Sprintf("%s_%s", sm.mw.Spec.APIID, streamID)
-
- if streamValue, exists := sm.streams.Load(streamFullID); exists {
- stream, ok := streamValue.(*streaming.Stream)
- if !ok {
- return fmt.Errorf("stream %s is not a valid stream", streamID)
- }
- err := stream.Stop()
- if err != nil {
- return err
- }
- sm.streams.Delete(streamFullID)
- } else {
- return fmt.Errorf("stream %s does not exist", streamID)
- }
- return nil
-}
-
-// Name is StreamingMiddleware
-func (s *StreamingMiddleware) Name() string {
- return "StreamingMiddleware"
-}
-
-// EnabledForSpec checks if streaming is enabled on the config
-func (s *StreamingMiddleware) EnabledForSpec() bool {
- s.Logger().Debug("Checking if streaming is enabled")
-
- streamingConfig := s.Gw.GetConfig().Streaming
- s.Logger().Debugf("Streaming config: %+v", streamingConfig)
+func (d *dummyStreamingMiddleware) EnabledForSpec() bool {
+ streamingConfig := d.Gw.GetConfig().Streaming
if streamingConfig.Enabled {
- s.Logger().Debug("Streaming is enabled in the config")
- s.allowedUnsafe = streamingConfig.AllowUnsafe
- s.Logger().Debugf("Allowed unsafe components: %v", s.allowedUnsafe)
-
- config := s.getStreamsConfig(nil)
- globalStreamCounter.Add(int64(len(config.Streams)))
-
- s.Logger().Debug("Total streams count: ", len(config.Streams))
-
- return len(config.Streams) != 0
- }
-
- s.Logger().Debug("Streaming is not enabled in the config")
- return false
-}
-
-// Init initializes the middleware
-func (s *StreamingMiddleware) Init() {
- s.Logger().Debug("Initializing StreamingMiddleware")
- s.ctx, s.cancel = context.WithCancel(context.Background())
-
- s.Logger().Debug("Initializing default stream manager")
- s.defaultStreamManager = s.createStreamManager(nil)
-}
-
-func (s *StreamingMiddleware) createStreamManager(r *http.Request) *StreamManager {
- newStreamManager := &StreamManager{
- muxer: mux.NewRouter(),
- mw: s,
- dryRun: r == nil,
- }
- streamID := fmt.Sprintf("_%d", time.Now().UnixNano())
- s.streamManagers.Store(streamID, newStreamManager)
-
- // Call initStreams for the new StreamManager
- newStreamManager.initStreams(r, s.getStreamsConfig(r))
-
- return newStreamManager
-}
-
-// Helper function to extract paths from an http_server configuration
-func extractPaths(httpConfig map[string]interface{}) []string {
- var paths []string
- defaultPaths := map[string]string{
- "path": "/post",
- "ws_path": "/post/ws",
- "stream_path": "/get/stream",
- }
- for key, defaultValue := range defaultPaths {
- if val, ok := httpConfig[key].(string); ok {
- paths = append(paths, val)
- } else {
- paths = append(paths, defaultValue)
- }
- }
- return paths
-}
-
-// Helper function to extract HTTP server paths from a given configuration
-func extractHTTPServerPaths(config map[string]interface{}) []string {
- if httpServerConfig, ok := config["http_server"].(map[string]interface{}); ok {
- return extractPaths(httpServerConfig)
- }
- return nil
-}
-
-// Helper function to handle broker configurations
-func handleBroker(brokerConfig map[string]interface{}) []string {
- var paths []string
- for _, ioKey := range []string{"inputs", "outputs"} {
- if ioList, ok := brokerConfig[ioKey].([]interface{}); ok {
- for _, ioItem := range ioList {
- if ioItemMap, ok := ioItem.(map[string]interface{}); ok {
- paths = append(paths, extractHTTPServerPaths(ioItemMap)...)
- }
- }
- }
- }
- return paths
-}
-
-// GetHTTPPaths is the ain function to get HTTP paths from the stream configuration
-func GetHTTPPaths(streamConfig map[string]interface{}) []string {
- var paths []string
- for _, component := range []string{"input", "output"} {
- if componentMap, ok := streamConfig[component].(map[string]interface{}); ok {
- paths = append(paths, extractHTTPServerPaths(componentMap)...)
- if brokerConfig, ok := componentMap["broker"].(map[string]interface{}); ok {
- paths = append(paths, handleBroker(brokerConfig)...)
- }
- }
- }
- // remove duplicates
- var deduplicated []string
- exists := map[string]struct{}{}
- for _, item := range paths {
- if _, ok := exists[item]; !ok {
- deduplicated = append(deduplicated, item)
- exists[item] = struct{}{}
- }
- }
- return deduplicated
-}
-
-func (s *StreamingMiddleware) getStreamsConfig(r *http.Request) *StreamsConfig {
- config := &StreamsConfig{Streams: make(map[string]any)}
- if !s.Spec.IsOAS {
- return config
- }
-
- extension, ok := s.Spec.OAS.T.Extensions[ExtensionTykStreaming]
- if !ok {
- return config
+ d.Logger().Error("Error: Streaming is supported only in Tyk Enterprise Edition")
}
- if streamsMap, ok := extension.(map[string]any); ok {
- if streams, ok := streamsMap["streams"].(map[string]any); ok {
- s.processStreamsConfig(r, streams, config)
- }
- }
-
- return config
-}
-
-func (s *StreamingMiddleware) processStreamsConfig(r *http.Request, streams map[string]any, config *StreamsConfig) {
- for streamID, stream := range streams {
- if r == nil {
- s.Logger().Debugf("No request available to replace variables in stream config for %s", streamID)
- } else {
- s.Logger().Debugf("Stream config for %s: %v", streamID, stream)
- marshaledStream, err := json.Marshal(stream)
- if err != nil {
- s.Logger().Errorf("Failed to marshal stream config: %v", err)
- continue
- }
- replacedStream := s.Gw.replaceTykVariables(r, string(marshaledStream), true)
-
- if replacedStream != string(marshaledStream) {
- s.Logger().Debugf("Stream config changed for %s: %s", streamID, replacedStream)
- } else {
- s.Logger().Debugf("Stream config has not changed for %s: %s", streamID, replacedStream)
- }
-
- var unmarshaledStream map[string]interface{}
- err = json.Unmarshal([]byte(replacedStream), &unmarshaledStream)
- if err != nil {
- s.Logger().Errorf("Failed to unmarshal replaced stream config: %v", err)
- continue
- }
- stream = unmarshaledStream
- }
- config.Streams[streamID] = stream
- }
-}
-
-// createStream creates a new stream
-func (sm *StreamManager) createStream(streamID string, config map[string]interface{}) error {
- streamFullID := fmt.Sprintf("%s_%s", sm.mw.Spec.APIID, streamID)
- sm.mw.Logger().Debugf("Creating stream: %s", streamFullID)
-
- stream := streaming.NewStream(sm.mw.allowedUnsafe)
- err := stream.Start(config, &handleFuncAdapter{
- mw: sm.mw,
- streamID: streamFullID,
- muxer: sm.muxer,
- sm: sm,
- // child logger is necessary to prevent race condition
- logger: sm.mw.Logger().WithField("stream", streamFullID),
- })
- if err != nil {
- sm.mw.Logger().Errorf("Failed to start stream %s: %v", streamFullID, err)
- return err
- }
-
- sm.streams.Store(streamFullID, stream)
- sm.mw.Logger().Infof("Successfully created stream: %s", streamFullID)
-
- return nil
-}
-
-func (sm *StreamManager) hasPath(path string) bool {
- for _, p := range sm.listenPaths {
- if strings.TrimPrefix(path, "/") == strings.TrimPrefix(p, "/") {
- return true
- }
- }
return false
}
-// ProcessRequest will handle the streaming functionality
-func (s *StreamingMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
- strippedPath := s.Spec.StripListenPath(r.URL.Path)
- if !s.defaultStreamManager.hasPath(strippedPath) {
- return nil, http.StatusOK
- }
-
- s.Logger().Debugf("Processing request: %s, %s", r.URL.Path, strippedPath)
-
- newRequest := &http.Request{
- Method: r.Method,
- URL: &url.URL{Scheme: r.URL.Scheme, Host: r.URL.Host, Path: strippedPath},
- }
-
- if !s.defaultStreamManager.muxer.Match(newRequest, &mux.RouteMatch{}) {
- return nil, http.StatusOK
- }
-
- var match mux.RouteMatch
- streamManager := s.createStreamManager(r)
- streamManager.routeLock.Lock()
- streamManager.muxer.Match(newRequest, &match)
- streamManager.routeLock.Unlock()
-
- // direct Bento handler
- handler, ok := match.Handler.(http.HandlerFunc)
- if !ok {
- return errors.New("invalid route handler"), http.StatusInternalServerError
- }
-
- handler.ServeHTTP(w, r)
-
- return nil, mwStatusRespond
-}
-
-// Unload closes and remove active streams
-func (s *StreamingMiddleware) Unload() {
- s.Logger().Debugf("Unloading streaming middleware %s", s.Spec.Name)
-
- totalStreams := 0
- s.streamManagers.Range(func(_, value interface{}) bool {
- manager, ok := value.(*StreamManager)
- if !ok {
- return true
- }
- manager.streams.Range(func(_, _ interface{}) bool {
- totalStreams++
- return true
- })
- return true
- })
- globalStreamCounter.Add(-int64(totalStreams))
-
- s.cancel()
-
- s.Logger().Debug("Closing active streams")
- s.streamManagers.Range(func(_, value interface{}) bool {
- manager, ok := value.(*StreamManager)
- if !ok {
- return true
- }
- manager.streams.Range(func(_, streamValue interface{}) bool {
- if stream, ok := streamValue.(*streaming.Stream); ok {
- if err := stream.Reset(); err != nil {
- return true
- }
- }
- return true
- })
- return true
- })
-
- s.streamManagers = sync.Map{}
-
- s.Logger().Info("All streams successfully removed")
-}
-
-type handleFuncAdapter struct {
- streamID string
- sm *StreamManager
- mw *StreamingMiddleware
- muxer *mux.Router
- logger *logrus.Entry
-}
-
-func (h *handleFuncAdapter) HandleFunc(path string, f func(http.ResponseWriter, *http.Request)) {
- h.logger.Debugf("Registering streaming handleFunc for path: %s", path)
-
- if h.mw == nil || h.muxer == nil {
- h.logger.Error("StreamingMiddleware or muxer is nil")
- return
- }
-
- h.sm.routeLock.Lock()
- h.muxer.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
- defer func() {
- // Stop the stream when the HTTP request finishes
- if err := h.sm.removeStream(h.streamID); err != nil {
- h.logger.Errorf("Failed to stop stream %s: %v", h.streamID, err)
- }
- }()
-
- f(w, r)
- })
- h.sm.routeLock.Unlock()
- h.logger.Debugf("Registered handler for path: %s", path)
+func (d *dummyStreamingMiddleware) Name() string {
+ return "StreamingMiddleware"
}
diff --git a/gateway/mw_streaming_ee.go b/gateway/mw_streaming_ee.go
new file mode 100644
index 00000000000..64ee4ffedb7
--- /dev/null
+++ b/gateway/mw_streaming_ee.go
@@ -0,0 +1,15 @@
+//go:build ee || dev
+
+// Provides StreamingMiddleware
+package gateway
+
+import (
+ "github.com/TykTechnologies/tyk/ee/middleware/streams"
+)
+
+func getStreamingMiddleware(baseMid *BaseMiddleware) TykMiddleware {
+ spec := baseMid.Spec
+ streamSpec := streams.NewAPISpec(spec.APIID, spec.Name, spec.IsOAS, spec.OAS, spec.StripListenPath)
+ streamMw := streams.NewMiddleware(baseMid.Gw, baseMid, streamSpec)
+ return WrapMiddleware(baseMid, streamMw)
+}
diff --git a/gateway/mw_streaming_test.go b/gateway/mw_streaming_test.go
index c55ef7641dc..02377eaf9a2 100644
--- a/gateway/mw_streaming_test.go
+++ b/gateway/mw_streaming_test.go
@@ -1,6 +1,9 @@
+//go:build ee || dev
+
package gateway
import (
+ "bytes"
"context"
"crypto/tls"
"encoding/json"
@@ -12,6 +15,7 @@ import (
"time"
"github.com/nats-io/nats.go"
+ "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
@@ -27,6 +31,8 @@ import (
"github.com/TykTechnologies/tyk/apidef/oas"
"github.com/TykTechnologies/tyk/config"
+ "github.com/TykTechnologies/tyk/ee/middleware/streams"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/test"
)
@@ -87,7 +93,7 @@ output:
t.Run(tc.name, func(t *testing.T) {
config, err := yamlConfigToMap(tc.configYaml)
require.NoError(t, err)
- httpPaths := GetHTTPPaths(config)
+ httpPaths := streams.GetHTTPPaths(config)
assert.ElementsMatch(t, tc.expected, httpPaths)
})
}
@@ -144,8 +150,17 @@ streams:
level: DEBUG
format: logfmt
add_timestamp: false
- static_fields:
- '@service': benthos
+`
+const bentoHTTPServerTemplate = `
+streams:
+ test:
+ input:
+ http_server:
+ path: /post
+ timeout: 1s
+ output:
+ http_server:
+ ws_path: /subscribe
`
func TestStreamingAPISingleClient(t *testing.T) {
@@ -188,6 +203,9 @@ func TestStreamingAPISingleClient(t *testing.T) {
}
wsURL := strings.Replace(ts.URL, "http", "ws", 1) + fmt.Sprintf("/%s/get/ws", apiName)
+
+ println("wsURL:", wsURL)
+
wsConn, _, err := dialer.Dial(wsURL, nil)
require.NoError(t, err, "failed to connect to ws server")
t.Cleanup(func() {
@@ -273,23 +291,39 @@ func TestStreamingAPIMultipleClients(t *testing.T) {
t.Cleanup(func() {
nc.Close()
})
+
subject := "test"
+ messages := make(map[string]struct{})
for i := 0; i < totalMessages; i++ {
- require.NoError(t, nc.Publish(subject, []byte(fmt.Sprintf("Hello %d", i))), "failed to publish message to subject")
- }
-
- // Read messages from all clients
- for clientID, wsConn := range wsConns {
- err = wsConn.SetReadDeadline(time.Now().Add(5000 * time.Millisecond))
- require.NoError(t, err, fmt.Sprintf("error setting read deadline for client %d", clientID))
+ message := fmt.Sprintf("Hello %d", i)
+ messages[message] = struct{}{}
+ require.NoError(t, nc.Publish(subject, []byte(message)), "failed to publish message to subject")
+ }
+
+ // Read messages from all subscribers
+ // Messages are distributed in a round-robin fashion, count the number of messages and check the messages individually.
+ var readMessages int
+ for readMessages < totalMessages {
+ for clientID, wsConn := range wsConns {
+ // We need to stop waiting for a message if the subscriber is consumed all of its received messages.
+ err = wsConn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ require.NoError(t, err, fmt.Sprintf("error setting read deadline for client %d", clientID))
+
+ _, data, err := wsConn.ReadMessage()
+ if os.IsTimeout(err) {
+ continue
+ }
+ require.NoError(t, err, fmt.Sprintf("error reading message for client %d", clientID))
- for i := 0; i < totalMessages; i++ {
- _, p, err := wsConn.ReadMessage()
- require.NoError(t, err, fmt.Sprintf("error reading message for client %d, message %d", clientID, i))
- assert.Equal(t, fmt.Sprintf("Hello %d", i), string(p), fmt.Sprintf("message not equal for client %d", clientID))
+ message := string(data)
+ _, ok := messages[message]
+ require.True(t, ok, fmt.Sprintf("message is unknown or consumed before %s", message))
+ delete(messages, message)
+ readMessages++
}
}
-
+ // Consumed all messages
+ require.Empty(t, messages)
}
func setUpStreamAPI(ts *Test, apiName string, streamConfig string) error {
@@ -326,7 +360,7 @@ func setupOASForStreamAPI(streamingConfig string) (oas.OAS, error) {
}
oasAPI.Extensions = map[string]interface{}{
- ExtensionTykStreaming: parsedStreamingConfig,
+ streams.ExtensionTykStreaming: parsedStreamingConfig,
}
return oasAPI, nil
@@ -349,11 +383,7 @@ func TestAsyncAPI(t *testing.T) {
t.SkipNow()
ts := StartTest(func(globalConf *config.Config) {
- globalConf.Labs = map[string]interface{}{
- "streaming": map[string]interface{}{
- "enabled": true,
- },
- }
+ globalConf.Streaming.Enabled = true
})
ts.Gw.BuildAndLoadAPI(func(spec *APISpec) {
@@ -418,7 +448,7 @@ streams:
}
oasAPI.Extensions = map[string]interface{}{
- ExtensionTykStreaming: parsedStreamingConfig,
+ streams.ExtensionTykStreaming: parsedStreamingConfig,
// oas.ExtensionTykAPIGateway: tykExtension,
}
@@ -440,8 +470,8 @@ streams:
// Check that standard API still works
_, _ = ts.Run(t, test.TestCase{Code: http.StatusOK, Method: http.MethodGet, Path: "/test"})
- if globalStreamCounter.Load() != 1 {
- t.Fatalf("Expected 1 stream, got %d", globalStreamCounter.Load())
+ if streams.GlobalStreamCounter.Load() != 1 {
+ t.Fatalf("Expected 1 stream, got %d", streams.GlobalStreamCounter.Load())
}
time.Sleep(500 * time.Millisecond)
@@ -564,7 +594,7 @@ streams:
}
oasAPI.Extensions = map[string]interface{}{
- ExtensionTykStreaming: parsedStreamingConfig,
+ streams.ExtensionTykStreaming: parsedStreamingConfig,
}
return oasAPI
@@ -576,7 +606,7 @@ func testAsyncAPIHttp(t *testing.T, ts *Test, isDynamic bool, tenantID string, a
const numMessages = 2
const numClients = 2
- streamCount := globalStreamCounter.Load()
+ streamCount := streams.GlobalStreamCounter.Load()
t.Logf("Stream count for tenant %s: %d", tenantID, streamCount)
// Create WebSocket clients
@@ -727,11 +757,7 @@ func TestWebSocketConnectionClosedOnAPIReload(t *testing.T) {
}
ts := StartTest(func(globalConf *config.Config) {
- globalConf.Labs = map[string]interface{}{
- "streaming": map[string]interface{}{
- "enabled": true,
- },
- }
+ globalConf.Streaming.Enabled = true
})
defer ts.Close()
@@ -781,3 +807,192 @@ func TestWebSocketConnectionClosedOnAPIReload(t *testing.T) {
t.Log("WebSocket connection was successfully closed on API reload")
}
+
+func TestStreamingAPISingleClient_Input_HTTPServer(t *testing.T) {
+ ts := StartTest(func(globalConf *config.Config) {
+ globalConf.Streaming.Enabled = true
+ })
+ t.Cleanup(func() {
+ ts.Close()
+ })
+
+ apiName := "test-api"
+ if err := setUpStreamAPI(ts, apiName, bentoHTTPServerTemplate); err != nil {
+ t.Fatal(err)
+ }
+
+ const totalMessages = 3
+
+ dialer := websocket.Dialer{
+ HandshakeTimeout: 1 * time.Second,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+
+ wsURL := strings.Replace(ts.URL, "http", "ws", 1) + fmt.Sprintf("/%s/subscribe", apiName)
+ wsConn, _, err := dialer.Dial(wsURL, nil)
+ require.NoError(t, err, "failed to connect to ws server")
+ t.Cleanup(func() {
+ if err = wsConn.Close(); err != nil {
+ t.Logf("failed to close ws connection: %v", err)
+ }
+ })
+
+ publishURL := fmt.Sprintf("%s/%s/post", ts.URL, apiName)
+ for i := 0; i < totalMessages; i++ {
+ data := []byte(fmt.Sprintf("{\"test\": \"message %d\"}", i))
+ resp, err := http.Post(publishURL, "application/json", bytes.NewReader(data))
+ require.NoError(t, err)
+ _ = resp.Body.Close()
+ }
+
+ err = wsConn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
+ require.NoError(t, err, "error setting read deadline")
+
+ for i := 0; i < totalMessages; i++ {
+ println("reading message", i)
+ _, p, err := wsConn.ReadMessage()
+ require.NoError(t, err, "error reading message")
+ assert.Equal(t, fmt.Sprintf("{\"test\": \"message %d\"}", i), string(p), "message not equal")
+ }
+}
+
+func TestStreamingAPIMultipleClients_Input_HTTPServer(t *testing.T) {
+ // Testing input http -> output http (3 output instances and 10 messages)
+ // Messages are distributed in a round-robin fashion.
+
+ ts := StartTest(func(globalConf *config.Config) {
+ globalConf.Streaming.Enabled = true
+ })
+ t.Cleanup(func() {
+ ts.Close()
+ })
+
+ apiName := "test-api"
+ if err := setUpStreamAPI(ts, apiName, bentoHTTPServerTemplate); err != nil {
+ t.Fatal(err)
+ }
+
+ const (
+ totalSubscribers = 3
+ totalMessages = 10
+ )
+ dialer := websocket.Dialer{
+ HandshakeTimeout: 1 * time.Second,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+
+ wsURL := strings.Replace(ts.URL, "http", "ws", 1) + fmt.Sprintf("/%s/subscribe", apiName)
+
+ // Create multiple WebSocket connections
+ var wsConns []*websocket.Conn
+ for i := 0; i < totalSubscribers; i++ {
+ wsConn, _, err := dialer.Dial(wsURL, nil)
+ require.NoError(t, err, fmt.Sprintf("failed to connect to ws server for client %d", i))
+ wsConns = append(wsConns, wsConn)
+ t.Cleanup(func() {
+ if err := wsConn.Close(); err != nil {
+ t.Logf("failed to close ws connection: %v", err)
+ }
+ })
+ }
+
+ // Publish 10 messages
+ messages := make(map[string]struct{})
+ publishURL := fmt.Sprintf("%s/%s/post", ts.URL, apiName)
+ for i := 0; i < totalMessages; i++ {
+ message := fmt.Sprintf("{\"test\": \"message %d\"}", i)
+ messages[message] = struct{}{}
+
+ data := []byte(message)
+ resp, err := http.Post(publishURL, "application/json", bytes.NewReader(data))
+ require.NoError(t, err)
+ _ = resp.Body.Close()
+ }
+
+ // Read messages from all subscribers
+ // Messages are distributed in a round-robin fashion, count the number of messages and check the messages individually.
+ var readMessages int
+ for readMessages < totalMessages {
+ for clientID, wsConn := range wsConns {
+ // We need to stop waiting for a message if the subscriber is consumed all of its received messages.
+ err := wsConn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ require.NoError(t, err, fmt.Sprintf("error while setting read deadline for client %d", clientID))
+
+ _, data, err := wsConn.ReadMessage()
+ if os.IsTimeout(err) {
+ continue
+ }
+ require.NoError(t, err, fmt.Sprintf("error while reading message %d", clientID))
+
+ message := string(data)
+ _, ok := messages[message]
+ require.True(t, ok, fmt.Sprintf("message is unknown or consumed before %s", message))
+ delete(messages, message)
+ readMessages++
+ }
+ }
+ require.Empty(t, messages)
+}
+
+type DummyBase struct {
+ model.LoggerProvider
+}
+
+func (d *DummyBase) Logger() *logrus.Entry {
+ return logrus.NewEntry(logrus.New())
+}
+
+func TestStreamingAPIGarbageCollection(t *testing.T) {
+ ts := StartTest(func(globalConf *config.Config) {
+ globalConf.Streaming.Enabled = true
+ })
+ t.Cleanup(func() {
+ ts.Close()
+ })
+
+ oasAPI, err := setupOASForStreamAPI(bentoHTTPServerTemplate)
+ require.NoError(t, err)
+
+ apiName := "test-api"
+
+ specs := ts.Gw.BuildAndLoadAPI(func(spec *APISpec) {
+ spec.Proxy.ListenPath = fmt.Sprintf("/%s", apiName)
+ spec.UseKeylessAccess = true
+ spec.IsOAS = true
+ spec.OAS = oasAPI
+ spec.OAS.Fill(*spec.APIDefinition)
+ })
+
+ apiSpec := streams.NewAPISpec(specs[0].APIID, specs[0].Name, specs[0].IsOAS, specs[0].OAS, specs[0].StripListenPath)
+
+ s := streams.NewMiddleware(ts.Gw, &DummyBase{}, apiSpec)
+
+ if err := setUpStreamAPI(ts, apiName, bentoHTTPServerTemplate); err != nil {
+ t.Fatal(err)
+ }
+
+ // Dummy request to create a stream manager
+ publishURL := fmt.Sprintf("%s/%s/post", ts.URL, apiName)
+ r, err := http.NewRequest("POST", publishURL, nil)
+ require.NoError(t, err)
+
+ s.CreateStreamManager(r)
+
+ // We should have a Stream manager in the cache.
+ var streamManagersBeforeGC int
+ s.StreamManagerCache.Range(func(k, v interface{}) bool {
+ streamManagersBeforeGC++
+ return true
+ })
+ require.Equal(t, 1, streamManagersBeforeGC)
+
+ s.GC()
+
+ // Garbage collection removed the stream manager because the activity counter is zero.
+ var streamManagersAfterGC int
+ s.StreamManagerCache.Range(func(k, v interface{}) bool {
+ streamManagersAfterGC++
+ return true
+ })
+ require.Equal(t, 0, streamManagersAfterGC)
+}
diff --git a/gateway/mw_transform.go b/gateway/mw_transform.go
index 341401746ed..e803f8421dc 100644
--- a/gateway/mw_transform.go
+++ b/gateway/mw_transform.go
@@ -107,7 +107,7 @@ func transformBody(r *http.Request, tmeta *TransformSpec, t *TransformMiddleware
return fmt.Errorf("failed to apply template to request: %w", err)
}
- s := t.Gw.replaceTykVariables(r, bodyBuffer.String(), true)
+ s := t.Gw.ReplaceTykVariables(r, bodyBuffer.String(), true)
newBuf := bytes.NewBufferString(s)
diff --git a/gateway/mw_upstream_basic_auth.go b/gateway/mw_upstream_basic_auth.go
new file mode 100644
index 00000000000..db495751b69
--- /dev/null
+++ b/gateway/mw_upstream_basic_auth.go
@@ -0,0 +1,34 @@
+//go:build !ee && !dev
+
+package gateway
+
+import (
+ "net/http"
+)
+
+func getUpstreamBasicAuthMw(base *BaseMiddleware) TykMiddleware {
+ return &noopUpstreamBasicAuth{base}
+}
+
+type noopUpstreamBasicAuth struct {
+ *BaseMiddleware
+}
+
+// ProcessRequest is noop implementation for upstream basic auth mw.
+func (d *noopUpstreamBasicAuth) ProcessRequest(_ http.ResponseWriter, r *http.Request, _ interface{}) (error, int) {
+ return nil, http.StatusOK
+}
+
+// EnabledForSpec will always return false for noopUpstreamBasicAuth.
+func (d *noopUpstreamBasicAuth) EnabledForSpec() bool {
+ if d.Spec.UpstreamAuth.BasicAuth.Enabled {
+ d.Logger().Error("Upstream basic auth is supported only in Tyk Enterprise Edition")
+ }
+
+ return false
+}
+
+// Name returns the name of the mw.
+func (d *noopUpstreamBasicAuth) Name() string {
+ return "NooPUpstreamBasicAuth"
+}
diff --git a/gateway/mw_upstream_basic_auth_ee.go b/gateway/mw_upstream_basic_auth_ee.go
new file mode 100644
index 00000000000..accac6dc4e3
--- /dev/null
+++ b/gateway/mw_upstream_basic_auth_ee.go
@@ -0,0 +1,14 @@
+//go:build ee || dev
+
+package gateway
+
+import (
+ "github.com/TykTechnologies/tyk/ee/middleware/upstreambasicauth"
+)
+
+func getUpstreamBasicAuthMw(base *BaseMiddleware) TykMiddleware {
+ spec := base.Spec
+ mwSpec := upstreambasicauth.NewAPISpec(spec.APIID, spec.Name, spec.IsOAS, spec.OAS, spec.UpstreamAuth)
+ upstreamBasicAuthMw := upstreambasicauth.NewMiddleware(base.Gw, base, mwSpec)
+ return WrapMiddleware(base, upstreamBasicAuthMw)
+}
diff --git a/gateway/mw_upstream_basic_auth_test.go b/gateway/mw_upstream_basic_auth_test.go
new file mode 100644
index 00000000000..a344f355fc4
--- /dev/null
+++ b/gateway/mw_upstream_basic_auth_test.go
@@ -0,0 +1,148 @@
+//go:build ee || dev
+
+package gateway
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/header"
+ "github.com/TykTechnologies/tyk/test"
+)
+
+func TestUpstreamBasicAuthentication(t *testing.T) {
+ ts := StartTest(nil)
+ t.Cleanup(func() {
+ ts.Close()
+ })
+
+ userName, password, customAuthHeader := "user", "password", "Custom-Auth"
+ expectedAuth := fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(userName+":"+password)))
+
+ ts.Gw.BuildAndLoadAPI(
+ func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-basic-auth-enabled/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: true,
+ BasicAuth: apidef.UpstreamBasicAuth{
+ Enabled: true,
+ Username: userName,
+ Password: password,
+ },
+ }
+ spec.Proxy.StripListenPath = true
+ }, func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-basic-auth-custom-header/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: true,
+ BasicAuth: apidef.UpstreamBasicAuth{
+ Enabled: true,
+ Username: userName,
+ Password: password,
+ Header: apidef.AuthSource{
+ Enabled: true,
+ Name: customAuthHeader,
+ },
+ },
+ }
+ spec.Proxy.StripListenPath = true
+ },
+ func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-basic-auth-disabled/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: true,
+ BasicAuth: apidef.UpstreamBasicAuth{
+ Enabled: false,
+ Username: userName,
+ Password: password,
+ },
+ }
+ spec.Proxy.StripListenPath = true
+ },
+ func(spec *APISpec) {
+ spec.Proxy.ListenPath = "/upstream-auth-disabled/"
+ spec.UseKeylessAccess = true
+ spec.UpstreamAuth = apidef.UpstreamAuth{
+ Enabled: false,
+ }
+ spec.Proxy.StripListenPath = true
+ },
+ )
+
+ ts.Run(t, test.TestCases{
+ {
+ Path: "/upstream-basic-auth-enabled/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.Contains(t, resp.Headers, header.Authorization)
+ assert.NotEmpty(t, resp.Headers[header.Authorization])
+ assert.Equal(t, expectedAuth, resp.Headers[header.Authorization])
+
+ return true
+ },
+ },
+ {
+ Path: "/upstream-basic-auth-custom-header/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.Contains(t, resp.Headers, customAuthHeader)
+ assert.NotEmpty(t, resp.Headers[customAuthHeader])
+ assert.Equal(t, expectedAuth, resp.Headers[customAuthHeader])
+
+ return true
+ },
+ },
+ {
+ Path: "/upstream-basic-auth-disabled/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.NotContains(t, resp.Headers, header.Authorization)
+
+ return true
+ },
+ },
+ {
+ Path: "/upstream-auth-disabled/",
+ Code: http.StatusOK,
+ BodyMatchFunc: func(body []byte) bool {
+ resp := struct {
+ Headers map[string]string `json:"headers"`
+ }{}
+ err := json.Unmarshal(body, &resp)
+ assert.NoError(t, err)
+
+ assert.NotContains(t, resp.Headers, header.Authorization)
+
+ return true
+ },
+ },
+ }...)
+
+}
diff --git a/gateway/mw_url_rewrite.go b/gateway/mw_url_rewrite.go
index b3e75f37505..019ce7a1346 100644
--- a/gateway/mw_url_rewrite.go
+++ b/gateway/mw_url_rewrite.go
@@ -205,12 +205,16 @@ func (gw *Gateway) urlRewrite(meta *apidef.URLRewriteMeta, r *http.Request) (str
ctxSetUrlRewritePath(r, meta.Path)
}
- newpath = gw.replaceTykVariables(r, newpath, true)
+ newpath = gw.ReplaceTykVariables(r, newpath, true)
return newpath, nil
}
-func (gw *Gateway) replaceTykVariables(r *http.Request, in string, escape bool) string {
+// ReplaceTykVariables implements a variable replacement hook. It will replace
+// the template `in`. If `escape` is true, the values get escaped as a query
+// parameter for a HTTP request would. If no replacement has been made, `in`
+// is returned without modification.
+func (gw *Gateway) ReplaceTykVariables(r *http.Request, in string, escape bool) string {
if strings.Contains(in, secretsConfLabel) {
contextData := ctxGetData(r)
diff --git a/gateway/policy_test.go b/gateway/policy_test.go
index 84184ee97bf..7acfd270cb4 100644
--- a/gateway/policy_test.go
+++ b/gateway/policy_test.go
@@ -1,14 +1,11 @@
package gateway
import (
- "embed"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
- "slices"
- "sort"
"strconv"
"testing"
"time"
@@ -17,20 +14,14 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/TykTechnologies/graphql-go-tools/pkg/graphql"
- "github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/config"
"github.com/TykTechnologies/tyk/header"
- "github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/test"
"github.com/TykTechnologies/tyk/user"
"github.com/TykTechnologies/tyk/internal/uuid"
)
-//go:embed testdata/*.json
-var testDataFS embed.FS
-
func TestLoadPoliciesFromDashboardReLogin(t *testing.T) {
// Test Dashboard
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -51,1090 +42,6 @@ func TestLoadPoliciesFromDashboardReLogin(t *testing.T) {
assert.Empty(t, policyMap)
}
-type testApplyPoliciesData struct {
- name string
- policies []string
- errMatch string // substring
- sessMatch func(*testing.T, *user.SessionState) // ignored if nil
- session *user.SessionState
- // reverseOrder executes the tests in reversed order of policies,
- // in addition to the order specified in policies
- reverseOrder bool
-}
-
-func (s *Test) testPrepareApplyPolicies(tb testing.TB) (*BaseMiddleware, []testApplyPoliciesData) {
- tb.Helper()
-
- f, err := testDataFS.ReadFile("testdata/policies.json")
- assert.NoError(tb, err)
-
- var policies = make(map[string]user.Policy)
- err = json.Unmarshal(f, &policies)
- assert.NoError(tb, err)
-
- s.Gw.policiesMu.RLock()
- s.Gw.policiesByID = policies
- s.Gw.policiesMu.RUnlock()
-
- bmid := &BaseMiddleware{
- Spec: &APISpec{
- APIDefinition: &apidef.APIDefinition{},
- },
- Gw: s.Gw,
- }
- // splitting tests for readability
- var tests []testApplyPoliciesData
-
- nilSessionTCs := []testApplyPoliciesData{
- {
- "Empty", nil,
- "", nil, nil, false,
- },
- {
- "Single", []string{"nonpart1"},
- "", nil, nil, false,
- },
- {
- "Missing", []string{"nonexistent"},
- "not found", nil, nil, false,
- },
- {
- "DiffOrg", []string{"difforg"},
- "different org", nil, nil, false,
- },
- }
- tests = append(tests, nilSessionTCs...)
-
- nonPartitionedTCs := []testApplyPoliciesData{
- {
- name: "MultiNonPart",
- policies: []string{"nonpart1", "nonpart2", "nonexistent"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "a": {
- Limit: user.APILimit{},
- AllowanceScope: "p1",
- },
- "b": {
- Limit: user.APILimit{},
- AllowanceScope: "p2",
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "MultiACLPolicy",
- policies: []string{"nonpart3"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "a": {
- Limit: user.APILimit{},
- },
- "b": {
- Limit: user.APILimit{},
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- }
- tests = append(tests, nonPartitionedTCs...)
-
- quotaPartitionTCs := []testApplyPoliciesData{
- {
- "QuotaPart with unlimited", []string{"unlimited-quota"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.QuotaMax != -1 {
- t.Fatalf("want unlimited quota to be -1")
- }
- }, nil, false,
- },
- {
- "QuotaPart", []string{"quota1"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.QuotaMax != 2 {
- t.Fatalf("want QuotaMax to be 2")
- }
- }, nil, false,
- },
- {
- "QuotaParts", []string{"quota1", "quota2"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.QuotaMax != 3 {
- t.Fatalf("Should pick bigger value")
- }
- }, nil, false,
- },
- {
- "QuotaParts with acl", []string{"quota5", "quota4"},
- "", func(t *testing.T, s *user.SessionState) {
- assert.Equal(t, int64(4), s.QuotaMax)
- }, nil, false,
- },
- {
- "QuotaPart with access rights", []string{"quota3"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.QuotaMax != 3 {
- t.Fatalf("quota should be the same as policy quota")
- }
- }, nil, false,
- },
- {
- "QuotaPart with access rights in multi-policy", []string{"quota4", "nonpart1"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.QuotaMax != 3 {
- t.Fatalf("quota should be the same as policy quota")
- }
-
- // Don't apply api 'b' coming from quota4 policy
- want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}}
- assert.Equal(t, want, s.AccessRights)
- }, nil, false,
- },
- }
- tests = append(tests, quotaPartitionTCs...)
-
- rateLimitPartitionTCs := []testApplyPoliciesData{
- {
- "RatePart with unlimited", []string{"unlimited-rate"},
- "", func(t *testing.T, s *user.SessionState) {
- assert.True(t, s.Rate <= 0, "want unlimited rate to be <= 0")
- }, nil, false,
- },
- {
- "RatePart", []string{"rate1"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.Rate != 3 {
- t.Fatalf("want Rate to be 3")
- }
- }, nil, false,
- },
- {
- "RateParts", []string{"rate1", "rate2"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.Rate != 4 {
- t.Fatalf("Should pick bigger value")
- }
- }, nil, false,
- },
- {
- "RateParts with acl", []string{"rate5", "rate4"},
- "", func(t *testing.T, s *user.SessionState) {
- assert.Equal(t, float64(10), s.Rate)
- }, nil, false,
- },
- {
- "RateParts with acl respected by session", []string{"rate4", "rate5"},
- "", func(t *testing.T, s *user.SessionState) {
- assert.Equal(t, float64(10), s.Rate)
- }, &user.SessionState{Rate: 20}, false,
- },
- {
- "Rate with no partition respected by session", []string{"rate-no-partition"},
- "", func(t *testing.T, s *user.SessionState) {
- assert.Equal(t, float64(12), s.Rate)
- }, &user.SessionState{Rate: 20}, false,
- },
- }
- tests = append(tests, rateLimitPartitionTCs...)
-
- complexityPartitionTCs := []testApplyPoliciesData{
- {
- "ComplexityPart with unlimited", []string{"unlimitedComplexity"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.MaxQueryDepth != -1 {
- t.Fatalf("unlimitied query depth should be -1")
- }
- }, nil, false,
- },
- {
- "ComplexityPart", []string{"complexity1"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.MaxQueryDepth != 2 {
- t.Fatalf("want MaxQueryDepth to be 2")
- }
- }, nil, false,
- },
- {
- "ComplexityParts", []string{"complexity1", "complexity2"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.MaxQueryDepth != 3 {
- t.Fatalf("Should pick bigger value")
- }
- }, nil, false,
- },
- }
- tests = append(tests, complexityPartitionTCs...)
-
- aclPartitionTCs := []testApplyPoliciesData{
- {
- "AclPart", []string{"acl1"},
- "", func(t *testing.T, s *user.SessionState) {
- want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}}
-
- assert.Equal(t, want, s.AccessRights)
- }, nil, false,
- },
- {
- "AclPart", []string{"acl1", "acl2"},
- "", func(t *testing.T, s *user.SessionState) {
- want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}, "b": {Limit: user.APILimit{}}}
- assert.Equal(t, want, s.AccessRights)
- }, nil, false,
- },
- {
- "Acl for a and rate for a,b", []string{"acl1", "rate-for-a-b"},
- "", func(t *testing.T, s *user.SessionState) {
- want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 4, Per: 1}}}}
- assert.Equal(t, want, s.AccessRights)
- }, nil, false,
- },
- {
- "Acl for a,b and individual rate for a,b", []string{"acl-for-a-b", "rate-for-a", "rate-for-b"},
- "", func(t *testing.T, s *user.SessionState) {
- want := map[string]user.AccessDefinition{
- "a": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 4, Per: 1}}},
- "b": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 2, Per: 1}}},
- }
- assert.Equal(t, want, s.AccessRights)
- }, nil, false,
- },
- {
- "RightsUpdate", []string{"acl3"},
- "", func(t *testing.T, ses *user.SessionState) {
- newPolicy := user.Policy{
- AccessRights: map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}, "b": {Limit: user.APILimit{}}, "c": {Limit: user.APILimit{}}},
- }
-
- s.Gw.policiesMu.Lock()
- s.Gw.policiesByID["acl3"] = newPolicy
- s.Gw.policiesMu.Unlock()
- err := bmid.ApplyPolicies(ses)
- if err != nil {
- t.Fatalf("couldn't apply policy: %s", err.Error())
- }
- assert.Equal(t, newPolicy.AccessRights, ses.AccessRights)
- }, nil, false,
- },
- }
- tests = append(tests, aclPartitionTCs...)
-
- inactiveTCs := []testApplyPoliciesData{
- {
- "InactiveMergeOne", []string{"tags1", "inactive1"},
- "", func(t *testing.T, s *user.SessionState) {
- if !s.IsInactive {
- t.Fatalf("want IsInactive to be true")
- }
- }, nil, false,
- },
- {
- "InactiveMergeAll", []string{"inactive1", "inactive2"},
- "", func(t *testing.T, s *user.SessionState) {
- if !s.IsInactive {
- t.Fatalf("want IsInactive to be true")
- }
- }, nil, false,
- },
- {
- "InactiveWithSession", []string{"tags1", "tags2"},
- "", func(t *testing.T, s *user.SessionState) {
- if !s.IsInactive {
- t.Fatalf("want IsInactive to be true")
- }
- }, &user.SessionState{
- IsInactive: true,
- }, false,
- },
- }
- tests = append(tests, inactiveTCs...)
-
- perAPITCs := []testApplyPoliciesData{
- {
- name: "Per API is set with other partitions to true",
- policies: []string{"per_api_and_partitions"},
- errMatch: "cannot apply policy per_api_and_partitions which has per_api and any of partitions set",
- },
- {
- name: "Per API is set to true with some partitions set to true",
- policies: []string{"per_api_and_some_partitions"},
- errMatch: "cannot apply policy per_api_and_some_partitions which has per_api and any of partitions set",
- },
- {
- name: "Per API is set to true with no other partitions set to true",
- policies: []string{"per_api_and_no_other_partitions"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "c": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 2000,
- Per: 60,
- },
- QuotaMax: -1,
- },
- AllowanceScope: "c",
- },
- "d": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 20,
- Per: 1,
- },
- QuotaMax: 1000,
- QuotaRenewalRate: 3600,
- },
- AllowanceScope: "d",
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "several policies with Per API set to true specifying limit for the same API",
- policies: []string{"per_api_and_no_other_partitions", "per_api_with_api_d"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- want := map[string]user.AccessDefinition{
- "c": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 2000,
- Per: 60,
- },
- QuotaMax: -1,
- },
- AllowanceScope: "c",
- },
- "d": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- },
- AllowanceScope: "d",
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "several policies with Per API set to true specifying limit for the same APIs",
- policies: []string{"per_api_and_no_other_partitions", "per_api_with_api_d", "per_api_with_api_c"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- want := map[string]user.AccessDefinition{
- "c": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 3000,
- Per: 10,
- },
- QuotaMax: -1,
- },
- AllowanceScope: "c",
- },
- "d": {
- Limit: user.APILimit{
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- },
- AllowanceScope: "d",
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "several policies, mixed the one which has Per API set to true and partitioned ones",
- policies: []string{"per_api_with_api_d", "quota1"},
- errMatch: "cannot apply multiple policies when some have per_api set and some are partitioned",
- },
- {
- name: "several policies, mixed the one which has Per API set to true and partitioned ones (different order)",
- policies: []string{"rate1", "per_api_with_api_d"},
- errMatch: "cannot apply multiple policies when some have per_api set and some are partitioned",
- },
- {
- name: "Per API is set to true and some API gets limit set from policy's fields",
- policies: []string{"per_api_with_limit_set_from_policy"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- want := map[string]user.AccessDefinition{
- "e": {
- Limit: user.APILimit{
- QuotaMax: -1,
- RateLimit: user.RateLimit{
- Rate: 300,
- Per: 1,
- },
- },
- AllowanceScope: "per_api_with_limit_set_from_policy",
- },
- "d": {
- Limit: user.APILimit{
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- },
- AllowanceScope: "d",
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "Per API with limits override",
- policies: []string{
- "per_api_with_limit_set_from_policy",
- "per_api_with_api_d",
- "per_api_with_higher_rate_on_api_d",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- want := map[string]user.AccessDefinition{
- "e": {
- Limit: user.APILimit{
- QuotaMax: -1,
- RateLimit: user.RateLimit{
- Rate: 300,
- Per: 1,
- },
- },
- AllowanceScope: "per_api_with_limit_set_from_policy",
- },
- "d": {
- Limit: user.APILimit{
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- },
- AllowanceScope: "d",
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- },
- }
- tests = append(tests, perAPITCs...)
-
- graphQLTCs := []testApplyPoliciesData{
- {
- name: "Merge per path rules for the same API",
- policies: []string{"per-path2", "per-path1"},
- sessMatch: func(t *testing.T, sess *user.SessionState) {
- want := map[string]user.AccessDefinition{
- "a": {
- AllowedURLs: []user.AccessSpec{
- {URL: "/user", Methods: []string{"GET", "POST"}},
- {URL: "/companies", Methods: []string{"GET", "POST"}},
- },
- Limit: user.APILimit{},
- },
- "b": {
- AllowedURLs: []user.AccessSpec{
- {URL: "/", Methods: []string{"PUT"}},
- },
- Limit: user.APILimit{},
- },
- }
-
- gotPolicy, ok := s.Gw.PolicyByID("per-path2")
-
- assert.True(t, ok)
- assert.Equal(t, user.AccessSpec{
- URL: "/user", Methods: []string{"GET"},
- }, gotPolicy.AccessRights["a"].AllowedURLs[0])
-
- assert.Equal(t, want, sess.AccessRights)
- },
- },
- {
- name: "Merge restricted fields for the same GraphQL API",
- policies: []string{"restricted-types1", "restricted-types2"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "a": { // It should get intersection of restricted types.
- RestrictedTypes: []graphql.Type{
- {Name: "Country", Fields: []string{"code"}},
- {Name: "Person", Fields: []string{"name"}},
- },
- Limit: user.APILimit{},
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "Merge allowed fields for the same GraphQL API",
- policies: []string{"allowed-types1", "allowed-types2"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "a": { // It should get intersection of restricted types.
- AllowedTypes: []graphql.Type{
- {Name: "Country", Fields: []string{"code"}},
- {Name: "Person", Fields: []string{"name"}},
- },
- Limit: user.APILimit{},
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "If GQL introspection is disabled, it remains disabled after merging",
- policies: []string{"introspection-disabled", "introspection-enabled"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "a": {
- DisableIntrospection: true, // If GQL introspection is disabled, it remains disabled after merging.
- Limit: user.APILimit{},
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- {
- name: "Merge field level depth limit for the same GraphQL API",
- policies: []string{"field-level-depth-limit1", "field-level-depth-limit2"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- want := map[string]user.AccessDefinition{
- "graphql-api": {
- Limit: user.APILimit{},
- FieldAccessRights: []user.FieldAccessDefinition{
- {TypeName: "Query", FieldName: "people", Limits: user.FieldLimits{MaxQueryDepth: 4}},
- {TypeName: "Mutation", FieldName: "putPerson", Limits: user.FieldLimits{MaxQueryDepth: -1}},
- {TypeName: "Query", FieldName: "countries", Limits: user.FieldLimits{MaxQueryDepth: 3}},
- {TypeName: "Query", FieldName: "continents", Limits: user.FieldLimits{MaxQueryDepth: 4}},
- },
- },
- }
-
- assert.Equal(t, want, s.AccessRights)
- },
- },
- }
- tests = append(tests, graphQLTCs...)
-
- throttleTCs := []testApplyPoliciesData{
- {
- "Throttle interval from policy", []string{"throttle1"},
- "", func(t *testing.T, s *user.SessionState) {
- if s.ThrottleInterval != 9 {
- t.Fatalf("Throttle interval should be 9 inherited from policy")
- }
- }, nil, false,
- },
- {
- name: "Throttle retry limit from policy",
- policies: []string{"throttle1"},
- errMatch: "",
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- if s.ThrottleRetryLimit != 99 {
- t.Fatalf("Throttle interval should be 9 inherited from policy")
- }
- },
- session: nil,
- },
- }
- tests = append(tests, throttleTCs...)
-
- tagsTCs := []testApplyPoliciesData{
- {
- "TagMerge", []string{"tags1", "tags2"},
- "", func(t *testing.T, s *user.SessionState) {
- want := []string{"key-tag", "tagA", "tagX", "tagY"}
- sort.Strings(s.Tags)
-
- assert.Equal(t, want, s.Tags)
- }, &user.SessionState{
- Tags: []string{"key-tag"},
- }, false,
- },
- }
- tests = append(tests, tagsTCs...)
-
- partitionTCs := []testApplyPoliciesData{
- {
- "NonpartAndPart", []string{"nonpart1", "quota1"},
- "", nil, nil, false,
- },
- {
- name: "inherit quota and rate from partitioned policies",
- policies: []string{"quota1", "rate3"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- if s.QuotaMax != 2 {
- t.Fatalf("quota should be the same as quota policy")
- }
- if s.Rate != 4 {
- t.Fatalf("rate should be the same as rate policy")
- }
- if s.Per != 4 {
- t.Fatalf("Rate per seconds should be the same as rate policy")
- }
- },
- },
- {
- name: "inherit quota and rate from partitioned policies applied in different order",
- policies: []string{"rate3", "quota1"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
-
- if s.QuotaMax != 2 {
- t.Fatalf("quota should be the same as quota policy")
- }
- if s.Rate != 4 {
- t.Fatalf("rate should be the same as rate policy")
- }
- if s.Per != 4 {
- t.Fatalf("Rate per seconds should be the same as rate policy")
- }
- },
- },
- }
- tests = append(tests, partitionTCs...)
-
- endpointRLTCs := []testApplyPoliciesData{
- {
- name: "Per API and per endpoint policies",
- policies: []string{"per_api_with_limit_set_from_policy", "per_api_with_endpoint_limits_on_d_and_e"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- endpointsConfig := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- {
- Path: "/post",
- Methods: user.EndpointMethods{
- {
- Name: "POST",
- Limit: user.RateLimit{
- Rate: 300,
- Per: 10,
- },
- },
- },
- },
- }
- want := map[string]user.AccessDefinition{
- "e": {
- Limit: user.APILimit{
- QuotaMax: -1,
- RateLimit: user.RateLimit{
- Rate: 500,
- Per: 1,
- },
- },
- AllowanceScope: "per_api_with_endpoint_limits_on_d_and_e",
- Endpoints: endpointsConfig,
- },
- "d": {
- Limit: user.APILimit{
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- },
- AllowanceScope: "d",
- Endpoints: endpointsConfig,
- },
- }
- assert.Equal(t, want, s.AccessRights)
- },
- reverseOrder: true,
- },
- {
- name: "Endpoint level limits overlapping",
- policies: []string{
- "per_api_with_limit_set_from_policy",
- "per_api_with_endpoint_limits_on_d_and_e",
- "per_endpoint_limits_different_on_api_d",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- apiEEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- {
- Path: "/post",
- Methods: user.EndpointMethods{
- {
- Name: "POST",
- Limit: user.RateLimit{
- Rate: 300,
- Per: 10,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiEEndpoints, s.AccessRights["e"].Endpoints)
-
- apiDEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- {
- Path: "/post",
- Methods: user.EndpointMethods{
- {
- Name: "POST",
- Limit: user.RateLimit{
- Rate: 400,
- Per: 11,
- },
- },
- },
- },
- {
- Path: "/anything",
- Methods: user.EndpointMethods{
- {
- Name: "PUT",
- Limit: user.RateLimit{
- Rate: 500,
- Per: 10,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
-
- apiELimits := user.APILimit{
- QuotaMax: -1,
- RateLimit: user.RateLimit{
- Rate: 500,
- Per: 1,
- },
- }
- assert.Equal(t, apiELimits, s.AccessRights["e"].Limit)
-
- apiDLimits := user.APILimit{
- QuotaMax: 5000,
- QuotaRenewalRate: 3600,
- RateLimit: user.RateLimit{
- Rate: 200,
- Per: 10,
- },
- }
- assert.Equal(t, apiDLimits, s.AccessRights["d"].Limit)
- },
- reverseOrder: true,
- },
- {
- name: "endpoint_rate_limits_on_acl_partition_only",
- policies: []string{"endpoint_rate_limits_on_acl_partition_only"},
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- assert.Empty(t, s.AccessRights["d"].Endpoints)
- },
- },
- {
- name: "endpoint_rate_limits_when_acl_and_quota_partitions_combined",
- policies: []string{
- "endpoint_rate_limits_on_acl_partition_only",
- "endpoint_rate_limits_on_quota_partition_only",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- assert.Empty(t, s.AccessRights["d"].Endpoints)
- },
- reverseOrder: true,
- },
- }
-
- tests = append(tests, endpointRLTCs...)
-
- combinedEndpointRLTCs := []testApplyPoliciesData{
- {
- name: "combine_non_partitioned_policies_with_endpoint_rate_limits_configured_on_api_d",
- policies: []string{
- "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- apiDEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: 20,
- Per: 60,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
- },
- reverseOrder: true,
- },
- {
- name: "combine_non_partitioned_policies_with_endpoint_rate_limits_no_bound_configured_on_api_d",
- policies: []string{
- "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- apiDEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
- },
- reverseOrder: true,
- },
- {
- name: "combine_non_partitioned_policies_with_multiple_endpoint_rate_limits_configured_on_api_d",
- policies: []string{
- "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
- "api_d_post_endpoint_rl_1_configure_on_non_partitioned_policy",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- apiDEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- {
- Path: "/post",
- Methods: user.EndpointMethods{
- {
- Name: "POST",
- Limit: user.RateLimit{
- Rate: 20,
- Per: 60,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
- },
- reverseOrder: true,
- },
- {
- name: "combine_non_partitioned_policies_with_endpoint_rate_limits_configured_on_api_d_and_e",
- policies: []string{
- "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
- "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
- "api_d_post_endpoint_rl_1_configure_on_non_partitioned_policy",
- "api_e_get_endpoint_rl_1_configure_on_non_partitioned_policy",
- },
- sessMatch: func(t *testing.T, s *user.SessionState) {
- t.Helper()
- assert.NotEmpty(t, s.AccessRights)
- apiDEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: -1,
- },
- },
- },
- },
- {
- Path: "/post",
- Methods: user.EndpointMethods{
- {
- Name: "POST",
- Limit: user.RateLimit{
- Rate: 20,
- Per: 60,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
-
- apiEEndpoints := user.Endpoints{
- {
- Path: "/get",
- Methods: user.EndpointMethods{
- {
- Name: "GET",
- Limit: user.RateLimit{
- Rate: 100,
- Per: 60,
- },
- },
- },
- },
- }
-
- assert.ElementsMatch(t, apiEEndpoints, s.AccessRights["e"].Endpoints)
- },
- reverseOrder: true,
- },
- }
-
- tests = combinedEndpointRLTCs
-
- return bmid, tests
-}
-
-func TestApplyPolicies(t *testing.T) {
- ts := StartTest(nil)
- defer ts.Close()
-
- bmid, tests := ts.testPrepareApplyPolicies(t)
-
- for _, tc := range tests {
- pols := [][]string{tc.policies}
- if tc.reverseOrder {
- var copyPols = make([]string, len(tc.policies))
- copy(copyPols, tc.policies)
- slices.Reverse(copyPols)
- pols = append(pols, copyPols)
- }
-
- for i, policies := range pols {
- name := tc.name
- if i == 1 {
- name = fmt.Sprintf("%s, reversed=%t", name, tc.reverseOrder)
- }
-
- t.Run(name, func(t *testing.T) {
- sess := tc.session
- if sess == nil {
- sess = &user.SessionState{}
- }
- sess.SetPolicies(policies...)
- if err := bmid.ApplyPolicies(sess); err != nil {
- assert.ErrorContains(t, err, tc.errMatch)
- return
- }
-
- if tc.sessMatch != nil {
- tc.sessMatch(t, sess)
- }
- })
- }
- }
-}
-
-func BenchmarkApplyPolicies(b *testing.B) {
- b.ReportAllocs()
- ts := StartTest(nil)
- defer ts.Close()
-
- bmid, tests := ts.testPrepareApplyPolicies(b)
-
- for i := 0; i < b.N; i++ {
- for _, tc := range tests {
- sess := &user.SessionState{}
- sess.SetPolicies(tc.policies...)
- bmid.ApplyPolicies(sess)
- }
- }
-}
-
func TestApplyPoliciesQuotaAPILimit(t *testing.T) {
ts := StartTest(nil)
defer ts.Close()
@@ -1812,73 +719,3 @@ func TestParsePoliciesFromRPC(t *testing.T) {
}
}
-
-type RPCDataLoaderMock struct {
- ShouldConnect bool
- Policies []user.Policy
- Apis []nestedApiDefinition
-}
-
-func (s *RPCDataLoaderMock) Connect() bool {
- return s.ShouldConnect
-}
-
-func (s *RPCDataLoaderMock) GetApiDefinitions(orgId string, tags []string) string {
- apiList, err := json.Marshal(s.Apis)
- if err != nil {
- return ""
- }
- return string(apiList)
-}
-func (s *RPCDataLoaderMock) GetPolicies(orgId string) string {
- policyList, err := json.Marshal(s.Policies)
- if err != nil {
- return ""
- }
- return string(policyList)
-}
-
-func Test_LoadPoliciesFromRPC(t *testing.T) {
- ts := StartTest(nil)
- defer ts.Close()
- objectID := persistentmodel.NewObjectID()
-
- t.Run("load policies from RPC - success", func(t *testing.T) {
- mockedStorage := &RPCDataLoaderMock{
- ShouldConnect: true,
- Policies: []user.Policy{
- {MID: objectID, ID: "", OrgID: "org1"},
- },
- }
-
- polMap, err := ts.Gw.LoadPoliciesFromRPC(mockedStorage, "org1", true)
-
- assert.NoError(t, err, "error loading policies from RPC:", err)
- assert.Equal(t, 1, len(polMap), "expected 0 policies to be loaded from RPC")
- })
-
- t.Run("load policies from RPC - success - then fail", func(t *testing.T) {
- mockedStorage := &RPCDataLoaderMock{
- ShouldConnect: true,
- Policies: []user.Policy{
- {MID: objectID, ID: "", OrgID: "org1"},
- },
- }
- // we load the Policies from RPC successfully - it should store the Policies in the backup
- polMap, err := ts.Gw.LoadPoliciesFromRPC(mockedStorage, "org1", true)
-
- assert.NoError(t, err, "error loading policies from RPC:", err)
- assert.Equal(t, 1, len(polMap), "expected 0 policies to be loaded from RPC")
-
- // we now simulate a failure to connect to RPC
- mockedStorage.ShouldConnect = false
- rpc.SetEmergencyMode(t, true)
- defer rpc.ResetEmergencyMode()
-
- // we now try to load the Policies again, and expect it to load the Policies from the backup
- polMap, err = ts.Gw.LoadPoliciesFromRPC(mockedStorage, "org1", true)
-
- assert.NoError(t, err, "error loading policies from RPC:", err)
- assert.Equal(t, 1, len(polMap), "expected 0 policies to be loaded from RPC")
- })
-}
diff --git a/gateway/res_handler_header_injector.go b/gateway/res_handler_header_injector.go
index 4587e941f56..9b27089c74f 100644
--- a/gateway/res_handler_header_injector.go
+++ b/gateway/res_handler_header_injector.go
@@ -62,7 +62,7 @@ func (h *HeaderInjector) HandleResponse(rw http.ResponseWriter, res *http.Respon
res.Header.Del(dKey)
}
for nKey, nVal := range hmeta.AddHeaders {
- setCustomHeader(res.Header, nKey, h.Gw.replaceTykVariables(req, nVal, false), ignoreCanonical)
+ setCustomHeader(res.Header, nKey, h.Gw.ReplaceTykVariables(req, nVal, false), ignoreCanonical)
}
}
@@ -75,7 +75,7 @@ func (h *HeaderInjector) HandleResponse(rw http.ResponseWriter, res *http.Respon
for key, val := range vInfo.GlobalResponseHeaders {
log.Debug("Adding: ", key)
- setCustomHeader(res.Header, key, h.Gw.replaceTykVariables(req, val, false), ignoreCanonical)
+ setCustomHeader(res.Header, key, h.Gw.ReplaceTykVariables(req, val, false), ignoreCanonical)
}
// Manage global response header options with response_processors
@@ -83,7 +83,7 @@ func (h *HeaderInjector) HandleResponse(rw http.ResponseWriter, res *http.Respon
res.Header.Del(n)
}
for header, v := range h.config.AddHeaders {
- setCustomHeader(res.Header, header, h.Gw.replaceTykVariables(req, v, false), ignoreCanonical)
+ setCustomHeader(res.Header, header, h.Gw.ReplaceTykVariables(req, v, false), ignoreCanonical)
}
}
diff --git a/gateway/reverse_proxy.go b/gateway/reverse_proxy.go
index a7755c63f5a..6d2731b1004 100644
--- a/gateway/reverse_proxy.go
+++ b/gateway/reverse_proxy.go
@@ -515,7 +515,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) Prox
startTime := time.Now()
p.logger.WithField("ts", startTime.UnixNano()).Debug("Started")
- resp := p.WrappedServeHTTP(rw, req, true)
+ resp := p.WrappedServeHTTP(rw, req, recordDetail(req, p.TykAPISpec))
finishTime := time.Since(startTime)
p.logger.WithField("ns", finishTime.Nanoseconds()).Debug("Finished")
@@ -1219,6 +1219,8 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques
}
+ p.addAuthInfo(outreq, req)
+
// do request round trip
var (
res *http.Response
@@ -1845,3 +1847,13 @@ func (p *ReverseProxy) IsUpgrade(req *http.Request) (string, bool) {
return httputil.IsUpgrade(req)
}
+
+func (p *ReverseProxy) addAuthInfo(outReq, req *http.Request) {
+ if !p.TykAPISpec.UpstreamAuth.IsEnabled() {
+ return
+ }
+
+ if authProvider := httputil.GetUpstreamAuth(req); authProvider != nil {
+ authProvider.Fill(outReq)
+ }
+}
diff --git a/gateway/reverse_proxy_test.go b/gateway/reverse_proxy_test.go
index bfab9ab339c..65a6d0b48c1 100644
--- a/gateway/reverse_proxy_test.go
+++ b/gateway/reverse_proxy_test.go
@@ -14,6 +14,7 @@ import (
"net/http/httptest"
"net/url"
"reflect"
+ "strconv"
"strings"
"sync"
"testing"
@@ -2023,3 +2024,40 @@ func TestQuotaResponseHeaders(t *testing.T) {
})
}
+
+func BenchmarkLargeResponsePayload(b *testing.B) {
+ ts := StartTest(func(_ *config.Config) {})
+ b.Cleanup(ts.Close)
+
+ // Create a 500 MB payload of zeros
+ payloadSize := 500 * 1024 * 1024 // 500 MB in bytes
+ largePayload := bytes.Repeat([]byte("x"), payloadSize)
+
+ largePayloadHandler := func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(payloadSize))
+ w.WriteHeader(http.StatusOK)
+ _, err := w.Write(largePayload)
+ assert.NoError(b, err)
+ }
+
+ // Create a test server with the largePayloadHandler
+ testServer := httptest.NewServer(http.HandlerFunc(largePayloadHandler))
+ b.Cleanup(testServer.Close)
+
+ ts.Gw.BuildAndLoadAPI(func(spec *APISpec) {
+ spec.UseKeylessAccess = true
+ spec.Proxy.ListenPath = "/"
+ spec.Proxy.TargetURL = testServer.URL
+ })
+
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ ts.Run(b, test.TestCase{
+ Method: http.MethodGet,
+ Path: "/",
+ Code: http.StatusOK,
+ })
+ }
+}
diff --git a/gateway/rpc_backup_handlers.go b/gateway/rpc_backup_handlers.go
index 2a59a92d74d..3d682c81407 100644
--- a/gateway/rpc_backup_handlers.go
+++ b/gateway/rpc_backup_handlers.go
@@ -113,6 +113,10 @@ func (gw *Gateway) LoadPoliciesFromRPCBackup() (map[string]user.Policy, error) {
}
}
+func getPaddedSecret(secret string) []byte {
+ return []byte(rightPad2Len(secret, "=", 32))
+}
+
func (gw *Gateway) saveRPCPoliciesBackup(list string) error {
if !json.Valid([]byte(list)) {
return errors.New("--> RPC Backup save failure: wrong format, skipping.")
@@ -132,8 +136,7 @@ func (gw *Gateway) saveRPCPoliciesBackup(list string) error {
return errors.New("--> RPC Backup save failed: redis connection failed")
}
- secret := rightPad2Len(gw.GetConfig().Secret, "=", 32)
- cryptoText := encrypt([]byte(secret), list)
+ cryptoText := encrypt(getPaddedSecret(gw.GetConfig().Secret), list)
err := store.SetKey(BackupPolicyKeyBase+tagList, cryptoText, -1)
if err != nil {
return errors.New("Failed to store node backup: " + err.Error())
diff --git a/gateway/rpc_storage_handler.go b/gateway/rpc_storage_handler.go
index 12027bb373c..ba01c439fc6 100644
--- a/gateway/rpc_storage_handler.go
+++ b/gateway/rpc_storage_handler.go
@@ -1,3 +1,4 @@
+//nolint:revive
package gateway
import (
@@ -12,7 +13,6 @@ import (
"github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/rpc"
- "github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/storage"
"github.com/sirupsen/logrus"
@@ -23,13 +23,13 @@ var (
"Login": func(clientAddr, userKey string) bool {
return false
},
- "LoginWithGroup": func(clientAddr string, groupData *apidef.GroupLoginRequest) bool {
+ "LoginWithGroup": func(clientAddr string, groupData *model.GroupLoginRequest) bool {
return false
},
"GetKey": func(keyName string) (string, error) {
return "", nil
},
- "SetKey": func(ibd *apidef.InboundData) error {
+ "SetKey": func(ibd *model.InboundData) error {
return nil
},
"GetExp": func(keyName string) (int64, error) {
@@ -44,10 +44,10 @@ var (
"DeleteRawKey": func(keyName string) (bool, error) {
return true, nil
},
- "GetKeysAndValues": func(searchString string) (*apidef.KeysValuesPair, error) {
+ "GetKeysAndValues": func(searchString string) (*model.KeysValuesPair, error) {
return nil, nil
},
- "GetKeysAndValuesWithFilter": func(searchString string) (*apidef.KeysValuesPair, error) {
+ "GetKeysAndValuesWithFilter": func(searchString string) (*model.KeysValuesPair, error) {
return nil, nil
},
"DeleteKeys": func(keys []string) (bool, error) {
@@ -59,16 +59,16 @@ var (
"Decrement": func(keyName string) error {
return nil
},
- "IncrememntWithExpire": func(ibd *apidef.InboundData) (int64, error) {
+ "IncrememntWithExpire": func(ibd *model.InboundData) (int64, error) {
return 0, nil
},
- "AppendToSet": func(ibd *apidef.InboundData) error {
+ "AppendToSet": func(ibd *model.InboundData) error {
return nil
},
- "SetRollingWindow": func(ibd *apidef.InboundData) (int, error) {
+ "SetRollingWindow": func(ibd *model.InboundData) (int, error) {
return 0, nil
},
- "GetApiDefinitions": func(dr *apidef.DefRequest) (string, error) {
+ "GetApiDefinitions": func(dr *model.DefRequest) (string, error) {
return "", nil
},
"GetPolicies": func(orgId string) (string, error) {
@@ -83,13 +83,13 @@ var (
"GetKeySpaceUpdate": func(clientAddr, orgId string) ([]string, error) {
return nil, nil
},
- "GetGroupKeySpaceUpdate": func(clientAddr string, groupData *apidef.GroupKeySpaceRequest) ([]string, error) {
+ "GetGroupKeySpaceUpdate": func(clientAddr string, groupData *model.GroupKeySpaceRequest) ([]string, error) {
return nil, nil
},
"Ping": func() bool {
return false
},
- "Disconnect": func(clientAddr string, groupData *apidef.GroupLoginRequest) error {
+ "Disconnect": func(clientAddr string, groupData *model.GroupLoginRequest) error {
return nil
},
}
@@ -163,7 +163,7 @@ func (r *RPCStorageHandler) buildNodeInfo() []byte {
}
r.Gw.getHostDetails(r.Gw.GetConfig().PIDFileLocation)
- node := apidef.NodeData{
+ node := model.NodeData{
NodeID: r.Gw.GetNodeID(),
GroupID: config.SlaveOptions.GroupID,
APIKey: config.SlaveOptions.APIKey,
@@ -172,7 +172,7 @@ func (r *RPCStorageHandler) buildNodeInfo() []byte {
NodeIsSegmented: config.DBAppConfOptions.NodeIsSegmented,
Tags: config.DBAppConfOptions.Tags,
Health: r.Gw.getHealthCheckInfo(),
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: r.Gw.apisByIDLen(),
PoliciesCount: r.Gw.PolicyCount(),
},
@@ -193,7 +193,7 @@ func (r *RPCStorageHandler) buildNodeInfo() []byte {
}
func (r *RPCStorageHandler) Disconnect() error {
- request := apidef.GroupLoginRequest{
+ request := model.GroupLoginRequest{
UserKey: r.Gw.GetConfig().SlaveOptions.APIKey,
GroupID: r.Gw.GetConfig().SlaveOptions.GroupID,
Node: r.buildNodeInfo(),
@@ -205,7 +205,7 @@ func (r *RPCStorageHandler) Disconnect() error {
func (r *RPCStorageHandler) getGroupLoginCallback(synchroniserEnabled bool) func(userKey string, groupID string) interface{} {
groupLoginCallbackFn := func(userKey string, groupID string) interface{} {
- return apidef.GroupLoginRequest{
+ return model.GroupLoginRequest{
UserKey: userKey,
GroupID: groupID,
Node: r.buildNodeInfo(),
@@ -347,7 +347,7 @@ func (r *RPCStorageHandler) SetExp(keyName string, timeout int64) error {
// SetKey will create (or update) a key value in the store
func (r *RPCStorageHandler) SetKey(keyName, session string, timeout int64) error {
start := time.Now() // get current time
- ibd := apidef.InboundData{
+ ibd := model.InboundData{
KeyName: r.fixKey(keyName),
SessionState: session,
Timeout: timeout,
@@ -410,7 +410,7 @@ func (r *RPCStorageHandler) Decrement(keyName string) {
// IncrementWithExpire will increment a key in redis
func (r *RPCStorageHandler) IncrememntWithExpire(keyName string, expire int64) int64 {
- ibd := apidef.InboundData{
+ ibd := model.InboundData{
KeyName: keyName,
Expire: expire,
}
@@ -475,8 +475,8 @@ func (r *RPCStorageHandler) GetKeysAndValuesWithFilter(filter string) map[string
returnValues := make(map[string]string)
- for i, v := range kvPair.(*apidef.KeysValuesPair).Keys {
- returnValues[r.cleanKey(v)] = kvPair.(*apidef.KeysValuesPair).Values[i]
+ for i, v := range kvPair.(*model.KeysValuesPair).Keys {
+ returnValues[r.cleanKey(v)] = kvPair.(*model.KeysValuesPair).Values[i]
}
return returnValues
@@ -501,8 +501,8 @@ func (r *RPCStorageHandler) GetKeysAndValues() map[string]string {
}
returnValues := make(map[string]string)
- for i, v := range kvPair.(*apidef.KeysValuesPair).Keys {
- returnValues[r.cleanKey(v)] = kvPair.(*apidef.KeysValuesPair).Values[i]
+ for i, v := range kvPair.(*model.KeysValuesPair).Keys {
+ returnValues[r.cleanKey(v)] = kvPair.(*model.KeysValuesPair).Values[i]
}
return returnValues
@@ -635,7 +635,7 @@ func (r *RPCStorageHandler) GetAndDeleteSet(keyName string) []interface{} {
}
func (r *RPCStorageHandler) AppendToSet(keyName, value string) {
- ibd := apidef.InboundData{
+ ibd := model.InboundData{
KeyName: keyName,
Value: value,
}
@@ -661,7 +661,7 @@ func (r *RPCStorageHandler) AppendToSet(keyName, value string) {
// SetScrollingWindow is used in the rate limiter to handle rate limits fairly.
func (r *RPCStorageHandler) SetRollingWindow(keyName string, per int64, val string, pipeline bool) (int, []interface{}) {
start := time.Now() // get current time
- ibd := apidef.InboundData{
+ ibd := model.InboundData{
KeyName: keyName,
Per: per,
Expire: -1,
@@ -725,7 +725,7 @@ func (r RPCStorageHandler) IsRetriableError(err error) bool {
// GetAPIDefinitions will pull API definitions from the RPC server
func (r *RPCStorageHandler) GetApiDefinitions(orgId string, tags []string) string {
- dr := apidef.DefRequest{
+ dr := model.DefRequest{
OrgId: orgId,
Tags: tags,
LoadOAS: true,
@@ -813,6 +813,8 @@ func (r *RPCStorageHandler) CheckForReload(orgId string) bool {
r.CheckForReload(orgId)
}
} else if !strings.Contains(err.Error(), "Cannot obtain response during") {
+ forcer := rpc.NewSyncForcer(r.Gw.StorageConnectionHandler, r.buildNodeInfo)
+ forcer.SetFirstConnection(true)
log.Warning("[RPC STORE] RPC Reload Checker encountered unexpected error: ", err)
}
@@ -889,7 +891,7 @@ func (r *RPCStorageHandler) CheckForKeyspaceChanges(orgId string) {
reqData["orgId"] = orgId
} else {
funcName = "GetGroupKeySpaceUpdate"
- req = apidef.GroupKeySpaceRequest{
+ req = model.GroupKeySpaceRequest{
OrgID: orgId,
GroupID: groupID,
}
diff --git a/gateway/rpc_storage_handler_test.go b/gateway/rpc_storage_handler_test.go
index c02c6269bc1..b063d9c08a6 100644
--- a/gateway/rpc_storage_handler_test.go
+++ b/gateway/rpc_storage_handler_test.go
@@ -11,7 +11,6 @@ import (
"github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/rpc"
- "github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/config"
"github.com/lonelycode/osin"
@@ -300,21 +299,21 @@ func TestGetGroupLoginCallback(t *testing.T) {
syncEnabled bool
givenKey string
givenGroup string
- expectedCallbackResponse apidef.GroupLoginRequest
+ expectedCallbackResponse model.GroupLoginRequest
}{
{
testName: "sync disabled",
syncEnabled: false,
givenKey: "key",
givenGroup: "group",
- expectedCallbackResponse: apidef.GroupLoginRequest{UserKey: "key", GroupID: "group"},
+ expectedCallbackResponse: model.GroupLoginRequest{UserKey: "key", GroupID: "group"},
},
{
testName: "sync enabled",
syncEnabled: true,
givenKey: "key",
givenGroup: "group",
- expectedCallbackResponse: apidef.GroupLoginRequest{UserKey: "key", GroupID: "group", ForceSync: true},
+ expectedCallbackResponse: model.GroupLoginRequest{UserKey: "key", GroupID: "group", ForceSync: true},
},
}
@@ -332,7 +331,7 @@ func TestGetGroupLoginCallback(t *testing.T) {
Gw: ts.Gw,
}
- expectedNodeInfo := apidef.NodeData{
+ expectedNodeInfo := model.NodeData{
NodeID: ts.Gw.GetNodeID(),
GroupID: "",
APIKey: "",
@@ -340,7 +339,7 @@ func TestGetGroupLoginCallback(t *testing.T) {
Tags: nil,
NodeVersion: VERSION,
Health: ts.Gw.getHealthCheckInfo(),
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 0,
PoliciesCount: 0,
},
@@ -357,7 +356,7 @@ func TestGetGroupLoginCallback(t *testing.T) {
tc.expectedCallbackResponse.Node = nodeData
fn := rpcListener.getGroupLoginCallback(tc.syncEnabled)
- groupLogin, ok := fn(tc.givenKey, tc.givenGroup).(apidef.GroupLoginRequest)
+ groupLogin, ok := fn(tc.givenKey, tc.givenGroup).(model.GroupLoginRequest)
assert.True(t, ok)
assert.Equal(t, tc.expectedCallbackResponse, groupLogin)
})
@@ -369,7 +368,7 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
tcs := []struct {
testName string
givenTs func() *Test
- expectedNodeInfo apidef.NodeData
+ expectedNodeInfo model.NodeData
}{
{
testName: "base",
@@ -378,13 +377,13 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
})
return ts
},
- expectedNodeInfo: apidef.NodeData{
+ expectedNodeInfo: model.NodeData{
GroupID: "",
APIKey: "",
TTL: 10,
Tags: nil,
NodeVersion: VERSION,
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 0,
PoliciesCount: 0,
},
@@ -402,13 +401,13 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
return ts
},
- expectedNodeInfo: apidef.NodeData{
+ expectedNodeInfo: model.NodeData{
GroupID: "group",
APIKey: "apikey-test",
TTL: 1,
Tags: []string{"tag1"},
NodeVersion: VERSION,
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 0,
PoliciesCount: 0,
},
@@ -439,12 +438,12 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
})
return ts
},
- expectedNodeInfo: apidef.NodeData{
+ expectedNodeInfo: model.NodeData{
GroupID: "group",
TTL: 1,
Tags: []string{"tag1"},
NodeVersion: VERSION,
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 1,
PoliciesCount: 1,
},
@@ -462,13 +461,13 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
ts.Gw.SetNodeID("test-node-id")
return ts
},
- expectedNodeInfo: apidef.NodeData{
+ expectedNodeInfo: model.NodeData{
NodeID: "test-node-id",
GroupID: "group",
TTL: 1,
Tags: []string{"tag1", "tag2"},
NodeVersion: VERSION,
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 0,
PoliciesCount: 0,
},
@@ -487,14 +486,14 @@ func TestRPCStorageHandler_BuildNodeInfo(t *testing.T) {
ts.Gw.SetNodeID("test-node-id")
return ts
},
- expectedNodeInfo: apidef.NodeData{
+ expectedNodeInfo: model.NodeData{
NodeID: "test-node-id",
GroupID: "group",
TTL: 1,
Tags: []string{"tag1", "tag2"},
NodeIsSegmented: true,
NodeVersion: VERSION,
- Stats: apidef.GWStats{
+ Stats: model.GWStats{
APIsCount: 0,
PoliciesCount: 0,
},
diff --git a/gateway/rpc_test.go b/gateway/rpc_test.go
index c768d0c25c1..ff5b7959507 100644
--- a/gateway/rpc_test.go
+++ b/gateway/rpc_test.go
@@ -1,3 +1,4 @@
+//nolint:revive
package gateway
import (
@@ -7,8 +8,8 @@ import (
"time"
"github.com/TykTechnologies/gorpc"
- "github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/config"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/rpc"
"github.com/TykTechnologies/tyk/test"
)
@@ -132,7 +133,7 @@ func TestSyncAPISpecsRPCFailure_CheckGlobals(t *testing.T) {
}
}()
dispatcher := gorpc.NewDispatcher()
- dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *apidef.DefRequest) (string, error) {
+ dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *model.DefRequest) (string, error) {
// the first time called is when we start the slave gateway
return a()
})
@@ -170,7 +171,7 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) {
rpc.UseSyncLoginRPC = true
var GetKeyCounter int
dispatcher := gorpc.NewDispatcher()
- dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *apidef.DefRequest) (string, error) {
+ dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *model.DefRequest) (string, error) {
return jsonMarshalString(BuildAPI(func(spec *APISpec) {
spec.UseKeylessAccess = false
})), nil
@@ -260,7 +261,7 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) {
rpc.ResetEmergencyMode()
dispatcher := gorpc.NewDispatcher()
- dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *apidef.DefRequest) (string, error) {
+ dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *model.DefRequest) (string, error) {
return jsonMarshalString(BuildAPI(
func(spec *APISpec) { spec.UseKeylessAccess = false },
func(spec *APISpec) { spec.UseKeylessAccess = false },
@@ -348,7 +349,7 @@ func TestSyncAPISpecsRPC_redis_failure(t *testing.T) {
test.Flaky(t) // TT-9117
dispatcher := gorpc.NewDispatcher()
- dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *apidef.DefRequest) (string, error) {
+ dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *model.DefRequest) (string, error) {
return jsonMarshalString(BuildAPI(func(spec *APISpec) {
spec.UseKeylessAccess = false
})), nil
diff --git a/gateway/server.go b/gateway/server.go
index aa20ee19eea..bc7b40a1ec4 100644
--- a/gateway/server.go
+++ b/gateway/server.go
@@ -125,6 +125,8 @@ type Gateway struct {
HostCheckTicker chan struct{}
HostCheckerClient *http.Client
TracerProvider otel.TracerProvider
+ // UpstreamOAuthCache is used to cache upstream OAuth tokens
+ UpstreamOAuthCache UpstreamOAuthCache
keyGen DefaultKeyGenerator
diff --git a/gateway/testutil.go b/gateway/testutil.go
index 5a3b1d4cd38..bfe93381a3a 100644
--- a/gateway/testutil.go
+++ b/gateway/testutil.go
@@ -37,6 +37,7 @@ import (
"github.com/gorilla/websocket"
"github.com/TykTechnologies/tyk/internal/httputil"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/internal/otel"
"github.com/TykTechnologies/tyk/internal/uuid"
@@ -971,7 +972,7 @@ func TestReq(t testing.TB, method, urlStr string, body interface{}) *http.Reques
func (gw *Gateway) CreateDefinitionFromString(defStr string) *APISpec {
loader := APIDefinitionLoader{Gw: gw}
def := loader.ParseDefinition(strings.NewReader(defStr))
- spec, _ := loader.MakeSpec(&nestedApiDefinition{APIDefinition: &def}, nil)
+ spec, _ := loader.MakeSpec(&model.MergedAPI{APIDefinition: &def}, nil)
return spec
}
diff --git a/gateway/tracing.go b/gateway/tracing.go
index c02146443ca..e7b75520b15 100644
--- a/gateway/tracing.go
+++ b/gateway/tracing.go
@@ -12,6 +12,7 @@ import (
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/internal/httputil"
+ "github.com/TykTechnologies/tyk/internal/model"
)
type traceHttpRequest struct {
@@ -117,7 +118,7 @@ func (gw *Gateway) traceHandler(w http.ResponseWriter, r *http.Request) {
loader := &APIDefinitionLoader{Gw: gw}
- spec, err := loader.MakeSpec(&nestedApiDefinition{APIDefinition: traceReq.Spec}, logrus.NewEntry(logger))
+ spec, err := loader.MakeSpec(&model.MergedAPI{APIDefinition: traceReq.Spec}, logrus.NewEntry(logger))
if err != nil {
doJSONWrite(w, http.StatusBadRequest, traceResponse{Message: "error", Logs: logStorage.String()})
return
diff --git a/go.mod b/go.mod
index df1c473074e..d4758961558 100644
--- a/go.mod
+++ b/go.mod
@@ -10,9 +10,9 @@ require (
github.com/TykTechnologies/drl v0.0.0-20231218155806-88e4363884a2
github.com/TykTechnologies/goautosocket v0.0.0-20190430121222-97bfa5e7e481
github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990
- github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9
+ github.com/TykTechnologies/gorpc v0.0.0-20241016124253-606484472fbb
github.com/TykTechnologies/goverify v0.0.0-20220808203004-1486f89e7708
- github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240705065952-ae6008677a48
+ github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240926103032-6eca9f4b5e30
github.com/TykTechnologies/graphql-translator v0.0.0-20240319092712-4ba87e4c06ff
github.com/TykTechnologies/leakybucket v0.0.0-20170301023702-71692c943e3c
github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632
@@ -100,6 +100,7 @@ require (
go.opentelemetry.io/otel v1.24.0
go.opentelemetry.io/otel/trace v1.24.0
go.uber.org/mock v0.4.0
+ golang.org/x/oauth2 v0.21.0
gopkg.in/yaml.v2 v2.4.0
)
@@ -208,6 +209,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clbanning/mxj/v2 v2.7.0 // indirect
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
+ github.com/coder/websocket v1.8.12 // indirect
github.com/colinmarc/hdfs v1.1.3 // indirect
github.com/containerd/containerd v1.7.18 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -471,7 +473,6 @@ require (
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/mod v0.18.0 // indirect
- golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/term v0.24.0 // indirect
golang.org/x/text v0.18.0 // indirect
@@ -505,9 +506,3 @@ require (
modernc.org/token v1.1.0 // indirect
nhooyr.io/websocket v1.8.10 // indirect
)
-
-//replace github.com/TykTechnologies/graphql-go-tools => ../graphql-go-tools
-
-//replace github.com/TykTechnologies/graphql-go-tools/v2 => ../graphql-go-tools/v2
-//
-//replace github.com/warpstreamlabs/bento => ../benthos
diff --git a/go.sum b/go.sum
index 1395f23998a..0e38dbbd3b4 100644
--- a/go.sum
+++ b/go.sum
@@ -187,12 +187,12 @@ github.com/TykTechnologies/goautosocket v0.0.0-20190430121222-97bfa5e7e481 h1:fP
github.com/TykTechnologies/goautosocket v0.0.0-20190430121222-97bfa5e7e481/go.mod h1:CtF8OunV123VfKa8Z9kKcIPHgcd67hSAwFMLlS7FvS4=
github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990 h1:CJRTgg13M3vJG9S7k7kpnvDRMGMywm5OsN6eUE8VwJE=
github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990/go.mod h1:SQT0NBrY4/pMikBgwFIrWCjcHBxg015Y8is0kAnMtug=
-github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 h1:fbxHiuw/244CQ4TEirzgL/CIMXDUx2szZn8cuuMlCy0=
-github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9/go.mod h1:v6v7Mlj08+EmEcXOfpuTxGt2qYU9yhqqtv4QF9Wf50E=
+github.com/TykTechnologies/gorpc v0.0.0-20241016124253-606484472fbb h1:4ZQmRdKvOgE/KSlwT7Ze/imlWaC/z9kynUL4lADCz3Y=
+github.com/TykTechnologies/gorpc v0.0.0-20241016124253-606484472fbb/go.mod h1:v6v7Mlj08+EmEcXOfpuTxGt2qYU9yhqqtv4QF9Wf50E=
github.com/TykTechnologies/goverify v0.0.0-20220808203004-1486f89e7708 h1:cmXjlMzcexhc/Cg+QB/c2CPUVs1ux9xn6162qaf/LC4=
github.com/TykTechnologies/goverify v0.0.0-20220808203004-1486f89e7708/go.mod h1:mkS8jKcz8otdfEXhJs1QQ/DKoIY1NFFsRPKS0RwQENI=
-github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240705065952-ae6008677a48 h1:dtK8xAF/inLBkjKRsHG+zwW5czgwRKidiTfToDxDvcQ=
-github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240705065952-ae6008677a48/go.mod h1:DCYkq1ZoUZ/pGESE+j3C7wuyDPSt1Mlu0jVgIoDABJY=
+github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240926103032-6eca9f4b5e30 h1:SxmD3nMD7AyhJGcOBG4QHwslMFvwLM0e3jH1enmWxZk=
+github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20240926103032-6eca9f4b5e30/go.mod h1:NE9MYGcB5eWKW0OefGOjZZEPv6S20AQ0OTsO+npGR8I=
github.com/TykTechnologies/graphql-go-tools/v2 v2.0.0-20240509085643-e95cdc317e1d h1:ntmDECMD7475TK/cxshJ1b8KJvUM1wppohd0qZjqUqI=
github.com/TykTechnologies/graphql-go-tools/v2 v2.0.0-20240509085643-e95cdc317e1d/go.mod h1:eOt2cxB9sZ80mz4q1UKzzk9CE4QZaS6jP20X2LwPwy8=
github.com/TykTechnologies/graphql-translator v0.0.0-20240319092712-4ba87e4c06ff h1:kFA240S1Y4snsEL4Ng4Ch1Ib2N04A15Y+9KYumK6uCg=
@@ -420,6 +420,8 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg=
github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc=
+github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
+github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/colinmarc/hdfs v1.1.3 h1:662salalXLFmp+ctD+x0aG+xOg62lnVnOJHksXYpFBw=
github.com/colinmarc/hdfs v1.1.3/go.mod h1:0DumPviB681UcSuJErAbDIOx6SIaJWj463TymfZG02I=
github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c=
diff --git a/internal/httputil/context.go b/internal/httputil/context.go
new file mode 100644
index 00000000000..2ac0b07ab2c
--- /dev/null
+++ b/internal/httputil/context.go
@@ -0,0 +1,43 @@
+package httputil
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/TykTechnologies/tyk/internal/model"
+)
+
+// ContextKey is the key type to be used for context interactions.
+type ContextKey string
+
+const (
+ upstreamAuth = ContextKey("upstream-auth")
+)
+
+// SetContext updates the context of a request.
+func SetContext(r *http.Request, ctx context.Context) {
+ r2 := r.WithContext(ctx)
+ *r = *r2
+}
+
+// SetUpstreamAuth sets the header name to be used for upstream authentication.
+func SetUpstreamAuth(r *http.Request, auth model.UpstreamAuthProvider) {
+ ctx := r.Context()
+ ctx = context.WithValue(ctx, upstreamAuth, auth)
+ SetContext(r, ctx)
+}
+
+// GetUpstreamAuth returns the header name to be used for upstream authentication.
+func GetUpstreamAuth(r *http.Request) model.UpstreamAuthProvider {
+ auth := r.Context().Value(upstreamAuth)
+ if auth == nil {
+ return nil
+ }
+
+ provider, ok := auth.(model.UpstreamAuthProvider)
+ if !ok {
+ return nil
+ }
+
+ return provider
+}
diff --git a/internal/httputil/context_test.go b/internal/httputil/context_test.go
new file mode 100644
index 00000000000..2fffe60b167
--- /dev/null
+++ b/internal/httputil/context_test.go
@@ -0,0 +1,94 @@
+package httputil_test
+
+import (
+ "context"
+ "net/http"
+ "testing"
+
+ "github.com/TykTechnologies/tyk/internal/httputil"
+
+ "github.com/TykTechnologies/tyk/internal/model"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func createReq(tb testing.TB) *http.Request {
+ tb.Helper()
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://example.com", nil)
+ assert.NoError(tb, err)
+ return req
+}
+
+func TestUpstreamAuth(t *testing.T) {
+ t.Run("valid auth provider", func(t *testing.T) {
+ mockAuthProvider := &model.MockUpstreamAuthProvider{}
+ req := createReq(t)
+
+ httputil.SetUpstreamAuth(req, mockAuthProvider)
+
+ // Retrieve the auth provider from the request's context to verify it was set
+ retrievedAuth := httputil.GetUpstreamAuth(req)
+ assert.NotNil(t, retrievedAuth)
+ assert.Equal(t, mockAuthProvider, retrievedAuth)
+ })
+
+ t.Run("no auth provider", func(t *testing.T) {
+ req := createReq(t)
+
+ retrievedAuth := httputil.GetUpstreamAuth(req)
+ assert.Nil(t, retrievedAuth)
+ })
+
+ t.Run("invalid auth provider", func(t *testing.T) {
+ req := createReq(t)
+
+ // Set a context with a value that is not of type proxy.UpstreamAuthProvider
+ ctx := context.WithValue(req.Context(), httputil.ContextKey("upstream-auth"), "invalid-type")
+ httputil.SetContext(req, ctx)
+
+ retrievedAuth := httputil.GetUpstreamAuth(req)
+ assert.Nil(t, retrievedAuth)
+ })
+}
+
+func TestSetContext(t *testing.T) {
+ t.Run("add key", func(t *testing.T) {
+ req := createReq(t)
+
+ // Create a new context with a key-value pair
+ ctx := context.WithValue(context.Background(), httputil.ContextKey("key"), "value")
+
+ // Call SetContext to update the request's context
+ httputil.SetContext(req, ctx)
+
+ // Verify that the request's context has been updated
+ retrievedValue := req.Context().Value(httputil.ContextKey("key"))
+ assert.Equal(t, "value", retrievedValue)
+ })
+
+ t.Run("override key", func(t *testing.T) {
+
+ req := createReq(t)
+ existingCtx := context.WithValue(context.Background(), httputil.ContextKey("existingKey"), "existingValue")
+ req = req.WithContext(existingCtx)
+
+ // Create a new context to override the existing context
+ newCtx := context.WithValue(context.Background(), httputil.ContextKey("newKey"), "newValue")
+
+ // Call SetContext to update the request's context with the new context
+ httputil.SetContext(req, newCtx)
+
+ assert.Nil(t, req.Context().Value(httputil.ContextKey("existingKey")))
+ assert.Equal(t, "newValue", req.Context().Value(httputil.ContextKey("newKey")))
+ })
+
+ t.Run("empty context", func(t *testing.T) {
+ req := createReq(t)
+
+ emptyCtx := context.Background()
+
+ httputil.SetContext(req, emptyCtx)
+
+ assert.Equal(t, emptyCtx, req.Context())
+ })
+}
diff --git a/internal/httputil/headers.go b/internal/httputil/headers.go
new file mode 100644
index 00000000000..4aed5f6749e
--- /dev/null
+++ b/internal/httputil/headers.go
@@ -0,0 +1,26 @@
+package httputil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+)
+
+// CORSHeaders is a list of CORS headers.
+var CORSHeaders = []string{
+ "Access-Control-Allow-Origin",
+ "Access-Control-Expose-Headers",
+ "Access-Control-Max-Age",
+ "Access-Control-Allow-Credentials",
+ "Access-Control-Allow-Methods",
+ "Access-Control-Allow-Headers",
+}
+
+// AuthHeader will take username and password and return
+// "Basic " + base64 encoded `username:password` for use
+// in an Authorization header.
+func AuthHeader(username, password string) string {
+ toEncode := strings.Join([]string{username, password}, ":")
+ encodedPass := base64.StdEncoding.EncodeToString([]byte(toEncode))
+ return fmt.Sprintf("Basic %s", encodedPass)
+}
diff --git a/internal/middleware/const.go b/internal/middleware/const.go
new file mode 100644
index 00000000000..0b26e05ad56
--- /dev/null
+++ b/internal/middleware/const.go
@@ -0,0 +1,5 @@
+package middleware
+
+// StatusRespond should be returned by a middleware to stop processing
+// further middleware from the middleware chain.
+const StatusRespond = 666
diff --git a/internal/model/host_details.go b/internal/model/host_details.go
deleted file mode 100644
index 8b87f55cdde..00000000000
--- a/internal/model/host_details.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package model
-
-// HostDetails contains information about a host machine,
-// including its hostname, process ID (PID), and IP address.
-type HostDetails struct {
- Hostname string
- PID int
- Address string
-}
diff --git a/internal/model/interfaces.go b/internal/model/interfaces.go
new file mode 100644
index 00000000000..db761b30255
--- /dev/null
+++ b/internal/model/interfaces.go
@@ -0,0 +1,66 @@
+package model
+
+import (
+ "net/http"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/TykTechnologies/tyk/config"
+ "github.com/TykTechnologies/tyk/user"
+)
+
+// Gateway is a collection of well defined gateway interfaces. It should only
+// be implemented in full by gateway.Gateway, and is used for a built-time
+// type assertion. Do not use the symbol elsewhere, use the smaller interfaces.
+type Gateway interface {
+ ConfigProvider
+ PolicyProvider
+
+ ReplaceTykVariables
+}
+
+// Middleware is a subset of the gateway.Middleware interface, that can be
+// implemented outside of gateway scope.
+type Middleware interface {
+ Init()
+ Name() string
+ Logger() *logrus.Entry
+ ProcessRequest(w http.ResponseWriter, r *http.Request, conf interface{}) (error, int) // Handles request
+ EnabledForSpec() bool
+}
+
+// LoggerProvider returns a new *logrus.Entry for the request.
+// It's implemented by gateway and middleware. Middleware typically
+// adds the `mw` field with the middleware name.
+type LoggerProvider interface {
+ Logger() *logrus.Entry
+}
+
+// ConfigProvider provides a typical config getter signature.
+type ConfigProvider interface {
+ GetConfig() config.Config
+}
+
+// PolicyProvider is a storage interface encapsulating policy retrieval.
+type PolicyProvider interface {
+ PolicyCount() int
+ PolicyIDs() []string
+ PolicyByID(string) (user.Policy, bool)
+}
+
+// These are utility methods without any real data model design around them.
+type (
+ // ReplaceTykVariables is a request-based template replacement hook.
+ // Implemented by gateway.Gateway.
+ ReplaceTykVariables interface {
+ ReplaceTykVariables(r *http.Request, in string, escape bool) string
+ }
+
+ // StripListenPath is the interface implemented by APISpec.StripListenPath.
+ StripListenPath interface {
+ StripListenPath(string) string
+ }
+
+ // StripListenPathFunc is the function signature for StripListenPath.
+ StripListenPathFunc func(string) string
+)
diff --git a/internal/model/merged_apis.go b/internal/model/merged_apis.go
new file mode 100644
index 00000000000..5f3a09b9327
--- /dev/null
+++ b/internal/model/merged_apis.go
@@ -0,0 +1,61 @@
+package model
+
+import (
+ "github.com/TykTechnologies/tyk/apidef"
+ "github.com/TykTechnologies/tyk/apidef/oas"
+)
+
+// MergedAPIList is the response body for FromDashboardService.
+type MergedAPIList struct {
+ Message []MergedAPI
+ Nonce string
+}
+
+func NewMergedAPIList(apis ...MergedAPI) *MergedAPIList {
+ return &MergedAPIList{
+ Message: apis,
+ }
+}
+
+// MergedAPI combines the embeds the classic and adds the OAS API definition as a field.
+type MergedAPI struct {
+ *apidef.APIDefinition `json:"api_definition,inline"`
+ OAS *oas.OAS `json:"oas"`
+}
+
+// Set sets the available classic API definitions to the MergedAPIList.
+func (f *MergedAPIList) SetClassic(defs []*apidef.APIDefinition) {
+ for _, def := range defs {
+ f.Message = append(f.Message, MergedAPI{APIDefinition: def})
+ }
+}
+
+// Filter, if enabled=true, will filter the internal api definitions by their tags.
+func (f *MergedAPIList) Filter(enabled bool, tags ...string) []MergedAPI {
+ if !enabled {
+ return f.Message
+ }
+
+ if len(tags) == 0 {
+ return nil
+ }
+
+ tagMap := map[string]bool{}
+ for _, tag := range tags {
+ tagMap[tag] = true
+ }
+
+ result := make([]MergedAPI, 0, len(f.Message))
+ for _, v := range f.Message {
+ if v.TagsDisabled {
+ continue
+ }
+ for _, tag := range v.Tags {
+ if ok := tagMap[tag]; ok {
+ result = append(result, MergedAPI{v.APIDefinition, v.OAS})
+ break
+ }
+ }
+ }
+ return result
+}
diff --git a/internal/model/rpc.go b/internal/model/rpc.go
new file mode 100644
index 00000000000..c44a95efcbc
--- /dev/null
+++ b/internal/model/rpc.go
@@ -0,0 +1,34 @@
+package model
+
+import "github.com/TykTechnologies/tyk/apidef"
+
+// This contains the shim for rpc data model types.
+// They are used from tests, and just pipe through
+// the apidef types to avoid import cycles.
+type (
+ GroupLoginRequest = apidef.GroupLoginRequest
+ GroupKeySpaceRequest = apidef.GroupKeySpaceRequest
+ DefRequest = apidef.DefRequest
+ InboundData = apidef.InboundData
+ KeysValuesPair = apidef.KeysValuesPair
+)
+
+// These are health check shims.
+type (
+ HealthCheckItem = apidef.HealthCheckItem
+ HealthCheckResponse = apidef.HealthCheckResponse
+ HealthCheckStatus = apidef.HealthCheckStatus
+
+ HostDetails = apidef.HostDetails
+ NodeData = apidef.NodeData
+ GWStats = apidef.GWStats
+)
+
+// Other.
+const (
+ Pass = apidef.Pass
+ Warn = apidef.Warn
+ Fail = apidef.Fail
+ System = apidef.System
+ Datastore = apidef.Datastore
+)
diff --git a/internal/model/upstream_auth.go b/internal/model/upstream_auth.go
new file mode 100644
index 00000000000..a064b42e212
--- /dev/null
+++ b/internal/model/upstream_auth.go
@@ -0,0 +1,16 @@
+package model
+
+import "net/http"
+
+// UpstreamAuthProvider is an interface that can fill in upstream authentication details to the request.
+type UpstreamAuthProvider interface {
+ Fill(r *http.Request)
+}
+
+// MockUpstreamAuthProvider is a mock implementation of UpstreamAuthProvider.
+type MockUpstreamAuthProvider struct{}
+
+// Fill is a mock implementation to be used in tests.
+func (m *MockUpstreamAuthProvider) Fill(_ *http.Request) {
+ // empty mock implementation.
+}
diff --git a/internal/policy/Taskfile.yml b/internal/policy/Taskfile.yml
index 8f5cf09cff2..97c1c82735d 100644
--- a/internal/policy/Taskfile.yml
+++ b/internal/policy/Taskfile.yml
@@ -19,8 +19,24 @@ tasks:
- defer: { task: services:down }
- goimports -w .
- go fmt ./...
+ - task: test
+ vars:
+ run: '{{.run}}'
+
+ test:
+ desc: "Run tests"
+ requires:
+ vars: [run]
+ cmds:
- go test -count=1 -run='({{.run}})' -cover -coverprofile=pkg.cov -v .
+ stress:
+ desc: "Run stress tests"
+ requires:
+ vars: [run]
+ cmds:
+ - go test -count=2000 -run='({{.run}})' -cover -coverprofile=pkg.cov .
+
cover:
desc: "Show source coverage"
aliases: [coverage, cov]
diff --git a/internal/policy/apply.go b/internal/policy/apply.go
index 6baafaecf2c..380a34922e0 100644
--- a/internal/policy/apply.go
+++ b/internal/policy/apply.go
@@ -6,6 +6,7 @@ import (
"github.com/sirupsen/logrus"
+ "github.com/TykTechnologies/tyk/internal/model"
"github.com/TykTechnologies/tyk/user"
)
@@ -14,23 +15,16 @@ var (
ErrMixedPartitionAndPerAPIPolicies = errors.New("cannot apply multiple policies when some have per_api set and some are partitioned")
)
-// Repository is a storage encapsulating policy retrieval.
-// Gateway implements this object to decouple this package.
-type Repository interface {
- PolicyCount() int
- PolicyIDs() []string
- PolicyByID(string) (user.Policy, bool)
-}
-
+// Service represents the implementation for apply policies logic.
type Service struct {
- storage Repository
+ storage model.PolicyProvider
logger *logrus.Logger
// used for validation if not empty
orgID *string
}
-func New(orgID *string, storage Repository, logger *logrus.Logger) *Service {
+func New(orgID *string, storage model.PolicyProvider, logger *logrus.Logger) *Service {
return &Service{
orgID: orgID,
storage: storage,
@@ -107,7 +101,8 @@ func (t *Service) Apply(session *user.SessionState) error {
)
storage := t.storage
- customPolicies, err := session.CustomPolicies()
+
+ customPolicies, err := session.GetCustomPolicies()
if err != nil {
policyIDs = session.PolicyIDs()
} else {
@@ -242,8 +237,9 @@ func (t *Service) Apply(session *user.SessionState) error {
return nil
}
-func (t *Service) Logger() *logrus.Logger {
- return t.logger
+// Logger implements a typical logger signature with service context.
+func (t *Service) Logger() *logrus.Entry {
+ return logrus.NewEntry(t.logger)
}
// ApplyRateLimits will write policy limits to session and apiLimits.
@@ -349,14 +345,22 @@ func (t *Service) applyPartitions(policy user.Policy, session *user.SessionState
return ErrMixedPartitionAndPerAPIPolicies
}
+ // Ensure `rights` is filled with known APIs to ensure that
+ // a policy with acl rights gets honored even if not first.
+ for k := range policy.AccessRights {
+ if _, ok := rights[k]; ok {
+ continue
+ }
+ rights[k] = user.AccessDefinition{}
+ }
+
for k, v := range policy.AccessRights {
- ar := v
+ // Use rights[k], which holds previously seen/merged policy access rights.
+ ar := rights[k]
if !usePartitions || policy.Partitions.Acl {
applyState.didAcl[k] = true
- ar.AllowedURLs = copyAllowedURLs(v.AllowedURLs)
-
// Merge ACLs for the same API
if r, ok := rights[k]; ok {
// If GQL introspection is disabled, keep that configuration.
@@ -365,32 +369,28 @@ func (t *Service) applyPartitions(policy user.Policy, session *user.SessionState
}
r.Versions = appendIfMissing(rights[k].Versions, v.Versions...)
- for _, u := range v.AllowedURLs {
- found := false
- for ai, au := range r.AllowedURLs {
- if u.URL == au.URL {
- found = true
- r.AllowedURLs[ai].Methods = appendIfMissing(au.Methods, u.Methods...)
- }
- }
-
- if !found {
- r.AllowedURLs = append(r.AllowedURLs, v.AllowedURLs...)
- }
- }
+ r.AllowedURLs = MergeAllowedURLs(r.AllowedURLs, v.AllowedURLs)
- for _, t := range v.RestrictedTypes {
- for ri, rt := range r.RestrictedTypes {
- if t.Name == rt.Name {
- r.RestrictedTypes[ri].Fields = intersection(rt.Fields, t.Fields)
+ if len(r.RestrictedTypes) == 0 {
+ r.RestrictedTypes = v.RestrictedTypes
+ } else {
+ for _, t := range v.RestrictedTypes {
+ for ri, rt := range r.RestrictedTypes {
+ if t.Name == rt.Name {
+ r.RestrictedTypes[ri].Fields = intersection(rt.Fields, t.Fields)
+ }
}
}
}
- for _, t := range v.AllowedTypes {
- for ri, rt := range r.AllowedTypes {
- if t.Name == rt.Name {
- r.AllowedTypes[ri].Fields = intersection(rt.Fields, t.Fields)
+ if len(r.AllowedTypes) == 0 {
+ r.AllowedTypes = v.AllowedTypes
+ } else {
+ for _, t := range v.AllowedTypes {
+ for ri, rt := range r.AllowedTypes {
+ if t.Name == rt.Name {
+ r.AllowedTypes[ri].Fields = intersection(rt.Fields, t.Fields)
+ }
}
}
}
@@ -401,17 +401,21 @@ func (t *Service) applyPartitions(policy user.Policy, session *user.SessionState
}
}
- for _, far := range v.FieldAccessRights {
- exists := false
- for i, rfar := range r.FieldAccessRights {
- if far.TypeName == rfar.TypeName && far.FieldName == rfar.FieldName {
- exists = true
- mergeFieldLimits(&r.FieldAccessRights[i].Limits, far.Limits)
+ if len(r.FieldAccessRights) == 0 {
+ r.FieldAccessRights = v.FieldAccessRights
+ } else {
+ for _, far := range v.FieldAccessRights {
+ exists := false
+ for i, rfar := range r.FieldAccessRights {
+ if far.TypeName == rfar.TypeName && far.FieldName == rfar.FieldName {
+ exists = true
+ mergeFieldLimits(&r.FieldAccessRights[i].Limits, far.Limits)
+ }
}
- }
- if !exists {
- r.FieldAccessRights = append(r.FieldAccessRights, far)
+ if !exists {
+ r.FieldAccessRights = append(r.FieldAccessRights, far)
+ }
}
}
@@ -423,8 +427,8 @@ func (t *Service) applyPartitions(policy user.Policy, session *user.SessionState
if !usePartitions || policy.Partitions.Quota {
applyState.didQuota[k] = true
- if greaterThanInt64(policy.QuotaMax, ar.Limit.QuotaMax) {
+ if greaterThanInt64(policy.QuotaMax, ar.Limit.QuotaMax) {
ar.Limit.QuotaMax = policy.QuotaMax
if greaterThanInt64(policy.QuotaMax, session.QuotaMax) {
session.QuotaMax = policy.QuotaMax
diff --git a/internal/policy/apply_test.go b/internal/policy/apply_test.go
index 3e889c44a39..e22da5c5b05 100644
--- a/internal/policy/apply_test.go
+++ b/internal/policy/apply_test.go
@@ -3,10 +3,15 @@ package policy_test
import (
"embed"
"encoding/json"
+ "fmt"
+ "slices"
+ "sort"
"testing"
+ "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
+ "github.com/TykTechnologies/graphql-go-tools/pkg/graphql"
"github.com/TykTechnologies/tyk/internal/policy"
"github.com/TykTechnologies/tyk/user"
)
@@ -15,9 +20,9 @@ import (
var testDataFS embed.FS
func TestApplyRateLimits_PolicyLimits(t *testing.T) {
- svc := &policy.Service{}
-
t.Run("policy limits unset", func(t *testing.T) {
+ svc := &policy.Service{}
+
session := &user.SessionState{
Rate: 5,
Per: 10,
@@ -37,6 +42,8 @@ func TestApplyRateLimits_PolicyLimits(t *testing.T) {
})
t.Run("policy limits apply all", func(t *testing.T) {
+ svc := &policy.Service{}
+
session := &user.SessionState{
Rate: 5,
Per: 10,
@@ -62,6 +69,8 @@ func TestApplyRateLimits_PolicyLimits(t *testing.T) {
// changes are applied to api limits, but skipped on
// the session as the session has a higher allowance.
t.Run("policy limits apply per-api", func(t *testing.T) {
+ svc := &policy.Service{}
+
session := &user.SessionState{
Rate: 15,
Per: 10,
@@ -86,6 +95,8 @@ func TestApplyRateLimits_PolicyLimits(t *testing.T) {
// As the policy defined a lower rate than apiLimits,
// no changes to api limits are applied.
t.Run("policy limits skip", func(t *testing.T) {
+ svc := &policy.Service{}
+
session := &user.SessionState{
Rate: 5,
Per: 10,
@@ -110,28 +121,72 @@ func TestApplyRateLimits_PolicyLimits(t *testing.T) {
func TestApplyRateLimits_FromCustomPolicies(t *testing.T) {
svc := &policy.Service{}
- t.Run("Custom policies", func(t *testing.T) {
- session := &user.SessionState{}
- session.SetCustomPolicies([]user.Policy{
- {
- ID: "pol1",
- Partitions: user.PolicyPartitions{RateLimit: true},
- Rate: 8,
- Per: 1,
- AccessRights: map[string]user.AccessDefinition{"a": {}},
- },
- {
- ID: "pol2",
- Partitions: user.PolicyPartitions{RateLimit: true},
- Rate: 10,
- Per: 1,
- AccessRights: map[string]user.AccessDefinition{"a": {}},
+ session := &user.SessionState{}
+ session.SetCustomPolicies([]user.Policy{
+ {
+ ID: "pol1",
+ Partitions: user.PolicyPartitions{RateLimit: true},
+ Rate: 8,
+ Per: 1,
+ AccessRights: map[string]user.AccessDefinition{"a": {}},
+ },
+ {
+ ID: "pol2",
+ Partitions: user.PolicyPartitions{RateLimit: true},
+ Rate: 10,
+ Per: 1,
+ AccessRights: map[string]user.AccessDefinition{"a": {}},
+ },
+ })
+
+ assert.NoError(t, svc.Apply(session))
+ assert.Equal(t, 10, int(session.Rate))
+}
+
+func TestApplyACL_FromCustomPolicies(t *testing.T) {
+ svc := &policy.Service{}
+
+ pol1 := user.Policy{
+ ID: "pol1",
+ Partitions: user.PolicyPartitions{RateLimit: true},
+ Rate: 8,
+ Per: 1,
+ AccessRights: map[string]user.AccessDefinition{
+ "a": {},
+ },
+ }
+
+ pol2 := user.Policy{
+ ID: "pol2",
+ Partitions: user.PolicyPartitions{Acl: true},
+ Rate: 10,
+ Per: 1,
+ AccessRights: map[string]user.AccessDefinition{
+ "a": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"GET", "POST"}},
+ {URL: "/companies", Methods: []string{"GET", "POST"}},
+ },
},
- })
+ },
+ }
+
+ t.Run("RateLimit first", func(t *testing.T) {
+ session := &user.SessionState{}
+ session.SetCustomPolicies([]user.Policy{pol1, pol2})
- svc.Apply(session)
+ assert.NoError(t, svc.Apply(session))
+ assert.Equal(t, pol2.AccessRights["a"].AllowedURLs, session.AccessRights["a"].AllowedURLs)
+ assert.Equal(t, 8, int(session.Rate))
+ })
- assert.Equal(t, 10, int(session.Rate))
+ t.Run("ACL first", func(t *testing.T) {
+ session := &user.SessionState{}
+ session.SetCustomPolicies([]user.Policy{pol2, pol1})
+
+ assert.NoError(t, svc.Apply(session))
+ assert.Equal(t, pol2.AccessRights["a"].AllowedURLs, session.AccessRights["a"].AllowedURLs)
+ assert.Equal(t, 8, int(session.Rate))
})
}
@@ -157,3 +212,1101 @@ func TestApplyEndpointLevelLimits(t *testing.T) {
}
}
+
+type testApplyPoliciesData struct {
+ name string
+ policies []string
+ errMatch string // substring
+ sessMatch func(*testing.T, *user.SessionState) // ignored if nil
+ session *user.SessionState
+ // reverseOrder executes the tests in reversed order of policies,
+ // in addition to the order specified in policies
+ reverseOrder bool
+}
+
+func testPrepareApplyPolicies(tb testing.TB) (*policy.Service, []testApplyPoliciesData) {
+ tb.Helper()
+
+ f, err := testDataFS.ReadFile("testdata/policies.json")
+ assert.NoError(tb, err)
+
+ var policies = make(map[string]user.Policy)
+ err = json.Unmarshal(f, &policies)
+ assert.NoError(tb, err)
+
+ var repoPols = make(map[string]user.Policy)
+ err = json.Unmarshal(f, &repoPols)
+ assert.NoError(tb, err)
+
+ store := policy.NewStoreMap(repoPols)
+ orgID := ""
+ service := policy.New(&orgID, store, logrus.StandardLogger())
+
+ // splitting tests for readability
+ var tests []testApplyPoliciesData
+
+ nilSessionTCs := []testApplyPoliciesData{
+ {
+ "Empty", nil,
+ "", nil, nil, false,
+ },
+ {
+ "Single", []string{"nonpart1"},
+ "", nil, nil, false,
+ },
+ {
+ "Missing", []string{"nonexistent"},
+ "not found", nil, nil, false,
+ },
+ {
+ "DiffOrg", []string{"difforg"},
+ "different org", nil, nil, false,
+ },
+ }
+ tests = append(tests, nilSessionTCs...)
+
+ nonPartitionedTCs := []testApplyPoliciesData{
+ {
+ name: "MultiNonPart",
+ policies: []string{"nonpart1", "nonpart2", "nonexistent"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "a": {
+ Limit: user.APILimit{},
+ AllowanceScope: "p1",
+ },
+ "b": {
+ Limit: user.APILimit{},
+ AllowanceScope: "p2",
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "MultiACLPolicy",
+ policies: []string{"nonpart3"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "a": {
+ Limit: user.APILimit{},
+ },
+ "b": {
+ Limit: user.APILimit{},
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ }
+ tests = append(tests, nonPartitionedTCs...)
+
+ quotaPartitionTCs := []testApplyPoliciesData{
+ {
+ "QuotaPart with unlimited", []string{"unlimited-quota"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.QuotaMax != -1 {
+ t.Fatalf("want unlimited quota to be -1")
+ }
+ }, nil, false,
+ },
+ {
+ "QuotaPart", []string{"quota1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.QuotaMax != 2 {
+ t.Fatalf("want QuotaMax to be 2")
+ }
+ }, nil, false,
+ },
+ {
+ "QuotaParts", []string{"quota1", "quota2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.QuotaMax != 3 {
+ t.Fatalf("Should pick bigger value")
+ }
+ }, nil, false,
+ },
+ {
+ "QuotaParts with acl", []string{"quota5", "quota4"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.Equal(t, int64(4), s.QuotaMax)
+ }, nil, false,
+ },
+ {
+ "QuotaPart with access rights", []string{"quota3"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.QuotaMax != 3 {
+ t.Fatalf("quota should be the same as policy quota")
+ }
+ }, nil, false,
+ },
+ {
+ "QuotaPart with access rights in multi-policy", []string{"quota4", "nonpart1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.QuotaMax != 3 {
+ t.Fatalf("quota should be the same as policy quota")
+ }
+
+ // Don't apply api 'b' coming from quota4 policy
+ want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}}
+ assert.Equal(t, want, s.AccessRights)
+ }, nil, false,
+ },
+ }
+ tests = append(tests, quotaPartitionTCs...)
+
+ rateLimitPartitionTCs := []testApplyPoliciesData{
+ {
+ "RatePart with unlimited", []string{"unlimited-rate"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.True(t, s.Rate <= 0, "want unlimited rate to be <= 0")
+ }, nil, false,
+ },
+ {
+ "RatePart", []string{"rate1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.Rate != 3 {
+ t.Fatalf("want Rate to be 3")
+ }
+ }, nil, false,
+ },
+ {
+ "RateParts", []string{"rate1", "rate2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.Rate != 4 {
+ t.Fatalf("Should pick bigger value")
+ }
+ }, nil, false,
+ },
+ {
+ "RateParts with acl", []string{"rate5", "rate4"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.Equal(t, float64(10), s.Rate)
+ }, nil, false,
+ },
+ {
+ "RateParts with acl respected by session", []string{"rate4", "rate5"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.Equal(t, float64(10), s.Rate)
+ }, &user.SessionState{Rate: 20}, false,
+ },
+ {
+ "Rate with no partition respected by session", []string{"rate-no-partition"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.Equal(t, float64(12), s.Rate)
+ }, &user.SessionState{Rate: 20}, false,
+ },
+ }
+ tests = append(tests, rateLimitPartitionTCs...)
+
+ complexityPartitionTCs := []testApplyPoliciesData{
+ {
+ "ComplexityPart with unlimited", []string{"unlimitedComplexity"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.MaxQueryDepth != -1 {
+ t.Fatalf("unlimitied query depth should be -1")
+ }
+ }, nil, false,
+ },
+ {
+ "ComplexityPart", []string{"complexity1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.MaxQueryDepth != 2 {
+ t.Fatalf("want MaxQueryDepth to be 2")
+ }
+ }, nil, false,
+ },
+ {
+ "ComplexityParts", []string{"complexity1", "complexity2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.MaxQueryDepth != 3 {
+ t.Fatalf("Should pick bigger value")
+ }
+ }, nil, false,
+ },
+ }
+ tests = append(tests, complexityPartitionTCs...)
+
+ aclPartitionTCs := []testApplyPoliciesData{
+ {
+ "AclPart", []string{"acl1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}}
+
+ assert.Equal(t, want, s.AccessRights)
+ }, nil, false,
+ },
+ {
+ "AclPart", []string{"acl1", "acl2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}, "b": {Limit: user.APILimit{}}}
+ assert.Equal(t, want, s.AccessRights)
+ }, nil, false,
+ },
+ {
+ "Acl for a and rate for a,b", []string{"acl1", "rate-for-a-b"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 4, Per: 1}}}}
+ assert.Equal(t, want, s.AccessRights)
+ }, nil, false,
+ },
+ {
+ "Acl for a,b and individual rate for a,b", []string{"acl-for-a-b", "rate-for-a", "rate-for-b"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "a": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 4, Per: 1}}},
+ "b": {Limit: user.APILimit{RateLimit: user.RateLimit{Rate: 2, Per: 1}}},
+ }
+ assert.Equal(t, want, s.AccessRights)
+ }, nil, false,
+ },
+ {
+ "RightsUpdate", []string{"acl-for-a-b"},
+ "", func(t *testing.T, ses *user.SessionState) {
+ t.Helper()
+ expectedAccessRights := map[string]user.AccessDefinition{"a": {Limit: user.APILimit{}}, "b": {Limit: user.APILimit{}}}
+ assert.Equal(t, expectedAccessRights, ses.AccessRights)
+ }, &user.SessionState{
+ AccessRights: map[string]user.AccessDefinition{
+ "c": {Limit: user.APILimit{}},
+ },
+ }, false,
+ },
+ }
+ tests = append(tests, aclPartitionTCs...)
+
+ inactiveTCs := []testApplyPoliciesData{
+ {
+ "InactiveMergeOne", []string{"tags1", "inactive1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if !s.IsInactive {
+ t.Fatalf("want IsInactive to be true")
+ }
+ }, nil, false,
+ },
+ {
+ "InactiveMergeAll", []string{"inactive1", "inactive2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if !s.IsInactive {
+ t.Fatalf("want IsInactive to be true")
+ }
+ }, nil, false,
+ },
+ {
+ "InactiveWithSession", []string{"tags1", "tags2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if !s.IsInactive {
+ t.Fatalf("want IsInactive to be true")
+ }
+ }, &user.SessionState{
+ IsInactive: true,
+ }, false,
+ },
+ }
+ tests = append(tests, inactiveTCs...)
+
+ perAPITCs := []testApplyPoliciesData{
+ {
+ name: "Per API is set with other partitions to true",
+ policies: []string{"per_api_and_partitions"},
+ errMatch: "cannot apply policy per_api_and_partitions which has per_api and any of partitions set",
+ },
+ {
+ name: "Per API is set to true with some partitions set to true",
+ policies: []string{"per_api_and_some_partitions"},
+ errMatch: "cannot apply policy per_api_and_some_partitions which has per_api and any of partitions set",
+ },
+ {
+ name: "Per API is set to true with no other partitions set to true",
+ policies: []string{"per_api_and_no_other_partitions"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "c": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 2000,
+ Per: 60,
+ },
+ QuotaMax: -1,
+ },
+ AllowanceScope: "c",
+ },
+ "d": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 20,
+ Per: 1,
+ },
+ QuotaMax: 1000,
+ QuotaRenewalRate: 3600,
+ },
+ AllowanceScope: "d",
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "several policies with Per API set to true specifying limit for the same API",
+ policies: []string{"per_api_and_no_other_partitions", "per_api_with_api_d"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "c": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 2000,
+ Per: 60,
+ },
+ QuotaMax: -1,
+ },
+ AllowanceScope: "c",
+ },
+ "d": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ },
+ AllowanceScope: "d",
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "several policies with Per API set to true specifying limit for the same APIs",
+ policies: []string{"per_api_and_no_other_partitions", "per_api_with_api_d", "per_api_with_api_c"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "c": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 3000,
+ Per: 10,
+ },
+ QuotaMax: -1,
+ },
+ AllowanceScope: "c",
+ },
+ "d": {
+ Limit: user.APILimit{
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ },
+ AllowanceScope: "d",
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "several policies, mixed the one which has Per API set to true and partitioned ones",
+ policies: []string{"per_api_with_api_d", "quota1"},
+ errMatch: "cannot apply multiple policies when some have per_api set and some are partitioned",
+ },
+ {
+ name: "several policies, mixed the one which has Per API set to true and partitioned ones (different order)",
+ policies: []string{"rate1", "per_api_with_api_d"},
+ errMatch: "cannot apply multiple policies when some have per_api set and some are partitioned",
+ },
+ {
+ name: "Per API is set to true and some API gets limit set from policy's fields",
+ policies: []string{"per_api_with_limit_set_from_policy"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "e": {
+ Limit: user.APILimit{
+ QuotaMax: -1,
+ RateLimit: user.RateLimit{
+ Rate: 300,
+ Per: 1,
+ },
+ },
+ AllowanceScope: "per_api_with_limit_set_from_policy",
+ },
+ "d": {
+ Limit: user.APILimit{
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ },
+ AllowanceScope: "d",
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "Per API with limits override",
+ policies: []string{
+ "per_api_with_limit_set_from_policy",
+ "per_api_with_api_d",
+ "per_api_with_higher_rate_on_api_d",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "e": {
+ Limit: user.APILimit{
+ QuotaMax: -1,
+ RateLimit: user.RateLimit{
+ Rate: 300,
+ Per: 1,
+ },
+ },
+ AllowanceScope: "per_api_with_limit_set_from_policy",
+ },
+ "d": {
+ Limit: user.APILimit{
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ },
+ AllowanceScope: "d",
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ }
+ tests = append(tests, perAPITCs...)
+
+ graphQLTCs := []testApplyPoliciesData{
+ {
+ name: "Merge per path rules for the same API",
+ policies: []string{"per-path2", "per-path1"},
+ sessMatch: func(t *testing.T, sess *user.SessionState) {
+ t.Helper()
+ want := map[string]user.AccessDefinition{
+ "a": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"GET", "POST"}},
+ {URL: "/companies", Methods: []string{"GET", "POST"}},
+ },
+ Limit: user.APILimit{},
+ },
+ "b": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/", Methods: []string{"PUT"}},
+ },
+ Limit: user.APILimit{},
+ },
+ }
+
+ gotPolicy, ok := store.PolicyByID("per-path2")
+
+ assert.True(t, ok)
+ assert.Equal(t, user.AccessSpec{
+ URL: "/user", Methods: []string{"GET"},
+ }, gotPolicy.AccessRights["a"].AllowedURLs[0])
+
+ assert.Equal(t, want, sess.AccessRights)
+ },
+ },
+ {
+ name: "Merge restricted fields for the same GraphQL API",
+ policies: []string{"restricted-types1", "restricted-types2"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "a": { // It should get intersection of restricted types.
+ RestrictedTypes: []graphql.Type{
+ {Name: "Country", Fields: []string{"code"}},
+ {Name: "Person", Fields: []string{"name"}},
+ },
+ Limit: user.APILimit{},
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "Merge allowed fields for the same GraphQL API",
+ policies: []string{"allowed-types1", "allowed-types2"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "a": { // It should get intersection of restricted types.
+ AllowedTypes: []graphql.Type{
+ {Name: "Country", Fields: []string{"code"}},
+ {Name: "Person", Fields: []string{"name"}},
+ },
+ Limit: user.APILimit{},
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "If GQL introspection is disabled, it remains disabled after merging",
+ policies: []string{"introspection-disabled", "introspection-enabled"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "a": {
+ DisableIntrospection: true, // If GQL introspection is disabled, it remains disabled after merging.
+ Limit: user.APILimit{},
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ {
+ name: "Merge field level depth limit for the same GraphQL API",
+ policies: []string{"field-level-depth-limit1", "field-level-depth-limit2"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ want := map[string]user.AccessDefinition{
+ "graphql-api": {
+ Limit: user.APILimit{},
+ FieldAccessRights: []user.FieldAccessDefinition{
+ {TypeName: "Query", FieldName: "people", Limits: user.FieldLimits{MaxQueryDepth: 4}},
+ {TypeName: "Mutation", FieldName: "putPerson", Limits: user.FieldLimits{MaxQueryDepth: -1}},
+ {TypeName: "Query", FieldName: "countries", Limits: user.FieldLimits{MaxQueryDepth: 3}},
+ {TypeName: "Query", FieldName: "continents", Limits: user.FieldLimits{MaxQueryDepth: 4}},
+ },
+ },
+ }
+
+ assert.Equal(t, want, s.AccessRights)
+ },
+ },
+ }
+ tests = append(tests, graphQLTCs...)
+
+ throttleTCs := []testApplyPoliciesData{
+ {
+ "Throttle interval from policy", []string{"throttle1"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ if s.ThrottleInterval != 9 {
+ t.Fatalf("Throttle interval should be 9 inherited from policy")
+ }
+ }, nil, false,
+ },
+ {
+ name: "Throttle retry limit from policy",
+ policies: []string{"throttle1"},
+ errMatch: "",
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ if s.ThrottleRetryLimit != 99 {
+ t.Fatalf("Throttle interval should be 9 inherited from policy")
+ }
+ },
+ session: nil,
+ },
+ }
+ tests = append(tests, throttleTCs...)
+
+ tagsTCs := []testApplyPoliciesData{
+ {
+ "TagMerge", []string{"tags1", "tags2"},
+ "", func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ want := []string{"key-tag", "tagA", "tagX", "tagY"}
+ sort.Strings(s.Tags)
+
+ assert.Equal(t, want, s.Tags)
+ }, &user.SessionState{
+ Tags: []string{"key-tag"},
+ }, false,
+ },
+ }
+ tests = append(tests, tagsTCs...)
+
+ partitionTCs := []testApplyPoliciesData{
+ {
+ "NonpartAndPart", []string{"nonpart1", "quota1"},
+ "", nil, nil, false,
+ },
+ {
+ name: "inherit quota and rate from partitioned policies",
+ policies: []string{"quota1", "rate3"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ if s.QuotaMax != 2 {
+ t.Fatalf("quota should be the same as quota policy")
+ }
+ if s.Rate != 4 {
+ t.Fatalf("rate should be the same as rate policy")
+ }
+ if s.Per != 4 {
+ t.Fatalf("Rate per seconds should be the same as rate policy")
+ }
+ },
+ },
+ {
+ name: "inherit quota and rate from partitioned policies applied in different order",
+ policies: []string{"rate3", "quota1"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+
+ if s.QuotaMax != 2 {
+ t.Fatalf("quota should be the same as quota policy")
+ }
+ if s.Rate != 4 {
+ t.Fatalf("rate should be the same as rate policy")
+ }
+ if s.Per != 4 {
+ t.Fatalf("Rate per seconds should be the same as rate policy")
+ }
+ },
+ },
+ }
+ tests = append(tests, partitionTCs...)
+
+ endpointRLTCs := []testApplyPoliciesData{
+ {
+ name: "Per API and per endpoint policies",
+ policies: []string{"per_api_with_limit_set_from_policy", "per_api_with_endpoint_limits_on_d_and_e"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ endpointsConfig := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ {
+ Path: "/post",
+ Methods: user.EndpointMethods{
+ {
+ Name: "POST",
+ Limit: user.RateLimit{
+ Rate: 300,
+ Per: 10,
+ },
+ },
+ },
+ },
+ }
+ want := map[string]user.AccessDefinition{
+ "e": {
+ Limit: user.APILimit{
+ QuotaMax: -1,
+ RateLimit: user.RateLimit{
+ Rate: 500,
+ Per: 1,
+ },
+ },
+ AllowanceScope: "per_api_with_endpoint_limits_on_d_and_e",
+ Endpoints: endpointsConfig,
+ },
+ "d": {
+ Limit: user.APILimit{
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ },
+ AllowanceScope: "d",
+ Endpoints: endpointsConfig,
+ },
+ }
+ assert.Equal(t, want, s.AccessRights)
+ },
+ reverseOrder: true,
+ },
+ {
+ name: "Endpoint level limits overlapping",
+ policies: []string{
+ "per_api_with_limit_set_from_policy",
+ "per_api_with_endpoint_limits_on_d_and_e",
+ "per_endpoint_limits_different_on_api_d",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ apiEEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ {
+ Path: "/post",
+ Methods: user.EndpointMethods{
+ {
+ Name: "POST",
+ Limit: user.RateLimit{
+ Rate: 300,
+ Per: 10,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiEEndpoints, s.AccessRights["e"].Endpoints)
+
+ apiDEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ {
+ Path: "/post",
+ Methods: user.EndpointMethods{
+ {
+ Name: "POST",
+ Limit: user.RateLimit{
+ Rate: 400,
+ Per: 11,
+ },
+ },
+ },
+ },
+ {
+ Path: "/anything",
+ Methods: user.EndpointMethods{
+ {
+ Name: "PUT",
+ Limit: user.RateLimit{
+ Rate: 500,
+ Per: 10,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
+
+ apiELimits := user.APILimit{
+ QuotaMax: -1,
+ RateLimit: user.RateLimit{
+ Rate: 500,
+ Per: 1,
+ },
+ }
+ assert.Equal(t, apiELimits, s.AccessRights["e"].Limit)
+
+ apiDLimits := user.APILimit{
+ QuotaMax: 5000,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 200,
+ Per: 10,
+ },
+ }
+ assert.Equal(t, apiDLimits, s.AccessRights["d"].Limit)
+ },
+ reverseOrder: true,
+ },
+ {
+ name: "endpoint_rate_limits_on_acl_partition_only",
+ policies: []string{"endpoint_rate_limits_on_acl_partition_only"},
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ assert.Empty(t, s.AccessRights["d"].Endpoints)
+ },
+ },
+ {
+ name: "endpoint_rate_limits_when_acl_and_quota_partitions_combined",
+ policies: []string{
+ "endpoint_rate_limits_on_acl_partition_only",
+ "endpoint_rate_limits_on_quota_partition_only",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ assert.Empty(t, s.AccessRights["d"].Endpoints)
+ },
+ reverseOrder: true,
+ },
+ }
+
+ tests = append(tests, endpointRLTCs...)
+
+ combinedEndpointRLTCs := []testApplyPoliciesData{
+ {
+ name: "combine_non_partitioned_policies_with_endpoint_rate_limits_configured_on_api_d",
+ policies: []string{
+ "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ apiDEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: 20,
+ Per: 60,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
+ },
+ reverseOrder: true,
+ },
+ {
+ name: "combine_non_partitioned_policies_with_endpoint_rate_limits_no_bound_configured_on_api_d",
+ policies: []string{
+ "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ apiDEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
+ },
+ reverseOrder: true,
+ },
+ {
+ name: "combine_non_partitioned_policies_with_multiple_endpoint_rate_limits_configured_on_api_d",
+ policies: []string{
+ "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
+ "api_d_post_endpoint_rl_1_configure_on_non_partitioned_policy",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ apiDEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ {
+ Path: "/post",
+ Methods: user.EndpointMethods{
+ {
+ Name: "POST",
+ Limit: user.RateLimit{
+ Rate: 20,
+ Per: 60,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
+ },
+ reverseOrder: true,
+ },
+ {
+ name: "combine_non_partitioned_policies_with_endpoint_rate_limits_configured_on_api_d_and_e",
+ policies: []string{
+ "api_d_get_endpoint_rl_1_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_2_configure_on_non_partitioned_policy",
+ "api_d_get_endpoint_rl_3_configure_on_non_partitioned_policy",
+ "api_d_post_endpoint_rl_1_configure_on_non_partitioned_policy",
+ "api_e_get_endpoint_rl_1_configure_on_non_partitioned_policy",
+ },
+ sessMatch: func(t *testing.T, s *user.SessionState) {
+ t.Helper()
+ assert.NotEmpty(t, s.AccessRights)
+ apiDEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: -1,
+ },
+ },
+ },
+ },
+ {
+ Path: "/post",
+ Methods: user.EndpointMethods{
+ {
+ Name: "POST",
+ Limit: user.RateLimit{
+ Rate: 20,
+ Per: 60,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiDEndpoints, s.AccessRights["d"].Endpoints)
+
+ apiEEndpoints := user.Endpoints{
+ {
+ Path: "/get",
+ Methods: user.EndpointMethods{
+ {
+ Name: "GET",
+ Limit: user.RateLimit{
+ Rate: 100,
+ Per: 60,
+ },
+ },
+ },
+ },
+ }
+
+ assert.ElementsMatch(t, apiEEndpoints, s.AccessRights["e"].Endpoints)
+ },
+ reverseOrder: true,
+ },
+ }
+
+ tests = append(tests, combinedEndpointRLTCs...)
+
+ return service, tests
+}
+
+func TestService_Apply(t *testing.T) {
+ service, tests := testPrepareApplyPolicies(t)
+
+ for _, tc := range tests {
+ pols := [][]string{tc.policies}
+ if tc.reverseOrder {
+ var copyPols = make([]string, len(tc.policies))
+ copy(copyPols, tc.policies)
+ slices.Reverse(copyPols)
+ pols = append(pols, copyPols)
+ }
+
+ for i, policies := range pols {
+ name := tc.name
+ if i == 1 {
+ name = fmt.Sprintf("%s, reversed=%t", name, true)
+ }
+
+ t.Run(name, func(t *testing.T) {
+ sess := tc.session
+ if sess == nil {
+ sess = &user.SessionState{}
+ }
+ sess.SetPolicies(policies...)
+ if err := service.Apply(sess); err != nil {
+ assert.ErrorContains(t, err, tc.errMatch)
+ return
+ }
+
+ if tc.sessMatch != nil {
+ tc.sessMatch(t, sess)
+ }
+ })
+ }
+ }
+}
+
+func BenchmarkService_Apply(b *testing.B) {
+ b.ReportAllocs()
+
+ service, tests := testPrepareApplyPolicies(b)
+
+ for i := 0; i < b.N; i++ {
+ for _, tc := range tests {
+ sess := &user.SessionState{}
+ sess.SetPolicies(tc.policies...)
+ err := service.Apply(sess)
+ assert.NoError(b, err)
+ }
+ }
+}
diff --git a/internal/policy/rpc.go b/internal/policy/rpc.go
new file mode 100644
index 00000000000..31c1db8b9ab
--- /dev/null
+++ b/internal/policy/rpc.go
@@ -0,0 +1,42 @@
+package policy
+
+import (
+ "encoding/json"
+
+ "github.com/TykTechnologies/tyk/internal/model"
+ "github.com/TykTechnologies/tyk/user"
+)
+
+// RPCDataLoaderMock is a policy-related test utility.
+type RPCDataLoaderMock struct {
+ ShouldConnect bool
+ Policies []user.Policy
+ Apis []model.MergedAPI
+}
+
+// Connect will return the connection status.
+func (s *RPCDataLoaderMock) Connect() bool {
+ return s.ShouldConnect
+}
+
+// GetApiDefinitions returns the internal Apis as a json string.
+func (s *RPCDataLoaderMock) GetApiDefinitions(_ string, tags []string) string {
+ if len(tags) > 1 {
+ panic("not implemented")
+ }
+
+ apiList, err := json.Marshal(s.Apis)
+ if err != nil {
+ return ""
+ }
+ return string(apiList)
+}
+
+// GetPolicies returns the internal Policies as a json string.
+func (s *RPCDataLoaderMock) GetPolicies(_ string) string {
+ policyList, err := json.Marshal(s.Policies)
+ if err != nil {
+ return ""
+ }
+ return string(policyList)
+}
diff --git a/internal/policy/store.go b/internal/policy/store.go
index 909c53e8bca..7829659db89 100644
--- a/internal/policy/store.go
+++ b/internal/policy/store.go
@@ -4,20 +4,27 @@ import (
"github.com/TykTechnologies/tyk/user"
)
-// Store is an in-memory policy storage object that
-// implements the repository for policy access. We
-// do not implement concurrency protections here.
+// Store is an in-memory policy storage object that implements the
+// repository for policy access. We do not implement concurrency
+// protections here. Where order is important, use this.
type Store struct {
- policies map[string]user.Policy
+ policies []user.Policy
}
-func NewStore(policies map[string]user.Policy) *Store {
+// NewStore returns a new policy.Store.
+func NewStore(policies []user.Policy) *Store {
return &Store{
policies: policies,
}
}
+// PolicyIDs returns a list policy IDs in the store.
+// It will return nil if no policies exist.
func (s *Store) PolicyIDs() []string {
+ if len(s.policies) == 0 {
+ return nil
+ }
+
policyIDs := make([]string, 0, len(s.policies))
for _, val := range s.policies {
policyIDs = append(policyIDs, val.ID)
@@ -25,11 +32,17 @@ func (s *Store) PolicyIDs() []string {
return policyIDs
}
+// PolicyByID returns a policy by ID.
func (s *Store) PolicyByID(id string) (user.Policy, bool) {
- v, ok := s.policies[id]
- return v, ok
+ for _, pol := range s.policies {
+ if pol.ID == id {
+ return pol, true
+ }
+ }
+ return user.Policy{}, false
}
+// PolicyCount returns the number of policies in the store.
func (s *Store) PolicyCount() int {
return len(s.policies)
}
diff --git a/internal/policy/store_map.go b/internal/policy/store_map.go
new file mode 100644
index 00000000000..a035c320a4a
--- /dev/null
+++ b/internal/policy/store_map.go
@@ -0,0 +1,46 @@
+package policy
+
+import (
+ "github.com/TykTechnologies/tyk/user"
+)
+
+// StoreMap is same as Store, but doesn't preserve order.
+type StoreMap struct {
+ policies map[string]user.Policy
+}
+
+// NewStoreMap returns a new policy.StoreMap.
+func NewStoreMap(policies map[string]user.Policy) *StoreMap {
+ if len(policies) == 0 {
+ policies = make(map[string]user.Policy)
+ }
+
+ return &StoreMap{
+ policies: policies,
+ }
+}
+
+// PolicyIDs returns a list policy IDs in the store.
+// It will return nil if no policies exist.
+func (s *StoreMap) PolicyIDs() []string {
+ if len(s.policies) == 0 {
+ return nil
+ }
+
+ policyIDs := make([]string, 0, len(s.policies))
+ for _, val := range s.policies {
+ policyIDs = append(policyIDs, val.ID)
+ }
+ return policyIDs
+}
+
+// PolicyByID returns a policy by ID.
+func (s *StoreMap) PolicyByID(id string) (user.Policy, bool) {
+ v, ok := s.policies[id]
+ return v, ok
+}
+
+// PolicyCount returns the number of policies in the store.
+func (s *StoreMap) PolicyCount() int {
+ return len(s.policies)
+}
diff --git a/gateway/testdata/policies.json b/internal/policy/testdata/policies.json
similarity index 96%
rename from gateway/testdata/policies.json
rename to internal/policy/testdata/policies.json
index c841f57c331..b56485d0fc9 100644
--- a/gateway/testdata/policies.json
+++ b/internal/policy/testdata/policies.json
@@ -904,5 +904,42 @@
"rate_limit": true,
"quota": true
}
+ },
+ "acl_with_allowed_url": {
+ "id": "acl_with_allowed_url",
+ "rate": 500,
+ "per": 1,
+ "quota_max": -1,
+ "access_rights": {
+ "d": {
+ "allowed_urls": [
+ {
+ "url": "/anything",
+ "methods": [
+ "GET",
+ "POST"
+ ]
+ }
+ ]
+ }
+ },
+ "partitions": {
+ "acl": true
+ }
+ },
+ "rate_limit": {
+ "id": "rate_limit",
+ "rate": 1000,
+ "per": 1,
+ "quota_max": -1,
+ "access_rights": {
+ "d": {
+ "api_name": "d",
+ "api_id": "d"
+ }
+ },
+ "partitions": {
+ "rate_limit": true
+ }
}
}
diff --git a/internal/policy/util.go b/internal/policy/util.go
index ed34211c0f4..8558fed0800 100644
--- a/internal/policy/util.go
+++ b/internal/policy/util.go
@@ -1,42 +1,62 @@
package policy
import (
+ "slices"
+
"github.com/TykTechnologies/tyk/user"
)
-// appendIfMissing ensures dest slice is unique with new items.
-func appendIfMissing(src []string, in ...string) []string {
- // Use map for uniqueness
- srcMap := map[string]bool{}
- for _, v := range src {
- srcMap[v] = true
- }
- for _, v := range in {
- srcMap[v] = true
- }
-
- // Produce unique []string, maintain sort order
- uniqueSorted := func(src []string, keys map[string]bool) []string {
- result := make([]string, 0, len(keys))
- for _, v := range src {
- // append missing value
- if val := keys[v]; val {
- result = append(result, v)
- delete(keys, v)
+// MergeAllowedURLs will merge s1 and s2 to produce a merged result.
+// It maintains order of keys in s1 and s2 as they are seen.
+// If the result is an empty set, nil is returned.
+func MergeAllowedURLs(s1, s2 []user.AccessSpec) []user.AccessSpec {
+ order := []string{}
+ merged := map[string][]string{}
+
+ // Loop input sets and merge through a map.
+ for _, src := range [][]user.AccessSpec{s1, s2} {
+ for _, r := range src {
+ url := r.URL
+ v, ok := merged[url]
+ if !ok {
+ // First time we see the spec
+ merged[url] = r.Methods
+
+ // Maintain order
+ order = append(order, url)
+
+ continue
}
+ merged[url] = appendIfMissing(v, r.Methods...)
}
- return result
}
- // no new items from `in`
- if len(srcMap) == len(src) {
- return src
+ // Early exit without allocating.
+ if len(order) == 0 {
+ return nil
}
- src = uniqueSorted(src, srcMap)
- in = uniqueSorted(in, srcMap)
+ // Provide results in desired order.
+ result := make([]user.AccessSpec, 0, len(order))
+ for _, key := range order {
+ spec := user.AccessSpec{
+ Methods: merged[key],
+ URL: key,
+ }
+ result = append(result, spec)
+ }
+ return result
+}
- return append(src, in...)
+// appendIfMissing ensures dest slice is unique with new items.
+func appendIfMissing(dest []string, in ...string) []string {
+ for _, v := range in {
+ if slices.Contains(dest, v) {
+ continue
+ }
+ dest = append(dest, v)
+ }
+ return dest
}
// intersection gets intersection of the given two slices.
@@ -56,30 +76,6 @@ func intersection(a []string, b []string) (inter []string) {
return
}
-// contains checks whether the given slice contains the given item.
-func contains(s []string, i string) bool {
- for _, a := range s {
- if a == i {
- return true
- }
- }
- return false
-}
-
-// greaterThanFloat64 checks whether first float64 value is bigger than second float64 value.
-// -1 means infinite and the biggest value.
-func greaterThanFloat64(first, second float64) bool {
- if first == -1 {
- return true
- }
-
- if second == -1 {
- return false
- }
-
- return first > second
-}
-
// greaterThanInt64 checks whether first int64 value is bigger than second int64 value.
// -1 means infinite and the biggest value.
func greaterThanInt64(first, second int64) bool {
@@ -107,23 +103,3 @@ func greaterThanInt(first, second int) bool {
return first > second
}
-
-func copyAllowedURLs(input []user.AccessSpec) []user.AccessSpec {
- if input == nil {
- return nil
- }
-
- copied := make([]user.AccessSpec, len(input))
-
- for i, as := range input {
- copied[i] = user.AccessSpec{
- URL: as.URL,
- }
- if as.Methods != nil {
- copied[i].Methods = make([]string, len(as.Methods))
- copy(copied[i].Methods, as.Methods)
- }
- }
-
- return copied
-}
diff --git a/internal/policy/util_test.go b/internal/policy/util_test.go
new file mode 100644
index 00000000000..460d0cfb119
--- /dev/null
+++ b/internal/policy/util_test.go
@@ -0,0 +1,64 @@
+package policy_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/TykTechnologies/tyk/internal/policy"
+ "github.com/TykTechnologies/tyk/user"
+)
+
+func TestMergeAllowedURLs(t *testing.T) {
+ svc := &policy.Service{}
+
+ session := &user.SessionState{}
+ policies := []user.Policy{
+ {
+ ID: "pol1",
+ AccessRights: map[string]user.AccessDefinition{
+ "a": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"GET"}},
+ {URL: "/companies", Methods: []string{"GET"}},
+ },
+ },
+ },
+ },
+ {
+ ID: "pol2",
+ AccessRights: map[string]user.AccessDefinition{
+ "a": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"POST", "PATCH", "PUT"}},
+ {URL: "/companies", Methods: []string{"POST"}},
+ {URL: "/admin", Methods: []string{"GET", "POST"}},
+ },
+ },
+ },
+ },
+ {
+ ID: "pol3",
+ AccessRights: map[string]user.AccessDefinition{
+ "a": {
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/admin/cache", Methods: []string{"DELETE"}},
+ },
+ },
+ },
+ },
+ }
+
+ session.SetCustomPolicies(policies)
+
+ assert.NoError(t, svc.Apply(session))
+
+ want := []user.AccessSpec{
+ {URL: "/user", Methods: []string{"GET", "POST", "PATCH", "PUT"}},
+ {URL: "/companies", Methods: []string{"GET", "POST"}},
+ {URL: "/admin", Methods: []string{"GET", "POST"}},
+ {URL: "/admin/cache", Methods: []string{"DELETE"}},
+ }
+
+ assert.Equal(t, want, session.AccessRights["a"].AllowedURLs)
+}
diff --git a/rpc/rpc_client.go b/rpc/rpc_client.go
index f1870ebe061..052baeb261b 100644
--- a/rpc/rpc_client.go
+++ b/rpc/rpc_client.go
@@ -49,7 +49,6 @@ var (
// UseSyncLoginRPC for tests where we dont need to execute as a goroutine
UseSyncLoginRPC bool
- connectionDialingWG sync.WaitGroup
AnalyticsSerializers []serializer.AnalyticsSerializer
)
@@ -254,14 +253,10 @@ func Connect(connConfig Config, suppressRegister bool, dispatcherFuncs map[strin
clientSingleton.OnConnect = onConnectFunc
clientSingleton.Conns = values.Config().RPCPoolSize
- if clientSingleton.Conns == 0 {
+ if clientSingleton.Conns <= 0 {
clientSingleton.Conns = 5
}
- for i := 0; i < clientSingleton.Conns; i++ {
- connectionDialingWG.Add(1)
- }
-
clientSingleton.Dial = func(addr string) (conn net.Conn, err error) {
dialer := &net.Dialer{
Timeout: 10 * time.Second,
@@ -298,8 +293,6 @@ func Connect(connConfig Config, suppressRegister bool, dispatcherFuncs map[strin
conn.Write([]byte("proto2"))
conn.Write([]byte{byte(len(connID))})
conn.Write([]byte(connID))
- // only mark as done is connection is established
- connectionDialingWG.Done()
return conn, nil
}
@@ -311,9 +304,8 @@ func Connect(connConfig Config, suppressRegister bool, dispatcherFuncs map[strin
if funcClientSingleton == nil {
funcClientSingleton = dispatcher.NewFuncClient(clientSingleton)
}
-
// wait until all the pool connections are dialed so we can call login
- connectionDialingWG.Wait()
+ clientSingleton.WaitForConnection()
handleLogin()
if !suppressRegister {
register()
diff --git a/rpc/rpc_client_test.go b/rpc/rpc_client_test.go
index 1cd00676547..e6a1b6bd739 100644
--- a/rpc/rpc_client_test.go
+++ b/rpc/rpc_client_test.go
@@ -2,7 +2,7 @@ package rpc
import "testing"
-func TestRecoveryFromEmregencyMode(t *testing.T) {
+func TestRecoveryFromEmergencyMode(t *testing.T) {
if IsEmergencyMode() {
t.Fatal("expected not to be in emergency mode before initiating login attempt")
}
diff --git a/rpc/synchronization_forcer.go b/rpc/synchronization_forcer.go
index a391d0f332f..07fd7bd0aaa 100644
--- a/rpc/synchronization_forcer.go
+++ b/rpc/synchronization_forcer.go
@@ -2,30 +2,55 @@ package rpc
import (
"errors"
+ "sync"
"github.com/TykTechnologies/tyk/apidef"
"github.com/TykTechnologies/tyk/storage"
)
type SyncronizerForcer struct {
- store *storage.RedisCluster
- getNodeDataFunc func() []byte
+ store *storage.RedisCluster
+ getNodeDataFunc func() []byte
+ isFirstConnection bool
}
+var (
+ syncForcerInstance *SyncronizerForcer
+ syncForcerOnce sync.Once
+)
+
// NewSyncForcer returns a new syncforcer with a connected redis with a key prefix synchronizer-group- for group synchronization control.
func NewSyncForcer(controller *storage.ConnectionHandler, getNodeDataFunc func() []byte) *SyncronizerForcer {
- sf := &SyncronizerForcer{}
- sf.getNodeDataFunc = getNodeDataFunc
- sf.store = &storage.RedisCluster{KeyPrefix: "synchronizer-group-", ConnectionHandler: controller}
- sf.store.Connect()
+ syncForcerOnce.Do(func() {
+ sf := &SyncronizerForcer{}
+ sf.store = &storage.RedisCluster{KeyPrefix: "synchronizer-group-", ConnectionHandler: controller}
+ sf.store.Connect()
+ sf.getNodeDataFunc = getNodeDataFunc
+ sf.isFirstConnection = true
+
+ syncForcerInstance = sf
+ })
+
+ if syncForcerInstance != nil {
+ syncForcerInstance.getNodeDataFunc = getNodeDataFunc
+ }
+
+ return syncForcerInstance
+}
+
+func (sf *SyncronizerForcer) SetFirstConnection(isFirstConnection bool) {
+ sf.isFirstConnection = isFirstConnection
+}
- return sf
+func (sf *SyncronizerForcer) GetIsFirstConnection() bool {
+ return sf.isFirstConnection
}
// GroupLoginCallback checks if the groupID key exists in the storage to turn on/off ForceSync param.
// If the the key doesn't exists in the storage, it creates it and set ForceSync to true
func (sf *SyncronizerForcer) GroupLoginCallback(userKey string, groupID string) interface{} {
- shouldForce := false
+ shouldForce := sf.isFirstConnection
+ sf.SetFirstConnection(false)
_, err := sf.store.GetKey(groupID)
if err != nil && errors.Is(err, storage.ErrKeyNotFound) {
diff --git a/test/http.go b/test/http.go
index 5c09b3e4389..e46463df46b 100644
--- a/test/http.go
+++ b/test/http.go
@@ -16,6 +16,7 @@ import (
"time"
)
+type TestCases []TestCase
type TestCase struct {
Host string `json:",omitempty"`
Method string `json:",omitempty"`
diff --git a/tests/policy/Taskfile.yml b/tests/policy/Taskfile.yml
new file mode 100644
index 00000000000..f5ab84d0aa9
--- /dev/null
+++ b/tests/policy/Taskfile.yml
@@ -0,0 +1,53 @@
+---
+version: "3"
+
+includes:
+ services:
+ taskfile: ../../docker/services/Taskfile.yml
+ dir: ../../docker/services
+
+vars:
+ coverage: policy.cov
+ testArgs: -v
+
+tasks:
+ test:
+ desc: "Run tests (requires redis)"
+ deps: [ services:up ]
+ cmds:
+ - defer:
+ task: services:down
+ - task: fmt
+ - go test {{.testArgs}} -count=1 -cover -coverprofile={{.coverage}} -coverpkg=./... ./...
+
+ bench:
+ desc: "Run benchmarks"
+ cmds:
+ - task: fmt
+ - go test {{.testArgs}} -count=1 -tags integration -run=^$ -bench=. -benchtime=10s -benchmem ./...
+
+ fmt:
+ internal: true
+ desc: "Invoke fmt"
+ cmds:
+ - goimports -w .
+ - go fmt ./...
+
+ cover:
+ desc: "Show source coverage"
+ aliases: [coverage, cov]
+ cmds:
+ - go tool cover -func={{.coverage}}
+
+ uncover:
+ desc: "Show uncovered source"
+ cmds:
+ - uncover {{.coverage}}
+
+ install:uncover:
+ desc: "Install uncover"
+ internal: true
+ env:
+ GOBIN: /usr/local/bin
+ cmds:
+ - go install github.com/gregoryv/uncover/...@latest
diff --git a/tests/policy/allowed_urls_test.go b/tests/policy/allowed_urls_test.go
new file mode 100644
index 00000000000..a81cc5c3ed2
--- /dev/null
+++ b/tests/policy/allowed_urls_test.go
@@ -0,0 +1,172 @@
+package policy
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/TykTechnologies/tyk/internal/policy"
+ "github.com/TykTechnologies/tyk/internal/uuid"
+ "github.com/TykTechnologies/tyk/test"
+ "github.com/TykTechnologies/tyk/user"
+)
+
+// The integration test.
+func TestAllowedURLs(t *testing.T) {
+ ts := StartTest(nil)
+ t.Cleanup(ts.Close)
+
+ policyBase := user.Policy{
+ ID: uuid.New(),
+ Per: 1,
+ Rate: 1000,
+ QuotaMax: 50,
+ QuotaRenewalRate: 3600,
+ OrgID: DefaultOrg,
+ AccessRights: map[string]user.AccessDefinition{
+ "api1": {
+ Versions: []string{"v1"},
+ Limit: user.APILimit{
+ QuotaMax: 100,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 1000,
+ Per: 1,
+ },
+ },
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"GET"}},
+ {URL: "/companies", Methods: []string{"GET"}},
+ },
+ },
+ "api2": {
+ Versions: []string{"v1"},
+ Limit: user.APILimit{
+ QuotaMax: 200,
+ QuotaRenewalRate: 3600,
+ RateLimit: user.RateLimit{
+ Rate: 1000,
+ Per: 1,
+ },
+ },
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/user", Methods: []string{"POST", "PATCH", "PUT"}},
+ {URL: "/companies", Methods: []string{"POST"}},
+ {URL: "/admin", Methods: []string{"GET", "POST"}},
+ },
+ },
+ "api3": {
+ Versions: []string{"v1"},
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/admin/cache", Methods: []string{"DELETE"}},
+ },
+ },
+ },
+ }
+
+ policyWithPaths := user.Policy{
+ ID: uuid.New(),
+ Per: 1,
+ Rate: 1000,
+ QuotaMax: 50,
+ QuotaRenewalRate: 3600,
+ OrgID: DefaultOrg,
+ AccessRights: map[string]user.AccessDefinition{
+ "api1": {
+ Versions: []string{"v1"},
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/appended", Methods: []string{"GET"}},
+ },
+ },
+ "api2": {
+ Versions: []string{"v1"},
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/appended", Methods: []string{"GET"}},
+ },
+ },
+ "api3": {
+ Versions: []string{"v1"},
+ AllowedURLs: []user.AccessSpec{
+ {URL: "/appended", Methods: []string{"GET"}},
+ },
+ },
+ },
+ }
+
+ ts.Gw.SetPoliciesByID(policyBase, policyWithPaths)
+
+ // load APIs
+ ts.Gw.BuildAndLoadAPI(
+ func(spec *APISpec) {
+ spec.Name = "api 1"
+ spec.APIID = "api1"
+ spec.UseKeylessAccess = false
+ spec.Proxy.ListenPath = "/api1"
+ spec.OrgID = DefaultOrg
+ },
+ func(spec *APISpec) {
+ spec.Name = "api 2"
+ spec.APIID = "api2"
+ spec.UseKeylessAccess = false
+ spec.Proxy.ListenPath = "/api2"
+ spec.OrgID = DefaultOrg
+ },
+ func(spec *APISpec) {
+ spec.Name = "api 3"
+ spec.APIID = "api3"
+ spec.UseKeylessAccess = false
+ spec.Proxy.ListenPath = "/api3"
+ spec.OrgID = DefaultOrg
+ },
+ )
+
+ // create test session
+ session := &user.SessionState{
+ ApplyPolicies: []string{policyBase.ID, policyWithPaths.ID},
+ OrgID: DefaultOrg,
+ AccessRights: map[string]user.AccessDefinition{
+ "api1": {
+ APIID: "api1",
+ Versions: []string{"v1"},
+ },
+ "api2": {
+ APIID: "api2",
+ Versions: []string{"v1"},
+ },
+ "api3": {
+ APIID: "api3",
+ Versions: []string{"v1"},
+ },
+ },
+ }
+
+ // create key
+ key := uuid.New()
+ ts.Run(t, test.TestCase{Method: http.MethodPost, Path: "/tyk/keys/" + key, Data: session, AdminAuth: true, Code: 200})
+
+ // check key session
+ t.Run("Check key session", func(t *testing.T) {
+ ts.Run(t, []test.TestCase{
+ {
+ Method: http.MethodGet,
+ Path: fmt.Sprintf("/tyk/keys/%v?org_id=%v", key, DefaultOrg),
+ AdminAuth: true,
+ Code: http.StatusOK,
+ BodyMatchFunc: func(data []byte) bool {
+ session := user.SessionState{}
+ assert.NoError(t, json.Unmarshal(data, &session))
+
+ for _, apiName := range []string{"api1", "api2", "api3"} {
+ want := policy.MergeAllowedURLs(policyBase.AccessRights[apiName].AllowedURLs, policyWithPaths.AccessRights[apiName].AllowedURLs)
+ assert.Equal(t, want, session.AccessRights[apiName].AllowedURLs, fmt.Sprintf("api %q allowed urls don't match", apiName))
+ }
+
+ return true
+ },
+ },
+ }...)
+ })
+}
diff --git a/tests/policy/shim.go b/tests/policy/shim.go
new file mode 100644
index 00000000000..cf6fe5f61af
--- /dev/null
+++ b/tests/policy/shim.go
@@ -0,0 +1,9 @@
+package policy
+
+import "github.com/TykTechnologies/tyk/gateway"
+
+const DefaultOrg = "default-org-id"
+
+type APISpec = gateway.APISpec
+
+var StartTest = gateway.StartTest
diff --git a/tests/quota/Taskfile.yml b/tests/quota/Taskfile.yml
index d65f4e35699..a0c0fa22df7 100644
--- a/tests/quota/Taskfile.yml
+++ b/tests/quota/Taskfile.yml
@@ -2,7 +2,9 @@
version: "3"
includes:
- services: ../../docker/services/Taskfile.yml
+ services:
+ taskfile: ../../docker/services/Taskfile.yml
+ dir: ../../docker/services
vars:
coverage: quota.cov
diff --git a/tests/rate/per_api_limit_test.go b/tests/rate/per_api_limit_test.go
index 3935bf9f30f..e36bca1a9f9 100644
--- a/tests/rate/per_api_limit_test.go
+++ b/tests/rate/per_api_limit_test.go
@@ -2,9 +2,11 @@ package rate_test
import (
"encoding/json"
- "fmt"
+ "net/http"
"testing"
+ "github.com/TykTechnologies/tyk/apidef"
+
"github.com/stretchr/testify/assert"
. "github.com/TykTechnologies/tyk/gateway"
@@ -12,7 +14,7 @@ import (
"github.com/TykTechnologies/tyk/test"
)
-func buildPathRateLimitAPI(tb testing.TB, gw *Gateway, pathName string, rate, per int64) {
+func buildPathRateLimitAPI(tb testing.TB, gw *Gateway, per int64, rateLimits []apidef.RateLimitMeta) {
tb.Helper()
gw.BuildAndLoadAPI(func(spec *APISpec) {
@@ -23,39 +25,34 @@ func buildPathRateLimitAPI(tb testing.TB, gw *Gateway, pathName string, rate, pe
spec.GlobalRateLimit.Per = float64(per)
version := spec.VersionData.Versions["v1"]
- versionJSON := []byte(fmt.Sprintf(`{
- "use_extended_paths": true,
- "extended_paths": {
- "rate_limit": [{
- "method": "GET",
- "rate": %d,
- "per": %d
- }]
- }
- }`, rate, per))
- err := json.Unmarshal(versionJSON, &version)
- assert.NoError(tb, err)
-
- version.ExtendedPaths.RateLimit[0].Path = pathName
+ version.UseExtendedPaths = true
+ version.ExtendedPaths.RateLimit = rateLimits
spec.VersionData.Versions["v1"] = version
})
}
-func testRateLimit(tb testing.TB, ts *Test, testPath string, want int) {
+func testRateLimit(tb testing.TB, ts *Test, testPath string, testMethod string, want int) {
tb.Helper()
// single request
_, _ = ts.Run(tb, test.TestCase{
- Path: "/ratelimit" + testPath,
- BodyMatch: fmt.Sprintf(`"Url":"%s"`, testPath),
+ Path: "/ratelimit" + testPath,
+ Method: testMethod,
+ BodyMatchFunc: func(bytes []byte) bool {
+ res := map[string]any{}
+ err := json.Unmarshal(bytes, &res)
+ assert.NoError(tb, err)
+ return assert.Equal(tb, testPath, res["Url"]) && assert.Equal(tb, testMethod, res["Method"])
+ },
})
// and 50 more
var ok, failed int = 1, 0
for i := 0; i < 50; i++ {
res, err := ts.Run(tb, test.TestCase{
- Path: "/ratelimit" + testPath,
+ Path: "/ratelimit" + testPath,
+ Method: testMethod,
})
assert.NoError(tb, err)
@@ -82,8 +79,16 @@ func TestPerAPILimit(t *testing.T) {
forPath := "/" + uuid.New()
testPath := "/miss"
- buildPathRateLimitAPI(t, ts.Gw, forPath, 30, 60)
- testRateLimit(t, ts, testPath, 15)
+ rateLimits := []apidef.RateLimitMeta{
+ {
+ Method: http.MethodGet,
+ Path: forPath,
+ Rate: 30,
+ Per: 60,
+ },
+ }
+ buildPathRateLimitAPI(t, ts.Gw, 60, rateLimits)
+ testRateLimit(t, ts, testPath, http.MethodGet, 15)
})
t.Run("hit per-endpoint rate limit", func(t *testing.T) {
@@ -93,7 +98,40 @@ func TestPerAPILimit(t *testing.T) {
forPath := "/" + uuid.New()
testPath := forPath
- buildPathRateLimitAPI(t, ts.Gw, forPath, 30, 60)
- testRateLimit(t, ts, testPath, 30)
+ rateLimits := []apidef.RateLimitMeta{
+ {
+ Method: http.MethodGet,
+ Path: forPath,
+ Rate: 30,
+ Per: 60,
+ },
+ }
+ buildPathRateLimitAPI(t, ts.Gw, 60, rateLimits)
+ testRateLimit(t, ts, testPath, http.MethodGet, 30)
+ })
+
+ t.Run("[TT-12990][regression] hit per-endpoint per-method rate limit", func(t *testing.T) {
+ ts := StartTest(nil)
+ defer ts.Close()
+
+ forPath := "/anything/" + uuid.New()
+ testPath := forPath
+ rateLimits := []apidef.RateLimitMeta{
+ {
+ Method: http.MethodGet,
+ Path: forPath,
+ Rate: 20,
+ Per: 60,
+ },
+ {
+ Method: http.MethodPost,
+ Path: forPath,
+ Rate: 30,
+ Per: 60,
+ },
+ }
+ buildPathRateLimitAPI(t, ts.Gw, 60, rateLimits)
+ testRateLimit(t, ts, testPath, http.MethodGet, 20)
+ testRateLimit(t, ts, testPath, http.MethodPost, 30)
})
}
diff --git a/user/custom_policies.go b/user/custom_policies.go
index bdbb7f3d12a..3ac8c852b92 100644
--- a/user/custom_policies.go
+++ b/user/custom_policies.go
@@ -6,10 +6,26 @@ import (
"fmt"
)
+// CustomPolicies returns a map of custom policies on the session.
+// To preserve policy order, use GetCustomPolicies instead.
func (s *SessionState) CustomPolicies() (map[string]Policy, error) {
+ customPolicies, err := s.GetCustomPolicies()
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string]Policy, len(customPolicies))
+ for i := 0; i < len(customPolicies); i++ {
+ result[customPolicies[i].ID] = customPolicies[i]
+ }
+
+ return result, nil
+}
+
+// GetCustomPolicies is like CustomPolicies but returns the list, preserving order.
+func (s *SessionState) GetCustomPolicies() ([]Policy, error) {
var (
customPolicies []Policy
- ret map[string]Policy
)
metadataPolicies, found := s.MetaData["policies"].([]interface{})
@@ -22,16 +38,14 @@ func (s *SessionState) CustomPolicies() (map[string]Policy, error) {
return nil, fmt.Errorf("failed to marshal metadata policies: %w", err)
}
- _ = json.Unmarshal(polJSON, &customPolicies)
-
- ret = make(map[string]Policy, len(customPolicies))
- for i := 0; i < len(customPolicies); i++ {
- ret[customPolicies[i].ID] = customPolicies[i]
+ if err := json.Unmarshal(polJSON, &customPolicies); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal metadata policies: %w", err)
}
- return ret, nil
+ return customPolicies, err
}
+// SetCustomPolicies sets custom policies into session metadata.
func (s *SessionState) SetCustomPolicies(list []Policy) {
if s.MetaData == nil {
s.MetaData = make(map[string]interface{})
diff --git a/user/session.go b/user/session.go
index 929e8efe2d3..ec6ebfbf11b 100644
--- a/user/session.go
+++ b/user/session.go
@@ -10,11 +10,8 @@ import (
"github.com/TykTechnologies/graphql-go-tools/pkg/graphql"
"github.com/TykTechnologies/tyk/apidef"
- logger "github.com/TykTechnologies/tyk/log"
)
-var log = logger.Get()
-
type HashType string
const (