diff --git a/.fossa.yml b/.fossa.yml index d8ea192bc..a813f66c1 100644 --- a/.fossa.yml +++ b/.fossa.yml @@ -2,4 +2,10 @@ version: 3 server: https://app.fossa.com project: id: "splunk-connect-for-snmp" - team: "TA-Automation" \ No newline at end of file + team: "TA-Automation" + +paths: + exclude: + - ui_tests + - test + - integration_tests \ No newline at end of file diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 9d27fc14e..df409b653 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: run fossa anlyze and create report + - name: run fossa analyze and create report run: | curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash fossa analyze --debug @@ -48,7 +48,7 @@ jobs: env: FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} - name: upload THIRDPARTY file - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: THIRDPARTY path: /tmp/THIRDPARTY @@ -63,14 +63,14 @@ jobs: if: (github.actor != 'dependabot[bot]') steps: - uses: actions/checkout@v4 - - uses: returntocorp/semgrep-action@v1 + - uses: semgrep/semgrep-action@v1 with: publishToken: ${{ secrets.SEMGREP_APP_TOKEN }} pre-commit: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 - uses: pre-commit/action@v3.0.0 test-unit: name: Test Unit Python ${{ matrix.python-version }} @@ -78,11 +78,11 @@ jobs: strategy: matrix: python-version: - - 3.9 + - "3.10" steps: - uses: actions/checkout@v4 - name: Setup python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install Poetry @@ -102,7 +102,7 @@ jobs: fail_ci_if_error: true path_to_write_report: ./coverage/codecov_report.txt verbose: true - - uses: actions/upload-artifact@v3 # upload test results + - uses: actions/upload-artifact@v4 # upload test results if: success() || failure() # run this step even if previous step failed with: name: test-results-unit-python_${{ matrix.python-version }} @@ -144,7 +144,7 @@ jobs: sudo apt-get install snmp -y sudo apt-get install python3-dev -y - name: run automatic_setup.sh - run: integration_tests/automatic_setup.sh + run: integration_tests/automatic_setup.sh integration - name: run tests working-directory: integration_tests run: | diff --git a/.github/workflows/ci-release-pr.yaml b/.github/workflows/ci-release-pr.yaml index f465fe9db..269549f2c 100644 --- a/.github/workflows/ci-release-pr.yaml +++ b/.github/workflows/ci-release-pr.yaml @@ -70,7 +70,7 @@ jobs: tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} cache-to: type=inline - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version: "14" diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml index 65a20feb3..77316c5ba 100644 --- a/.github/workflows/ci-release.yaml +++ b/.github/workflows/ci-release.yaml @@ -72,7 +72,7 @@ jobs: tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} cache-to: type=inline - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version: "14" - name: Semantic Release diff --git a/.github/workflows/ci-ui-tests.yaml b/.github/workflows/ci-ui-tests.yaml new file mode 100644 index 000000000..037a3da13 --- /dev/null +++ b/.github/workflows/ci-ui-tests.yaml @@ -0,0 +1,95 @@ +name: ui-tests +on: + push: + branches: + - "main" + - "develop" + - "next" + - "sc4snmp-ui-tests" + pull_request: + branches: + - "main" + - "develop" + - "next" +jobs: + ui-tests-check: + name: Check if run ui tests + runs-on: ubuntu-latest + steps: + - name: Checkout Project + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Get commit message + id: get_commit_message + run: | + if [[ '${{ github.event_name }}' == 'push' ]]; then + echo ::set-output name=commit_message::$(git log --format=%B -n 1 HEAD) + elif [[ '${{ github.event_name }}' == 'pull_request' ]]; then + echo ::set-output name=commit_message::$(git log --format=%B -n 1 HEAD^2) + fi + outputs: + commit_message: + echo "${{ steps.get_commit_message.outputs.commit_message }}" + + run-ui-e2e-tests: + name: run UI e2e tests + needs: + - ui-tests-check + runs-on: ubuntu-latest + if: "contains(needs.ui-tests-check.outputs.commit_message, '[run-ui-tests]')" + timeout-minutes: 120 + env: + CI_EXECUTION_TYPE: ci + + strategy: + matrix: + execution-type: ["basic", "extended"] + + steps: + - name: Checkout Project + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: remove not used docker images + run: | + echo "docker rmi" + docker rmi $(docker images -q) || true + docker images + echo "check disk free space" + df -h + + - name: prepare values.yaml for configuration storing + working-directory: integration_tests + run: | + cp ./../ui_tests/config/ui_values.yaml ./values.yaml + sed -i "s|/home/splunker|$(pwd)|g" values.yaml + + - name: install microk8s + run: | + sudo snap install microk8s --classic --channel=1.25/stable + sudo apt-get install snmp -y + sudo apt-get install python3-dev -y + + - name: run automatic_setup.sh + run: integration_tests/automatic_setup.sh + + - name: install dependencies + working-directory: ui_tests + run: | + pip install -r requirements.txt + export PATH="/home/ubuntu/.local/bin:$PATH" + + - name: run tests + working-directory: ui_tests + run: | + sudo microk8s kubectl get pod -n sc4snmp + echo "check if UI is available - curl" + curl http://localhost:30001 + echo "run tests" + pytest -vvv --splunk-user=admin --splunk-password="changeme2" --splunk-host="localhost" --device-simulator="$(hostname -I | cut -d " " -f1)" -k ${{ matrix.execution-type }} + diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml index cff2363e2..54cdbb74d 100644 --- a/.github/workflows/mike.yaml +++ b/.github/workflows/mike.yaml @@ -45,7 +45,7 @@ jobs: sudo apt install gnome-keyring BRANCH=$(echo $GITHUB_REF | cut -d / -f 3) echo $BRANCH - pip3 install poetry=="1.2.2" + pip3 install poetry=="1.5.1" poetry install poetry run pip install 'setuptools==65.6.3' poetry run mike deploy -p $BRANCH diff --git a/.github/workflows/offline-installation.yaml b/.github/workflows/offline-installation.yaml index 42c54ee64..13038c796 100644 --- a/.github/workflows/offline-installation.yaml +++ b/.github/workflows/offline-installation.yaml @@ -19,7 +19,7 @@ jobs: - name: Check out code uses: actions/checkout@v4 - name: Setup python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install ruamel.yaml @@ -36,7 +36,7 @@ jobs: GET_YAML_FIELD_SCRIPT: get_yaml_fields.py GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ env.ARTIFACT_NAME }} path: /tmp/package/packages diff --git a/CHANGELOG.md b/CHANGELOG.md index fcff951bf..a96a1ee4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,19 @@ ## Unreleased +## [1.10.0] + +### Changed +- add sc4snmp ui +- add reverse dns lookup in traps +- upgrade chart dependencies: redis to ~18.5.0, mibserver to 1.15.7 +- add beta support for docker-compose deployment +- add log messages for invalid traps configuration +- review and update of documentation + +### Fixed +- error handling to be more precise + ## [1.9.3] ### Changed diff --git a/Dockerfile b/Dockerfile index 6db68e491..1f401e456 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10.0-bullseye as base +FROM python:3.10.0-bullseye AS base ENV PYTHONFAULTHANDLER=1 \ PYTHONHASHSEED=random \ @@ -6,7 +6,7 @@ ENV PYTHONFAULTHANDLER=1 \ RUN mkdir /app WORKDIR /app -FROM base as builder +FROM base AS builder RUN pip install --upgrade pip ;\ pip install poetry @@ -18,10 +18,7 @@ RUN poetry config virtualenvs.in-project true ;\ . /app/.venv/bin/activate ;\ pip install dist/*.whl - - - -FROM base as final +FROM base AS final COPY --from=builder /app/.venv /app/.venv COPY entrypoint.sh ./ diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock index 35165f04a..364fc0e71 100644 --- a/charts/splunk-connect-for-snmp/Chart.lock +++ b/charts/splunk-connect-for-snmp/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 13.18.5 - name: redis repository: https://charts.bitnami.com/bitnami - version: 18.1.6 + version: 18.5.0 - name: mibserver repository: https://pysnmp.github.io/mibs/charts/ - version: 1.15.5 -digest: sha256:a3b073e1425f293d0df10b58f5c4ea599a6ac8b550affc6be45c6dcd01478720 -generated: "2023-10-23T01:58:29.993501241Z" + version: 1.15.7 +digest: sha256:692c53672741f1c2f75c021c3f75ae45290195e5efaeffc9a06b5cb5b0d6ac12 +generated: "2023-12-11T13:21:44.273403415Z" diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index c5c8b2c3b..8e833eff6 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,19 +14,19 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.9.3 +version: 1.10.0-beta.6 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.9.3" +appVersion: "1.10.0-beta.6" # dependencies: - name: mongodb version: ~13.18.0 repository: https://charts.bitnami.com/bitnami - name: redis - version: ~18.1.0 + version: ~18.5.0 repository: https://charts.bitnami.com/bitnami - name: mibserver version: ~1.15 diff --git a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml index 0bae393d8..675b2defe 100644 --- a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: {{ .Values.scheduler.logLevel | default "INFO" }} - name: CHAIN_OF_TASKS_EXPIRY_TIME value: {{ .Values.scheduler.tasksExpiryTime | quote }} + - name: CONFIG_FROM_MONGO + value: {{ quote .Values.UI.enable | default "false" }} volumeMounts: - name: config mountPath: "/app/config" diff --git a/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl new file mode 100644 index 000000000..b56314c55 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl @@ -0,0 +1,117 @@ +{{/* +Create a job config template which will be included in configMap for UI backend. +*/}} +{{- define "splunk-connect-for-snmp.job-config" -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "splunk-connect-for-snmp.inventory.fullname" . }} + labels: + {{- include "splunk-connect-for-snmp.inventory.labels" . | nindent 4 }} +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + {{- with .Values.inventory.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + + labels: + {{- include "splunk-connect-for-snmp.inventory.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-inventory + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + ["inventory"] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: {{ include "splunk-connect-for-snmp.redis_url" . }} + - name: INVENTORY_PATH + value: /app/inventory/inventory.csv + - name: CELERY_BROKER_URL + value: {{ include "splunk-connect-for-snmp.celery_url" . }} + - name: MONGO_URI + value: {{ include "splunk-connect-for-snmp.mongo_uri" . }} + - name: MIB_SOURCES + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/asn1/@mib@" + - name: MIB_INDEX + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/index.csv" + - name: MIB_STANDARD + value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/standard.txt" + - name: LOG_LEVEL + value: {{ .Values.scheduler.logLevel | default "INFO" }} + - name: CONFIG_FROM_MONGO + value: {{ quote .Values.UI.enable | default "false" }} + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: inventory + mountPath: "/app/inventory" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: inventory + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-inventory + # An array of keys from the ConfigMap to create as files + items: + - key: "inventory.csv" + path: "inventory.csv" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + restartPolicy: OnFailure +{{- end }} + +{{/* +Return full image for thr UI front end. +*/}} +{{- define "splunk-connect-for-snmp.uiFrontImage" -}} +{{ .Values.UI.frontEnd.repository }}:{{ .Values.UI.frontEnd.tag | default "latest" }} +{{- end }} + +{{/* +Return full image for thr UI back end. +*/}} +{{- define "splunk-connect-for-snmp.uiBackImage" -}} +{{ .Values.UI.backEnd.repository }}:{{ .Values.UI.backEnd.tag | default "latest" }} +{{- end }} + +{{- define "splunk-connect-for-snmp-ui.celery_url" -}} +{{- printf "redis://%s-redis-headless:6379/2" .Release.Name }} +{{- end }} + +{{- define "splunk-connect-for-snmp-ui.redis_url" -}} +{{- printf "redis://%s-redis-headless:6379/3" .Release.Name }} +{{- end }} + +{{- define "splunk-connect-for-snmp-ui.hostMountPath" -}} +/var/values_dir +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml b/charts/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml new file mode 100644 index 000000000..6f653c886 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml @@ -0,0 +1,11 @@ +{{ if .Values.UI.enable }} +{{ $ui := .Values.UI }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "splunk-connect-for-snmp.name" . }}-job-configmap + namespace: {{ .Release.Namespace | quote }} +data: + job_config: | + {{- include "splunk-connect-for-snmp.job-config" . | nindent 4 }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml b/charts/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml new file mode 100644 index 000000000..23a01ead9 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml @@ -0,0 +1,53 @@ +{{ if .Values.UI.enable }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-backend-worker-deployment + labels: + app: ui-backend-worker +spec: + replicas: 1 + selector: + matchLabels: + app: ui-backend-worker + template: + metadata: + labels: + app: ui-backend-worker + spec: + containers: + - name: ui-backend-worker + image: {{ include "splunk-connect-for-snmp.uiBackImage" . | quote }} + imagePullPolicy: {{ .Values.UI.backEnd.pullPolicy }} + command: ["sh","-c","/celery_start.sh"] + env: + - name: MONGO_URI + value: {{ include "splunk-connect-for-snmp.mongo_uri" . }} + - name: REDIS_URL + value: {{ include "splunk-connect-for-snmp-ui.redis_url" . }} + - name: JOB_CONFIG_PATH + value: /config/job_config.yaml + - name: JOB_NAMESPACE + value: sc4snmp + - name: CELERY_BROKER_URL + value: {{ include "splunk-connect-for-snmp-ui.celery_url" . }} + - name: VALUES_DIRECTORY + {{- if .Values.UI.valuesFileDirectory }} + value: {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }} + {{- else }} + value: + {{- end }} + ports: + - containerPort: 5000 + volumeMounts: + - name: backend-configmap + mountPath: /config + serviceAccountName: job-robot + volumes: + - name: backend-configmap + configMap: + name: {{ include "splunk-connect-for-snmp.name" . }}-job-configmap + items: + - key: job_config + path: job_config.yaml +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml b/charts/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml new file mode 100644 index 000000000..b5bc0a490 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml @@ -0,0 +1,88 @@ +{{ if .Values.UI.enable }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-backend-deployment + labels: + app: ui-backend +spec: + replicas: 1 + selector: + matchLabels: + app: ui-backend + template: + metadata: + labels: + app: ui-backend + spec: + securityContext: + runAsUser: 10000 + runAsGroup: 10000 + fsGroup: 10000 + {{- if .Values.UI.valuesFileDirectory }} + initContainers: + - name: patch-log-dirs + image: {{ .Values.UI.init.repository }} + imagePullPolicy: {{ .Values.UI.init.pullPolicy }} + command: [ 'sh', '-c', ' + mkdir -p {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }}; + chmod -v g+rwxs {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }}; + if [ -d "{{ include "splunk-connect-for-snmp-ui.hostMountPath" . }}" ]; + then + setfacl -n -Rm d:m::rwx,m::rwx,d:g:10000:rwx,g:10000:rwx {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }}; + fi;' ] + securityContext: + runAsUser: 0 + volumeMounts: + - name: values-directory + mountPath: {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }} + {{- end }} + containers: + - name: ui-backend + image: {{ include "splunk-connect-for-snmp.uiBackImage" . | quote }} + imagePullPolicy: {{ .Values.UI.backEnd.pullPolicy }} + command: ["sh","-c","/flask_start.sh"] + env: + - name: MONGO_URI + value: {{ include "splunk-connect-for-snmp.mongo_uri" . }} + - name: REDIS_URL + value: {{ include "splunk-connect-for-snmp-ui.redis_url" . }} + - name: JOB_CONFIG_PATH + value: /config/job_config.yaml + - name: JOB_NAMESPACE + value: sc4snmp + - name: CELERY_BROKER_URL + value: {{ include "splunk-connect-for-snmp-ui.celery_url" . }} + - name: VALUES_DIRECTORY + {{- if .Values.UI.valuesFileDirectory }} + value: {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }} + {{- else }} + value: + {{- end }} + - name: VALUES_FILE + value: {{ .Values.UI.valuesFileName }} + - name: KEEP_TEMP_FILES + value: {{ quote .Values.UI.keepSectionFiles | default "false" }} + ports: + - containerPort: 5000 + volumeMounts: + - name: backend-configmap + mountPath: /config + {{- if .Values.UI.valuesFileDirectory }} + - name: values-directory + mountPath: {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }} + {{- end }} + serviceAccountName: job-robot + volumes: + - name: backend-configmap + configMap: + name: {{ include "splunk-connect-for-snmp.name" . }}-job-configmap + items: + - key: job_config + path: job_config.yaml + {{- if .Values.UI.valuesFileDirectory }} + - name: values-directory + hostPath: + path: {{ .Values.UI.valuesFileDirectory }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml b/charts/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml new file mode 100644 index 000000000..43ceab6a2 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml @@ -0,0 +1,29 @@ +{{ if .Values.UI.enable }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-frontend-deployment + labels: + app: ui-frontend +spec: + replicas: 1 + selector: + matchLabels: + app: ui-frontend + template: + metadata: + labels: + app: ui-frontend + spec: + containers: + - name: ui-frontend + image: {{ include "splunk-connect-for-snmp.uiFrontImage" . | quote }} + imagePullPolicy: {{ .Values.UI.frontEnd.pullPolicy }} + env: + - name: REACT_APP_FLASK_PORT + value: {{ quote .Values.UI.backEnd.NodePort }} + - name: DEMO_PORT + value: {{ quote .Values.UI.frontEnd.NodePort }} + ports: + - containerPort: {{ .Values.UI.frontEnd.NodePort }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml b/charts/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml new file mode 100644 index 000000000..ce5e766cd --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml @@ -0,0 +1,27 @@ +{{- if .Values.UI.valuesFileDirectory }} +apiVersion: v1 +kind: Pod +metadata: + name: revert-patch-log-dirs + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + restartPolicy: Never + containers: + - name: revert-patch-log-dirs + image: {{ .Values.UI.init.repository }} + imagePullPolicy: {{ .Values.UI.init.pullPolicy }} + securityContext: + runAsUser: 0 + command: ['sh', '-c', ' + setfacl --recursive --remove-all {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }}; + '] + volumeMounts: + - name: values-directory + mountPath: {{ include "splunk-connect-for-snmp-ui.hostMountPath" . }} + volumes: + - name: values-directory + hostPath: + path: {{ .Values.UI.valuesFileDirectory }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/role-binding.yaml b/charts/splunk-connect-for-snmp/templates/ui/role-binding.yaml new file mode 100644 index 000000000..a2c0caa41 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/role-binding.yaml @@ -0,0 +1,15 @@ +{{ if .Values.UI.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: job-robot + namespace: sc4snmp +subjects: +- kind: ServiceAccount + name: job-robot # Name of the ServiceAccount + namespace: sc4snmp +roleRef: + kind: Role # This must be Role or ClusterRole + name: job-robot # This must match the name of the Role or ClusterRole you wish to bind to + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/role.yaml b/charts/splunk-connect-for-snmp/templates/ui/role.yaml new file mode 100644 index 000000000..bb4c29c0d --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/role.yaml @@ -0,0 +1,14 @@ +{{ if .Values.UI.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: sc4snmp + name: job-robot +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/service-account.yaml b/charts/splunk-connect-for-snmp/templates/ui/service-account.yaml new file mode 100644 index 000000000..e92e9b562 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/service-account.yaml @@ -0,0 +1,7 @@ +{{ if .Values.UI.enable }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: job-robot + namespace: sc4snmp +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/service-backend.yaml b/charts/splunk-connect-for-snmp/templates/ui/service-backend.yaml new file mode 100644 index 000000000..c2a209d18 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/service-backend.yaml @@ -0,0 +1,14 @@ +{{ if .Values.UI.enable }} +apiVersion: v1 +kind: Service +metadata: + name: ui-backend-entrypoint +spec: + selector: + app: ui-backend + type: NodePort + ports: + - port: 5000 + targetPort: 5000 + nodePort: {{ .Values.UI.backEnd.NodePort }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/ui/service-frontend.yaml b/charts/splunk-connect-for-snmp/templates/ui/service-frontend.yaml new file mode 100644 index 000000000..56caaffa7 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/ui/service-frontend.yaml @@ -0,0 +1,14 @@ +{{ if .Values.UI.enable }} +apiVersion: v1 +kind: Service +metadata: + name: ui-frontend-entrypoint +spec: + type: NodePort + selector: + app: ui-frontend + ports: + - port: {{ .Values.UI.frontEnd.NodePort }} + targetPort: {{ .Values.UI.frontEnd.NodePort }} + nodePort: {{ .Values.UI.frontEnd.NodePort }} +{{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl index 6c6f11109..6eeb5e578 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl @@ -181,4 +181,10 @@ Common labels value: {{ .Values.worker.trap.concurrency | default "2" | quote }} - name: PREFETCH_COUNT value: {{ .Values.worker.trap.prefetch | default "1" | quote }} +- name: RESOLVE_TRAP_ADDRESS + value: {{ .Values.worker.trap.resolveAddress.enabled | default "false" | quote }} +- name: MAX_DNS_CACHE_SIZE_TRAPS + value: {{ .Values.worker.trap.resolveAddress.cacheSize | default "500" | quote }} +- name: TTL_DNS_CACHE_TRAPS + value: {{ .Values.worker.trap.resolveAddress.cacheTTL | default "1800" | quote }} {{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index 085e148e4..95ee9a978 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -17,6 +17,37 @@ image: # Secrets to attach to the respective serviceaccount to pull docker images imagePullSecrets: [] +# SC4SNMP UI configuration +UI: + enable: false + frontEnd: + NodePort: 30001 + repository: ghcr.io/splunk/sc4snmp-ui/frontend/container + tag: "main" + pullPolicy: "Always" + backEnd: + NodePort: 30002 + repository: ghcr.io/splunk/sc4snmp-ui/backend/container + tag: "main" + pullPolicy: "Always" + init: + repository: registry.access.redhat.com/ubi9/ubi + pullPolicy: IfNotPresent + + # valuesFileDirectory is obligatory if UI is used. It is a location of values.yaml and configuration files generated from the UI. + valuesFileDirectory: "" + + # valuesFileName is an exact name of yaml file with user's configuration, located inside directory specified in + # valuesFileDirectory. It is optional. If it is provided then this file fill be updated with configuration from the UI. + # If the valuesFileName is empty, or provided file name can't be found inside valuesFileDirectory directory, + # then configuration from the UI will be saved in few files, each file for different section, inside + # valuesFileDirectory directory. + valuesFileName: "" + + # If keepSectionFiles is set to true, separate configration files for different sections will be saved in + # valuesFileDirectory directory regardless of valuesFileName proper configuration. + keepSectionFiles: false + ################################################################################ # Splunk Cloud / Splunk Enterprise configuration. ################################################################################ @@ -236,6 +267,11 @@ worker: trap: # number of the trap replicas when autoscaling is set to false replicaCount: 2 + # Use reverse dns lookup of trap ip address and send the hostname to splunk + resolveAddress: + enabled: false + cacheSize: 500 # maximum number of records in cache + cacheTTL: 1800 # time to live of the cached record in seconds # minimum number of threads in a pod concurrency: 4 # how many tasks are consumed from the queue at once diff --git a/create_packages.sh b/create_packages.sh index 2bf30953a..54300a38a 100755 --- a/create_packages.sh +++ b/create_packages.sh @@ -115,6 +115,41 @@ pull_dependencies_images_sc4snmp(){ fi } +images_ui_to_pack="" +pull_ui_images() { + chart_dir="$1" + if [ -d "$chart_dir" ] && { [ -a "$chart_dir/values.yaml" ] || [ -a "$chart_dir/values.yml" ]; } + then + if [ -a "$chart_dir/values.yaml" ] + then + values_file="$chart_dir/values.yaml" + else + values_file="$chart_dir/values.yml" + fi + backend_image_repository=$(python3 "$python_script" "$values_file" "UI.backEnd.repository") + backend_image_tag=$(python3 "$python_script" "$values_file" "UI.backEnd.tag") + docker_pull_image=$(combine_image_name "" "$backend_image_repository" "$backend_image_tag" "") + echo "docker pull $docker_pull_image" >> /tmp/package/packages/pull_gui_images.sh + images_ui_to_pack="$images_ui_to_pack""$docker_pull_image " + + frontend_image_repository=$(python3 "$python_script" "$values_file" "UI.frontEnd.repository") + frontend_image_tag=$(python3 "$python_script" "$values_file" "UI.frontEnd.tag") + docker_pull_image=$(combine_image_name "" "$frontend_image_repository" "$frontend_image_tag" "") + echo "docker pull $docker_pull_image" >> /tmp/package/packages/pull_gui_images.sh + images_ui_to_pack="$images_ui_to_pack""$docker_pull_image " + + init_image_repository=$(python3 "$python_script" "$values_file" "UI.init.repository") + docker_pull_image=$(combine_image_name "" "$init_image_repository" "" "") + echo "docker pull $docker_pull_image" >> /tmp/package/packages/pull_gui_images.sh + images_ui_to_pack="$images_ui_to_pack""$docker_pull_image " + + echo "docker save $images_ui_to_pack > sc4snmp-gui-images.tar" >> /tmp/package/packages/pull_gui_images.sh + else + echo "Invalid directory" + exit 0 + fi +} + helm repo add bitnami https://charts.bitnami.com/bitnami helm repo add pysnmp-mibs https://pysnmp.github.io/mibs/charts @@ -173,6 +208,7 @@ do fi done +pull_ui_images "/tmp/package/$SPLUNK_DIR" docker save $images_to_pack > /tmp/package/packages/dependencies-images.tar cd ../.. tar -czvf packages/splunk-connect-for-snmp-chart.tar splunk-connect-for-snmp diff --git a/docs/bestpractices.md b/docs/bestpractices.md index 9552d1153..c5d296b77 100644 --- a/docs/bestpractices.md +++ b/docs/bestpractices.md @@ -1,25 +1,55 @@ # Debug Splunk Connect for SNMP -## Pieces of Advice - -### Check when SNMP WALK was executed last time for the device +## Check when SNMP WALK was executed last time for the device 1. [Configure Splunk OpenTelemetry Collector for Kubernetes](gettingstarted/sck-installation.md) 2. Go to your Splunk and execute search: `index="em_logs" "Sending due task" "sc4snmp;;walk"` and replace with the pertinent IP Address. -### Installing Splunk Connect for SNMP on Linux RedHat +## Installing Splunk Connect for SNMP on Linux RedHat Installation of RedHat may be blocking ports required by microk8s. Installing microk8s on RedHat -requires checking to see if the firewall is not blocking any of [required microk8s ports](https://microk8s.io/docs/ports). +requires checking to see if the firewall is not blocking any of the [required microk8s ports](https://microk8s.io/docs/ports). + +### Accessing SC4SNMP logs + +SC4SNMP logs can be browsed in Splunk in `em_logs` index, provided that [sck-otel](gettingstarted/sck-installation.md) +is installed. Logs can be also accessed directly in kubernetes using terminal. + +#### Accessing logs via Splunk +If [sck-otel](gettingstarted/sck-installation.md) is installed, browse `em_logs` index. Logs can be further filtered +for example by the sourcetype field. Example search command to get logs from poller: +``` +index=em_logs sourcetype="kube:container:splunk-connect-for-snmp-worker-poller" +``` + +#### Accessing logs in kubernetes +To access logs directly in kubernetes, first run `microk8s kubectl -n sc4snmp get pods`. This will output all pods: +``` +NAME READY STATUS RESTARTS AGE +snmp-splunk-connect-for-snmp-worker-trap-99f49c557-j9jwx 1/1 Running 0 29m +snmp-splunk-connect-for-snmp-trap-56f75f9754-kmlgb 1/1 Running 0 29m +snmp-splunk-connect-for-snmp-scheduler-7bb8c79855-rgjkj 1/1 Running 0 29m +snmp-mibserver-784bd599fd-6xzfj 1/1 Running 0 29m +snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4 1/1 Running 0 29m +snmp-splunk-connect-for-snmp-worker-sender-6f8496bfbf-cvt9l 1/1 Running 0 29m +snmp-mongodb-7579dc7867-mlnst 2/2 Running 0 29m +snmp-redis-master-0 1/1 Running 0 29m +``` + +Now select the desired pod and run `microk8s kubectl -n sc4snmp logs pod/` command. Example command to retrieve +logs from `splunk-connect-for-snmp-worker-poller`: +``` +microk8s kubectl -n sc4snmp logs pod/snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4 +``` ## Issues ### "Empty SNMP response message" problem -In case you see the following line in the worker's logs: +If you see the following line in the worker's logs: ```log [2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: Empty SNMP response message') ``` -that causes infinite retry of walk operation, add `worker.ignoreEmptyVarbinds` parameter to `values.yaml` and set it to true. +that causes an infinite retry of the walk operation. Add `worker.ignoreEmptyVarbinds` parameter to `values.yaml` and set it to true. An example configuration for a worker in `values.yaml` is: @@ -45,36 +75,51 @@ worker: - "127.0.0.6" ``` -If you put only IP address (ex. `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`, -`127.0.0.1:163`...). If you put IP address and host structured as `{host}:{port}`, that means the error will be ignored only for this device. +If you put in only the IP address (for example, `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`, +`127.0.0.1:163`...). If you put the IP address and host as `{host}:{port}`, that means the error will be ignored only for this device. ### Walking a device takes too much time -Enable small walk functionality with the following instruction: [Configure small walk profile](../configuration/configuring-profiles/#walk-profile). +See [Configure small walk profile](../configuration/configuring-profiles/#walk-profile) to enable the small walk functionality. -### An error of SNMP isWalk=True blocks traffic on SC4SNMP instance +### An error of SNMP isWalk=True blocks traffic on the SC4SNMP instance -If you see many `An error of SNMP isWalk=True` errors in logs, that means that there is a connection problem with the hosts you're polling from. -Walk will try to retry multiple times, which will eventually cause a worker to be blocked for the retries time. In this case, you might want to limit -the maximum retries time. You can do this by setting the variable `worker.walkRetryMaxInterval`, for example: +If you see many `An error of SNMP isWalk=True` errors in your logs, that means that there is a connection problem with the hosts you're polling from. +Walk will retry multiple times, which will eventually cause a worker to be blocked while it retries. In that case, you might want to limit +the maximum retry time. You can do this by setting the variable `worker.walkRetryMaxInterval`, for example: ```yaml worker: walkRetryMaxInterval: 60 ``` -With the configuration from the above, 'walk' will retry exponentially from 30 seconds until it reaches 60 seconds. Default value for `worker.walkRetryMaxInterval` is 180. +With the previous configuration, 'walk' will retry exponentially from 30 seconds until it reaches 60 seconds. The default value for `worker.walkRetryMaxInterval` is 180. ### SNMP Rollover -The Rollover problem is due to the integer value stored (especially when the value is 32-bit) being finite. +The Rollover problem is due to a finite stored integer value (especially when the value is 32-bit). When it reaches its maximum, it gets rolled down to 0 again. This causes a strange drop in Analytics data. -The most common case of this issue is interface speed on high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters ([read more about it](https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html)). +The most common case of this issue is interface speed on high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters, see https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html. Not all the devices support it, but if they do, poll the counter64 type OID instead of the counter32 one. For example, use `ifHCInOctets` instead of `ifInOctets`. -If 64-bit counter are not supported on your device, you can write your own Splunk queries that calculate the shift based on -maximum integer value + current state. The same works for values big enough that they're not fitting a 64-bit value. -An example for a SPLUNK query like that (interface counter), would be: +If 64-bit counter is not supported on your device, you can write your own Splunk queries that calculate the shift based on +the maximum integer value and the current state. The same works for values large enough that they don't fit into a 64-bit value. +An example for an appropriate Splunk query would be the following: + + +### Unknown USM user +In case of polling SNMPv3 devices, `Unknown USM user` error suggests wrong username. Verify +that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). + +### Wrong SNMP PDU digest +In case of polling SNMPv3 devices, `Wrong SNMP PDU digest` error suggests wrong authentication key. Verify +that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). + +### No SNMP response received before timeout +`No SNMP response received before timeout` error might have several root causes. Some of them are: +- wrong device IP or port +- SNMPv2c wrong community string +- SNMPv3 wrong privacy key ``` | streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias @@ -93,14 +138,107 @@ Error: UPGRADE FAILED: cannot patch "snmp-splunk-connect-for-snmp-inventory" wit The immutable error is due to the limitation placed on an inventory job. As the SC4SNMP requires several checks before applying updates, it is designed to allow changes in the inventory task after 5 minutes. -The status of the inventory can be checked with a command: +The status of the inventory can be checked with the following command: ``` microk8s kubectl -n sc4snmp get pods | grep inventory ``` -If the command is not empty, wait and execute it again after the inventory job finishes (no longer visible in the output). +If the command is not empty, wait and execute it again after the inventory job finishes. This is when it is no longer visible in the output. -If the changes are required to be applied immedietly, the previous inventory job can be deleted with the command: +If the changes are required to be applied immediately, the previous inventory job can be deleted with the following command: ``` microk8s kubectl delete job/snmp-splunk-connect-for-snmp-inventory -n sc4snmp ``` -The upgrade command can be executed again. \ No newline at end of file +The upgrade command can be executed again. + +### Identifying Traps issues + +#### Wrong IP or port +The first possible answer to why traps are not sent to Splunk is that SNMP agents send trap messages to the wrong IP +address or port. To check what is the correct address of traps server, run the following command: + +``` +microk8s kubectl -n sc4snmp get services +``` + +This command should output similar data: +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +snmp-redis-headless ClusterIP None 6379/TCP 113s +snmp-mibserver ClusterIP 10.152.183.163 80/TCP 113s +snmp-mongodb ClusterIP 10.152.183.118 27017/TCP 113s +snmp-redis-master ClusterIP 10.152.183.61 6379/TCP 113s +snmp-mongodb-metrics ClusterIP 10.152.183.50 9216/TCP 113s +snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.190 114.241.233.134 162:32180/UDP 113s +``` + +Check the `EXTERNAL-IP` of `snmp-splunk-connect-for-snmp-trap` and the second port number for this service. In this case +the full `snmp-splunk-connect-for-snmp-trap` address will be `114.241.233.134:32180`. + + +In case agents send traps to the correct address, but there is still no data in the `netops` index, there might be some +issues with credentials. These errors can be seen in logs of the `snmp-splunk-connect-for-snmp-trap` pod. + +#### Unknown SNMP community name encountered +In case of using community string for authentication purposes, the following error should be expected if the arriving trap +has a community string not configured in SC4SNMP: +``` +2024-02-06 15:42:14,885 ERROR Security Model failure for device ('18.226.181.199', 42514): Unknown SNMP community name encountered +``` + +If this error occurs, check if the appropriate community is defined under `traps.communities` in `values.yaml`. See the +following example of a `public` community configuration: +```yaml +traps: + communities: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +``` + +#### Unknown SNMP security name encountered + +While sending SNMP v3 traps in case of wrong username or engine id configuration, the following error should be expected: +``` +2024-02-06 15:42:14,091 ERROR Security Model failure for device ('18.226.181.199', 46066): Unknown SNMP security name encountered +``` + +If this error occurs, verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). +After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. Check that the correct snmp engine id +is configured under `traps.securityEngineId`. See the following example of a `values.yaml` with configured secret and engine id: +```yaml +traps: + usernameSecrets: + - my-secret-name + securityEngineId: + - "090807060504030201" +``` + +#### Authenticator mismatched + +While sending SNMP v3 traps in case of wrong authentication protocol or password configuration, the following error should be expected: +``` +2024-02-06 15:42:14,642 ERROR Security Model failure for device ('18.226.181.199', 54806): Authenticator mismatched +``` +If this error occurs, verify that the kubernetes secret with the correct authentication protocol and password has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). +After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret: +```yaml +traps: + usernameSecrets: + - my-secret-name +``` + +#### Ciphering services not available or ciphertext is broken +While sending SNMP v3 traps in case of wrong privacy protocol or password configuration, the following error should be expected: +``` +2024-02-06 15:42:14,780 ERROR Security Model failure for device ('18.226.181.199', 48249): Ciphering services not available or ciphertext is broken +``` +If this error occurs, verify that the kubernetes secret with the correct privacy protocol and password has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). +After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret: +```yaml +traps: + usernameSecrets: + - my-secret-name +``` diff --git a/docs/configuration/configuring-groups.md b/docs/configuration/configuring-groups.md index dae8c12f2..55c616606 100644 --- a/docs/configuration/configuring-groups.md +++ b/docs/configuration/configuring-groups.md @@ -1,16 +1,16 @@ # Configuring Groups It is common to configure whole groups of devices instead of just single ones. -SC4SNMP allows both types of configuration. Group consists of many hosts. Each of them is configured in the `values.yaml` +SC4SNMP allows both types of configuration. A group consists of many hosts. Each of them is configured in the `values.yaml` file, in the `scheduler` section. After configuring a group, its name can be used in the `address` field in the inventory record. All settings specified in the inventory record will be assigned to hosts from the given group, unless specific host configuration overrides it. -- Group configuration example and documentation can be found in the [Scheduler Configuration](scheduler-configuration.md#define-groups-of-hosts) page. -- Use of groups in the inventory can be found in the [Poller Configuration](poller-configuration.md#configure-inventory) page. +- See the [Scheduler Configuration](scheduler-configuration.md#define-groups-of-hosts) page for group examples and documentation. +- See the [Poller Configuration](poller-configuration.md#configure-inventory) page for information about groups in the inventory. -If the host is configured in the group and both the group and the single host are included in the inventory (like in the example below), the -configuration for the single host will be ignored in favour of group configuration: +If the host is configured in the group and both the group and the single host are included in the inventory, the +configuration for the single host will be ignored in favor of the group configuration. See the following example: ```yaml scheduler: @@ -31,7 +31,7 @@ poller: ``` If the specific host from the group has to be configured separately, first it must be deleted from the group configuration, -and then it can be inserted as a new record in the inventory (like in the example below): +and then it can be inserted as a new record in the inventory. See the following example: ```yaml scheduler: diff --git a/docs/configuration/configuring-profiles.md b/docs/configuration/configuring-profiles.md index e0809f7f0..5a54c61fc 100644 --- a/docs/configuration/configuring-profiles.md +++ b/docs/configuration/configuring-profiles.md @@ -1,23 +1,23 @@ # Configuring profiles -Profiles are the units where you can configure what you want to poll, and then assign them to the device. The definition of profile can be found in the `values.yaml` file +Profiles are where you can configure what you want to poll, and then assign them to the device. The definition of profile can be found in the `values.yaml` file under the `scheduler` section. -Here are the instructions on how to use profiles: [Update Inventory and Profile](../poller-configuration/#update-inventory). +See the following instructions on how to use profiles: [Update Inventory and Profile](../poller-configuration/#update-inventory). There are two types of profiles in general: -1. Static profile - polling starts when the profile is added to the `profiles` field in the `inventory` of the device. -2. Smart profile - polling starts when configured conditions are fulfilled, and the device to poll from has `smart_profiles` enabled in inventory. -Smart profiles are useful when we have many devices of a certain kind, and we don't want to configure each of them individually with static profiles. +1. Static profile: Polling starts when the profile is added to the `profiles` field in the `inventory` of the device. +2. Smart profile: Polling starts when configured conditions are fulfilled, and the device to poll from has `smart_profiles` enabled in inventory. +Smart profiles are useful when you have many devices of the same kind, and you don't want to configure each of them individually with static profiles. In order to configure smart profile, do the following: 1. Choose one of the fields polled from the device, most commonly sysDescr. 2. Set the filter to match all the devices of this kind. - 3. Setup polling of the profile by enabling smart profiles for devices you want to be polled. + 3. Set up polling of the profile by enabling the smart profiles for the devices that you want to be polled. -The template of the profile looks like the following: +The profile template looks like the following: ```yaml scheduler: @@ -42,7 +42,7 @@ scheduler: - ['SNMPv2-MIB', 'sysUpTime',0] ``` -For example, we have configured two profiles. One is smart, and the other one is static: +In the following example, two profiles are configured. One is smart, and the other one is static: ```yaml scheduler: @@ -64,7 +64,7 @@ scheduler: - ['IP-MIB'] ``` -If we want to enable only `static_profile` polling for the host `10.202.4.202`, we will configure similar inventory: +If you only want to enable the option `static_profile` polling for the host `10.202.4.202`, you would configure a similar inventory: ```yaml poller: @@ -73,7 +73,7 @@ poller: 10.202.4.202,,2c,public,,,2000,static_profile,f, ``` -If we want to enable checking the `10.202.4.202` device against smart profiles, we need to set `smart_profiles` to `t`: +If you want to enable checking the `10.202.4.202` device against smart profiles, you need to set `smart_profiles` to `t`: ```yaml poller: @@ -82,7 +82,7 @@ poller: 10.202.4.202,,2c,public,,,2000,,t, ``` -Then, if the device `sysDescr` matches the `'.*linux.*'` filter, the `smart_profile` profile will be polled. +Afterwards, if the device `sysDescr` matches the `'.*linux.*'` filter, the `smart_profile` profile will be polled. ## varBinds configuration @@ -92,15 +92,15 @@ subsection of each profile. The syntax configuration of `varBinds` looks like th [ "MIB-Component", "MIB object"[Optional], "MIB index number"[Optional]] - - `MIB-Component` - The SNMP MIB itself consists of distinct component MIBs, each of which refers to a specific - defined collection of management information that is part of the overall SNMP MIB, eg., `SNMPv2-MIB`. + - `MIB-Component`: The SNMP MIB itself consists of distinct component MIBs, each of which refers to a specific +collection of management information that is part of the overall SNMP MIB, for example, `SNMPv2-MIB`. If only the `MIB-Component` is set, then the SC4SNMP will get the whole subtree. - - `MIB object` - The SNMP MIB stores only simple data types: scalars and two-dimensional arrays of scalars, + - `MIB object`: The SNMP MIB stores only simple data types: scalars and two-dimensional arrays of scalars, called tables. The keywords SYNTAX, ACCESS, and DESCRIPTION as well as other keywords such as STATUS and INDEX are used to define the SNMP MIB managed objects. - - `MIB index number` - Define index number for given MIB Object eg. `0`. + - `MIB index number`: Define the index number for a given MIB Object, for example,`0`. -Example: +See the following example: ```yaml varBinds: # Syntax: [ "MIB-Component", "MIB object name"[Optional], "MIB index number"[Optional]] @@ -110,16 +110,16 @@ Example: ``` ## Static Profile configuration -Static Profile is used when they are defined on a list of profiles in the inventory configuration in the `poller` +Static Profile is used when a list of profiles is defined in the `poller` service [Inventory configuration](../poller-configuration/#configure-inventory). Static Profiles are executed even if the SmartProfile flag in inventory is set to false. -To configure Static Profile value needs to be set in the `profiles` section: +To configure Static Profile, the following value needs to be set in the `profiles` section: - - `ProfileName` - define as subsection key in `profiles`. - - `frequency` - define interval between executing SNMP gets in second. - - `varBinds` - define var binds to query. + - Define `ProfileName` as a subsection key in `profiles`. + - Define `frequency` as the interval between SNMP execution in seconds. + - Define `varBinds` as var binds to query. -Example: +See the following example: ```yaml scheduler: profiles: | @@ -137,7 +137,7 @@ Sometimes static profiles have additional functionalities to be used in specific #### WALK profile -If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of type `walk`: +If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of the `walk` type: ```yaml scheduler: profiles: | @@ -148,9 +148,9 @@ scheduler: - ['UDP-MIB'] ``` This profile should be placed in the profiles section of the inventory definition. It will be executed with the frequency defined in `walk_interval`. -If multiple profiles of type `walk` is placed in profiles, the last one will be used. +If multiple profiles of type `walk` were placed in profiles, the last one will be used. -This is how to use `walk` profiles: +See the following example on how to use `walk` in profiles: ```yaml poller: @@ -160,29 +160,29 @@ poller: ``` NOTE: When small walk is configured, `SNMPv2-MIB` is enabled by default (we need it to create the state of the device in the database). -For example, if you've decided to use `small_walk` from the example above, you'll be able to poll only `UDP-MIB`, and `SNMPv2-MIB` OIDs. +For example, if you used `small_walk` from the previous example, you'll only be able to poll `UDP-MIB` and `SNMPv2-MIB` OIDs. ## SmartProfile configuration -SmartProfile is executed when the SmartProfile flag in inventory is set to true and the condition defined in profile match. -More information about configuring inventory can be found in [Inventory configuration](../poller-configuration/#configure-inventory). +SmartProfile is executed when the SmartProfile flag in the inventory is set to true and the conditions defined in profile match. +See [Inventory configuration](../poller-configuration/#configure-inventory) for more information. -To configure Smart Profile, the following value needs to be set in the `profiles` section: +To configure SmartProfile, the following values needs to be set in the `profiles` section: - - `ProfileName` - define as subsection key in `profiles`. - - `frequency` - define an interval between executing SNMP's gets in second. - - `condition` - section define conditions to match profile - - `type` - key of `condition` section which defines type of condition. The allowed values are `base` and `field` (`walk` type is also allowed here, but it's not part of smart profiles). - - `base` type of condition will be executed when `SmartProfile` in inventory is set to true. - - `field` type of condition will be executed if it matches `pattern` for defined `field`. Supported fields are: + - For`ProfileName`, define it as a subsection key in `profiles`. + - For`frequency`, define it as the interval between SNMP execution in seconds. + - For `condition`, define the conditions to match the profile. + - For `type`, define it as the key for the `condition` section that defines the type of condition. The allowed values are `base` or `field` (`walk` type is also allowed here, but it's not part of smart profiles). + - The `base` type of condition will be executed when `SmartProfile` in inventory is set to true. + - The`field` type of condition will be executed if it matches `pattern` for the defined `field`. Supported fields are: - "SNMPv2-MIB.sysDescr" - "SNMPv2-MIB.sysObjectID" - - `field` Define field name for condition type field. - - `pattern` Define list of regular expression patterns for MIB object field defined in `field` section. For example: + - For `field`, define the field name for the field condition type. + - For`pattern`, define the list of regular expression patterns for the MIB object field defined in the `field` section, for example: - ".*linux.*" - - `varBinds` - define var binds to query. + - For `varBinds`, define var binds to query. -Example of `base` type profile: +See the following example of a `base` type profile: ```yaml scheduler: profiles: | @@ -195,7 +195,7 @@ scheduler: - ['SNMPv2-MIB', 'sysName'] ``` -Example of `field` type profile, also called an automatic profile: +See the following example of a `field` type profile, also called an automatic profile: ```yaml scheduler: profiles: | @@ -211,14 +211,14 @@ scheduler: - ['SNMPv2-MIB', 'sysName'] ``` -NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed frequency, or a profile type, the change will be reflected only after the next walk. -There is also 5 minute TTL for an inventory pod. Basically, SC4SNMP allows one inventory upgrade and then block updates for the next 5 minutes. +NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed the frequency, or a profile type, the change will be reflected only after the next walk. +There is also a 5 minute time to live (TTL) for an inventory pod. SC4SNMP allows one inventory upgrade and then it block updates for the next 5 minutes. ## Conditional profiles -There is a way to not explicitly give what SNMP objects we want to poll - only the conditions that must be fulfilled to -qualify object for polling. +There is a way to not explicitly list what SNMP objects you want to poll, but, instead, only give the conditions that must be fulfilled to +qualify an object for polling. -An example of a conditional profile is: +See the following example of a conditional profile: ```yaml IF_conditional_profile: @@ -238,15 +238,15 @@ IF_conditional_profile: ``` When the such profile is defined and added to a device in an inventory, it will poll all interfaces where `ifAdminStatus` -and `ifOperStatus` is up. Note that conditional profiles are being evaluated during the walk process (on every `walk_interval`) -and if the status changes in between, the scope of the conditional profile won't be modified. +and `ifOperStatus` is up. Conditional profiles are being evaluated during the walk process (on every `walk_interval`), +and, if the status changes in between, the scope of the conditional profile won't be modified. Therefore, status changes are only implemented when walk_interval is executed. -These are operations possible to use in conditional profiles: +See the following operations that can be used in conditional profiles: -1. `equals` - value gathered from `field` is equal to `value` -2. `gt` - value gathered from `field` is bigger than `value` (works only for numeric values) -3. `lt` - value gathered from `field` is smaller than `value` (works only for numeric values) -4. `in` - value gathered from `field` is equal to one of the elements provided in `value`, for ex.: +1. `equals`: the value gathered from `field` is equal to the`value`. +2. `gt`: the value gathered from `field` is bigger than `value` (works only for numeric values). +3. `lt`: the value gathered from `field` is smaller than `value` (works only for numeric values). +4. `in`: the value gathered from `field` is equal to one of the elements provided in `value`, for example: ```yaml conditions: @@ -257,8 +257,8 @@ conditions: - 0 ``` -5. `regex` - value gathered from `field` match the pattern provided in `value`. -You can add options for regular expression after `/`. Possible options match ones used in [mongodb regex operator](https://www.mongodb.com/docs/manual/reference/operator/query/regex/). +5. `regex`: value gathered from `field` match the pattern provided in `value`. +You can add options for regular expression after `/`. Possible options match ones used in [mongodb regex operator](https://www.mongodb.com/docs/manual/reference/operator/query/regex/), for example: ```yaml conditions: @@ -267,7 +267,7 @@ conditions: value: ".own/i" ``` -To negate operation you can add flag `negate_operation: "true"` to specified `field`. +To negate an operation you can add the flag `negate_operation: "true"` to the specified `field`, for example: ```yaml conditions: - field: IF-MIB.ifAdminStatus @@ -275,31 +275,28 @@ conditions: value: "up" negate_operation: "true" ``` -It will negate the operator specified in `operation`. Possible negation: +This will negate the operator specified in `operation`. See the following: -1. `negate_operation + equals` - value gathered from `field` is NOT equal to `value` -2. `negate_operation + gt` - value gathered from `field` is SMALLER or EQUAL to `value` (works only for numeric values) -3. `negate_operation + lt` - value gathered from `field` is BIGGER or EQUAL to `value` (works only for numeric values) -4. `negate_operation + in` - value gathered from `field` is NOT equal to any of the elements provided in `value` -5. `negate_operation + regex` - value gathered from `field` is NOT matching the pattern provided in `value`. +1. `negate_operation + equals`: value gathered from `field` is NOT equal to `value`. +2. `negate_operation + gt`: value gathered from `field` is SMALLER or EQUAL to `value` (works only for numeric values). +3. `negate_operation + lt`: value gathered from `field` is BIGGER or EQUAL to `value` (works only for numeric values). +4. `negate_operation + in`: value gathered from `field` is NOT equal to any of the elements provided in `value`. +5. `negate_operation + regex`: value gathered from `field` is NOT matching the pattern provided in `value`. -`field` part of `conditions` must fulfill the pattern `MIB-family.field`. Fields must represent textual value (not metric one), -you can learn more about it [here](snmp-data-format.md). +The `field` parameter in `conditions` must fulfill the pattern `MIB-family.field`. The field must represent a textual value (rather than a metric one). +See [snmp data format](snmp-data-format.md) for more information. -You have to explicitly define `varBinds` (not only the MIB family but also the field to poll), so such config: +You have to explicitly define `varBinds` (not only the MIB family but also the field to poll). See the following **incorrect** example: ```yaml varBinds: - [ 'IF-MIB' ] ``` -is not correct. - - ## Custom translations If the user wants to use custom names/translations of MIB names, it can be configured under the customTranslations section under scheduler config. -Translations are grouped by MIB family. In the example below IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1: +Translations are grouped by the MIB family. In the following example, IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1: ```yaml scheduler: customTranslations: diff --git a/docs/configuration/coredns-configuration.md b/docs/configuration/coredns-configuration.md new file mode 100644 index 000000000..5f5dfb7ff --- /dev/null +++ b/docs/configuration/coredns-configuration.md @@ -0,0 +1,141 @@ +# Configuration of CoreDNS in microk8s to use different nameservers for different domains and ip ranges + + +In MicroK8s, CoreDNS is enabled by running the following command: `microk8s enable dns`. + +Alternatively, you can specify a list of DNS servers by running the command: `microk8s enable dns:8.8.8.8,1.1.1.1`. + +The servers in the provided list are expected to be capable of resolving the same addresses. +If one of these servers is unreachable, another one is used. +If the requirement is to use different DNS servers for various domains or different IP ranges in the case of reverse lookup, the configuration differs. + +Before executing `microk8s enable dns`, the first step is to edit `coredns.yaml`, located inside the MicroK8s installation folder. +An example path is: `/var/snap/microk8s/common/addons/core/addons/dns/coredns.yaml`. + + +Inside `coredns.yaml`, there is a complete configuration for the CoreDNS deployment. +The only section that requires editing is the ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists + k8s-app: kube-dns +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + log . { + class error + } + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . $NAMESERVERS + cache 30 + loop + reload + loadbalance + } +``` + +Changes should be made in `data.Corefile` within this ConfigMap. Presented documentation explains basic configuration. +For more details, refer to the official CoreDNS [documentation](https://coredns.io/manual/toc/). + + +Updated ConfigMap: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists + k8s-app: kube-dns +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + log . { + class error + } + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . $NAMESERVERS + cache 1 + loop + reload + loadbalance + } + dummyhost.com:53 { + errors + health { + lameduck 5s + } + ready + log . { + class error + } + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . 4.3.2.1 + cache 1 + loop + reload + loadbalance + } + 2.1.in-addr.arpa:53 { + errors + health { + lameduck 5s + } + ready + log . { + class error + } + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . 4.3.2.1 + cache 1 + loop + reload + loadbalance + } +``` + +Two server blocks, `dummyhost.com:53` and `2.1.in-addr.arpa:53`, have been added. + +The `dummyhost.com:53` server block is used to resolve all hosts within the `dummyhost.com` domain. +The DNS server used for these hosts is specified in the forward plugin as `4.3.2.1`. +Additional information about the forward plugin can be found in the official CoreDNS [documentation](https://coredns.io/plugins/forward/). + +The `2.1.in-addr.arpa:53` server block is added for reverse DNS lookup for all devices in the IPv4 range `1.2.0.0/16`. +The DNS server is the same as in the `dummyhost.com:53` server block. + +All other DNS requests will be handled by the `8.8.8.8` server if `microk8s enable dns` is run without providing a list of DNS servers. +Alternatively, one of the servers provided in the list will be used in the case of running with the list of servers, +i.e., `microk8s enable dns:8.8.8.8,1.1.1.1`. \ No newline at end of file diff --git a/docs/configuration/deployment-configuration.md b/docs/configuration/deployment-configuration.md index 460ee4d98..5cf546090 100644 --- a/docs/configuration/deployment-configuration.md +++ b/docs/configuration/deployment-configuration.md @@ -1,6 +1,6 @@ #Deployment Configuration -`values.yaml` is the main point of SC4SNMP management. You can check all the default values of Helm dependencies using: +`values.yaml` is the main point of SC4SNMP management. You can check all the default values of Helm dependencies using the following command: ``` microk8s helm3 inspect values splunk-connect-for-snmp/splunk-connect-for-snmp > values.yaml @@ -8,7 +8,7 @@ microk8s helm3 inspect values splunk-connect-for-snmp/splunk-connect-for-snmp > The whole file is divided into the following parts: -For configuring endpoint for sending SNMP data: +To configure the endpoint for sending SNMP data: - `splunk` - in case you use Splunk Enterprise/Cloud - `sim` - in case you use Splunk Observability Cloud. More details: [sim configuration](sim-configuration.md) @@ -29,7 +29,7 @@ Shared components: - `redis` - more details: [redis configuration](redis-configuration.md) ### Shared values -All the components have the `resources` field for adjusting memory resources: +All the components have the following `resources` field for adjusting memory resources: ```yaml resources: limits: @@ -39,11 +39,11 @@ All the components have the `resources` field for adjusting memory resources: cpu: 1000m memory: 2Gi ``` -More information about the concept of `resources` can be found in the [kuberentes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). +For more information about the concept of `resources`, see the [kuberentes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). -There is an option to create common annotations across all the services. It can be set by: +There is an option to create common annotations across all services. It can be set by: ```yaml commonAnnotations: annotation_key: annotation_value -``` \ No newline at end of file +``` diff --git a/docs/configuration/mongo-configuration.md b/docs/configuration/mongo-configuration.md index af535bce6..ab532a57e 100644 --- a/docs/configuration/mongo-configuration.md +++ b/docs/configuration/mongo-configuration.md @@ -1,12 +1,13 @@ -#Mongo DB Configuration +# Mongo DB Configuration + Mongo DB is used as the database for keeping schedules. -### Mongo DB configuration file +### MongoDB configuration file -Mongo DB configuration is kept in the `values.yaml` file in the `mongodb` section. +MongoDB configuration is kept in the `values.yaml` file in the `mongodb` section. `values.yaml` is used during the installation process for configuring kubernetes values. -Example: +See the following example: ```yaml mongodb: #Architecture, Architecture for Mongo deployments is immutable to move from standalone to replicaset will require a uninstall. @@ -30,4 +31,4 @@ mongodb: enabled: true ``` -It is recommended not to change this setting. If it is necessary to change it, see: [MongoDB on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/mongodb/) +It is recommended not to change this setting. If it is necessary to change it, see [MongoDB on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/mongodb/). diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md index 99b285eb6..0e58897f0 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/configuration/poller-configuration.md @@ -1,12 +1,13 @@ -#Poller Configuration +# Poller Configuration + Poller is a service which is responsible for querying -SNMP devices using the SNMP GET, and the SNMP WALK functionality. Poller executes two main types of tasks: +SNMP devices using the SNMP GET and WALK functionalities. Poller executes two main types of tasks: -- Walk task - executes SNMP walk. SNMP walk is an SNMP application that uses SNMP GETNEXT requests to -collect SNMP data from the network and infrastructure SNMP-enabled devices, such as switches and routers. It is a time-consuming task, +- The Walk task executes SNMP walk. SNMP walk is an SNMP application that uses SNMP GETNEXT requests to +collect SNMP data from the network and infrastructure of SNMP-enabled devices, such as switches and routers. It is a time-consuming task, which may overload the SNMP device when executed too often. It is used by the SC4SNMP to collect and push all OID values, which the provided ACL has access to. -- Get task - it is a lightweight task whose goal is to query a subset of OIDs defined by the customer. The task serves frequent monitoring OIDs, like memory or CPU utilization. +- The Get task is a lightweight task that queries a subset of OIDs defined by the customer. This task monitors OIDs, such as memory or CPU utilization. Poller has an `inventory`, which defines what and how often SC4SNMP has to poll. @@ -15,7 +16,7 @@ Poller has an `inventory`, which defines what and how often SC4SNMP has to poll. The poller configuration is kept in a `values.yaml` file in the `poller` section. `values.yaml` is used during the installation process for configuring Kubernetes values. -Poller example configuration: +See the following poller example configuration: ```yaml poller: usernameSecrets: @@ -27,7 +28,7 @@ poller: 10.202.4.202,,2c,public,,,2000,,, ``` -NOTE: header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. +NOTE: The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. ### Define log level The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. @@ -38,8 +39,7 @@ Secrets are required to run SNMPv3 polling. To add v3 authentication details, cr ### Append OID index part to the metrics -Not every SNMP metric object is structured the way it has its index as a one of the field value. -We can append the index part of OID with: +Not every SNMP metric object is structured with its index as a one of the field values. We can append the index part of OID with: ```yaml poller: @@ -66,19 +66,10 @@ out of this object: } ``` -Not every SNMP metric object is structured the way it has its index as a one of the field value. -We can append the index part of OID with: - -```yaml -poller: - metricsIndexingEnabled: true -``` - - ### Disable automatic polling of base profiles -There are [two profiles](https://github:com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml) that are being polled by default - so that even without any configuration you can see -the data in Splunk. You can disable it with `pollBaseProfiles` parameter. +There are [two profiles](https://github:com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml) that are being polled by default, so that even without any configuration set up, you can see +the data in Splunk. You can disable it with the following `pollBaseProfiles` parameter: ```yaml poller: @@ -87,25 +78,24 @@ poller: ### Configure inventory -To update inventory, see: [Update Inventory and Profile](#update-inventory). - -`inventory` section in `poller` has the following fields to configure: - - - `address` [REQUIRED] - IP address which SC4SNMP should connect to collect data from or name of the group of hosts. General -information about groups can be found on [Configuring Groups](configuring-groups.md) page. - - `port` [OPTIONAL] - SNMP listening port. Default value `161`. - - `version` [REQUIRED] - SNMP version, allowed values: `1`, `2c` or `3` - - `community` [OPTIONAL] - SNMP community string, filed is required when `version` is `1` or `2c` - - `secret` [OPTIONAL] - reference to the secret from `poller.usernameSecrets` that should be used to poll from the device - - `security_engine` [OPTIONAL] - security engine ID required by SNMPv3. If not provided for version `3` it is autogenerated. - - `walk_interval` [OPTIONAL] - interval in seconds for SNMP walk, default value `42000`. This value needs to be between `1800` and `604800` - - `profiles` [OPTIONAL] - list of SNMP profiles used for the device. More than one profile can be added by semicolon -separation eg. `profile1;profile2`. More about profiles in [Profile Configuration](../configuring-profiles) - - `smart_profiles` [OPTIONAL] - enabled smart profiles, by default it's `true`. Allowed value: `true`, `false`. - - `delete` [OPTIONAL] - flags which define if inventory should be deleted from scheduled tasks for WALKs and GETs. -Allowed value: `true`, `false`. Default value is `false`. - -Example: +To update inventory, see [Update Inventory and Profile](#update-inventory). + +The `inventory` section in `poller` has the following fields to configure: + + - `address` (REQUIRED) is the IP address which SC4SNMP should collect data from, or name of the group of hosts. General +information about groups can be found on the [Configuring Groups](configuring-groups.md) page. + - `port` (OPTIONAL) is an SNMP listening port. The default value is `161`. + - `version` (REQUIRED) is the SNMP version, and the allowed values are `1`, `2c`, or `3`. + - `community` (OPTIONAL) is the SNMP community string, and a field is required when the `version` is `1` or `2c`. + - `secret` (OPTIONAL) is the reference to the secret from `poller.usernameSecrets` that should be used to poll from the device. + - `security_engine` (OPTIONAL) is the security engine ID required by SNMPv3. If it is not provided for version `3`, it will be autogenerated. + - `walk_interval` (OPTIONAL) is the interval in seconds for SNMP walk, with a default value of `42000`. This value needs to be between `1800` and `604800`. + - `profiles` (OPTIONAL) is a list of SNMP profiles used for the device. More than one profile can be added by a semicolon +separation, for example, `profile1;profile2`. For more information about profiles, see [Profile Configuration](../configuring-profiles). + - `smart_profiles` (OPTIONAL) enables smart profiles, and by default it's set to `true`. Its allowed values are `true` or `false`. + - `delete` (OPTIONAL) is a flag that defines if the inventory should be deleted from the scheduled tasks for WALKs and GETs. Its allowed value are `true`or `false`. The default value is `false`. + +See the following example: ```yaml poller: inventory: | @@ -116,23 +106,20 @@ poller: ### Update Inventory -Adding new devices for `values.yaml` is quite expensive from the Splunk Connect for SNMP perspective. -As it interacts with real, networking devices, it requires several checks before applying changes. SC4SNMP was designed to prevent changes in inventory -task more often than every 5 min. - +Adding new devices for `values.yaml` is resource expensive, and can impact performance. As it interacts with hardware networking devices, the updating process requires several checks before applying changes. SC4SNMP was designed to prevent changes in inventory tasks more often than every 5 minutes. + To apply inventory changes in `values.yaml`, the following steps need to be executed: 1. Edit `values.yaml` -2. Check if inventory pod is still running by the execute command: +2. Check if the inventory pod is still running using the following execute command: ```shell microk8s kubectl -n sc4snmp get pods | grep inventory ``` -If the command does not return any pods, follow the next step. In another case, wait and execute the command again until the moment -when inventory job finishes. +If the command does not return any pods, wait and continue to execute the command again, until the inventory job finishes. -If you really need to apply changes immediately, you can get around the limitation by deleting the inventory job with: +If you really need to apply changes immediately, you can get around the limitation by deleting the inventory job using the following command: ```shell microk8s kubectl delete job/snmp-splunk-connect-for-snmp-inventory -n sc4snmp @@ -147,20 +134,20 @@ microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/spl ``` NOTE: If you decide to change the frequency of the profile without changing the inventory data, the change will be reflected after -next the walk process for the host. The walk happens every `walk_interval`, or on any change in inventory. +the next walk process for the host. The walk happens every `walk_interval`, or during any change in inventory. #### Upgrade with the csv file -There is a possibility to update inventory by making changes outside the `values.yaml`. It can be put to separate csv file and upgraded passing `--set-file poller.inventory=`. +You can update inventory by making changes outside of the `values.yaml`. It can be put into a separate csv file and upgraded using `--set-file poller.inventory=`. -Example of the CSV file configuration: +See the following example of an CSV file configuration: ```csv address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete 10.202.4.202,,2c,public,,,3000,my_profile,, ``` -Example of upgrade command with the csv file: +See the following example of an upgrade command with a CSV file: ```shell microk8s helm3 upgrade --install snmp -f values.yaml --set-file poller.inventory=inventory.csv splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace diff --git a/docs/configuration/redis-configuration.md b/docs/configuration/redis-configuration.md index e66793e29..6efb9f110 100644 --- a/docs/configuration/redis-configuration.md +++ b/docs/configuration/redis-configuration.md @@ -1,4 +1,4 @@ -#Redis configuration +# Redis configuration Recently, RabbitMQ was replaced with Redis as a queue service and periodic task database. The reason for this is to increase SC4SNMP performance and protect against bottlenecks. @@ -7,6 +7,6 @@ Redis both manages periodic tasks and queues the SC4SNMP service. It queues task ### Redis configuration file Redis configuration is kept in the `values.yaml` file in the `redis` section. -`values.yaml` is being used during the installation process for configuring Kubernetes values. +`values.yaml` is used during the installation process to configure Kubernetes values. -To edit the configuration, see: [Redis on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/redis) +To edit the configuration, see [Redis on Kubernetes](https://github.com/bitnami/charts/tree/master/bitnami/redis). diff --git a/docs/configuration/scheduler-configuration.md b/docs/configuration/scheduler-configuration.md index 8eca783bd..8fc074887 100644 --- a/docs/configuration/scheduler-configuration.md +++ b/docs/configuration/scheduler-configuration.md @@ -1,13 +1,13 @@ # Scheduler configuration -The scheduler is a service with is responsible for managing schedules for SNMP walks and GETs. Schedules definition -are stored in Mongo DB. +The scheduler is a service that manages schedules for SNMP walks and GETs. The definitions of the schedules +are stored in MongoDB. ### Scheduler configuration file -Scheduler configuration is kept in `values.yaml` file in section `scheduler`. -`values.yaml` is being used during the installation process for configuring Kubernetes values. +Scheduler configuration is kept in `values.yaml` file, in the section `scheduler`. +`values.yaml` is used during the installation process to configure Kubernetes values. -Example: +See the following example: ```yaml scheduler: logLevel: "WARN" @@ -25,8 +25,8 @@ scheduler: ``` ### Define log level -Log level for scheduler can be set by changing the value for key `logLevel`. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. -The default value is `WARNING` +The log level for the scheduler can be set by changing the value for the `logLevel` key. The allowed values are `DEBUG`, `INFO`, `WARNING`, or `ERROR`. +The default value is `WARNING`. ### Define resource requests and limits ```yaml @@ -43,9 +43,9 @@ scheduler: ``` ### Define groups of hosts -To get the general idea when groups are useful see [Configuring Groups](configuring-groups.md). +For more information on when to use groups, see [Configuring Groups](configuring-groups.md). -Example group configuration: +See the following example group configuration: ```yaml scheduler: groups: | @@ -67,12 +67,12 @@ scheduler: ``` The one obligatory field for the host configuration is `address`. If `port` isn't configured its default value is `161`. -Other fields that can be modified here are: `community`, `secret`, `version`, `security_engine`. -However, if they remain unspecified in the host configuration, they will be derived from the inventory record regarding this specific group. +Other fields that can be modified here are: `community`, `secret`, `version`, and `security_engine`. +However, if they remain unspecified in the host configuration, they will be derived from the inventory record. -### Define tasks expiry time +### Define the expiration time for tasks -Define time in second after which polling or walk tasks, that haven't been picked up by the worker, will be revoked. Check the [celery documentation](https://docs.celeryq.dev/en/stable/userguide/calling.html#expiration) for more details. +Define the time, in seconds, when polling or walk tasks will be revoked if they haven't been picked up by the worker. See the [celery documentation](https://docs.celeryq.dev/en/stable/userguide/calling.html#expiration) for more details. ```yaml scheduler: tasksExpiryTime: 300 diff --git a/docs/configuration/sim-configuration.md b/docs/configuration/sim-configuration.md index 2fa0b4b86..1861f1c87 100644 --- a/docs/configuration/sim-configuration.md +++ b/docs/configuration/sim-configuration.md @@ -13,7 +13,7 @@ sim: You need to specify Splunk Observability Cloud token and realm. There are two ways of configuring them: -1. Pass those in a plain text via `values.yaml` so at the end sim element looks like this: +1. Pass those in a plain text using `values.yaml`, so at the end, the sim element looks like the following: ```yaml sim: @@ -22,7 +22,7 @@ sim: signalfxRealm: us0 ``` -2. Alternatively, create microk8s secret by yourself and pass its name in `values.yaml` file. Create secret: +2. Alternatively, create the microk8s secret by yourself and pass its name to the `values.yaml` file. Use the following command to create it: ``` microk8s kubectl create -n secret generic \ @@ -30,9 +30,9 @@ microk8s kubectl create -n secret generic \ --from-literal=signalfxRealm= ``` -Modify `sim.secret` section of `values.yaml`. Disable creation of the secret with `sim.secret.create` and provide the -`` matching the one from the previous step. Pass it via `sim.secret.name`. For example, for ``=`signalfx` -the `sim` section would look like: +Modify `sim.secret` section of `values.yaml`. Disable the creation of the secret with `sim.secret.create` and provide the +``, matching the one from the previous step. Pass it using `sim.secret.name`. For example, for ``=`signalfx`, +the `sim` section would look like the following: ```yaml sim: @@ -47,7 +47,7 @@ of `values.yaml` (given by `sim.secret.name`), you need to roll out the deployme ### Define annotations -In case you need to append some annotations to the `sim` service, you can do it by setting `sim.service.annotations`, for ex.: +In case you need to append some annotations to the `sim` service, you can do it by setting `sim.service.annotations`, for example: ```yaml sim: diff --git a/docs/configuration/snmp-data-format.md b/docs/configuration/snmp-data-format.md index e218611de..e4fb2c899 100644 --- a/docs/configuration/snmp-data-format.md +++ b/docs/configuration/snmp-data-format.md @@ -1,9 +1,9 @@ # SNMP Data Format -SC4SNMP classifies SNMP data elements as metrics or textual fields. We assume that the metric types are the indicators worth monitoring, -that changes dynamically, and textual fields are the context helpful to understand what an SNMP object really means. +SC4SNMP classifies SNMP data elements as metrics or textual fields. Metric types are usually the indicators worth monitoring, +which change dynamically, while textual fields are helpful context to understand what an SNMP object means. -SC4SNMP classify the data element as a metric when its type is one of: +SC4SNMP classifies the data element as a metric when its type is one of the following: - `Unsigned` - `Counter` @@ -13,8 +13,7 @@ SC4SNMP classify the data element as a metric when its type is one of: Every other type is interpreted as a field value. -Sometimes, the MIB file indicates a field as an `INTEGER`, but there is also some mapping defined, like for -example in case of `IF-MIB.ifOperStatus`: +Sometimes, the MIB file indicates a field as an `INTEGER`, but there is also some mapping defined. See the following`IF-MIB.ifOperStatus` example: ``` ifOperStatus OBJECT-TYPE @@ -32,11 +31,11 @@ ifOperStatus OBJECT-TYPE ``` [source](https://www.circitor.fr/Mibs/Mib/I/IF-MIB.mib) -Here we expect some numeric value, but actually what SNMP Agents gets from the device is a `string` value, -like `up`. To avoid setting textual value as a metrics, SC4SNMP does an additional check and tries to cast the -numeric value to float. If the check fails, the values is classified as a textual field. +Here a numeric value is expected, but actually what SNMP Agents ends up receiving from the device is a `string` value, +like `up`. To avoid setting textual value as a metric, SC4SNMP does an additional check and tries to cast the +numeric value to float. If the check fails, the value is classified as a textual field. -Let's go through a simple example. We've just added a device and didn't configure anything special. The data from a walk +See the following simple example. You just added a device and didn't configure anything special. The data from a walk in Splunk's metrics index is: ``` @@ -62,7 +61,7 @@ in Splunk's metrics index is: } ``` -Clearly we can see the textual part: +You can see a textual part: ``` ifAdminStatus: up @@ -94,7 +93,7 @@ And a metric one: ### Metric index The rule is, if we poll a profile with AT LEAST one metric value, it will go to the metric index and will be -enriched with all the textual fields we have for this object. For example, when polling: +enriched with all the textual fields you have for the object. For example, when polling: ```yaml profile_with_one_metric: @@ -104,7 +103,7 @@ profile_with_one_metric: - ['IF-MIB', 'ifInUcastPkts'] ``` -The record that we'll see in Splunk `| mpreview index=net*` for the same case as above would be: +The record that you'll see in Splunk `| mpreview index=net*` for the same case as the previous one would be: ``` ifAdminStatus: up @@ -117,12 +116,10 @@ The record that we'll see in Splunk `| mpreview index=net*` for the same case as metric_name:sc4snmp.IF-MIB.ifInUcastPkts: 47512921 ``` -Note, that only fields specified in `varBinds` are actively polled form the device. In case of `profile_with_one_metric` -shown above, the textual fields `ifAdminStatus`, `ifDescr`, `ifIndex`, `ifOperStatus` and `ifPhysAddress` are taken from -the database cache, which is updated on every `walk` process. This is fine for the most of the cases, as things like -MAC address, interface type or interface status shouldn't change frequently if ever. +Only fields specified in `varBinds` are actively polled from the device. In the case of the previous `profile_with_one_metric`, the textual fields `ifAdminStatus`, `ifDescr`, `ifIndex`, `ifOperStatus` and `ifPhysAddress` are taken from the database cache. This is updated on every walk process. This is fine in most cases, as values such as +MAC address, interface type, or interface status shouldn't change frequently if at all. -If you want to keep `ifOperStatus` and `ifAdminStatus` up to date all the time, define profile like: +If you want to keep `ifOperStatus` and `ifAdminStatus` up to date all the time, define profile using the following example: ```yaml profile_with_one_metric: @@ -138,8 +135,8 @@ The result in Splunk will look the same, but `ifOperStatus` and `ifAdminStatus` ### Event index -It is possible to create an event without a single metric value, in such scenario it will go to an event index. -An example of such profile would be: +It is possible to create an event without a single metric value. In such scenario, it will go to an event index. +See the following example of profile under that scenario: ```yaml profile_with_only_textual_fields: @@ -150,7 +147,7 @@ profile_with_only_textual_fields: - ['IF-MIB', 'ifOperStatus'] ``` -In this case no additional enrichment will be done. The events in event index `index=netops` of Splunk will look like: +In the following example, no additional enrichment will be done. The events in event index `index=netops` of Splunk would look like: ``` { [-] @@ -176,4 +173,4 @@ In this case no additional enrichment will be done. The events in event index `i value: up } } -``` \ No newline at end of file +``` diff --git a/docs/configuration/snmpv3-configuration.md b/docs/configuration/snmpv3-configuration.md index 6865f0a88..cd288c1ae 100644 --- a/docs/configuration/snmpv3-configuration.md +++ b/docs/configuration/snmpv3-configuration.md @@ -1,4 +1,4 @@ -### Create SNMP v3 users +# SNMPv3 user configuration Configuration of SNMP v3, when supported by the monitored devices, is the most secure choice available for authentication and data privacy. Each set of credentials will be stored as "Secret" objects in k8s, diff --git a/docs/configuration/step-by-step-poll.md b/docs/configuration/step-by-step-poll.md index 5bca728b2..be4dcd65a 100644 --- a/docs/configuration/step-by-step-poll.md +++ b/docs/configuration/step-by-step-poll.md @@ -1,6 +1,6 @@ # An example of a polling scenario -We have 4 hosts we want to poll from: +In the following example, there are 4 hosts you want to poll from: 1. `10.202.4.201:161` 2. `10.202.4.202:161` @@ -10,7 +10,7 @@ We have 4 hosts we want to poll from: To retrieve data from the device efficiently, first determine the specific data needed. Instead of walking through the entire `1.3.6.1`, limit the walk to poll only the necessary data. Configure the `IF-MIB` family for interfaces and the `UCD-SNMP-MIB` for CPU-related statistics. In the `scheduler` section of `values.yaml`, define the target group and -establish the polling parameters, known as the profile, to gather the desired data precisely: +establish the polling parameters, known as the profile, to gather the desired data precisely. See the following example: ```yaml scheduler: @@ -48,7 +48,7 @@ scheduler: port: 163 ``` -Then it is required to pass the proper instruction of what to do for SC4SNMP instance. This can be done by appending a new row +It is required to pass the proper instruction of what to do for the SC4SNMP instance. To do this, append a new row to `poller.inventory`: ```yaml @@ -59,14 +59,14 @@ poller: switch_group,,2c,public,,,2000,small_walk;switch_profile,, ``` -The provided configuration will make: +The provided example configuration will make: 1. Walk devices from `switch_group` with `IF-MIB` and `UCD-SNMP-MIB` every 2000 seconds 2. Poll specific `IF-MIB` fields and the whole `UCD-SNMP-MIB` every 60 seconds -Note: you could as well limit walk profile even more if you want to enhance the performance. +Note: You can also limit the walk profile even more if you want to enhance the performance. -It makes sense to put in the walk the textual values that don't required to be constantly monitored, and monitor only the metrics +It makes sense to put the textual values in the walk that aren't required to be constantly monitored, and monitor only the metrics you're interested in: ``` @@ -92,23 +92,22 @@ switch_profile: - ["IF-MIB", "ifOutQLen"] ``` -Then every metric object will be enriched with the textual values gathered from a walk process. Learn more about -SNMP format [here](snmp-data-format.md). +Afterwards, every metric object will be enriched with the textual values gathered from a walk process. See [here](snmp-data-format.md) for more information about SNMP format. -Now we're ready to reload SC4SNMP. We run the `helm3 upgrade` command: +Now you're ready to reload SC4SNMP. Run the following `helm3 upgrade` command: ```yaml microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -We should see the new pod with `Running` -> `Completed` state: +See the new pod with the following `Running` -> `Completed` state command: ```yaml microk8s kubectl get pods -n sc4snmp -w ``` -Example output: +See the following example output: ```yaml NAME READY STATUS RESTARTS AGE snmp-splunk-connect-for-snmp-worker-sender-5bc5cf864b-cwmfw 1/1 Running 0 5h52m @@ -127,13 +126,13 @@ snmp-splunk-connect-for-snmp-inventory-g4bs7 0/1 Completed snmp-splunk-connect-for-snmp-inventory-g4bs7 0/1 Completed 0 7s ``` -We can check the pod's logs to make sure everything was reloaded right, with: +Check the pod's logs to make sure everything was reloaded correctly, using the following command: ```yaml microk8s kubectl logs -f snmp-splunk-connect-for-snmp-inventory-g4bs7 -n sc4snmp ``` -Example output: +See the following example output: ```yaml Successfully connected to redis://snmp-redis-headless:6379/0 @@ -147,22 +146,22 @@ Successfully connected to http://snmp-mibserver/index.csv {"message": "New Record address='10.202.4.204' port=163 version='2c' community='public' secret=None security_engine=None walk_interval=2000 profiles=['switch_profile'] smart_profiles=True delete=False", "time": "2022-09-05T14:30:30.607641", "level": "INFO"} ``` -In some time (depending on how long the walk takes), we'll see events under: +In some time (depending on how long the walk takes), we'll see events using the following query: ```yaml | mpreview index=netmetrics | search profiles=switch_profile ``` -query in Splunk. When groups are used, we can also use querying by the group name: +When groups are used, we can also use querying by the group name, for example: ```yaml | mpreview index=netmetrics | search group=switch_group ``` -Keep in mind, that querying by profiles/group in Splunk is only possible in the metrics index. Every piece of data being sent +Querying by profiles/group in Splunk is only possible in the metrics index. Every piece of data being sent by SC4SNMP is formed based on the MIB file's definition of the SNMP object's index. The object is forwarded to an event index only if it doesn't have any metric value inside. -The `raw` metrics in Splunk example is: +The following is a Splunk `raw` metrics example: ```json { diff --git a/docs/configuration/trap-configuration.md b/docs/configuration/trap-configuration.md index 9985e1bc1..e321615fa 100644 --- a/docs/configuration/trap-configuration.md +++ b/docs/configuration/trap-configuration.md @@ -1,12 +1,13 @@ -#Trap Configuration -A trap service is a simple server that can handle SNMP traps sent by SNMP devices like routers or switches. +# Trap Configuration + +A trap service is a simple server that can handle SNMP traps sent by SNMP devices, such as routers or switches. ### Trap configuration file The trap configuration is kept in the `values.yaml` file in section traps. `values.yaml` is used during the installation process for configuring Kubernetes values. -Trap example configuration: +See the following trap example configuration: ```yaml traps: communities: @@ -35,10 +36,10 @@ traps: ``` ### Define communities -`communities` define a version of SNMP protocol and SNMP community string, which should be used. -`communities` key is split by protocol version, supported values are `1` and `2c`. Under the `version` section, SNMP community string can be defined. +`communities` defines a version of an SNMP protocol and an SNMP community string, which should be used. +The `communities` key is split by protocol version, with `1` and `2c` as supported values. Under the `version` section, you can define the SNMP community string. -Example: +See the following example: ```yaml traps: communities: @@ -50,11 +51,11 @@ traps: ``` ### Configure user secrets for SNMPv3 -The `usernameSecrets` key in the `traps` section define SNMPv3 secrets for trap messages sent by SNMP device. `usernameSecrets` define which secrets +The `usernameSecrets` key in the `traps` section defines the SNMPv3 secrets for the trap messages sent by the SNMP device. `usernameSecrets` defines which secrets in "Secret" objects in k8s should be used, as a value it needs the name of "Secret" objects. -More information on how to define the "Secret" object for SNMPv3 can be found in [SNMPv3 Configuration](snmpv3-configuration.md). +For more information on how to define the "Secret" object for SNMPv3, see [SNMPv3 Configuration](snmpv3-configuration.md). -Example: +See the following example: ```yaml traps: usernameSecrets: @@ -68,17 +69,16 @@ SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending applic application for each USM user. The SNMP Engine ID is usually unique for the device, and the SC4SNMP as a trap receiver has to be aware of which security engine IDs to accept. Define all of them under `traps.securityEngineId` in `values.yaml`. -By default, it is set to one-element list: `[80003a8c04]`. +By default, it is set to a one-element list: `[80003a8c04]`, for example: -Example: ```yaml traps: securityEngineId: - "80003a8c04" ``` -Security engine ID is a substitute of the `-e` variable in `snmptrap`. -An example of SNMPv3 trap is: +The security engine ID is a substitute of the `-e` variable in `snmptrap`. +The following is an example of an SNMPv3 trap: ```yaml snmptrap -v3 -e 80003a8c04 -l authPriv -u snmp-poller -a SHA -A PASSWORD1 -x AES -X PASSWORD1 10.202.13.233 '' 1.3.6.1.2.1.2.2.1.1.1 @@ -86,16 +86,16 @@ snmptrap -v3 -e 80003a8c04 -l authPriv -u snmp-poller -a SHA -A PASSWORD1 -x AES ### Define external gateway for traps -If you use SC4SNMP standalone, configure `loadBalancerIP`. +If you use SC4SNMP on a single machine, configure `loadBalancerIP`. `loadBalancerIP` is the IP address in the metallb pool. -Example: +See the following example: ```yaml traps: loadBalancerIP: 10.202.4.202 ``` -If you want to use SC4SNMP trap receiver in K8S cluster, configure `NodePort` instead. The snippet of config is: +If you want to use the SC4SNMP trap receiver in K8S cluster, configure `NodePort` instead. Use the following configuration: ```yaml traps: @@ -105,16 +105,15 @@ traps: nodePort: 30000 ``` -Using this method, SNMP trap will always be forwarded to one of the trap receiver pods listening on port 30000 (as in the -example above, remember - you can configure any other port). So doesn't matter IP address of which node you use, adding -nodePort will make it end up in a correct place everytime. +Using this method, the SNMP trap will always be forwarded to one of the trap receiver pods listening on port 30000 (like in the +example above, you can configure to any other port). So, it doesn't matter that IP address of which node you use. Adding +nodePort will make it end up in the correct place everytime. -Here, good practice is to create IP floating address/Anycast pointing to the healthy nodes, so the traffic is forwarded in case of the -failover. The best way is to create external LoadBalancer which balance the traffic between nodes. +A good practice is to create an IP floating address/Anycast pointing to the healthy nodes, so the traffic is forwarded in case of the +failover. To do this, create an external LoadBalancer that balances the traffic between nodes. ### Define number of traps server replica -`replicaCount` defines that the number of replicas for trap container should be 2x number of nodes. The default value is `2`. -Example: +`replicaCount` defines that the number of replicas per trap container should be 2 times the number of nodes. ```yaml traps: #For production deployments the value should be at least 2x the number of nodes @@ -124,11 +123,11 @@ traps: ``` ### Define log level -The log level for trap can be set by changing the value for the `logLevel` key. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. +The log level for trap can be set by changing the value for the `logLevel` key. The allowed values are`DEBUG`, `INFO`, `WARNING`, or `ERROR`. The default value is `WARNING`. ### Define annotations -In case you need to append some annotations to the `trap` service, you can do so by setting `traps.service.annotations`, for ex.: +In case you need to append some annotations to the `trap` service, you can do so by setting `traps.service.annotations`, for example: ```yaml traps: @@ -138,20 +137,19 @@ traps: ``` ### Aggregate traps -In case you want to see traps events collected as one event inside splunk you can enable it by setting `traps.aggregateTrapsEvents`. -Example: +In case you want to see traps events collected as one event inside Splunk, you can enable it by setting `traps.aggregateTrapsEvents`, for example: ```yaml traps: aggregateTrapsEvents: "true" ``` ### Updating trap configuration -If you need to update part of traps configuration, you can do it by editing the `values.yaml` and then running below command to restart the pod deployment. +If you need to update part of the traps configuration, you can do it by editing the `values.yaml` and then running the following command to restart the pod deployment: ``` microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-trap -n sc4snmp ``` -NOTE: Name of the deployment can differ based on helm installation name and can be checked with: +NOTE: The name of the deployment can differ based on the helm installation name. This can be checked with the following command: ``` microk8s kubectl get deployments -n sc4snmp -``` \ No newline at end of file +``` diff --git a/docs/configuration/worker-configuration.md b/docs/configuration/worker-configuration.md index b0171b6c0..6734cd2c6 100644 --- a/docs/configuration/worker-configuration.md +++ b/docs/configuration/worker-configuration.md @@ -16,7 +16,7 @@ SC4SNMP has two base functionalities: monitoring traps and polling. These operat Worker configuration is kept in the `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender`, or `trap`, that refer to the workers' types. `values.yaml` is used during the installation process for configuring Kubernetes values. -The `worker` default configuration is: +The `worker` default configuration is the following: ```yaml worker: @@ -24,6 +24,11 @@ worker: trap: # replicaCount: number of trap-worker pods which consumes trap tasks replicaCount: 2 + # Use reverse dns lookup of trap ip address and send the hostname to splunk + resolveAddress: + enabled: false + cacheSize: 500 # maximum number of records in cache + cacheTTL: 1800 # time to live of the cached record in seconds #autoscaling: use it instead of replicaCount in order to make pods scalable by itself #autoscaling: # enabled: true @@ -61,12 +66,12 @@ All parameters are described in the [Worker parameters](#worker-parameters) sect You can adjust worker pods in two ways: set fixed value in `replicaCount`, or enable `autoscaling`, which scales pods automatically. -#### Real life scenario: I use SC4SNMP for only trap monitoring, I want to use my resources effectively. +#### Real life scenario: I use SC4SNMP for only trap monitoring, and I want to use my resources effectively. If you don't use polling at all, set `worker.poller.replicaCount` to `0`. -If you'll want to use polling in the future, you need to increase `replicaCount`. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs and `worker.sender.replicaCount` to send traps to Splunk. Usually you need much less sender pods than trap ones. +If you want to use polling in the future, you need to increase `replicaCount`. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs and `worker.sender.replicaCount` to send traps to Splunk. Usually, you need significantly fewer sender pods than trap pods. -This is the example of `values.yaml` without using autoscaling: +The following is an example of `values.yaml` without using autoscaling: ```yaml worker: @@ -79,7 +84,7 @@ worker: logLevel: "WARNING" ``` -This is the example of `values.yaml` with autoscaling: +The following is an example of `values.yaml` with autoscaling: ```yaml worker: @@ -100,12 +105,12 @@ worker: logLevel: "WARNING" ``` -In the example above both trap and sender pods are autoscaled. During an upgrade process, the number of pods is created through +In the previous example, both trap and sender pods are autoscaled. During an upgrade process, the number of pods is created through `minReplicas`, and then new ones are created only if the CPU threshold exceeds the `targetCPUUtilizationPercentage`, which by default is 80%. This solution helps you to keep resources usage adjusted to what you actually need. -After helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: +After the helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: ```yaml NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE @@ -114,18 +119,18 @@ horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-sender horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-trap Deployment/snmp-splunk-connect-for-snmp-worker-trap 1%/80% 4 10 4 28m ``` -If you see `/80%` in `TARGETS` section instead of the CPU percentage, you probably don't have the `metrics-server` add-on enabled. +If you see `/80%` in the `TARGETS` section instead of the CPU percentage, you probably don't have the `metrics-server` add-on enabled. Enable it using `microk8s enable metrics-server`. #### Real life scenario: I have a significant delay in polling Sometimes when polling is configured to be run frequently and on many devices, workers get overloaded -and there is a delay in delivering data to Splunk. To avoid such situations, we can scale poller and sender pods. -Because of the walk cycles (walk is a costly operation ran once in a while), poller workers require more resources +and there is a delay in delivering data to Splunk. To avoid these situations, scale poller and sender pods. +Because of the walk cycles, (walk is a costly operation that is only run once in a while), poller workers require more resources for a short time. For this reason, enabling autoscaling is recommended. -This is the example of `values.yaml` with autoscaling: +See the following example of `values.yaml` with autoscaling: ```yaml worker: @@ -150,8 +155,7 @@ worker: logLevel: "WARNING" ``` -Remember, that the system won't scale itself infinitely, there is a finite amount of resources that you can allocate. -By default, every worker has configured the following resources: +Remember that the system won’t scale itself infinitely. There is a finite amount of resources that you can allocate. By default, every worker has configured the following resources: ```yaml resources: @@ -164,13 +168,13 @@ By default, every worker has configured the following resources: #### I have autoscaling enabled and experience problems with Mongo and Redis pod -If MongoDB and Redis pods are crushing, and some of the pods are in infinite `Pending` state, that means -you're over your resources and SC4SNMP cannot scale more. You should decrease the number of `maxReplicas` in +If MongoDB and Redis pods are crushing, and some of the pods are in an infinite `Pending` state, that means +you've exhausted your resources and SC4SNMP cannot scale more. You should decrease the number of `maxReplicas` in workers, so that it's not going beyond the available CPU. #### I don't know how to set autoscaling parameters and how many replicas I need -The best way to see if pods are overloaded is to run: +The best way to see if pods are overloaded is to run the following command: ```yaml microk8s kubectl top pods -n sc4snmp @@ -191,37 +195,55 @@ snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-46zhf 3m 259Mi snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-mjtpv 4m 259Mi ``` -Here you can see how much CPU and Memory is being used by the pods. If the CPU is close to 500m (which is the limit for one pod by default), -you should enable autoscaling/increase maxReplicas or increase replicaCount with autoscaling off. +Here you can see how much CPU and Memory is being used by the pods. If the CPU is close to 500m, which is the limit for one pod by default, +enable autoscaling/increase maxReplicas or increase replicaCount with autoscaling off. -Here you can read about Horizontal Autoscaling and how to adjust maximum replica value to the resources you have: [Horizontal Autoscaling.](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) +See [Horizontal Autoscaling.](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to adjust the maximum replica value to the resources you have. +### Reverse DNS lookup in trap worker + +If you want to see the hostname instead of the IP address of the incoming traps in Splunk, you can enable reverse dns lookup +for the incoming traps using the following configuration: + +```yaml +worker: + trap: + resolveAddress: + enabled: true + cacheSize: 500 # maximum number of records in cache + cacheTTL: 1800 # time to live of the cached record in seconds +``` + +Trap worker uses in memory cache to store the results of the reverse dns lookup. If you restart the worker, the cache will be cleared. ### Worker parameters -| variable | description | default | -| --- | --- |---------| -| worker.taskTimeout | task timeout in seconds (usually necessary when walk process takes a long time) | 2400 | -| worker.walkRetryMaxInterval | maximum time interval between walk attempts | 180 | -| worker.poller.replicaCount | number of poller worker replicas | 2 | -| worker.poller.autoscaling.enabled | enabling autoscaling for poller worker pods | false | -| worker.poller.autoscaling.minReplicas | minimum number of running poller worker pods when autoscaling is enabled | 2 | -| worker.poller.autoscaling.maxReplicas | maximum number of running poller worker pods when autoscaling is enabled | 40 | -| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | -| worker.poller.resources.limits | the resources limits for poller worker container | {} | -| worker.poller.resources.requests | the requested resources for poller worker container | {} | -| worker.trap.replicaCount | number of trap worker replicas | 2 | -| worker.trap.autoscaling.enabled | enabling autoscaling for trap worker pods | false | -| worker.trap.autoscaling.minReplicas | minimum number of running trap worker pods when autoscaling is enabled | 2 | -| worker.trap.autoscaling.maxReplicas | maximum number of running trap worker pods when autoscaling is enabled | 40 | -| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | -| worker.trap.resources.limits | the resources limits for poller worker container | {} | -| worker.trap.resources.requests | the requested resources for poller worker container | {} | -| worker.sender.replicaCount | number of sender worker replicas | 2 | -| worker.sender.autoscaling.enabled | enabling autoscaling for sender worker pods | false | -| worker.sender.autoscaling.minReplicas | minimum number of running sender worker pods when autoscaling is enabled | 2 | -| worker.sender.autoscaling.maxReplicas | maximum number of running sender worker pods when autoscaling is enabled | 40 | -| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | -| worker.sender.resources.limits | the resources limits for poller worker container | {} | -| worker.sender.resources.requests | the requested resources for poller worker container | {} | +| Variable | Description | Default | +|-----------------------------------------------------------|--------------------------------------------------------------------------------------|---------| +| worker.taskTimeout | Task timeout in seconds (usually necessary when the walk process takes a long time) | 2400 | +| worker.walkRetryMaxInterval | Maximum time interval between walk attempts | 180 | +| worker.poller.replicaCount | Number of poller worker replicas | 2 | +| worker.poller.autoscaling.enabled | Enabling autoscaling for poller worker pods | false | +| worker.poller.autoscaling.minReplicas | Minimum number of running poller worker pods when autoscaling is enabled | 2 | +| worker.poller.autoscaling.maxReplicas | Maximum number of running poller worker pods when autoscaling is enabled | 40 | +| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | +| worker.poller.resources.limits | The resources limits for poller worker container | {} | +| worker.poller.resources.requests | The requested resources for poller worker container | {} | +| worker.trap.replicaCount | Number of trap worker replicas | 2 | +| worker.trap.resolveAddress.enabled | Enable reverse dns lookup of the IP address of the processed trap | false | +| worker.trap.resolveAddress.cacheSize | Maximum number of reverse dns lookup result records stored in cache | 500 | +| worker.trap.resolveAddress.cacheTTL | Time to live of the cached reverse dns lookup record in seconds | 1800 | +| worker.trap.autoscaling.enabled | Enabling autoscaling for trap worker pods | false | +| worker.trap.autoscaling.minReplicas | Minimum number of running trap worker pods when autoscaling is enabled | 2 | +| worker.trap.autoscaling.maxReplicas | Maximum number of running trap worker pods when autoscaling is enabled | 40 | +| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | +| worker.trap.resources.limits | The resource limit for the poller worker container | {} | +| worker.trap.resources.requests | The requested resources for the poller worker container | {} | +| worker.sender.replicaCount | The number of sender worker replicas | 2 | +| worker.sender.autoscaling.enabled | Enabling autoscaling for sender worker pods | false | +| worker.sender.autoscaling.minReplicas | Minimum number of running sender worker pods when autoscaling is enabled | 2 | +| worker.sender.autoscaling.maxReplicas | Maximum number of running sender worker pods when autoscaling is enabled | 40 | +| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | +| worker.sender.resources.limits | The resource limit for the poller worker container | {} | +| worker.sender.resources.requests | The requested resources for the poller worker container | {} | diff --git a/docs/docker-compose.md b/docs/docker-compose.md new file mode 100644 index 000000000..42783d09e --- /dev/null +++ b/docs/docker-compose.md @@ -0,0 +1,8 @@ +# Deployment using docker compose + +SC4SNMP now offers beta support for docker-compose providing an alternative method for project deployment. +While this is in beta, we encourage users to explore it. Although we've conducted extensive testing, occasional issues may arise. +Your feedback during this phase is crucial in refining and optimizing and can be shared using [issues](https://github.com/splunk/splunk-connect-for-snmp/issues). +To get started, refer to the [documentation](https://github.com/splunk/splunk-connect-for-snmp/tree/fix/docker-compose/docs/dockercompose) +and `fix/docker-compose` branch for instructions on setting up and running SC4SNMP using docker-compose. +Your involvement in testing docker-compose support is pivotal, and we look forward to hearing about your experiences. \ No newline at end of file diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/gettingstarted/mk8s/k8s-microk8s.md index 97e89b6c9..7d8084bf4 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s.md +++ b/docs/gettingstarted/mk8s/k8s-microk8s.md @@ -16,8 +16,8 @@ Three node minimum per node: # MicroK8s installation on Ubuntu -The following quick start guidance is based on Ubuntu 20.04LTS with MicroK8s with internet access. Other deployment options -may be found in the MicroK8s [documentation](https://microk8s.io/docs) including offline and with proxy. +The following quick start guidance is based on Ubuntu 20.04LTS with MicroK8s and internet access. See other deployment options +in the MicroK8s [documentation](https://microk8s.io/docs), including offline and with proxy. ## Install MicroK8s using Snap @@ -37,18 +37,18 @@ Wait for Installation of Mk8S to complete: microk8s status --wait-ready ``` -## Add additional nodes (optional) +## Add nodes (optional) -* Repeat the steps above for each additional node (minimum total 3) -* On the first node issue the following to return the instructions to join: +* Repeat the steps above for each additional node (with a minimum of 3 nodes). +* On the first node, use the following command to see the instructions to join: ```bash microk8s add-node ``` -* On each additional node, use the output from the command above +* On each additional node, use the output from the command above. -## Install basic services required for sc4snmp +## Install required services for SC4SNMP The following commands can be issued from any one node in a cluster: @@ -62,7 +62,7 @@ microk8s status --wait-ready ``` Install the DNS server for mk8s and configure the forwarding DNS servers. Replace the IP addressed below (opendns) with -allowed values for your network: +the allowed values for your network: ```bash microk8s enable dns:208.67.222.222,208.67.220.220 @@ -71,8 +71,8 @@ microk8s status --wait-ready ## Install Metallb -Note: when installing Metallb you will be prompted for one or more IPs to use as entry points -into the cluster. If your plan to enable clustering, this IP should not be assigned to the host (floats). +When installing Metallb, you will be prompted for one or more IPs to use as entry points +into the cluster. If you plan to enable clustering, this IP should not be assigned to the host (floats). If you do not plan to cluster, then this IP should be the IP of your host. Note2: a single IP in cidr format is x.x.x.x/32. Use CIDR or range syntax for single server installations. This can be diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md index 6937987f5..0a8e709c8 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/gettingstarted/sc4snmp-installation.md @@ -32,9 +32,9 @@ splunk-connect-for-snmp/splunk-connect-for-snmp 1.0.0 1.0.0 #### Download and modify values.yaml -The installation of SC4SNMP requires the creation of a `values.yaml` file, which serves as the configuration file. To configure this file, follow these steps: +The installation of SC4SNMP requires the creation of a `values.yaml` file, which serves as the configuration file. To configure this file, see the following steps: -1. Start with checking out the [basic configuration template][basic_template_link] +1. Review the [basic configuration template][basic_template_link]. 2. Review the [examples][examples_link] to determine which areas require configuration. 3. For more advanced configuration options, refer to the complete default [values.yaml](https://github.com/splunk/splunk-connect-for-snmp/blob/main/charts/splunk-connect-for-snmp/values.yaml) or download it directly from Helm using the command `microk8s helm3 show values splunk-connect-for-snmp/splunk-connect-for-snmp` @@ -51,7 +51,7 @@ microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp/splunk-connec ``` From now on, when editing SC4SNMP configuration, the configuration change must be -inserted in the corresponding section of `values.yaml`. For more details check [configuration](../configuration/deployment-configuration.md) section. +inserted in the corresponding section of `values.yaml`. For more details see [configuration](../configuration/deployment-configuration.md) section. Use the following command to propagate configuration changes: ``` bash @@ -81,11 +81,11 @@ snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running snmp-splunk-connect-for-snmp-inventory-mjccw 0/1 Completed 0 6s ``` -The output may vary depending on the configuration. In the above example, both polling and traps are configured, +The output might vary depending on the configuration. In the above example, both polling and traps are configured, and the data is being sent to Splunk. -If you have `traps` configured, you should see `EXTERNAL-IP` in `snmp-splunk-connect-for-snmp-trap` service. -Check it using the command: +If you have `traps` configured, you should see `EXTERNAL-IP` in the `snmp-splunk-connect-for-snmp-trap` service. +Check it using the following command: ```bash microk8s kubectl get svc -n sc4snmp @@ -103,11 +103,10 @@ snmp-mongodb-metrics ClusterIP 10.152.183.217 snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.33 10.202.9.21 162:30161/UDP 33h ``` -If there's `` communicate instead of the IP address, that means you either provided the wrong IP address +If you see `` communicate instead of the IP address, that means you either provided the wrong IP address in `traps.loadBalancerIP` or there's something wrong with the `metallb` microk8s addon. -For the sake of the example, let's assume we haven't changed the default indexes names and the metric data goes to `netmetrics` -and the events goes to `netops`. +In the following example, the default indexes are used, the metric data goes to `netmetrics`, and the events goes to `netops`. #### Test SNMP Traps @@ -119,9 +118,9 @@ apt-get install snmpd snmptrap -v2c -c public EXTERNAL-IP 123 1.3.6.1.2.1.1.4 1.3.6.1.2.1.1.4 s test ``` -Remember to replace `EXTERNAL-IP` with the ip address of the `snmp-splunk-connect-for-snmp-trap` service from the above. +Remember to replace the `EXTERNAL-IP` with the IP address of the `snmp-splunk-connect-for-snmp-trap` service from the previous list. -2. Search Splunk: You should see one event per trap command with the host value of the test machine `EXTERNAL-IP` IP address. +2. After using the following command in the Splunk search box, you should see one event per trap command, with the host value of the test machine `EXTERNAL-IP` IP address: ``` bash index="netops" sourcetype="sc4snmp:traps" @@ -130,8 +129,8 @@ index="netops" sourcetype="sc4snmp:traps" #### Test SNMP Poller 1. To test SNMP poller, you can either use the device you already have, or configure snmpd on your Linux system. -Snmpd needs to be configured to listen on the external IP. To enable listening snmpd to external IP, go to the `/etc/snmp/snmpd.conf` configuration file, and replace the IP address `10.0.101.22` with the server IP address where snmpd is configured: -`agentaddress 10.0.101.22,127.0.0.1,[::1]`. Restart snmpd through the execute command: +Snmpd needs to be configured to listen on the external IP. To enable snmpd to listen to external IP, go to the `/etc/snmp/snmpd.conf` configuration file, and replace the IP address `10.0.101.22` with the server IP address where snmpd is configured: +`agentaddress 10.0.101.22,127.0.0.1,[::1]`. Restart snmpd through the following execute command: ``` bash service snmpd stop @@ -139,7 +138,7 @@ service snmpd start ``` 2. Configure SC4SNMP Poller to test and add the IP address which you want to poll. Add the configuration entry into the `values.yaml` file by -replacing the IP address `10.0.101.22` with the server IP address where the snmpd was configured. +replacing the IP address `10.0.101.22` with the server IP address where the snmpd was configured. See the following: ``` bash poller: inventory: | @@ -147,13 +146,13 @@ poller: 10.0.101.22,,2c,public,,,42000,,, ``` -3. Load `values.yaml` file into SC4SNMP +3. Load `values.yaml` file into SC4SNMP using the following command: ``` bash microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -4. Verify if the records appeared in Splunk: +4. Verify if the records appeared in Splunk using the following command: ``` bash index="netops" sourcetype="sc4snmp:event" @@ -163,18 +162,18 @@ index="netops" sourcetype="sc4snmp:event" | mpreview index="netmetrics" | search sourcetype="sc4snmp:metric" ``` -NOTE: Before polling starts, SC4SNMP must perform SNMP WALK process on the device. It is run first time after configuring the new device, and then the run time in every `walk_interval`. +NOTE: Before polling starts, SC4SNMP must perform the SNMP WALK process on the device. It is run the first time after configuring the new device, and then during the run time in every `walk_interval`. Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited. -In such cases, enable the small walk. See: [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). +In such cases, enable the small walk. See [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). When the walk finishes, events appear in Splunk. ## Next Steps A good way to start with SC4SNMP polling is to follow the [Step by Step guide for polling](../configuration/step-by-step-poll.md). -Advanced configuration of polling is available in [Poller configuration](../configuration/poller-configuration.md) section. -SNMP data format is explained in [SNMP data format](../configuration/snmp-data-format.md) section. +Advanced configuration of polling is available in the [Poller configuration](../configuration/poller-configuration.md) section. +The SNMP data format is explained in the [SNMP data format](../configuration/snmp-data-format.md) section. -For advanced trap configuration, check the [Traps configuration](../configuration/trap-configuration.md) section. +For advanced trap configuration, see the [Traps configuration](../configuration/trap-configuration.md) section. ## Uninstall Splunk Connect for SNMP To uninstall SC4SNMP run the following commands: @@ -186,4 +185,4 @@ To uninstall SC4SNMP run the following commands: [examples_link]: https://github.com/splunk/splunk-connect-for-snmp/tree/main/examples -[basic_template_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/basic_template.md \ No newline at end of file +[basic_template_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/basic_template.md diff --git a/docs/gettingstarted/sck-installation.md b/docs/gettingstarted/sck-installation.md index 4097a9624..aa6daf0e9 100644 --- a/docs/gettingstarted/sck-installation.md +++ b/docs/gettingstarted/sck-installation.md @@ -1,16 +1,15 @@ # Splunk OpenTelemetry Collector for Kubernetes installation -Splunk OpenTelemetry Collector for Kubernetes is not required for SC4SNMP installation. This is the tool that sends logs -and metrics from a k8s cluster to a Splunk instance, which makes SC4SNMP easier to debug. +Splunk OpenTelemetry Collector for Kubernetes is not required for SC4SNMP installation. However, Splunk OpenTelemetry Collector for Kubernetes sends logs and metrics from a k8s cluster to a Splunk instance, which makes SC4SNMP easier to debug. You can do the same using the `microk8s kubectl logs` command on instances you're interested in, but if you're not proficient in Kubernetes, -Splunk OpenTelemetry Collector for Kubernetes is strongly advised. +Splunk OpenTelemetry Collector for Kubernetes is recommended. -The below steps are sufficient for a Splunk OpenTelemetry Collector installation for the SC4SNMP project with Splunk Enterprise/Enterprise Cloud. +The following steps are sufficient for a Splunk OpenTelemetry Collector installation for the SC4SNMP project with Splunk Enterprise/Enterprise Cloud. In order to learn more about Splunk OpenTelemetry Collector, visit [Splunk OpenTelemetry Collector](https://github.com/signalfx/splunk-otel-collector-chart). ### Offline installation -For offline installation instructions see [this page](../offlineinstallation/offline-sck.md). +For offline installation instructions see [Splunk OpenTelemetry Collector for Kubernetes offline installation](../offlineinstallation/offline-sck.md). ### Add Splunk OpenTelemetry Collector repository to HELM @@ -20,7 +19,7 @@ microk8s helm3 repo add splunk-otel-collector-chart https://signalfx.github.io/s ## Install Splunk OpenTelemetry Collector with HELM for a Splunk Platform -In order to run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below +In order to run Splunk OpenTelemetry Collector on your environment, replace `<>` variables based on the following description: ```bash microk8s helm3 upgrade --install sck \ --set="clusterName=" \ @@ -44,7 +43,7 @@ microk8s helm3 upgrade --install sck \ | splunk_token | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | | cluster_name | name of the cluster | my-cluster | -An example of filled up command is: +See the following example of a correctly filled up command: ```bash microk8s helm3 upgrade --install sck \ --set="clusterName=my-cluster" \ @@ -59,7 +58,7 @@ microk8s helm3 upgrade --install sck \ ## Install Splunk OpenTelemetry Collector with HELM for Splunk Observability for Kubernetes -To run Splunk OpenTelemetry Collector on your environment, replace the `<>` variables according to the description presented below: +To run Splunk OpenTelemetry Collector on your environment, replace the `<>` variables based on the following description: ```bash @@ -86,7 +85,7 @@ splunk-otel-collector-chart/splunk-otel-collector | ingest_url | Ingest URL from the Splunk Observability Cloud environment | https://ingest..signalfx.com | | api_url | API URL from the Splunk Observability Cloud environment | https://api..signalfx.com | -An example of a filled up command is: +See the following example of a correctly filled up command: ```bash microk8s helm3 upgrade --install sck --set="clusterName=my_cluster" diff --git a/docs/gettingstarted/splunk-requirements.md b/docs/gettingstarted/splunk-requirements.md index 4d3b092bd..cc5161113 100644 --- a/docs/gettingstarted/splunk-requirements.md +++ b/docs/gettingstarted/splunk-requirements.md @@ -1,28 +1,24 @@ -# Splunk requirements +# Prerequisites for the Splunk Connect for SNMP -## Prepare Splunk +See the following prerequisites for the Splunk Connect for SNMP. -See the following prerequisites for the Splunk Connect for SNMP. - -### Requirements (Splunk Enterprise/Enterprise Cloud) +### Requirements for Splunk Enterprise or Enterprise Cloud 1. Manually create the following indexes in Splunk: - * Indexes for logs and metrics from SC4SNMP Connector: + * Indexes to store Splunk Connect for SNMP logs and metrics: * em_metrics (metrics type) * em_logs (event type) - * Indexes where SNMP Data will be forwarded: + * Destination indexes for forwarding SNMP data: * netmetrics (metrics type) * netops (event type) Note: `netmetrics` and `netops` are the default names of SC4SNMP indexes. You can use the index names of your choice and -reference it in the `values.yaml` file later on. See parameters and instructions for details: [SC4SNMP Parameters](sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection). - +reference it in the `values.yaml` file later on. See [SC4SNMP Parameters](sc4snmp-installation.md#configure-splunk-enterprise-or-splunk-cloud-connection) for details. 2. Create or obtain a new Splunk HTTP Event Collector token and the correct HTTPS endpoint. -3. Verify the token using [curl](https://docs.splunk.com/Documentation/Splunk/8.1.3/Data/FormateventsforHTTPEventCollector). Note: The endpoint must use a publicly trusted certificate authority. - -4. The SHARED IP address to be used for SNMP Traps. Note Simple and POC deployments will use the same IP as the host server. If HA deployment will be used, the IP must be in addition to the management interface of each cluster member. +3. Verify the token using [curl](https://docs.splunk.com/Documentation/Splunk/8.1.3/Data/FormateventsforHTTPEventCollector). The endpoint must use a publicly trusted certificate authority. +4. Use the shared IP address for SNMP traps. Simple and POC deployments will use the same IP address as the host server. For an HA deployment, use the management interface and the IP address of each cluster member. 5. Obtain the IP address of an internal DNS server that can resolve the Splunk Endpoint. ### Requirements (Splunk Infrastructure Monitoring) diff --git a/docs/gui/apply-changes.md b/docs/gui/apply-changes.md new file mode 100644 index 000000000..6530e3316 --- /dev/null +++ b/docs/gui/apply-changes.md @@ -0,0 +1,18 @@ +# Apply changes + + +In order to apply changes from the GUI to the core SC4SNMP, press the `Apply changes` button. Update can be made minimum 5 minutes +after the previous one was applied. If the `Apply changes` button is clicked earlier, new update will be scheduled automatically +and the following message with ETA will be displayed: + + +![ETA](../images/ui_docs/apply_changes/update_time.png){ style="border:2px solid; width:500px; height:auto" } + + +Scheduled update triggers new kubernetes job `job/snmp-splunk-connect-for-snmp-inventory`. If the ETA elapsed and the +previous `job/snmp-splunk-connect-for-snmp-inventory` is still present in the `sc4snmp` kubernetes namespace, +creation of the new job will be retried 10 times. If `Apply changes` is clicked during retries, the following message +will be displayed: + + +![Retries](../images/ui_docs/apply_changes/retries.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/gui/enable-gui.md b/docs/gui/enable-gui.md new file mode 100644 index 000000000..bd4fe3628 --- /dev/null +++ b/docs/gui/enable-gui.md @@ -0,0 +1,33 @@ +# SC4SNMP GUI + +SC4SNMP GUI is deployed in kubernetes and can be accessed through the web browser. + +## Enabling GUI + +To enable GUI, the following section must be added to `values.yaml` file and `UI.enable` variable must be set to `true`: + +```yaml +UI: + enable: true + frontEnd: + NodePort: 30001 + pullPolicy: "Always" + backEnd: + NodePort: 30002 + pullPolicy: "Always" + valuesFileDirectory: "" + valuesFileName: "" + keepSectionFiles: true +``` + +- `NodePort`: port number on which GUI will be accessible. It has to be from a range `30000-32767`. +- `pullPolicy`: [kubernetes pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) +- `valuesFileDirectory`: this is an obligatory field if UI is used. It is an absolute directory path on the host machine where configuration files from the GUI will be generated. It is used to keep all the changes from the GUI so that users can easily switch back from using UI to the current sc4snmp version. It is advised to create new folder for those files, because this directory is mounted to the Kubernetes pod and GUI application has full write access to this directory. +- `valuesFileName`: [OPTIONAL] full name of the file with configuration (e.g. `values.yaml`) that is stored inside the `valuesFileDirectory` directory. If this file name is provided, and it exists in this directory, then GUI will update appropriate sections in provided `values.yaml` file. If this file name is not provided, or provided file name can’t be found inside `valuesFileDirectory` then inside that directory there will be created three files with the latest GUI configuration of groups, profiles and inventory. Those configuration can be copied and pasted to the appropriate sections in the original `values.yaml` file. +- `keepSectionFiles`: if valid `valuesFileName` was provided then by setting this variable to `true` or `false` user can decide whether to keep additional files with configuration of groups, profiles and inventory. If valid `valuesFileName` was NOT provided, then those files are created regardless of this variable. + + +To access the GUI, in the browser type the IP address of your Microk8s cluster followed by the NodePort number from the frontEnd section, e.g. `192.168.123.13:30001`. + + + diff --git a/docs/gui/groups-gui.md b/docs/gui/groups-gui.md new file mode 100644 index 000000000..a07bb9e4e --- /dev/null +++ b/docs/gui/groups-gui.md @@ -0,0 +1,35 @@ +# Configuring groups in GUI + +SC4SNMP [groups](../configuration/configuring-groups.md) can be configured in `Groups` tab. + +![Groups tab](../images/ui_docs/groups/groups_tab.png){ style="border:2px solid" } + +
+ +After pressing `Add group` button or plus sign next to the `Group`, new group can be added. + + +![New group](../images/ui_docs/groups/add_group.png){style="border:2px solid; width:500px; height:auto" } + +
+ +Configured groups are displayed on the left-hand side, under the `Group name` label. After clicking on the group name, +all devices belonging to the given group are displayed. To add a new device, click the plus sign next to the group name. +Configuration of the device is the same as in the `values.yaml` file [(check here)](../configuration/configuring-groups.md). + + +![Add a device](../images/ui_docs/groups/add_device.png){style="border:2px solid; width:500px; height:auto" } + +
+ +To edit a group name, click the pencil icon next to the group name. + + +![Edit group](../images/ui_docs/groups/edit_group.png){style="border:2px solid; width:500px; height:auto" } + +
+ +To edit device, click the pencil icon in the row of the given device. + + +![Edit device](../images/ui_docs/groups/edit_device.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/gui/inventory-gui.md b/docs/gui/inventory-gui.md new file mode 100644 index 000000000..0c6f52aa0 --- /dev/null +++ b/docs/gui/inventory-gui.md @@ -0,0 +1,21 @@ +# Configuring inventory in GUI + +SC4SNMP [inventory](../configuration/poller-configuration.md#poller-configuration-file) can be configured in `Inventory` tab. + +![Profiles tab](../images/ui_docs/inventory/inventory_tab.png){ style="border:2px solid" } + +
+ +After pressing `Add device/group` button, new single device or group can be added. +Configuration of the device is the same as in the `inventory.yaml` file [(check here)](../configuration/poller-configuration.md#poller-configuration-file). + + +![New device/group](../images/ui_docs/inventory/add_device.png){style="border:2px solid; width:500px; height:auto" } + +
+ +To edit a device or group, click the pencil icon next in the desired row. + + +![Edit device](../images/ui_docs/inventory/edit_device.png){style="border:2px solid; width:500px; height:auto" } +![Edit group](../images/ui_docs/inventory/edit_group.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/gui/profiles-gui.md b/docs/gui/profiles-gui.md new file mode 100644 index 000000000..4294a2082 --- /dev/null +++ b/docs/gui/profiles-gui.md @@ -0,0 +1,35 @@ +# Configuring profiles in GUI + +SC4SNMP [profiles](../configuration/configuring-profiles.md) can be configured in `Profiles` tab. + +![Profiles tab](../images/ui_docs/profiles/profiles_list.png){ style="border:2px solid" } + +
+ +After pressing `Add profile` button, new profile will be added. +Configuration of the profile is the same as in the `values.yaml` file [(check here)](../configuration/configuring-profiles.md). + + +![Add standard profile](../images/ui_docs/profiles/add_standard_profile.png){style="border:2px solid; width:500px; height:auto" } + +
+ +Type of the profile can be changed: + + +![Profile types](../images/ui_docs/profiles/profiles_types.png){ style="border:2px solid; width:500px; height:auto" } + +
+ +Examples of configuration of `Smart` and `Conditional` profiles: + + +![Smart profile](../images/ui_docs/profiles/add_smart_profile.png){ style="border:2px solid; width:500px; height:auto" } +![Conditional profile](../images/ui_docs/profiles/add_conditional.png){ style="border:2px solid; width:500px; height:auto" } + +
+ +All configured profiles can be edited by clicking the pencil icon: + + +![Edit confitional profile](../images/ui_docs/profiles/edit_conditional.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/ha.md b/docs/ha.md index 1f46bf8a4..ea811088d 100644 --- a/docs/ha.md +++ b/docs/ha.md @@ -1,11 +1,10 @@ -## High Availability Considerations +# High Availability The SNMP protocol uses UDP as the transport protocol. Network reliability is a constraint. Consider network architecture when designing for high availability: -* When using a single node collector, ensure automatic recovery from virtual infrastructure (i.e. VMware, Openstack, etc). -* When using a multi-node cluster, ensure nodes are not located such that a simple majority of nodes can -be lost. For example, consider row, rack, network, power, and storage. -* When determining the placement of clusters, the closest location by the number of network hops should be utilized. +* When using a single node collector, ensure automatic recovery from virtual infrastructure, such as VMware or Openstack. +* When using a multi-node cluster, ensure nodes are not located in a way where the majority of nodes can be lost. For example, consider row, rack, network, power, and storage. +* When determining the placement of clusters, the closest location by the number of network hops should be used. * For "data center" applications, collection should be local to the data center. -* Consider IP Anycast. +* Consider using IP Anycast. diff --git a/docs/images/ui_docs/apply_changes/retries.png b/docs/images/ui_docs/apply_changes/retries.png new file mode 100644 index 000000000..dd06b8dbb Binary files /dev/null and b/docs/images/ui_docs/apply_changes/retries.png differ diff --git a/docs/images/ui_docs/apply_changes/update_time.png b/docs/images/ui_docs/apply_changes/update_time.png new file mode 100644 index 000000000..a7c5eca1e Binary files /dev/null and b/docs/images/ui_docs/apply_changes/update_time.png differ diff --git a/docs/images/ui_docs/groups/add_device.png b/docs/images/ui_docs/groups/add_device.png new file mode 100644 index 000000000..882100321 Binary files /dev/null and b/docs/images/ui_docs/groups/add_device.png differ diff --git a/docs/images/ui_docs/groups/add_group.png b/docs/images/ui_docs/groups/add_group.png new file mode 100644 index 000000000..2058bcc7c Binary files /dev/null and b/docs/images/ui_docs/groups/add_group.png differ diff --git a/docs/images/ui_docs/groups/edit_device.png b/docs/images/ui_docs/groups/edit_device.png new file mode 100644 index 000000000..325bf442f Binary files /dev/null and b/docs/images/ui_docs/groups/edit_device.png differ diff --git a/docs/images/ui_docs/groups/edit_group.png b/docs/images/ui_docs/groups/edit_group.png new file mode 100644 index 000000000..3de963f70 Binary files /dev/null and b/docs/images/ui_docs/groups/edit_group.png differ diff --git a/docs/images/ui_docs/groups/groups_tab.png b/docs/images/ui_docs/groups/groups_tab.png new file mode 100644 index 000000000..9ae0fe2f4 Binary files /dev/null and b/docs/images/ui_docs/groups/groups_tab.png differ diff --git a/docs/images/ui_docs/inventory/add_device.png b/docs/images/ui_docs/inventory/add_device.png new file mode 100644 index 000000000..881b647cd Binary files /dev/null and b/docs/images/ui_docs/inventory/add_device.png differ diff --git a/docs/images/ui_docs/inventory/edit_device.png b/docs/images/ui_docs/inventory/edit_device.png new file mode 100644 index 000000000..e3648cee5 Binary files /dev/null and b/docs/images/ui_docs/inventory/edit_device.png differ diff --git a/docs/images/ui_docs/inventory/edit_group.png b/docs/images/ui_docs/inventory/edit_group.png new file mode 100644 index 000000000..92769f838 Binary files /dev/null and b/docs/images/ui_docs/inventory/edit_group.png differ diff --git a/docs/images/ui_docs/inventory/inventory_tab.png b/docs/images/ui_docs/inventory/inventory_tab.png new file mode 100644 index 000000000..40d35f2d3 Binary files /dev/null and b/docs/images/ui_docs/inventory/inventory_tab.png differ diff --git a/docs/images/ui_docs/profiles/add_conditional.png b/docs/images/ui_docs/profiles/add_conditional.png new file mode 100644 index 000000000..bc2d419ce Binary files /dev/null and b/docs/images/ui_docs/profiles/add_conditional.png differ diff --git a/docs/images/ui_docs/profiles/add_smart_profile.png b/docs/images/ui_docs/profiles/add_smart_profile.png new file mode 100644 index 000000000..c71e9b051 Binary files /dev/null and b/docs/images/ui_docs/profiles/add_smart_profile.png differ diff --git a/docs/images/ui_docs/profiles/add_standard_profile.png b/docs/images/ui_docs/profiles/add_standard_profile.png new file mode 100644 index 000000000..6bce35268 Binary files /dev/null and b/docs/images/ui_docs/profiles/add_standard_profile.png differ diff --git a/docs/images/ui_docs/profiles/edit_conditional.png b/docs/images/ui_docs/profiles/edit_conditional.png new file mode 100644 index 000000000..60d543c30 Binary files /dev/null and b/docs/images/ui_docs/profiles/edit_conditional.png differ diff --git a/docs/images/ui_docs/profiles/profiles_list.png b/docs/images/ui_docs/profiles/profiles_list.png new file mode 100644 index 000000000..d3fdf29c7 Binary files /dev/null and b/docs/images/ui_docs/profiles/profiles_list.png differ diff --git a/docs/images/ui_docs/profiles/profiles_types.png b/docs/images/ui_docs/profiles/profiles_types.png new file mode 100644 index 000000000..772dbf5de Binary files /dev/null and b/docs/images/ui_docs/profiles/profiles_types.png differ diff --git a/docs/index.md b/docs/index.md index 3e64ee392..ee4a75864 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,18 +1,18 @@ # Splunk Connect for SNMP Splunk welcomes your experimentation and feedback. Let your -account team know you are testing Splunk Connect for SNMP. +account team know that you are testing Splunk Connect for SNMP. Splunk Connect for SNMP is an edge-deployed, containerized, and highly available solution for collecting SNMP data for Splunk Enterprise, -Splunk Enterprise Cloud and Splunk Infrastructure Monitoring. +Splunk Enterprise Cloud, and Splunk Infrastructure Monitoring. SC4SNMP provides context-full information. It not only forwards SNMP data to Splunk, but also integrates the data into meaningful objects. For example, you don't need to write queries in order to gather information about interfaces of the device, because SC4SNMP does that automatically: [![Interface metrics](images/interface_metrics.png)](images/interface_metrics.png) -What makes it easy to visualize the data in Analytics of Splunk: +This makes it easy to visualize the data in Splunk Analytics: [![Interface analytics](images/interface_analytics.png)](images/interface_analytics.png) @@ -20,6 +20,6 @@ Here is a short presentation of how to browse SNMP data in Splunk: ![type:video](videos/setting_analytics.mov) -SC4SNMP can also easily monitor trap events sent by different SNMP devices. Trap events are JSON formatted, and are being stored under `netops` index. +SC4SNMP can also easily monitor trap events sent by different SNMP devices. Trap events are JSON formatted, and are stored under the `netops` index. [![Trap example](images/trap.png)](images/trap.png) diff --git a/docs/mib-request.md b/docs/mib-request.md index 10d4fb09c..c63224524 100644 --- a/docs/mib-request.md +++ b/docs/mib-request.md @@ -1,49 +1,48 @@ # MIB submission process To achieve human-readable OIDs, the corresponding MIB files are necessary. -They are being stored in one of the components of SC4SNMP - the MIB server. +They are stored in the MIB server, which is one of the components of SC4SNMP. -The list of currently available MIBs is here: +See the following link for a list of currently available MIBs: [https://pysnmp.github.io/mibs/index.csv](https://pysnmp.github.io/mibs/index.csv) -An alternative way to check if the MIB you're interested in is being served is to check the link: -`https://pysnmp.github.io/mibs/asn1/@mib@` where `@mib@` is the name of MIB (for example `IF-MIB`). If the file -is downloading, that means the MIB file exists in the mib server. +An alternative way to check if the MIB you're interested in is being served is to check the following link: +`https://pysnmp.github.io/mibs/asn1/@mib@` where `@mib@` is the name of MIB, for example, `IF-MIB`. If the file +is downloading, that means the MIB file exists in the MIB server. ## Submit a new MIB file -In case you want to add a new MIB file to the MIB server, follow the steps: +In case you want to add a new MIB file to the MIB server, see the following steps: -1. Create a fork of the [https://github.com/pysnmp/mibs](https://github.com/pysnmp/mibs) repository +1. Create a fork of the [https://github.com/pysnmp/mibs](https://github.com/pysnmp/mibs) repository. -2. Put MIB file/s under `src/vendor/@vendor_name@` where `@vendor_name@` is the name of the MIB file's vendor (in case -there is no directory of vendors you need, create it by yourself) +2. Put one or more MIB files under `src/vendor/@vendor_name@` where `@vendor_name@` is the name of the MIB file's vendor. If there is currently no directory of vendors that you need, create it yourself. -3. Create a pull request to a `main` branch +3. Create a pull request to a `main` branch. -4. Name the pull request the following way: `feat: add @vendor_name@ MIB files` +4. Name the pull request the following way: `feat: add @vendor_name@ MIB files`. -An alternative way of adding MIBs to the MIB server is to create an issue on +An alternative way of adding MIBs to the MIB server is to create an issue in the [https://github.com/pysnmp/mibs](https://github.com/pysnmp/mibs) repository, attaching the files and information about the vendor. ## Update your instance of SC4SNMP with the newest MIB server -Usually SC4SNMP is released with the newest version of MIB server every time the new MIB files were added. -But, if you want to use the newest MIB server right after its released, you can do it manually via the `values.yaml` file. +Usually SC4SNMP is released with the newest version of MIB server every time the new MIB files are added. +But, if you want to use the newest MIB server right after its released, you can do it manually using the `values.yaml` file: -1. Append `mibserver` config to the values.yaml, with the `mibserver.image.tag` of a value of the newest `mibserver`, for ex.: +1. Append `mibserver` configuration to the values.yaml, with the `mibserver.image.tag` of a value of the newest `mibserver`, for example: ``` mibserver: image: tag: "1.14.5" ``` -Check all the MIB server releases [here](https://github.com/pysnmp/mibs/releases). +Check all the MIB server releases in https://github.com/pysnmp/mibs/releases. -2. Run `microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace` +2. Run `microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace`. -3. Restart worker-trap and worker-poller deployments: +3. Restart the following worker-trap and worker-poller deployments: ``` microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-worker-trap -n sc4snmp @@ -53,19 +52,19 @@ microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-worker- ## Beta: use MIB server with local MIBs From the `1.15.0` version of the MIB server, there is a way to use local MIB files. This may be useful when your MIB -files are proprietary, or you use SC4SNMP offline - this way you can update necessary MIBs by yourself, without a need -of going through the MIB request procedure. +files are proprietary, or you use SC4SNMP offline. This way, you can update necessary MIBs by yourself, without having to +go through the MIB request procedure. In order to add your MIB files to the MIB server in standalone SC4SNMP installation: -1. Create/Choose a directory on the machine where SC4SNMP is installed. For example: `/home/user/local_mibs`. +1. Create or choose a directory on the machine where SC4SNMP is installed, for example, `/home/user/local_mibs`. 2. Create vendor directories inside. For example, if you have MIB files from `VENDOR1` and `VENDOR2`, create `/home/user/local_mibs/VENDOR1` and `/home/user/local_mibs/VENDOR2` and put files inside accordingly. Putting wrong vendor names won't make compilation fail, this is more for the logging purposes. Segregating your files will make troubleshooting easier. 3. MIB files should be named the same as the contained MIB module. The MIB module name is specified at the beginning of the MIB file before `::= BEGIN` keyword. -4. Add following config to the `values.yaml`: +4. Add the following to the `values.yaml`: ```yaml mibserver: @@ -73,23 +72,23 @@ mibserver: pathToMibs: "/home/user/local_mibs" ``` -To verify if the process of compilation was completed successfully, check the mibserver logs with: +To verify that the process of compilation was completed successfully, check the the mibserver logs using the following command: ```bash microk8s kubectl logs -f deployments/snmp-mibserver -n sc4snmp ``` -It creates a Kubernetes pvc with MIB files inside and maps it to MIB server pod. -Also, you can change the storageClass and size of persistence according to the `mibserver` schema: [check here](https://github.com/pysnmp/mibs/blob/main/charts/mibserver/values.yaml). +This creates a Kubernetes pvc with MIB files inside and maps it to the MIB server pod. +Also, you can change the storageClass and size of persistence according to the `mibserver` schema, see https://github.com/pysnmp/mibs/blob/main/charts/mibserver/values.yaml. The default persistence size is 1 Gibibyte, so consider reducing or expanding it to the amount you actually need. -Whenever you add new MIB files, rollout restart MIB server pods to compile them again: +Whenever you add new MIB files, rollout restart MIB server pods to compile them again, using the following command: ```bash microk8s kubectl rollout restart deployment snmp-mibserver -n sc4snmp ``` -NOTE: In case of multi-node Kubernetes installation, create pvc beforehand, copy files onto it and add to the MIB server -using `persistence.existingClaim`. If you go with `localMibs.pathToMibs` solution in case of multi-node installation +For a multi-node Kubernetes installation, create pvc beforehand, copy files onto it, and add it to the MIB server +using `persistence.existingClaim`. If you go with the `localMibs.pathToMibs` solution for a multi-node installation (with `nodeSelector` set up to schedule MIB server pods on the same node where the MIB files are), -it will work - but when the Node with hostPath mapped fails, you'll use access to the MIB files on another node. +when the Node with the mapped hostPath fails, you'll have to access the MIB files on another node. diff --git a/docs/offlineinstallation/offline-microk8s.md b/docs/offlineinstallation/offline-microk8s.md index 246a4df67..0ff5c96c3 100644 --- a/docs/offlineinstallation/offline-microk8s.md +++ b/docs/offlineinstallation/offline-microk8s.md @@ -1,18 +1,17 @@ # Offline Microk8s installation issues -Offline installation of Microk8s is described [here](https://microk8s.io/docs/install-alternatives#heading--offline), but -there are additional steps to install microk8s offline. +See [install alternatives](https://microk8s.io/docs/install-alternatives#heading--offline) for offline installation of Microk8s, but there are additional steps to install microk8s offline. See the following steps to install offline: ## Importing images -After running: +After running the following: ``` snap ack microk8s_{microk8s_version}.assert snap install microk8s_{microk8s_version}.snap --classic ``` -You should check if the microk8s instance is healthy. Do it with: +You should check if the microk8s instance is healthy. Do it using the following command: ```commandline microk8s kubectl get pods -A @@ -25,11 +24,9 @@ kube-system calico-kube-controllers-7c9c8dd885-fg8f2 0/1 Pending 0 kube-system calico-node-zg4c4 0/1 Init:0/3 0 23s ``` -The pods are in the `Pending`/`Init` state because they're trying to download images, which is impossible to do offline. -In order to make them work you need to download all the images on a different server with an internet connection, pack it up, and -import it to a microk8s image registry on your offline server. +The pods are in the `Pending`/`Init` state because they’re trying to download images, which is impossible to do offline. In order to make them download, you need to download all the images on a different server with an internet connection, pack it up, and import it to a microk8s image registry on your offline server. -### Packing up images for offline environment +### Packing up images for an offline environment You need to monitor @@ -44,9 +41,9 @@ kube-system 0s Warning Failed pod/calico-node-sc784 kube-system 0s Warning Failed pod/calico-node-sc784 Error: ErrImagePull ``` -This shows you that you lack a `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. +The previous information shows you that you lack a `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. -The process of such action is always: +The process to do this action is always the following: ```commandline docker pull @@ -61,7 +58,7 @@ microk8s ctr image import image.tar ### Example of the offline installation -For example, `microk8s` version `3597` requires these images to work correctly: +For example, `microk8s` version `3597` requires the following images to work correctly: ```commandline docker pull docker.io/calico/kube-controllers:v3.21.4 @@ -98,7 +95,7 @@ microk8s ctr image import metrics.tar NOTE: for other versions of `microk8s`, tags of images may differ. -The healthy instance of microk8s, after running: +After running the following: ```commandline microk8s enable hostpath-storage @@ -106,7 +103,7 @@ microk8s enable rbac microk8s enable metrics-server ``` -should look like this: +The microk8s instance should be the following: ``` NAMESPACE NAME READY STATUS RESTARTS AGE @@ -118,18 +115,18 @@ kube-system metrics-server-5f8f64cb86-x7k29 1/1 Running ## Enabling DNS and Metallb -The `dns` and `metallb` don't require importing any images, so you can enable them simply by: +`dns` and `metallb` don’t require importing any images, so you can enable them simply through the following commands: ```yaml microk8s enable dns microk8s enable metallb ``` -More on `metallb` [here](../gettingstarted/mk8s/k8s-microk8s.md#install-metallb). +For more information on `metallb`, see [Install metallb](../gettingstarted/mk8s/k8s-microk8s.md#install-metallb). ## Installing helm3 -The additional problem is the installation of `helm3` add-on. You need to do a few things to make it work. +Additionally, you need to install the helm3 add-on. See the following steps: 1. Check your server's platform with: @@ -137,14 +134,14 @@ The additional problem is the installation of `helm3` add-on. You need to do a f dpkg --print-architecture ``` -The output would be for ex.: `amd64`. +The output would be, for example: `amd64`. You need the platform to download the correct version of helm. 2. Download the helm package from `https://get.helm.sh/helm-v3.8.0-linux-{{arch}}.tar.gz`, where `{{arch}}` should be -replaced with the result from the previous command. Example: `https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.gz` +replaced with the result from the previous command, for example: `https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.gz`. -3. Rename package to `helm.tar.gz` and send it to an offline lab. -4. Create `tmp` directory in `/var/snap/microk8s/current` and copy the package there: +3. Rename the package to `helm.tar.gz` and send it to an offline lab. +4. Create `tmp` directory in `/var/snap/microk8s/current` and copy the package in the following locations: ``` sudo mkdir -p /var/snap/microk8s/current/tmp/helm3 @@ -169,7 +166,7 @@ Save file. ## Verify your instance -Check if all the add-ons were installed successfully with command: `microk8s status --wait-ready`. An example of +Check if all the add-ons were installed successfully using the following command: `microk8s status --wait-ready`. An example of a correct output is: ```commandline diff --git a/docs/offlineinstallation/offline-sc4snmp.md b/docs/offlineinstallation/offline-sc4snmp.md index f34252689..319e33e8b 100644 --- a/docs/offlineinstallation/offline-sc4snmp.md +++ b/docs/offlineinstallation/offline-sc4snmp.md @@ -1,7 +1,9 @@ # Offline SC4SNMP installation +See the following options for an offline SC4SNMP installation. + ## Local machine with internet access -To install the SC4SNMP offline, first, some packages must be downloaded from the [Github release](https://github.com/splunk/splunk-connect-for-snmp/releases) and then moved +To install the SC4SNMP offline, first, download some packages from the [Github release](https://github.com/splunk/splunk-connect-for-snmp/releases) and then move them to the SC4SNMP installation server. Those packages are: - `dependencies-images.tar` @@ -10,38 +12,41 @@ to the SC4SNMP installation server. Those packages are: Additionally, you'll need - `pull_mibserver.sh` script +- `pull_gui_images.sh` script -to easily pull and export mibserver image. +to easily pull and export mibserver image and GUI images. -Moreover, SC4SNMP Docker image must be pulled, saved as a `.tar` package, and then moved to the server as well. +Moreover, the SC4SNMP Docker image must be pulled, saved as a `.tar` package, and then moved to the server as well. This process requires Docker to be installed locally. Images can be pulled from the following repository: `ghcr.io/splunk/splunk-connect-for-snmp/container:`. -The latest tag can be found [here](https://github.com/splunk/splunk-connect-for-snmp) under the Releases section with the label `latest`. +The latest tag can be found in [The Splunk Connect for SNMP Repository](https://github.com/splunk/splunk-connect-for-snmp), under the Releases section with the label `latest`. -Example of docker pull command: +See the following example of docker pull command: ```bash docker pull ghcr.io/splunk/splunk-connect-for-snmp/container: ``` -Then save the image. Directory where this image will be saved can be specified after the `>` sign: +Afterwards, save the image. The directory where this image will be saved can be specified after the `>` sign: ```bash docker save ghcr.io/splunk/splunk-connect-for-snmp/container: > snmp_image.tar ``` -Another package you have to pull is the mibserver image. You can do it by executing `pull_mibserver.sh` script from -the Release section, or copy-pasting its content. +Other packages you have to pull are mibserver and GUI images. Do this by executing `pull_mibserver.sh` and +`pull_gui_images.sh` scripts from the Release section, or copy-pasting its content. See the following: ```bash chmod a+x pull_mibserver.sh # you'll probably need to make file executable ./pull_mibserver.sh +chmod a+x pull_gui_images.sh +./pull_gui_images.sh ``` -This script should produce `mibserver.tar` with the image of the mibserver inside. +Those scripts should produce `mibserver.tar` with the image of the mibserver and `sc4snmp-gui-images.tar` with GUI images inside. -All four packages, `mibserver.tar`, `snmp_image.tar`, `dependencies-images.tar`, and `splunk-connect-for-snmp-chart.tar`, must be moved to the SC4SNMP installation server. +All five packages, `mibserver.tar`, `snmp_image.tar`, `dependencies-images.tar`, `sc4snmp-gui-images.tar` and `splunk-connect-for-snmp-chart.tar`, must be moved to the SC4SNMP installation server. ## Installation on the server @@ -51,7 +56,7 @@ On the server, all the images must be imported to the microk8s cluster. This can microk8s ctr image import ``` -In case of this installation the following commands must be run: +Run the following commands: ```bash microk8s ctr image import dependencies-images.tar @@ -59,7 +64,7 @@ microk8s ctr image import snmp_image.tar microk8s ctr image import mibserver.tar ``` -Then create `values.yaml`. It's a little different from `values.yaml` used in an online installation. +Afterwards, create `values.yaml`. It's a little different from `values.yaml` used in an online installation. The difference between the two files is the following, which is used for automatic image pulling: ```yaml @@ -67,9 +72,9 @@ image: pullPolicy: "Never" ``` -Example `values.yaml` file can be found [here][offline_doc_link]. +An example `values.yaml` file can be found in the [Offline SC4SNMP values.yaml template](https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/offline_installation_values.md). -The next step is to unpack the chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating the `splunk-connect-for-snmp` directory: +Next, unpack the chart package `splunk-connect-for-snmp-chart.tar`. It will result in creating the following `splunk-connect-for-snmp` directory: ```bash tar -xvf splunk-connect-for-snmp-chart.tar --exclude='._*' @@ -81,4 +86,4 @@ Finally, run the helm install command in the directory where both the `values.ya microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -[offline_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/offline_installation_values.md \ No newline at end of file +[offline_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/offline_installation_values.md diff --git a/docs/offlineinstallation/offline-sck.md b/docs/offlineinstallation/offline-sck.md index bec45e4e2..459efdbaa 100644 --- a/docs/offlineinstallation/offline-sck.md +++ b/docs/offlineinstallation/offline-sck.md @@ -1,25 +1,27 @@ # Splunk OpenTelemetry Collector for Kubernetes offline installation +See the following options to install the Splunk OpenTelemetry Collector for Kubernetes. + ## Local machine with internet access -To install Splunk OpenTelemetry Collector offline first one must download packed chart `splunk-otel-collector-.tgz` and the otel image `otel_image.tar` -from github release where `` is the current OpenTelemetry release tag. Both packages must be later moved to the installation server. +To install Splunk OpenTelemetry Collector offline, first, download the packed chart `splunk-otel-collector-.tgz` and the otel image `otel_image.tar` +from the Github release, where `` is the current OpenTelemetry release tag. Both packages must be later moved to the installation server. ## Installation on the server -Otel image has to be imported to the `microk8s` registry with: +The Otel image has to be imported to the `microk8s` registry with: ```bash microk8s ctr image import otel_image.tar ``` -Imported package must be unpacked with the following command : +The imported package must be unpacked with the following command : ```bash tar -xvf splunk-otel-collector-.tgz --exclude='._*' ``` -In order to run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below +In order to run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the following description: ```bash microk8s helm3 install sck \ --set="clusterName=" \ @@ -43,7 +45,7 @@ microk8s helm3 install sck \ | splunk_token | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | | cluster_name | name of the cluster | my-cluster | -An example of filled up command is: +An example of a correctly filled command is: ```bash microk8s helm3 install sck \ --set="clusterName=my-cluster" \ @@ -58,7 +60,7 @@ microk8s helm3 install sck \ ## Install Splunk OpenTelemetry Collector with HELM for Splunk Observability for Kubernetes -To run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the description presented below +To run Splunk OpenTelemetry Collector on your environment, replace `<>` variables according to the following description: ```bash microk8s helm3 install sck @@ -84,7 +86,7 @@ splunk-otel-collector | ingest_url | Ingest URL from the Splunk Observability Cloud environment | https://ingest..signalfx.com | | api_url | API URL from the Splunk Observability Cloud environment | https://api..signalfx.com | -An example of filled up command is: +An example of a correctly filled command is: ```bash microk8s helm3 install sck --set="clusterName=my_cluster" @@ -96,4 +98,4 @@ microk8s helm3 install sck --set="splunkObservability.tracesEnabled=false" --set="splunkObservability.logsEnabled=false" splunk-otel-collector -``` \ No newline at end of file +``` diff --git a/docs/planning.md b/docs/planning.md index 64cad2178..f2d0b236c 100644 --- a/docs/planning.md +++ b/docs/planning.md @@ -1,7 +1,7 @@ # Planning Splunk Connect for SNMP (SC4SNMP) is a solution that allows the customer -to \"get\" data from network devices and appliances when a more feature-complete solution, such as the Splunk Universal Forwarder, is not +to get data from network devices and appliances when a more feature-complete solution, such as the Splunk Universal Forwarder, is not available. ## Architecture @@ -18,7 +18,7 @@ existing firewall. - A supported deployment of MicroK8s -- 16 Core/32 threads x64 architecture server or vm (single instance) +- 16 Core/32 threads x64 architecture server or virtual machine (single instance) 12 GB ram - HA Requires 3 or more instances (odd numbers) 8 core/16 thread 16 GB @@ -35,13 +35,12 @@ existing firewall. ## Planning Infrastructure +When planning infrastructure for Splunk Connect for SNMP, (SC4SNMP), remember the following limitations: + A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with 16 Core/32 threads x64 and 64 GB RAM will be able to handle up to 1500 SNMP TRAPs per second. -A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with -16 Core/32 threads x64 and 64 GB RAM is able to handle up to 2750 SNMP varbinds per second. -As for events per second visible in Splunk, please remember that a single SC4SNMP event can contain more than one varbind inside - auto aggregation/grouping feature (varbinds which are describing same thing ie. network interface will be grouped in one event). -That is why, depending on configuration, the number of events per second may vary. +A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with 16 Core/32 threads x64 and 64 GB RAM is able to handle up to 2750 SNMP varbinds per second. As for events per second that are visible in Splunk, a single SC4SNMP event can contain more than one varbind inside, which is an automatic grouping feature. For example, the network interface would be grouped into one event, with varbinds grouped together to describe the same thing. That is why, depending on the configuration, the number of events per second may vary. + -When planning infrastructure for Splunk Connect for SNMP, (SC4SNMP) note the limitations highlighted above. diff --git a/docs/releases.md b/docs/releases.md index edf8fa831..bdbf780a4 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -1,14 +1,14 @@ # Base Information ## Known Issues -List of open known issues is available under [Known issue link](https://github.com/splunk/splunk-connect-for-snmp/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) +The list of open known issues is available under [Known issue link](https://github.com/splunk/splunk-connect-for-snmp/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) ## Open issues to the product -To open issue for Splunk Connect for SNMP go to [github SC4SNMP](https://github.com/splunk/splunk-connect-for-snmp/issues) -project and open issue. +To open an issue for Splunk Connect for SNMP, go to the [github SC4SNMP](https://github.com/splunk/splunk-connect-for-snmp/issues) +project and open am issue. ## Releases -To check Splunk Connect for SNMP releases please visit: [SC4SNMP Releases](https://github.com/splunk/splunk-connect-for-snmp/releases) +To check Splunk Connect for SNMP releases, see: [SC4SNMP Releases](https://github.com/splunk/splunk-connect-for-snmp/releases) diff --git a/docs/security.md b/docs/security.md index 5c9c22f49..f3cbd50da 100644 --- a/docs/security.md +++ b/docs/security.md @@ -1,4 +1,3 @@ - # Security Considerations The SC4SNMP solution implements SNMP in a compatible mode for current and legacy network device gear. @@ -6,7 +5,7 @@ SNMP is a protocol widely considered to be risky and requires threat mitigation * Do not expose SNMP endpoints to untrusted connections such as the internet or general LAN network of a typical enterprise. * Do not allow SNMPv1 or SNMPv2 connections to cross a network zone where a man in the middle interception is possible. -* Be aware many SNMPv3 devices rely on insecure cryptography including DES, MD5, and SHA. Do not presume SNMPv3 devices and connections are secure by default. -* When possible use SNMPv3 with the most secure protocol options mutually supported. +* Many SNMPv3 devices rely on insecure cryptography including DES, MD5, and SHA. Do not assume that SNMPv3 devices and connections are secure by default. +* When possible use SNMPv3 with the most secure mutually supported protocol options. * The default IP of each node should be considered a management interface and should be protected from network access by an untrusted device by a hardware or software firewall. When possible the IP allocated for SNMP communication should not be shared by the management interface. diff --git a/docs/small-environment.md b/docs/small-environment.md index 6c886738d..2508be1cb 100644 --- a/docs/small-environment.md +++ b/docs/small-environment.md @@ -1,15 +1,14 @@ # Lightweight SC4SNMP installation SC4SNMP can be successfully installed in small environments with 2 CPUs and 4 GB of memory. -One important thing to remember is that Splunk OpenTelemetry Collector for Kubernetes cannot be installed in such a small -environment along with SC4SNMP. The other difference from normal installation is that the `resources` limits must be set for Kubernetes +However, Splunk OpenTelemetry Collector for Kubernetes cannot be installed in a small +environment along with SC4SNMP. Additionally, the `resources` limits must be set for Kubernetes pods. See the example of `values.yaml` with the appropriate resources [here][lightweight_doc_link]. - -The rest of the installation is the same as in [online](gettingstarted/sc4snmp-installation.md), or the +The rest of the installation is the same as [online](gettingstarted/sc4snmp-installation.md), or the [offline](offlineinstallation/offline-sc4snmp.md) installation. Keep in mind that a lightweight instance of SC4SNMP won't be able to poll from many devices and may experience delays if there is frequent polling. -[lightweight_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/lightweight_installation.yaml \ No newline at end of file +[lightweight_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/lightweight_installation.yaml diff --git a/docs/upgrade.md b/docs/upgrade.md index 9beb4df3c..3d07ba320 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -1,5 +1,7 @@ # Upgrading SC4SNMP +See the following to update SC4SNMP. + ## Upgrade to the latest version To upgrade SC4SNMP to the latest version, simply run the following command: @@ -13,30 +15,30 @@ Afterwards, run: microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -SC4SNMP will be upgraded to the newest version. You can see the latest version after hitting the command: +SC4SNMP will be upgraded to the newest version. You can see the latest version after running the following command: ```yaml microk8s helm3 search repo snmp ``` -The output looks like that: +The following should be the output: ``` NAME CHART VERSION APP VERSION DESCRIPTION splunk-connect-for-snmp/splunk-connect-for-snmp 1.6.2 1.6.2 A Helm chart for SNMP Connect for SNMP ``` -So in this case, the latest version is `1.6.2` and it will be installed after `helm3 upgrade` command. +In this case, the latest version is `1.6.2` and it will be installed after running the `helm3 upgrade` command. ## Upgrade to a specific version -Alternatively, you can install one of the previous versions, or a development one. You can list all the previous versions with: +Alternatively, you can install one of the previous versions, or a development one. You can list all the previous versions using: ```yaml microk8s helm3 search repo snmp --versions ``` -And all the development versions: +And all the development versions using: ```yaml microk8s helm3 search repo snmp --devel @@ -49,7 +51,7 @@ To upgrade your SC4SNMP instance to any of the listed versions, run `helm3 upgra microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace --version ``` -For example: +See the following example: ```yaml microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace --version 1.6.3-beta.13 diff --git a/integration_tests/automatic_setup.sh b/integration_tests/automatic_setup.sh index 085bf40f5..bcb8e6126 100755 --- a/integration_tests/automatic_setup.sh +++ b/integration_tests/automatic_setup.sh @@ -26,6 +26,7 @@ wait_for_splunk() { } function define_python() { + echo $(yellow "define python") if command -v python &> /dev/null; then PYTHON=python elif command -v python3 &> /dev/null; then @@ -97,6 +98,7 @@ cd integration_tests chmod u+x prepare_splunk.sh echo $(green "Preparing Splunk instance") ./prepare_splunk.sh +./install_sck.sh sed -i "s/###SPLUNK_TOKEN###/$(cat hec_token)/" values.yaml sed -i "s/###LOAD_BALANCER_ID###/$(hostname -I | cut -d " " -f1)/" values.yaml sudo docker run -d -p 161:161/udp tandrup/snmpsim @@ -128,6 +130,7 @@ wait_for_pod_initialization wait_for_sc4snmp_pods_to_be_up check_metallb_status -define_python - -deploy_poetry +if [[ $1 == 'integration' ]]; then + define_python + deploy_poetry +fi diff --git a/integration_tests/install_sck.sh b/integration_tests/install_sck.sh new file mode 100755 index 000000000..96f28173a --- /dev/null +++ b/integration_tests/install_sck.sh @@ -0,0 +1,10 @@ +sudo microk8s helm3 repo add splunk-otel-collector-chart https://signalfx.github.io/splunk-otel-collector-chart +sudo microk8s helm3 upgrade --install sck \ + --set="clusterName=my-cluster" \ + --set="splunkPlatform.endpoint=https://$(hostname -I | cut -d " " -f1):8088/services/collector" \ + --set="splunkPlatform.insecureSkipVerify=true" \ + --set="splunkPlatform.token=$(cat hec_token)" \ + --set="splunkPlatform.metricsEnabled=true" \ + --set="splunkPlatform.metricsIndex=em_metrics" \ + --set="splunkPlatform.index=em_logs" \ + splunk-otel-collector-chart/splunk-otel-collector diff --git a/integration_tests/prepare_splunk.sh b/integration_tests/prepare_splunk.sh index 183e68680..9a0cb091f 100644 --- a/integration_tests/prepare_splunk.sh +++ b/integration_tests/prepare_splunk.sh @@ -1,6 +1,6 @@ create_splunk_indexes() { - index_names=("netmetrics" "netops") - index_types=("metric" "event") + index_names=("netmetrics" "em_metrics" "netops" "em_logs") + index_types=("metric" "metric" "event" "event") for index in "${!index_names[@]}" ; do if ! curl -k -u admin:"changeme2" "https://localhost:8089/services/data/indexes" \ -d datatype="${index_types[${index}]}" -d name="${index_names[${index}]}" ; then diff --git a/integration_tests/values.yaml b/integration_tests/values.yaml index 8f92d8a6c..b92b71770 100644 --- a/integration_tests/values.yaml +++ b/integration_tests/values.yaml @@ -62,6 +62,8 @@ scheduler: # - ['SNMPv2-MIB', 'sysName', 0] # - ['IF-MIB'] # - ['TCP-MIB'] + groups: | + {} poller: usernameSecrets: - sv3poller diff --git a/mkdocs.yml b/mkdocs.yml index 2b429381b..acff02b53 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -11,6 +11,8 @@ markdown_extensions: - fenced_code - sane_lists - codehilite + - attr_list + - md_in_html plugins: - search: @@ -53,10 +55,17 @@ nav: - Redis: "configuration/redis-configuration.md" - SNMPv3 configuration: "configuration/snmpv3-configuration.md" - Splunk Infrastructure Monitoring: "configuration/sim-configuration.md" + - CoreDNS: "configuration/coredns-configuration.md" - Offline Installation: - Install Microk8s: "offlineinstallation/offline-microk8s.md" - Install Splunk OpenTelemetry Collector for Kubernetes: "offlineinstallation/offline-sck.md" - Install SC4SNMP: "offlineinstallation/offline-sc4snmp.md" + - GUI: + - Enable GUI: "gui/enable-gui.md" + - Configuring Profiles: "gui/profiles-gui.md" + - Configuring Groups: "gui/groups-gui.md" + - Configuring Inventory: "gui/inventory-gui.md" + - Apply changes: "gui/apply-changes.md" - Lightweight installation: "small-environment.md" - Planning: "planning.md" - Security: "security.md" @@ -65,4 +74,5 @@ nav: - Troubleshooting : "bestpractices.md" - Releases: "releases.md" - High Availability: ha.md + - Docker compose deployment: "docker-compose.md" diff --git a/poetry.lock b/poetry.lock index cb2f8627a..345e3f81a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "amqp" @@ -89,13 +89,13 @@ tzdata = ["tzdata"] [[package]] name = "billiard" -version = "4.1.0" +version = "4.2.0" description = "Python multiprocessing fork with improvements and bugfixes" optional = false python-versions = ">=3.7" files = [ - {file = "billiard-4.1.0-py3-none-any.whl", hash = "sha256:0f50d6be051c6b2b75bfbc8bfd85af195c5739c281d3f5b86a5640c65563614a"}, - {file = "billiard-4.1.0.tar.gz", hash = "sha256:1ad2eeae8e28053d729ba3373d34d9d6e210f6e4d8bf0a9c64f92bd053f1edf5"}, + {file = "billiard-4.2.0-py3-none-any.whl", hash = "sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d"}, + {file = "billiard-4.2.0.tar.gz", hash = "sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c"}, ] [[package]] @@ -125,31 +125,31 @@ ujson = ["ujson (>=5.4.0,<6.0.0)"] [[package]] name = "celery" -version = "5.3.4" +version = "5.3.6" description = "Distributed Task Queue." optional = false python-versions = ">=3.8" files = [ - {file = "celery-5.3.4-py3-none-any.whl", hash = "sha256:1e6ed40af72695464ce98ca2c201ad0ef8fd192246f6c9eac8bba343b980ad34"}, - {file = "celery-5.3.4.tar.gz", hash = "sha256:9023df6a8962da79eb30c0c84d5f4863d9793a466354cc931d7f72423996de28"}, + {file = "celery-5.3.6-py3-none-any.whl", hash = "sha256:9da4ea0118d232ce97dff5ed4974587fb1c0ff5c10042eb15278487cdd27d1af"}, + {file = "celery-5.3.6.tar.gz", hash = "sha256:870cc71d737c0200c397290d730344cc991d13a057534353d124c9380267aab9"}, ] [package.dependencies] "backports.zoneinfo" = {version = ">=0.2.1", markers = "python_version < \"3.9\""} -billiard = ">=4.1.0,<5.0" +billiard = ">=4.2.0,<5.0" click = ">=8.1.2,<9.0" click-didyoumean = ">=0.3.0" click-plugins = ">=1.1.1" click-repl = ">=0.2.0" -kombu = ">=5.3.2,<6.0" +kombu = ">=5.3.4,<6.0" python-dateutil = ">=2.8.2" tblib = {version = ">=1.5.0", optional = true, markers = "python_version >= \"3.8.0\" and extra == \"tblib\""} tzdata = ">=2022.7" -vine = ">=5.0.0,<6.0" +vine = ">=5.1.0,<6.0" [package.extras] arangodb = ["pyArango (>=2.0.2)"] -auth = ["cryptography (==41.0.3)"] +auth = ["cryptography (==41.0.5)"] azureblockblob = ["azure-storage-blob (>=12.15.0)"] brotli = ["brotli (>=1.0.0)", "brotlipy (>=0.7.0)"] cassandra = ["cassandra-driver (>=3.25.0,<4)"] @@ -159,26 +159,26 @@ couchbase = ["couchbase (>=3.0.0)"] couchdb = ["pycouchdb (==1.14.2)"] django = ["Django (>=2.2.28)"] dynamodb = ["boto3 (>=1.26.143)"] -elasticsearch = ["elasticsearch (<8.0)"] +elasticsearch = ["elastic-transport (<=8.10.0)", "elasticsearch (<=8.11.0)"] eventlet = ["eventlet (>=0.32.0)"] gevent = ["gevent (>=1.5.0)"] librabbitmq = ["librabbitmq (>=2.0.0)"] memcache = ["pylibmc (==1.6.3)"] mongodb = ["pymongo[srv] (>=4.0.2)"] -msgpack = ["msgpack (==1.0.5)"] +msgpack = ["msgpack (==1.0.7)"] pymemcache = ["python-memcached (==1.59)"] pyro = ["pyro4 (==4.82)"] pytest = ["pytest-celery (==0.0.0)"] -redis = ["redis (>=4.5.2,!=4.5.5,<5.0.0)"] +redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"] s3 = ["boto3 (>=1.26.143)"] slmq = ["softlayer-messaging (>=1.0.3)"] -solar = ["ephem (==4.1.4)"] +solar = ["ephem (==4.1.5)"] sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"] sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.0)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] tblib = ["tblib (>=1.3.0)", "tblib (>=1.5.0)"] yaml = ["PyYAML (>=3.10)"] zookeeper = ["kazoo (>=1.3.1)"] -zstd = ["zstandard (==0.21.0)"] +zstd = ["zstandard (==0.22.0)"] [[package]] name = "celery-redbeat" @@ -456,6 +456,24 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker perf = ["ipython"] testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +[[package]] +name = "importlib-resources" +version = "6.1.1" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"}, + {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + [[package]] name = "iniconfig" version = "1.1.1" @@ -496,13 +514,13 @@ files = [ [[package]] name = "kombu" -version = "5.3.2" +version = "5.3.4" description = "Messaging library for Python." optional = false python-versions = ">=3.8" files = [ - {file = "kombu-5.3.2-py3-none-any.whl", hash = "sha256:b753c9cfc9b1e976e637a7cbc1a65d446a22e45546cd996ea28f932082b7dc9e"}, - {file = "kombu-5.3.2.tar.gz", hash = "sha256:0ba213f630a2cb2772728aef56ac6883dc3a2f13435e10048f6e97d48506dbbd"}, + {file = "kombu-5.3.4-py3-none-any.whl", hash = "sha256:63bb093fc9bb80cfb3a0972336a5cec1fa7ac5f9ef7e8237c6bf8dda9469313e"}, + {file = "kombu-5.3.4.tar.gz", hash = "sha256:0bb2e278644d11dea6272c17974a3dbb9688a949f3bb60aeb5b791329c44fadc"}, ] [package.dependencies] @@ -514,14 +532,14 @@ vine = "*" [package.extras] azureservicebus = ["azure-servicebus (>=7.10.0)"] azurestoragequeues = ["azure-identity (>=1.12.0)", "azure-storage-queue (>=12.6.0)"] -confluentkafka = ["confluent-kafka (==2.1.1)"] +confluentkafka = ["confluent-kafka (>=2.2.0)"] consul = ["python-consul2"] librabbitmq = ["librabbitmq (>=2.0.0)"] mongodb = ["pymongo (>=4.1.1)"] msgpack = ["msgpack"] pyro = ["pyro4"] qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"] -redis = ["redis (>=4.5.2)"] +redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"] slmq = ["softlayer-messaging (>=1.0.3)"] sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"] sqs = ["boto3 (>=1.26.143)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] @@ -715,24 +733,27 @@ files = [ [[package]] name = "mike" -version = "1.1.2" +version = "2.0.0" description = "Manage multiple versions of your MkDocs-powered documentation" optional = false python-versions = "*" files = [ - {file = "mike-1.1.2-py3-none-any.whl", hash = "sha256:4c307c28769834d78df10f834f57f810f04ca27d248f80a75f49c6fa2d1527ca"}, - {file = "mike-1.1.2.tar.gz", hash = "sha256:56c3f1794c2d0b5fdccfa9b9487beb013ca813de2e3ad0744724e9d34d40b77b"}, + {file = "mike-2.0.0-py3-none-any.whl", hash = "sha256:87f496a65900f93ba92d72940242b65c86f3f2f82871bc60ebdcffc91fad1d9e"}, + {file = "mike-2.0.0.tar.gz", hash = "sha256:566f1cab1a58cc50b106fb79ea2f1f56e7bfc8b25a051e95e6eaee9fba0922de"}, ] [package.dependencies] -jinja2 = "*" +importlib-metadata = "*" +importlib-resources = "*" +jinja2 = ">=2.7" mkdocs = ">=1.0" +pyparsing = ">=3.0" pyyaml = ">=5.1" verspec = "*" [package.extras] -dev = ["coverage", "flake8 (>=3.0)", "shtab"] -test = ["coverage", "flake8 (>=3.0)", "shtab"] +dev = ["coverage", "flake8 (>=3.0)", "flake8-quotes", "shtab"] +test = ["coverage", "flake8 (>=3.0)", "flake8-quotes", "shtab"] [[package]] name = "mkdocs" @@ -767,13 +788,13 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-material" -version = "9.4.6" +version = "9.5.2" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.4.6-py3-none-any.whl", hash = "sha256:78802035d5768a78139c84ad7dce0c6493e8f7dc4861727d36ed91d1520a54da"}, - {file = "mkdocs_material-9.4.6.tar.gz", hash = "sha256:09665e60df7ee9e5ff3a54af173f6d45be718b1ee7dd962bcff3102b81fb0c14"}, + {file = "mkdocs_material-9.5.2-py3-none-any.whl", hash = "sha256:6ed0fbf4682491766f0ec1acc955db6901c2fd424c7ab343964ef51b819741f5"}, + {file = "mkdocs_material-9.5.2.tar.gz", hash = "sha256:ca8b9cd2b3be53e858e5a1a45ac9668bd78d95d77a30288bb5ebc1a31db6184c"}, ] [package.dependencies] @@ -782,7 +803,7 @@ colorama = ">=0.4,<1.0" jinja2 = ">=3.0,<4.0" markdown = ">=3.2,<4.0" mkdocs = ">=1.5.3,<2.0" -mkdocs-material-extensions = ">=1.2,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" paginate = ">=0.5,<1.0" pygments = ">=2.16,<3.0" pymdown-extensions = ">=10.2,<11.0" @@ -796,13 +817,13 @@ recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2. [[package]] name = "mkdocs-material-extensions" -version = "1.2" +version = "1.3.1" description = "Extension pack for Python Markdown and MkDocs Material." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mkdocs_material_extensions-1.2-py3-none-any.whl", hash = "sha256:c767bd6d6305f6420a50f0b541b0c9966d52068839af97029be14443849fb8a1"}, - {file = "mkdocs_material_extensions-1.2.tar.gz", hash = "sha256:27e2d1ed2d031426a6e10d5ea06989d67e90bb02acd588bc5673106b5ee5eedf"}, + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, ] [[package]] @@ -849,13 +870,13 @@ pymongo = ">=2.6.0" [[package]] name = "opentelemetry-api" -version = "1.20.0" +version = "1.21.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_api-1.20.0-py3-none-any.whl", hash = "sha256:982b76036fec0fdaf490ae3dfd9f28c81442a33414f737abc687a32758cdcba5"}, - {file = "opentelemetry_api-1.20.0.tar.gz", hash = "sha256:06abe351db7572f8afdd0fb889ce53f3c992dbf6f6262507b385cc1963e06983"}, + {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, + {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"}, ] [package.dependencies] @@ -864,13 +885,13 @@ importlib-metadata = ">=6.0,<7.0" [[package]] name = "opentelemetry-exporter-jaeger-thrift" -version = "1.20.0" +version = "1.21.0" description = "Jaeger Thrift Exporter for OpenTelemetry" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_exporter_jaeger_thrift-1.20.0-py3-none-any.whl", hash = "sha256:781045dbbce3094772426259fac4602269ddd7934f7767145997ea13f82d67e2"}, - {file = "opentelemetry_exporter_jaeger_thrift-1.20.0.tar.gz", hash = "sha256:ab8416584535f93e3a087eecd6edec534361748763a9f8b609bbd0b44f3d73f9"}, + {file = "opentelemetry_exporter_jaeger_thrift-1.21.0-py3-none-any.whl", hash = "sha256:4364b8dfa6965707c72c43d85942b1491982b7d44f0123d593513e8bedafa9e2"}, + {file = "opentelemetry_exporter_jaeger_thrift-1.21.0.tar.gz", hash = "sha256:41119bc7e5602cec83dd7d7060f061ecbc91de231272e8f515b07ef9a4b6e41c"}, ] [package.dependencies] @@ -880,13 +901,13 @@ thrift = ">=0.10.0" [[package]] name = "opentelemetry-instrumentation" -version = "0.41b0" +version = "0.42b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_instrumentation-0.41b0-py3-none-any.whl", hash = "sha256:0ef9e5705ceca0205992a4a845ae4251ce6ec15a1206ca07c2b00afb0c5bd386"}, - {file = "opentelemetry_instrumentation-0.41b0.tar.gz", hash = "sha256:214382ba10dfd29d4e24898a4c7ef18b7368178a6277a1aec95cdb75cabf4612"}, + {file = "opentelemetry_instrumentation-0.42b0-py3-none-any.whl", hash = "sha256:65ae54ddb90ca2d05d2d16bf6863173e7141eba1bbbf41fc9bbb02446adbe369"}, + {file = "opentelemetry_instrumentation-0.42b0.tar.gz", hash = "sha256:6a653a1fed0f76eea32885321d77c750483e987eeefa4cbf219fc83559543198"}, ] [package.dependencies] @@ -896,67 +917,67 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-celery" -version = "0.41b0" +version = "0.42b0" description = "OpenTelemetry Celery Instrumentation" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_instrumentation_celery-0.41b0-py3-none-any.whl", hash = "sha256:b3e2ee1b0e2acb13716f5318f406e8a8eb57aac6eb6118266694037ac6d8a7e6"}, - {file = "opentelemetry_instrumentation_celery-0.41b0.tar.gz", hash = "sha256:403d48c17478f5a8d42522211c66c0af16a5a7339d5a2b00c27c1b84c963617b"}, + {file = "opentelemetry_instrumentation_celery-0.42b0-py3-none-any.whl", hash = "sha256:25a1c2fc35ee4f4c87c855c9b09af09b0084cba796d3b2972586bb64ef23b6dc"}, + {file = "opentelemetry_instrumentation_celery-0.42b0.tar.gz", hash = "sha256:1b6a55c1f2bd193737643e736aa85988b8522fdbff7ec934edc34ee59257fa5d"}, ] [package.dependencies] opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.41b0" -opentelemetry-semantic-conventions = "0.41b0" +opentelemetry-instrumentation = "0.42b0" +opentelemetry-semantic-conventions = "0.42b0" [package.extras] instruments = ["celery (>=4.0,<6.0)"] -test = ["opentelemetry-instrumentation-celery[instruments]", "opentelemetry-test-utils (==0.41b0)", "pytest"] +test = ["opentelemetry-instrumentation-celery[instruments]", "opentelemetry-test-utils (==0.42b0)", "pytest"] [[package]] name = "opentelemetry-instrumentation-logging" -version = "0.41b0" +version = "0.42b0" description = "OpenTelemetry Logging instrumentation" optional = false python-versions = "*" files = [ - {file = "opentelemetry_instrumentation_logging-0.41b0-py2.py3-none-any.whl", hash = "sha256:ab7117886695c32eb30d7a59199292283c5e652e2b9f2d11874fe4359eacc16a"}, - {file = "opentelemetry_instrumentation_logging-0.41b0.tar.gz", hash = "sha256:8ad46e011a99df726323428f0d0a09bf68159ab776b8184ba6d83a7c44f7de81"}, + {file = "opentelemetry_instrumentation_logging-0.42b0-py2.py3-none-any.whl", hash = "sha256:d504103ddfd260e11f0c07d8c0f0ca25694c3c0a96dd0902239baa9790b12c29"}, + {file = "opentelemetry_instrumentation_logging-0.42b0.tar.gz", hash = "sha256:222922cb666bcada986db5b3574656589dbd118ddf976a0f1c75098f2d8fb40f"}, ] [package.dependencies] opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.41b0" +opentelemetry-instrumentation = "0.42b0" [package.extras] -test = ["opentelemetry-test-utils (==0.41b0)"] +test = ["opentelemetry-test-utils (==0.42b0)"] [[package]] name = "opentelemetry-sdk" -version = "1.20.0" +version = "1.21.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_sdk-1.20.0-py3-none-any.whl", hash = "sha256:f2230c276ff4c63ea09b3cb2e2ac6b1265f90af64e8d16bbf275c81a9ce8e804"}, - {file = "opentelemetry_sdk-1.20.0.tar.gz", hash = "sha256:702e432a457fa717fd2ddfd30640180e69938f85bb7fec3e479f85f61c1843f8"}, + {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, + {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"}, ] [package.dependencies] -opentelemetry-api = "1.20.0" -opentelemetry-semantic-conventions = "0.41b0" +opentelemetry-api = "1.21.0" +opentelemetry-semantic-conventions = "0.42b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.41b0" +version = "0.42b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.7" files = [ - {file = "opentelemetry_semantic_conventions-0.41b0-py3-none-any.whl", hash = "sha256:45404391ed9e50998183a4925ad1b497c01c143f06500c3b9c3d0013492bb0f2"}, - {file = "opentelemetry_semantic_conventions-0.41b0.tar.gz", hash = "sha256:0ce5b040b8a3fc816ea5879a743b3d6fe5db61f6485e4def94c6ee4d402e1eb7"}, + {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, + {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"}, ] [[package]] @@ -1027,13 +1048,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co [[package]] name = "pluggy" -version = "1.0.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, - {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -1193,92 +1214,93 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pymongo" -version = "4.5.0" +version = "4.6.1" description = "Python driver for MongoDB " optional = false python-versions = ">=3.7" files = [ - {file = "pymongo-4.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d4fa1b01fa7e5b7bb8d312e3542e211b320eb7a4e3d8dc884327039d93cb9e0"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:dfcd2b9f510411de615ccedd47462dae80e82fdc09fe9ab0f0f32f11cf57eeb5"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:3e33064f1984db412b34d51496f4ea785a9cff621c67de58e09fb28da6468a52"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:33faa786cc907de63f745f587e9879429b46033d7d97a7b84b37f4f8f47b9b32"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:76a262c41c1a7cbb84a3b11976578a7eb8e788c4b7bfbd15c005fb6ca88e6e50"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:0f4b125b46fe377984fbaecf2af40ed48b05a4b7676a2ff98999f2016d66b3ec"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:40d5f6e853ece9bfc01e9129b228df446f49316a4252bb1fbfae5c3c9dedebad"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:152259f0f1a60f560323aacf463a3642a65a25557683f49cfa08c8f1ecb2395a"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d64878d1659d2a5bdfd0f0a4d79bafe68653c573681495e424ab40d7b6d6d41"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1bb3a62395ffe835dbef3a1cbff48fbcce709c78bd1f52e896aee990928432b"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe48f50fb6348511a3268a893bfd4ab5f263f5ac220782449d03cd05964d1ae7"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7591a3beea6a9a4fa3080d27d193b41f631130e3ffa76b88c9ccea123f26dc59"}, - {file = "pymongo-4.5.0-cp310-cp310-win32.whl", hash = "sha256:3a7166d57dc74d679caa7743b8ecf7dc3a1235a9fd178654dddb2b2a627ae229"}, - {file = "pymongo-4.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:21b953da14549ff62ea4ae20889c71564328958cbdf880c64a92a48dda4c9c53"}, - {file = "pymongo-4.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ead4f19d0257a756b21ac2e0e85a37a7245ddec36d3b6008d5bfe416525967dc"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9aff6279e405dc953eeb540ab061e72c03cf38119613fce183a8e94f31be608f"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4c8d6aa91d3e35016847cbe8d73106e3d1c9a4e6578d38e2c346bfe8edb3ca"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08819da7864f9b8d4a95729b2bea5fffed08b63d3b9c15b4fea47de655766cf5"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a253b765b7cbc4209f1d8ee16c7287c4268d3243070bf72d7eec5aa9dfe2a2c2"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8027c9063579083746147cf401a7072a9fb6829678076cd3deff28bb0e0f50c8"}, - {file = "pymongo-4.5.0-cp311-cp311-win32.whl", hash = "sha256:9d2346b00af524757576cc2406414562cced1d4349c92166a0ee377a2a483a80"}, - {file = "pymongo-4.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:c3c3525ea8658ee1192cdddf5faf99b07ebe1eeaa61bf32821126df6d1b8072b"}, - {file = "pymongo-4.5.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e5a27f348909235a106a3903fc8e70f573d89b41d723a500869c6569a391cff7"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9a9a39b7cac81dca79fca8c2a6479ef4c7b1aab95fad7544cc0e8fd943595a2"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:496c9cbcb4951183d4503a9d7d2c1e3694aab1304262f831d5e1917e60386036"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23cc6d7eb009c688d70da186b8f362d61d5dd1a2c14a45b890bd1e91e9c451f2"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fff7d17d30b2cd45afd654b3fc117755c5d84506ed25fda386494e4e0a3416e1"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6422b6763b016f2ef2beedded0e546d6aa6ba87910f9244d86e0ac7690f75c96"}, - {file = "pymongo-4.5.0-cp312-cp312-win32.whl", hash = "sha256:77cfff95c1fafd09e940b3fdcb7b65f11442662fad611d0e69b4dd5d17a81c60"}, - {file = "pymongo-4.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:e57d859b972c75ee44ea2ef4758f12821243e99de814030f69a3decb2aa86807"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2b0176f9233a5927084c79ff80b51bd70bfd57e4f3d564f50f80238e797f0c8a"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89b3f2da57a27913d15d2a07d58482f33d0a5b28abd20b8e643ab4d625e36257"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:5caee7bd08c3d36ec54617832b44985bd70c4cbd77c5b313de6f7fce0bb34f93"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:1d40ad09d9f5e719bc6f729cc6b17f31c0b055029719406bd31dde2f72fca7e7"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:076afa0a4a96ca9f77fec0e4a0d241200b3b3a1766f8d7be9a905ecf59a7416b"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:3fa3648e4f1e63ddfe53563ee111079ea3ab35c3b09cd25bc22dadc8269a495f"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:44ee985194c426ddf781fa784f31ffa29cb59657b2dba09250a4245431847d73"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33c17d9e694b66d7e96977e9e56df19d662031483efe121a24772a44ccbbc7e"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d79ae3bb1ff041c0db56f138c88ce1dfb0209f3546d8d6e7c3f74944ecd2439"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d67225f05f6ea27c8dc57f3fa6397c96d09c42af69d46629f71e82e66d33fa4f"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41771b22dd2822540f79a877c391283d4e6368125999a5ec8beee1ce566f3f82"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a1f26bc1f5ce774d99725773901820dfdfd24e875028da4a0252a5b48dcab5c"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3236cf89d69679eaeb9119c840f5c7eb388a2110b57af6bb6baf01a1da387c18"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e1f61355c821e870fb4c17cdb318669cfbcf245a291ce5053b41140870c3e5cc"}, - {file = "pymongo-4.5.0-cp37-cp37m-win32.whl", hash = "sha256:49dce6957598975d8b8d506329d2a3a6c4aee911fa4bbcf5e52ffc6897122950"}, - {file = "pymongo-4.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2227a08b091bd41df5aadee0a5037673f691e2aa000e1968b1ea2342afc6880"}, - {file = "pymongo-4.5.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:435228d3c16a375274ac8ab9c4f9aef40c5e57ddb8296e20ecec9e2461da1017"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8e559116e4128630ad3b7e788e2e5da81cbc2344dee246af44471fa650486a70"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:840eaf30ccac122df260b6005f9dfae4ac287c498ee91e3e90c56781614ca238"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b4fe46b58010115514b842c669a0ed9b6a342017b15905653a5b1724ab80917f"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:a8127437ebc196a6f5e8fddd746bd0903a400dc6b5ae35df672dd1ccc7170a2a"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:2988ef5e6b360b3ff1c6d55c53515499de5f48df31afd9f785d788cdacfbe2d3"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e249190b018d63c901678053b4a43e797ca78b93fb6d17633e3567d4b3ec6107"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:1240edc1a448d4ada4bf1a0e55550b6292420915292408e59159fd8bbdaf8f63"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6d2a56fc2354bb6378f3634402eec788a8f3facf0b3e7d468db5f2b5a78d763"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a0aade2b11dc0c326ccd429ee4134d2d47459ff68d449c6d7e01e74651bd255"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74c0da07c04d0781490b2915e7514b1adb265ef22af039a947988c331ee7455b"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3754acbd7efc7f1b529039fcffc092a15e1cf045e31f22f6c9c5950c613ec4d"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:631492573a1bef2f74f9ac0f9d84e0ce422c251644cd81207530af4aa2ee1980"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e2654d1278384cff75952682d17c718ecc1ad1d6227bb0068fd826ba47d426a5"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:168172ef7856e20ec024fe2a746bfa895c88b32720138e6438fd765ebd2b62dd"}, - {file = "pymongo-4.5.0-cp38-cp38-win32.whl", hash = "sha256:b25f7bea162b3dbec6d33c522097ef81df7c19a9300722fa6853f5b495aecb77"}, - {file = "pymongo-4.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:b520aafc6cb148bac09ccf532f52cbd31d83acf4d3e5070d84efe3c019a1adbf"}, - {file = "pymongo-4.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8543253adfaa0b802bfa88386db1009c6ebb7d5684d093ee4edc725007553d21"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:bc5d8c3647b8ae28e4312f1492b8f29deebd31479cd3abaa989090fb1d66db83"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:505f8519c4c782a61d94a17b0da50be639ec462128fbd10ab0a34889218fdee3"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:53f2dda54d76a98b43a410498bd12f6034b2a14b6844ca08513733b2b20b7ad8"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:9c04b9560872fa9a91251030c488e0a73bce9321a70f991f830c72b3f8115d0d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:58a63a26a1e3dc481dd3a18d6d9f8bd1d576cd1ffe0d479ba7dd38b0aeb20066"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:f076b779aa3dc179aa3ed861be063a313ed4e48ae9f6a8370a9b1295d4502111"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:1b1d7d9aabd8629a31d63cd106d56cca0e6420f38e50563278b520f385c0d86e"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37df8f6006286a5896d1cbc3efb8471ced42e3568d38e6cb00857277047b0d63"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56320c401f544d762fc35766936178fbceb1d9261cd7b24fbfbc8fb6f67aa8a5"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbd705d5f3c3d1ff2d169e418bb789ff07ab3c70d567cc6ba6b72b04b9143481"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a167081c75cf66b32f30e2f1eaee9365af935a86dbd76788169911bed9b5d5"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c42748ccc451dfcd9cef6c5447a7ab727351fd9747ad431db5ebb18a9b78a4d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf62da7a4cdec9a4b2981fcbd5e08053edffccf20e845c0b6ec1e77eb7fab61d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b5bbb87fa0511bd313d9a2c90294c88db837667c2bda2ea3fa7a35b59fd93b1f"}, - {file = "pymongo-4.5.0-cp39-cp39-win32.whl", hash = "sha256:465fd5b040206f8bce7016b01d7e7f79d2fcd7c2b8e41791be9632a9df1b4999"}, - {file = "pymongo-4.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:63d8019eee119df308a075b8a7bdb06d4720bf791e2b73d5ab0e7473c115d79c"}, - {file = "pymongo-4.5.0.tar.gz", hash = "sha256:681f252e43b3ef054ca9161635f81b730f4d8cadd28b3f2b2004f5a72f853982"}, + {file = "pymongo-4.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4344c30025210b9fa80ec257b0e0aab5aa1d5cca91daa70d82ab97b482cc038e"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux1_i686.whl", hash = "sha256:1c5654bb8bb2bdb10e7a0bc3c193dd8b49a960b9eebc4381ff5a2043f4c3c441"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:eaf2f65190c506def2581219572b9c70b8250615dc918b3b7c218361a51ec42e"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:262356ea5fcb13d35fb2ab6009d3927bafb9504ef02339338634fffd8a9f1ae4"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:2dd2f6960ee3c9360bed7fb3c678be0ca2d00f877068556785ec2eb6b73d2414"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:ff925f1cca42e933376d09ddc254598f8c5fcd36efc5cac0118bb36c36217c41"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:3cadf7f4c8e94d8a77874b54a63c80af01f4d48c4b669c8b6867f86a07ba994f"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55dac73316e7e8c2616ba2e6f62b750918e9e0ae0b2053699d66ca27a7790105"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:154b361dcb358ad377d5d40df41ee35f1cc14c8691b50511547c12404f89b5cb"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2940aa20e9cc328e8ddeacea8b9a6f5ddafe0b087fedad928912e787c65b4909"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:010bc9aa90fd06e5cc52c8fac2c2fd4ef1b5f990d9638548dde178005770a5e8"}, + {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e470fa4bace5f50076c32f4b3cc182b31303b4fefb9b87f990144515d572820b"}, + {file = "pymongo-4.6.1-cp310-cp310-win32.whl", hash = "sha256:da08ea09eefa6b960c2dd9a68ec47949235485c623621eb1d6c02b46765322ac"}, + {file = "pymongo-4.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:13d613c866f9f07d51180f9a7da54ef491d130f169e999c27e7633abe8619ec9"}, + {file = "pymongo-4.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6a0ae7a48a6ef82ceb98a366948874834b86c84e288dbd55600c1abfc3ac1d88"}, + {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd94c503271e79917b27c6e77f7c5474da6930b3fb9e70a12e68c2dff386b9a"}, + {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d4ccac3053b84a09251da8f5350bb684cbbf8c8c01eda6b5418417d0a8ab198"}, + {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:349093675a2d3759e4fb42b596afffa2b2518c890492563d7905fac503b20daa"}, + {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88beb444fb438385e53dc9110852910ec2a22f0eab7dd489e827038fdc19ed8d"}, + {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8e62d06e90f60ea2a3d463ae51401475568b995bafaffd81767d208d84d7bb1"}, + {file = "pymongo-4.6.1-cp311-cp311-win32.whl", hash = "sha256:5556e306713e2522e460287615d26c0af0fe5ed9d4f431dad35c6624c5d277e9"}, + {file = "pymongo-4.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:b10d8cda9fc2fcdcfa4a000aa10413a2bf8b575852cd07cb8a595ed09689ca98"}, + {file = "pymongo-4.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b435b13bb8e36be11b75f7384a34eefe487fe87a6267172964628e2b14ecf0a7"}, + {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e438417ce1dc5b758742e12661d800482200b042d03512a8f31f6aaa9137ad40"}, + {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b47ebd89e69fbf33d1c2df79759d7162fc80c7652dacfec136dae1c9b3afac7"}, + {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbed8cccebe1169d45cedf00461b2842652d476d2897fd1c42cf41b635d88746"}, + {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30a9e06041fbd7a7590693ec5e407aa8737ad91912a1e70176aff92e5c99d20"}, + {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8729dbf25eb32ad0dc0b9bd5e6a0d0b7e5c2dc8ec06ad171088e1896b522a74"}, + {file = "pymongo-4.6.1-cp312-cp312-win32.whl", hash = "sha256:3177f783ae7e08aaf7b2802e0df4e4b13903520e8380915e6337cdc7a6ff01d8"}, + {file = "pymongo-4.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:00c199e1c593e2c8b033136d7a08f0c376452bac8a896c923fcd6f419e07bdd2"}, + {file = "pymongo-4.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6dcc95f4bb9ed793714b43f4f23a7b0c57e4ef47414162297d6f650213512c19"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:13552ca505366df74e3e2f0a4f27c363928f3dff0eef9f281eb81af7f29bc3c5"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:77e0df59b1a4994ad30c6d746992ae887f9756a43fc25dec2db515d94cf0222d"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3a7f02a58a0c2912734105e05dedbee4f7507e6f1bd132ebad520be0b11d46fd"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:026a24a36394dc8930cbcb1d19d5eb35205ef3c838a7e619e04bd170713972e7"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:3b287e814a01deddb59b88549c1e0c87cefacd798d4afc0c8bd6042d1c3d48aa"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:9a710c184ba845afb05a6f876edac8f27783ba70e52d5eaf939f121fc13b2f59"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:30b2c9caf3e55c2e323565d1f3b7e7881ab87db16997dc0cbca7c52885ed2347"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff62ba8ff70f01ab4fe0ae36b2cb0b5d1f42e73dfc81ddf0758cd9f77331ad25"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:547dc5d7f834b1deefda51aedb11a7af9c51c45e689e44e14aa85d44147c7657"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1de3c6faf948f3edd4e738abdb4b76572b4f4fdfc1fed4dad02427e70c5a6219"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2831e05ce0a4df10c4ac5399ef50b9a621f90894c2a4d2945dc5658765514ed"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:144a31391a39a390efce0c5ebcaf4bf112114af4384c90163f402cec5ede476b"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33bb16a07d3cc4e0aea37b242097cd5f7a156312012455c2fa8ca396953b11c4"}, + {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b7b1a83ce514700276a46af3d9e481ec381f05b64939effc9065afe18456a6b9"}, + {file = "pymongo-4.6.1-cp37-cp37m-win32.whl", hash = "sha256:3071ec998cc3d7b4944377e5f1217c2c44b811fae16f9a495c7a1ce9b42fb038"}, + {file = "pymongo-4.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2346450a075625c4d6166b40a013b605a38b6b6168ce2232b192a37fb200d588"}, + {file = "pymongo-4.6.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:061598cbc6abe2f382ab64c9caa83faa2f4c51256f732cdd890bcc6e63bfb67e"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d483793a384c550c2d12cb794ede294d303b42beff75f3b3081f57196660edaf"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f9756f1d25454ba6a3c2f1ef8b7ddec23e5cdeae3dc3c3377243ae37a383db00"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:1ed23b0e2dac6f84f44c8494fbceefe6eb5c35db5c1099f56ab78fc0d94ab3af"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:3d18a9b9b858ee140c15c5bfcb3e66e47e2a70a03272c2e72adda2482f76a6ad"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:c258dbacfff1224f13576147df16ce3c02024a0d792fd0323ac01bed5d3c545d"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:f7acc03a4f1154ba2643edeb13658d08598fe6e490c3dd96a241b94f09801626"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:76013fef1c9cd1cd00d55efde516c154aa169f2bf059b197c263a255ba8a9ddf"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0e6a6c807fa887a0c51cc24fe7ea51bb9e496fe88f00d7930063372c3664c3"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd1fa413f8b9ba30140de198e4f408ffbba6396864c7554e0867aa7363eb58b2"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d219b4508f71d762368caec1fc180960569766049bbc4d38174f05e8ef2fe5b"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b81ecf18031998ad7db53b960d1347f8f29e8b7cb5ea7b4394726468e4295e"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56816e43c92c2fa8c11dc2a686f0ca248bea7902f4a067fa6cbc77853b0f041e"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef801027629c5b511cf2ba13b9be29bfee36ae834b2d95d9877818479cdc99ea"}, + {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d4c2be9760b112b1caf649b4977b81b69893d75aa86caf4f0f398447be871f3c"}, + {file = "pymongo-4.6.1-cp38-cp38-win32.whl", hash = "sha256:39d77d8bbb392fa443831e6d4ae534237b1f4eee6aa186f0cdb4e334ba89536e"}, + {file = "pymongo-4.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:4497d49d785482cc1a44a0ddf8830b036a468c088e72a05217f5b60a9e025012"}, + {file = "pymongo-4.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:69247f7a2835fc0984bbf0892e6022e9a36aec70e187fcfe6cae6a373eb8c4de"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:7bb0e9049e81def6829d09558ad12d16d0454c26cabe6efc3658e544460688d9"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6a1810c2cbde714decf40f811d1edc0dae45506eb37298fd9d4247b8801509fe"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e2aced6fb2f5261b47d267cb40060b73b6527e64afe54f6497844c9affed5fd0"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:d0355cff58a4ed6d5e5f6b9c3693f52de0784aa0c17119394e2a8e376ce489d4"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:3c74f4725485f0a7a3862cfd374cc1b740cebe4c133e0c1425984bcdcce0f4bb"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:9c79d597fb3a7c93d7c26924db7497eba06d58f88f58e586aa69b2ad89fee0f8"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:8ec75f35f62571a43e31e7bd11749d974c1b5cd5ea4a8388725d579263c0fdf6"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e641f931c5cd95b376fd3c59db52770e17bec2bf86ef16cc83b3906c054845"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aafd036f6f2e5ad109aec92f8dbfcbe76cff16bad683eb6dd18013739c0b3ae"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f2b856518bfcfa316c8dae3d7b412aecacf2e8ba30b149f5eb3b63128d703b9"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec31adc2e988fd7db3ab509954791bbc5a452a03c85e45b804b4bfc31fa221d"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9167e735379ec43d8eafa3fd675bfbb12e2c0464f98960586e9447d2cf2c7a83"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1461199b07903fc1424709efafe379205bf5f738144b1a50a08b0396357b5abf"}, + {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3094c7d2f820eecabadae76bfec02669567bbdd1730eabce10a5764778564f7b"}, + {file = "pymongo-4.6.1-cp39-cp39-win32.whl", hash = "sha256:c91ea3915425bd4111cb1b74511cdc56d1d16a683a48bf2a5a96b6a6c0f297f7"}, + {file = "pymongo-4.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:ef102a67ede70e1721fe27f75073b5314911dbb9bc27cde0a1c402a11531e7bd"}, + {file = "pymongo-4.6.1.tar.gz", hash = "sha256:31dab1f3e1d0cdd57e8df01b645f52d43cc1b653ed3afd535d2891f4fc4f9712"}, ] [package.dependencies] @@ -1290,6 +1312,7 @@ encryption = ["certifi", "pymongo[aws]", "pymongocrypt (>=1.6.0,<2.0.0)"] gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] snappy = ["python-snappy"] +test = ["pytest (>=7)"] zstd = ["zstandard"] [[package]] @@ -1349,7 +1372,7 @@ requests = ">=2.26.0,<3.0.0" [[package]] name = "pysnmplib" -version = "5.0.21" +version = "5.0.24" description = "" optional = false python-versions = "^3.8" @@ -1365,17 +1388,17 @@ pysnmp-pysmi = "^1.0.4" type = "git" url = "https://github.com/pysnmp/pysnmp.git" reference = "main" -resolved_reference = "bc1fb3c39764f36c1b7c9551b52ef8246b9aea7c" +resolved_reference = "4891556e7db831a5a9b27d4bad8ff102609b2a2c" [[package]] name = "pytest" -version = "7.4.2" +version = "8.0.0" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"}, - {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"}, + {file = "pytest-8.0.0-py3-none-any.whl", hash = "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"}, + {file = "pytest-8.0.0.tar.gz", hash = "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c"}, ] [package.dependencies] @@ -1383,7 +1406,7 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" +pluggy = ">=1.3.0,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] @@ -1423,13 +1446,13 @@ six = ">=1.5" [[package]] name = "python-dotenv" -version = "1.0.0" +version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" files = [ - {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, - {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, ] [package.extras] @@ -1458,6 +1481,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1465,8 +1489,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1483,6 +1515,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1490,6 +1523,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1647,13 +1681,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-cache" -version = "1.1.0" +version = "1.1.1" description = "A persistent cache for python requests" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "requests_cache-1.1.0-py3-none-any.whl", hash = "sha256:178282bce704b912c59e7f88f367c42bddd6cde6bf511b2a3e3cfb7e5332a92a"}, - {file = "requests_cache-1.1.0.tar.gz", hash = "sha256:41b79166aa8e300cc4de982f7ab7c52af914a785160be1eda25c6e9265969a67"}, + {file = "requests_cache-1.1.1-py3-none-any.whl", hash = "sha256:c8420cf096f3aafde13c374979c21844752e2694ffd8710e6764685bb577ac90"}, + {file = "requests_cache-1.1.1.tar.gz", hash = "sha256:764f93d3fa860be72125a568c2cc8eafb151cf29b4dc2515433a56ee657e1c60"}, ] [package.dependencies] @@ -1694,60 +1728,80 @@ requests = ">=2.20" docs = ["furo (>=2022.12,<2023.0)", "myst-parser (>=0.17)", "sphinx (>=5.2,<6.0)", "sphinx-autodoc-typehints (>=1.22,<2.0)", "sphinx-copybutton (>=0.5)"] [[package]] -name = "ruamel.yaml" -version = "0.17.21" +name = "ruamel-yaml" +version = "0.18.5" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false -python-versions = ">=3" +python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.17.21-py3-none-any.whl", hash = "sha256:742b35d3d665023981bd6d16b3d24248ce5df75fdb4e2924e93a05c1f8b61ca7"}, - {file = "ruamel.yaml-0.17.21.tar.gz", hash = "sha256:8b7ce697a2f212752a35c1ac414471dc16c424c9573be4926b56ff3f5d23b7af"}, + {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, + {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, ] [package.dependencies] -"ruamel.yaml.clib" = {version = ">=0.2.6", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""} +"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\""} [package.extras] -docs = ["ryd"] +docs = ["mercurial (>5.7)", "ryd"] jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] [[package]] -name = "ruamel.yaml.clib" -version = "0.2.6" +name = "ruamel-yaml-clib" +version = "0.2.8" description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6e7be2c5bcb297f5b82fee9c665eb2eb7001d1050deaba8471842979293a80b0"}, - {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:066f886bc90cc2ce44df8b5f7acfc6a7e2b2e672713f027136464492b0c34d7c"}, - {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:221eca6f35076c6ae472a531afa1c223b9c29377e62936f61bc8e6e8bdc5f9e7"}, - {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-win32.whl", hash = "sha256:1070ba9dd7f9370d0513d649420c3b362ac2d687fe78c6e888f5b12bf8bc7bee"}, - {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:77df077d32921ad46f34816a9a16e6356d8100374579bc35e15bab5d4e9377de"}, - {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:cfdb9389d888c5b74af297e51ce357b800dd844898af9d4a547ffc143fa56751"}, - {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7b2927e92feb51d830f531de4ccb11b320255ee95e791022555971c466af4527"}, - {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-win32.whl", hash = "sha256:ada3f400d9923a190ea8b59c8f60680c4ef8a4b0dfae134d2f2ff68429adfab5"}, - {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-win_amd64.whl", hash = "sha256:de9c6b8a1ba52919ae919f3ae96abb72b994dd0350226e28f3686cb4f142165c"}, - {file = "ruamel.yaml.clib-0.2.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d67f273097c368265a7b81e152e07fb90ed395df6e552b9fa858c6d2c9f42502"}, - {file = "ruamel.yaml.clib-0.2.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:72a2b8b2ff0a627496aad76f37a652bcef400fd861721744201ef1b45199ab78"}, - {file = "ruamel.yaml.clib-0.2.6-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:d3c620a54748a3d4cf0bcfe623e388407c8e85a4b06b8188e126302bcab93ea8"}, - {file = "ruamel.yaml.clib-0.2.6-cp36-cp36m-win32.whl", hash = "sha256:9efef4aab5353387b07f6b22ace0867032b900d8e91674b5d8ea9150db5cae94"}, - {file = "ruamel.yaml.clib-0.2.6-cp36-cp36m-win_amd64.whl", hash = "sha256:846fc8336443106fe23f9b6d6b8c14a53d38cef9a375149d61f99d78782ea468"}, - {file = "ruamel.yaml.clib-0.2.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0847201b767447fc33b9c235780d3aa90357d20dd6108b92be544427bea197dd"}, - {file = "ruamel.yaml.clib-0.2.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:78988ed190206672da0f5d50c61afef8f67daa718d614377dcd5e3ed85ab4a99"}, - {file = "ruamel.yaml.clib-0.2.6-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:210c8fcfeff90514b7133010bf14e3bad652c8efde6b20e00c43854bf94fa5a6"}, - {file = "ruamel.yaml.clib-0.2.6-cp37-cp37m-win32.whl", hash = "sha256:a49e0161897901d1ac9c4a79984b8410f450565bbad64dbfcbf76152743a0cdb"}, - {file = "ruamel.yaml.clib-0.2.6-cp37-cp37m-win_amd64.whl", hash = "sha256:bf75d28fa071645c529b5474a550a44686821decebdd00e21127ef1fd566eabe"}, - {file = "ruamel.yaml.clib-0.2.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a32f8d81ea0c6173ab1b3da956869114cae53ba1e9f72374032e33ba3118c233"}, - {file = "ruamel.yaml.clib-0.2.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7f7ecb53ae6848f959db6ae93bdff1740e651809780822270eab111500842a84"}, - {file = "ruamel.yaml.clib-0.2.6-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:61bc5e5ca632d95925907c569daa559ea194a4d16084ba86084be98ab1cec1c6"}, - {file = "ruamel.yaml.clib-0.2.6-cp38-cp38-win32.whl", hash = "sha256:89221ec6d6026f8ae859c09b9718799fea22c0e8da8b766b0b2c9a9ba2db326b"}, - {file = "ruamel.yaml.clib-0.2.6-cp38-cp38-win_amd64.whl", hash = "sha256:31ea73e564a7b5fbbe8188ab8b334393e06d997914a4e184975348f204790277"}, - {file = "ruamel.yaml.clib-0.2.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc6a613d6c74eef5a14a214d433d06291526145431c3b964f5e16529b1842bed"}, - {file = "ruamel.yaml.clib-0.2.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:1866cf2c284a03b9524a5cc00daca56d80057c5ce3cdc86a52020f4c720856f0"}, - {file = "ruamel.yaml.clib-0.2.6-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:1b4139a6ffbca8ef60fdaf9b33dec05143ba746a6f0ae0f9d11d38239211d335"}, - {file = "ruamel.yaml.clib-0.2.6-cp39-cp39-win32.whl", hash = "sha256:3fb9575a5acd13031c57a62cc7823e5d2ff8bc3835ba4d94b921b4e6ee664104"}, - {file = "ruamel.yaml.clib-0.2.6-cp39-cp39-win_amd64.whl", hash = "sha256:825d5fccef6da42f3c8eccd4281af399f21c02b32d98e113dbc631ea6a6ecbc7"}, - {file = "ruamel.yaml.clib-0.2.6.tar.gz", hash = "sha256:4ff604ce439abb20794f05613c374759ce10e3595d1867764dd1ae675b85acbd"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, + {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, + {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, ] [[package]] @@ -1899,13 +1953,13 @@ test = ["coverage", "flake8 (>=3.7)", "mypy", "pretend", "pytest"] [[package]] name = "vine" -version = "5.0.0" -description = "Promises, promises, promises." +version = "5.1.0" +description = "Python promises." optional = false python-versions = ">=3.6" files = [ - {file = "vine-5.0.0-py2.py3-none-any.whl", hash = "sha256:4c9dceab6f76ed92105027c49c823800dd33cacce13bdedc5b914e3514b7fb30"}, - {file = "vine-5.0.0.tar.gz", hash = "sha256:7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e"}, + {file = "vine-5.1.0-py3-none-any.whl", hash = "sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc"}, + {file = "vine-5.1.0.tar.gz", hash = "sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0"}, ] [[package]] @@ -2010,6 +2064,16 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, + {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, + {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -2075,4 +2139,4 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "6a309c7dd8f715f17d4c2509cf3af97cfda3c8f3e367a53b329ca10398fe268c" +content-hash = "b3fa129675c8c3082e6cdc84e3019b9130a7b359f021baf714e8393c1a171dc1" diff --git a/pyproject.toml b/pyproject.toml index 3f172703c..e9f897a3d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.9.3" +version = "1.10.0-beta.6" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" @@ -24,7 +24,7 @@ python_files = ["test_*.py"] python = "^3.8" pymongo = {extras = ["srv"], version = "^4.0.0"} requests = {extras = ["crypto"], version = "^2.31.0"} -celery = {extras = ["tblib"], version = "5.3.4"} +celery = {extras = ["tblib"], version = "5.3.6"} pydantic = "^1.9.0" opentelemetry-api = "^1.6.2" opentelemetry-sdk = "^1.6.2" @@ -35,25 +35,25 @@ pyrate-limiter = "^2.10.0" requests-cache = "^1.0.0" requests-ratelimiter = "^0.4.0" mongoengine = "^0.27.0" -celery-redbeat = {git = "https://github.com/splunk/redbeat", rev = "main"} +celery-redbeat = {git = "https://github.com/splunk/redbeat", branch = "main"} PyYAML = "^6.0" #Note this is temporary PR to upstream project is issued wait-for-dep = {extras = ["redis"], git="https://github.com/omrozowicz-splunk/wait-for-dep.git"} mongolock = "^1.3.4" pika = "^1.2.0" JSON-log-formatter ="^0.5.1" -"ruamel.yaml" = "^0.17.21" -mkdocs-video = "^1.5.0" -pysnmplib = {git = "https://github.com/pysnmp/pysnmp.git", rev = "main"} +"ruamel.yaml" = "^0.18.0" +pysnmplib = {git = "https://github.com/pysnmp/pysnmp.git", branch = "main"} urllib3 = "^1.26.17" [tool.poetry.group.dev.dependencies] -pytest = "^7.2.1" +pytest = "^8.0.0" pytest-cov = "^4.0.0" -mike = "^1.0.1" +mike = "^2.0.0" mkdocs = "^1.2.2" mkdocs-material = "^9.0.0" python-dotenv = "^1.0.0" +mkdocs-video = "^1.5.0" [build-system] requires = ["poetry>=0.12"] diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml index 5f28baeac..40062ecdb 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -46,6 +46,8 @@ spec: value: INFO - name: CHAIN_OF_TASKS_EXPIRY_TIME value: "60" + - name: CONFIG_FROM_MONGO + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml index 5f28baeac..40062ecdb 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -46,6 +46,8 @@ spec: value: INFO - name: CHAIN_OF_TASKS_EXPIRY_TIME value: "60" + - name: CONFIG_FROM_MONGO + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml index 5f28baeac..40062ecdb 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -46,6 +46,8 @@ spec: value: INFO - name: CHAIN_OF_TASKS_EXPIRY_TIME value: "60" + - name: CONFIG_FROM_MONGO + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-config.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-config.yaml new file mode 100644 index 000000000..2f7d00cdd --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-config.yaml @@ -0,0 +1,42 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/scheduler-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-config + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: |- + profiles: + IF_profile: + frequency: 600 + varBinds: + - [ "IF-MIB", "ifDescr" ] + - [ "IF-MIB", "ifAdminStatus" ] + - [ "IF-MIB", "ifName" ] + - [ 'IF-MIB','ifAlias' ] + - [ "IF-MIB", "ifInDiscards" ] + - [ "IF-MIB", "ifInErrors" ] + - [ "IF-MIB", "ifInNUcastPkts" ] + - [ "IF-MIB", "ifInOctets" ] + - [ "IF-MIB", "ifInUcastPkts" ] + - [ "IF-MIB", "ifInUnknownProtos" ] + - [ "IF-MIB", "ifOutDiscards" ] + - [ "IF-MIB", "ifOutErrors" ] + - [ "IF-MIB", "ifOutNUcastPkts" ] + - [ "IF-MIB", "ifOutOctets" ] + - [ "IF-MIB", "ifOutQLen" ] + - [ "IF-MIB", "ifOutUcastPkts" ] + + communities: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml new file mode 100644 index 000000000..632980cd1 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml @@ -0,0 +1,16 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/scheduler-inventory.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-inventory + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +data: + inventory.csv: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.41.24,,2c,public,,,1800,IF_profile,false, diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/splunk-secret.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/splunk-secret.yaml new file mode 100644 index 000000000..21e689f0a --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/splunk-secret.yaml @@ -0,0 +1,9 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/splunk-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: splunk-connect-for-snmp-splunk +type: Opaque +data: + hec_token: "MDAwMDAwMDAtMDAwMC0wMDAwLTAwMDAtMDAwMDAwMDAwMDAw" diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/traps-config.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/traps-config.yaml new file mode 100644 index 000000000..2f4b3f37d --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/traps-config.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/traps-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-traps + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: |- + communities: + 2c: + - public + - homelab diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml new file mode 100644 index 000000000..3134f724e --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -0,0 +1,87 @@ +--- +# Source: splunk-connect-for-snmp/templates/inventory/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-splunk-connect-for-snmp-inventory + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + spec: + containers: + - name: splunk-connect-for-snmp-inventory + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + ["inventory"] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: INVENTORY_PATH + value: /app/inventory/inventory.csv + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: CHAIN_OF_TASKS_EXPIRY_TIME + value: "60" + - name: CONFIG_FROM_MONGO + value: "true" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: inventory + mountPath: "/app/inventory" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: inventory + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-inventory + # An array of keys from the ConfigMap to create as files + items: + - key: "inventory.csv" + path: "inventory.csv" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + restartPolicy: OnFailure diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml new file mode 100644 index 000000000..ff2a134ee --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -0,0 +1,98 @@ +--- +# Source: splunk-connect-for-snmp/templates/scheduler/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-scheduler + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-scheduler + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "beat", + ] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: INVENTORY_REFRESH_RATE + value: "600" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + resources: + {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/pdb.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/pdb.yaml new file mode 100644 index 000000000..ef36d43af --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/scheduler/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-scheduler + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/serviceaccount.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/serviceaccount.yaml new file mode 100644 index 000000000..59ae809f1 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +--- +# Source: splunk-connect-for-snmp/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-splunk-connect-for-snmp-user + labels: + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/sim/pdb.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/sim/pdb.yaml new file mode 100644 index 000000000..0f1827e83 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/sim/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/sim/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-sim + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-sim + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 80% + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-sim + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml new file mode 100644 index 000000000..6851a86ec --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -0,0 +1,35 @@ +--- +# Source: splunk-connect-for-snmp/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "release-name-splunk-connect-for-snmp-trap-test-connection" + labels: + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test + "kube-score/ignore": "pod-probes,pod-networkpolicy" +spec: + containers: + - name: wget + image: busybox:1.34.1 + imagePullPolicy: Always + command: ['wget'] + args: ['release-name-splunk-connect-for-snmp-trap:162'] + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + resources: + limits: + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + restartPolicy: Never diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml new file mode 100644 index 000000000..e405f5f39 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -0,0 +1,114 @@ +--- +# Source: splunk-connect-for-snmp/templates/traps/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-trap + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-traps + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "trap" + ] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: SPLUNK_HEC_SCHEME + value: "https" + - name: SPLUNK_HEC_HOST + value: "10.202.18.152" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "true" + - name: SNMP_V3_SECURITY_ENGINE_ID + value: 80003a8c04 + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + ports: + - name: snmp-udp + containerPort: 2162 + protocol: UDP + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + resources: + {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-traps + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/pdb.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/pdb.yaml new file mode 100644 index 000000000..34bb78a7f --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/traps/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-trap + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 80% + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml new file mode 100644 index 000000000..d65075ff0 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml @@ -0,0 +1,27 @@ +--- +# Source: splunk-connect-for-snmp/templates/traps/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: release-name-splunk-connect-for-snmp-trap + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm + annotations: + metallb.universe.tf/allow-shared-ip: "splunk-connect" + +spec: + type: LoadBalancer + externalTrafficPolicy: Local + loadBalancerIP: 10.202.6.213 + ports: + - port: 162 + targetPort: 2162 + protocol: UDP + name: snmp-udp + selector: + app.kubernetes.io/name: splunk-connect-for-snmp-trap + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml new file mode 100644 index 000000000..78b8c4ba5 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml @@ -0,0 +1,92 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/configmap-backend.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-job-configmap + namespace: "default" +data: + job_config: | + apiVersion: batch/v1 + kind: Job + metadata: + name: release-name-splunk-connect-for-snmp-inventory + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm + spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + spec: + containers: + - name: splunk-connect-for-snmp-inventory + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + ["inventory"] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: INVENTORY_PATH + value: /app/inventory/inventory.csv + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: CONFIG_FROM_MONGO + value: "true" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: inventory + mountPath: "/app/inventory" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: inventory + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-inventory + # An array of keys from the ConfigMap to create as files + items: + - key: "inventory.csv" + path: "inventory.csv" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + restartPolicy: OnFailure diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml new file mode 100644 index 000000000..8e645641a --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml @@ -0,0 +1,49 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/deployment-backend-worker.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-backend-worker-deployment + labels: + app: ui-backend-worker +spec: + replicas: 1 + selector: + matchLabels: + app: ui-backend-worker + template: + metadata: + labels: + app: ui-backend-worker + spec: + containers: + - name: ui-backend-worker + image: "ghcr.io/splunk/sc4snmp-ui/backend/container:main" + imagePullPolicy: Always + command: ["sh","-c","/celery_start.sh"] + env: + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/3 + - name: JOB_CONFIG_PATH + value: /config/job_config.yaml + - name: JOB_NAMESPACE + value: sc4snmp + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/2 + - name: VALUES_DIRECTORY + value: /var/values_dir + ports: + - containerPort: 5000 + volumeMounts: + - name: backend-configmap + mountPath: /config + serviceAccountName: job-robot + volumes: + - name: backend-configmap + configMap: + name: splunk-connect-for-snmp-job-configmap + items: + - key: job_config + path: job_config.yaml diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml new file mode 100644 index 000000000..8cbca80c1 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-backend.yaml @@ -0,0 +1,78 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/deployment-backend.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-backend-deployment + labels: + app: ui-backend +spec: + replicas: 1 + selector: + matchLabels: + app: ui-backend + template: + metadata: + labels: + app: ui-backend + spec: + securityContext: + runAsUser: 10000 + runAsGroup: 10000 + fsGroup: 10000 + initContainers: + - name: patch-log-dirs + image: registry.access.redhat.com/ubi9/ubi + imagePullPolicy: IfNotPresent + command: [ 'sh', '-c', ' + mkdir -p /var/values_dir; + chmod -v g+rwxs /var/values_dir; + if [ -d "/var/values_dir" ]; + then + setfacl -n -Rm d:m::rwx,m::rwx,d:g:10000:rwx,g:10000:rwx /var/values_dir; + fi;' ] + securityContext: + runAsUser: 0 + volumeMounts: + - name: values-directory + mountPath: /var/values_dir + containers: + - name: ui-backend + image: "ghcr.io/splunk/sc4snmp-ui/backend/container:main" + imagePullPolicy: Always + command: ["sh","-c","/flask_start.sh"] + env: + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/3 + - name: JOB_CONFIG_PATH + value: /config/job_config.yaml + - name: JOB_NAMESPACE + value: sc4snmp + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/2 + - name: VALUES_DIRECTORY + value: /var/values_dir + - name: VALUES_FILE + value: values.yaml + - name: KEEP_TEMP_FILES + value: "false" + ports: + - containerPort: 5000 + volumeMounts: + - name: backend-configmap + mountPath: /config + - name: values-directory + mountPath: /var/values_dir + serviceAccountName: job-robot + volumes: + - name: backend-configmap + configMap: + name: splunk-connect-for-snmp-job-configmap + items: + - key: job_config + path: job_config.yaml + - name: values-directory + hostPath: + path: /home/ubuntu diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml new file mode 100644 index 000000000..612c15143 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml @@ -0,0 +1,29 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/deployment-frontend.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui-frontend-deployment + labels: + app: ui-frontend +spec: + replicas: 1 + selector: + matchLabels: + app: ui-frontend + template: + metadata: + labels: + app: ui-frontend + spec: + containers: + - name: ui-frontend + image: "ghcr.io/splunk/sc4snmp-ui/frontend/container:main" + imagePullPolicy: Always + env: + - name: REACT_APP_FLASK_PORT + value: "30002" + - name: DEMO_PORT + value: "30001" + ports: + - containerPort: 30001 diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml new file mode 100644 index 000000000..2ba72c297 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml @@ -0,0 +1,27 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/revert-patch-log-dirs.yaml +apiVersion: v1 +kind: Pod +metadata: + name: revert-patch-log-dirs + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + restartPolicy: Never + containers: + - name: revert-patch-log-dirs + image: registry.access.redhat.com/ubi9/ubi + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 + command: ['sh', '-c', ' + setfacl --recursive --remove-all /var/values_dir; + '] + volumeMounts: + - name: values-directory + mountPath: /var/values_dir + volumes: + - name: values-directory + hostPath: + path: /home/ubuntu diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role-binding.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role-binding.yaml new file mode 100644 index 000000000..46c4c7734 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role-binding.yaml @@ -0,0 +1,15 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: job-robot + namespace: sc4snmp +subjects: +- kind: ServiceAccount + name: job-robot # Name of the ServiceAccount + namespace: sc4snmp +roleRef: + kind: Role # This must be Role or ClusterRole + name: job-robot # This must match the name of the Role or ClusterRole you wish to bind to + apiGroup: rbac.authorization.k8s.io diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role.yaml new file mode 100644 index 000000000..66b15bf7f --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/role.yaml @@ -0,0 +1,14 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: sc4snmp + name: job-robot +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-account.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-account.yaml new file mode 100644 index 000000000..d70de3fc1 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-account.yaml @@ -0,0 +1,7 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/service-account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: job-robot + namespace: sc4snmp diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-backend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-backend.yaml new file mode 100644 index 000000000..07a5f7adb --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-backend.yaml @@ -0,0 +1,14 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/service-backend.yaml +apiVersion: v1 +kind: Service +metadata: + name: ui-backend-entrypoint +spec: + selector: + app: ui-backend + type: NodePort + ports: + - port: 5000 + targetPort: 5000 + nodePort: 30002 diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-frontend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-frontend.yaml new file mode 100644 index 000000000..bc238b6d9 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/service-frontend.yaml @@ -0,0 +1,14 @@ +--- +# Source: splunk-connect-for-snmp/templates/ui/service-frontend.yaml +apiVersion: v1 +kind: Service +metadata: + name: ui-frontend-entrypoint +spec: + type: NodePort + selector: + app: ui-frontend + ports: + - port: 30001 + targetPort: 30001 + nodePort: 30001 diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/pdb.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/pdb.yaml new file mode 100644 index 000000000..4b3ea594c --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-worker + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 80% + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml new file mode 100644 index 000000000..f08f466db --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -0,0 +1,146 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-poller + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-worker-poller + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-poller", + ] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_SCHEME + value: "https" + - name: SPLUNK_HEC_HOST + value: "10.202.18.152" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "true" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: WORKER_CONCURRENCY + value: "4" + - name: PREFETCH_COUNT + value: "1" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + resources: + limits: + cpu: 500m + requests: + cpu: 250m + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-poller + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml new file mode 100644 index 000000000..911621cba --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -0,0 +1,146 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-sender + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-worker-sender + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-sender", + ] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_SCHEME + value: "https" + - name: SPLUNK_HEC_HOST + value: "10.202.18.152" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "true" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: WORKER_CONCURRENCY + value: "4" + - name: PREFETCH_COUNT + value: "30" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + resources: + limits: + cpu: 500m + requests: + cpu: 250m + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml new file mode 100644 index 000000000..ce10d78b0 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -0,0 +1,146 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-trap + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-worker-trap + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-trap", + ] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_SCHEME + value: "https" + - name: SPLUNK_HEC_HOST + value: "10.202.18.152" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "true" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: WORKER_CONCURRENCY + value: "4" + - name: PREFETCH_COUNT + value: "30" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + resources: + limits: + cpu: 500m + requests: + cpu: 250m + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-trap + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml index 5f28baeac..40062ecdb 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -46,6 +46,8 @@ spec: value: INFO - name: CHAIN_OF_TASKS_EXPIRY_TIME value: "60" + - name: CONFIG_FROM_MONGO + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml index 5f28baeac..40062ecdb 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -46,6 +46,8 @@ spec: value: INFO - name: CHAIN_OF_TASKS_EXPIRY_TIME value: "60" + - name: CONFIG_FROM_MONGO + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/values_enable_ui.yaml b/rendered/values_enable_ui.yaml new file mode 100644 index 000000000..cc1165a2d --- /dev/null +++ b/rendered/values_enable_ui.yaml @@ -0,0 +1,57 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + repository: ghcr.io/splunk/sc4snmp-ui/frontend/container + tag: "main" + pullPolicy: "Always" + backEnd: + NodePort: 30002 + repository: ghcr.io/splunk/sc4snmp-ui/backend/container + tag: "main" + pullPolicy: "Always" + init: + repository: registry.access.redhat.com/ubi9/ubi + pullPolicy: IfNotPresent + valuesFileDirectory: "/home/ubuntu" + valuesFileName: "values.yaml" + keepSectionFiles: false +splunk: + enabled: true + protocol: https + host: 10.202.18.152 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: "true" + port: "8088" +traps: + communities: + 2c: + - public + - homelab + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 10.202.6.213 +scheduler: + profiles: | + IF_profile: + frequency: 600 + varBinds: + - [ "IF-MIB", "ifDescr" ] + - [ "IF-MIB", "ifAdminStatus" ] + - [ "IF-MIB", "ifName" ] + - [ 'IF-MIB','ifAlias' ] + - [ "IF-MIB", "ifInDiscards" ] + - [ "IF-MIB", "ifInErrors" ] + - [ "IF-MIB", "ifInNUcastPkts" ] + - [ "IF-MIB", "ifInOctets" ] + - [ "IF-MIB", "ifInUcastPkts" ] + - [ "IF-MIB", "ifInUnknownProtos" ] + - [ "IF-MIB", "ifOutDiscards" ] + - [ "IF-MIB", "ifOutErrors" ] + - [ "IF-MIB", "ifOutNUcastPkts" ] + - [ "IF-MIB", "ifOutOctets" ] + - [ "IF-MIB", "ifOutQLen" ] + - [ "IF-MIB", "ifOutUcastPkts" ] +poller: + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 54.82.41.24,,2c,public,,,1800,IF_profile,false, \ No newline at end of file diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index e07986824..b7bcca54b 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.9.3" +__version__ = "1.10.0-beta.6" diff --git a/splunk_connect_for_snmp/celery_config.py b/splunk_connect_for_snmp/celery_config.py index 18d963941..f5fe28e3e 100644 --- a/splunk_connect_for_snmp/celery_config.py +++ b/splunk_connect_for_snmp/celery_config.py @@ -18,7 +18,7 @@ from kombu import Queue -with suppress(ImportError): +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() diff --git a/splunk_connect_for_snmp/common/collection_manager.py b/splunk_connect_for_snmp/common/collection_manager.py index 416b5fd81..ddaf345d1 100644 --- a/splunk_connect_for_snmp/common/collection_manager.py +++ b/splunk_connect_for_snmp/common/collection_manager.py @@ -1,17 +1,19 @@ import os from abc import abstractmethod +from contextlib import suppress import yaml from celery.utils.log import get_task_logger -try: +from splunk_connect_for_snmp.common.hummanbool import human_bool + +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") +CONFIG_FROM_MONGO = human_bool(os.getenv("CONFIG_FROM_MONGO", "false").lower()) logger = get_task_logger(__name__) @@ -67,16 +69,20 @@ class GroupsManager(CollectionManager): def __init__(self, mongo): super().__init__(mongo, "groups") - @staticmethod - def gather_elements(): + def gather_elements(self): groups = {} - try: - with open(CONFIG_PATH, encoding="utf-8") as file: - config_runtime = yaml.safe_load(file) - if "groups" in config_runtime: - groups = config_runtime.get("groups", {}) - except FileNotFoundError: - logger.info(f"File: {CONFIG_PATH} not found") + if CONFIG_FROM_MONGO: + groups_list = list(self.mongo.sc4snmp.groups_ui.find({}, {"_id": 0})) + for gr in groups_list: + groups.update(gr) + else: + try: + with open(CONFIG_PATH, encoding="utf-8") as file: + config_runtime = yaml.safe_load(file) + if "groups" in config_runtime: + groups = config_runtime.get("groups", {}) + except FileNotFoundError: + logger.info(f"File: {CONFIG_PATH} not found") return groups @@ -84,8 +90,7 @@ class ProfilesManager(CollectionManager): def __init__(self, mongo): super().__init__(mongo, "profiles") - @staticmethod - def gather_elements(): + def gather_elements(self): active_profiles = {} pkg_path = os.path.join( @@ -100,24 +105,37 @@ def gather_elements(): ) for key, profile in profiles.items(): active_profiles[key] = profile - - try: - with open(CONFIG_PATH, encoding="utf-8") as file: - config_runtime = yaml.safe_load(file) - if "profiles" in config_runtime: - profiles = config_runtime.get("profiles", {}) - logger.info( - f"loading {len(profiles.keys())} profiles from runtime profile group" - ) - for key, profile in profiles.items(): - if key in active_profiles: - if not profile.get("enabled", True): - logger.info(f"disabling profile {key}") - del active_profiles[key] + if CONFIG_FROM_MONGO: + profiles_list = list(self.mongo.sc4snmp.profiles_ui.find({}, {"_id": 0})) + for pr in profiles_list: + key = list(pr.keys())[0] + profile = pr[key] + if key in active_profiles: + if not profile.get("enabled", True): + logger.info(f"disabling profile {key}") + del active_profiles[key] + else: + active_profiles[key] = profile + else: + active_profiles[key] = profile + else: + try: + with open(CONFIG_PATH, encoding="utf-8") as file: + config_runtime = yaml.safe_load(file) + if "profiles" in config_runtime: + profiles = config_runtime.get("profiles", {}) + logger.info( + f"loading {len(profiles.keys())} profiles from runtime profile group" + ) + for key, profile in profiles.items(): + if key in active_profiles: + if not profile.get("enabled", True): + logger.info(f"disabling profile {key}") + del active_profiles[key] + else: + active_profiles[key] = profile else: active_profiles[key] = profile - else: - active_profiles[key] = profile - except FileNotFoundError: - logger.info(f"File: {CONFIG_PATH} not found") + except FileNotFoundError: + logger.info(f"File: {CONFIG_PATH} not found") return active_profiles diff --git a/splunk_connect_for_snmp/common/custom_cache.py b/splunk_connect_for_snmp/common/custom_cache.py new file mode 100644 index 000000000..bc9f50a16 --- /dev/null +++ b/splunk_connect_for_snmp/common/custom_cache.py @@ -0,0 +1,31 @@ +import math +import time +from functools import lru_cache, update_wrapper +from typing import Any, Callable + + +def _ttl_hash_gen(seconds: int): + start_time = time.time() + while True: + yield math.floor((time.time() - start_time) / seconds) + + +def ttl_lru_cache(maxsize: int = 128, ttl: int = -1): + if ttl <= 0: + ttl = 65536 + hash_gen = _ttl_hash_gen(ttl) + + def wrapper(func: Callable) -> Callable: + @lru_cache(maxsize) + def ttl_func(ttl_hash, *args, **kwargs): + return func(*args, **kwargs) + + def wrapped(*args, **kwargs) -> Any: + th = next(hash_gen) + return ttl_func(th, *args, **kwargs) + + setattr(wrapped, "cache_info", ttl_func.cache_info) + setattr(wrapped, "cache_clear", ttl_func.cache_clear) + return update_wrapper(wrapped, func) + + return wrapper diff --git a/splunk_connect_for_snmp/common/custom_translations.py b/splunk_connect_for_snmp/common/custom_translations.py index d6b8c1c63..d0458786d 100644 --- a/splunk_connect_for_snmp/common/custom_translations.py +++ b/splunk_connect_for_snmp/common/custom_translations.py @@ -14,15 +14,14 @@ # limitations under the License. # import os +from contextlib import suppress import yaml -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") diff --git a/splunk_connect_for_snmp/common/inventory_processor.py b/splunk_connect_for_snmp/common/inventory_processor.py index 3c10ecd84..5e8c3a4c2 100644 --- a/splunk_connect_for_snmp/common/inventory_processor.py +++ b/splunk_connect_for_snmp/common/inventory_processor.py @@ -1,23 +1,25 @@ import copy import os +from contextlib import suppress from csv import DictReader from typing import List +import pymongo + from splunk_connect_for_snmp.common.collection_manager import GroupsManager +from splunk_connect_for_snmp.common.hummanbool import human_bool from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.common.task_generator import WalkTaskGenerator from splunk_connect_for_snmp.poller import app -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass - CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") INVENTORY_PATH = os.getenv("INVENTORY_PATH", "/app/inventory/inventory.csv") +CONFIG_FROM_MONGO = human_bool(os.getenv("CONFIG_FROM_MONGO", "false").lower()) ALLOWED_KEYS_VALUES = [ "address", "port", @@ -86,31 +88,36 @@ def get_groups_keys(list_of_groups, group_name, inventory_group_port_mapping): class InventoryProcessor: - def __init__(self, group_manager: GroupsManager, logger): + def __init__(self, group_manager: GroupsManager, logger, inventory_ui_collection): self.inventory_records: List[dict] = [] self.group_manager = group_manager self.logger = logger self.hosts_from_groups: dict = {} self.inventory_group_port_mapping: dict = {} self.single_hosts: List[dict] = [] + self.inventory_ui_collection = inventory_ui_collection def get_all_hosts(self): - self.logger.info(f"Loading inventory from {INVENTORY_PATH}") - with open(INVENTORY_PATH, encoding="utf-8") as csv_file: - ir_reader = DictReader(csv_file) - for inventory_line in ir_reader: - self.process_line(inventory_line) - for source_record in self.single_hosts: - address = source_record["address"] - port = source_record.get("port") - host = transform_address_to_key(address, port) - was_present = self.hosts_from_groups.get(host, None) - if was_present is None: - self.inventory_records.append(source_record) - else: - self.logger.warning( - f"Record: {host} has been already configured in group. Skipping..." - ) + if CONFIG_FROM_MONGO: + self.logger.info("Loading inventory from inventory_ui collection") + ir_reader = list(self.inventory_ui_collection.find({}, {"_id": 0})) + else: + with open(INVENTORY_PATH, encoding="utf-8") as csv_file: + self.logger.info(f"Loading inventory from {INVENTORY_PATH}") + ir_reader = list(DictReader(csv_file)) + for inventory_line in ir_reader: + self.process_line(inventory_line) + for source_record in self.single_hosts: + address = source_record["address"] + port = source_record.get("port") + host = transform_address_to_key(address, port) + was_present = self.hosts_from_groups.get(host, None) + if was_present is None: + self.inventory_records.append(source_record) + else: + self.logger.warning( + f"Record: {host} has been already configured in group. Skipping..." + ) return self.inventory_records, self.inventory_group_port_mapping def process_line(self, source_record): diff --git a/splunk_connect_for_snmp/enrich/tasks.py b/splunk_connect_for_snmp/enrich/tasks.py index 36c4cace9..30c55008f 100644 --- a/splunk_connect_for_snmp/enrich/tasks.py +++ b/splunk_connect_for_snmp/enrich/tasks.py @@ -14,17 +14,16 @@ # limitations under the License. # import time +from contextlib import suppress from pymongo import UpdateOne from splunk_connect_for_snmp import customtaskmanager -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass import os diff --git a/splunk_connect_for_snmp/inventory/loader.py b/splunk_connect_for_snmp/inventory/loader.py index 1db70f286..ef57b21d8 100644 --- a/splunk_connect_for_snmp/inventory/loader.py +++ b/splunk_connect_for_snmp/inventory/loader.py @@ -17,8 +17,11 @@ import logging import os import sys +from contextlib import suppress +from csv import DictReader import pymongo +import yaml from splunk_connect_for_snmp import customtaskmanager from splunk_connect_for_snmp.common.collection_manager import ( @@ -28,6 +31,7 @@ from splunk_connect_for_snmp.common.customised_json_formatter import ( CustomisedJSONFormatter, ) +from splunk_connect_for_snmp.common.hummanbool import human_bool from splunk_connect_for_snmp.common.inventory_processor import ( InventoryProcessor, InventoryRecordManager, @@ -37,12 +41,10 @@ from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.common.schema_migration import migrate_database -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass formatter = CustomisedJSONFormatter() @@ -60,9 +62,98 @@ MONGO_DB = os.getenv("MONGO_DB", "sc4snmp") CONFIG_PATH = os.getenv("CONFIG_PATH", "/app/config/config.yaml") INVENTORY_PATH = os.getenv("INVENTORY_PATH", "/app/inventory/inventory.csv") +CONFIG_FROM_MONGO = human_bool(os.getenv("CONFIG_FROM_MONGO", "false").lower()) +INVENTORY_KEYS_TRANSFORM = { + "securityEngine": "security_engine", + "SmartProfiles": "smart_profiles", +} +BOOLEAN_INVENTORY_FIELDS = ["delete", "smart_profiles"] CHAIN_OF_TASKS_EXPIRY_TIME = int(os.getenv("CHAIN_OF_TASKS_EXPIRY_TIME", "60")) +def configure_ui_database(mongo_client): + """ + If the UI wasn't used in previous update, and now it is used, create UI collections in Mongo + with config from files. Similarly, if the UI was used previous update, and now it isn't used, + drop UI collections in Mongo. + """ + used_ui_collection = mongo_client.sc4snmp.used_ui + used_ui_doc = used_ui_collection.find_one() + if used_ui_doc: + used_ui = used_ui_doc["used_ui"] + else: + used_ui = False + + if CONFIG_FROM_MONGO and not used_ui: + inventory_ui_collection = mongo_client.sc4snmp.inventory_ui + groups_ui_collection = mongo_client.sc4snmp.groups_ui + profiles_ui_collection = mongo_client.sc4snmp.profiles_ui + used_ui_collection.update_one({}, {"$set": {"used_ui": True}}, upsert=True) + + with open(INVENTORY_PATH, encoding="utf-8") as csv_file: + ir_reader = DictReader(csv_file) + for inventory_line in ir_reader: + for key in INVENTORY_KEYS_TRANSFORM.keys(): + if key in inventory_line: + new_key = INVENTORY_KEYS_TRANSFORM[key] + inventory_line[new_key] = inventory_line.pop(key) + + for field in BOOLEAN_INVENTORY_FIELDS: + if inventory_line[field].lower() in ["", "f", "false", "0"]: + inventory_line[field] = False + else: + inventory_line[field] = True + + port = ( + int(inventory_line.get("port", 161)) + if inventory_line.get("port", 161) + else 161 + ) + walk_interval = ( + int(inventory_line["walk_interval"]) + if int(inventory_line["walk_interval"]) >= 1800 + else 1800 + ) + inventory_line["port"] = port + inventory_line["walk_interval"] = walk_interval + if not inventory_line["address"].startswith("#"): + inventory_ui_collection.insert(inventory_line) + + groups = {} + all_profiles = {} + try: + with open(CONFIG_PATH, encoding="utf-8") as file: + config_runtime = yaml.safe_load(file) + if "groups" in config_runtime: + groups = config_runtime.get("groups", {}) + + if "profiles" in config_runtime: + profiles = config_runtime.get("profiles", {}) + logger.info( + f"loading {len(profiles.keys())} profiles from runtime profiles config" + ) + for key, profile in profiles.items(): + all_profiles[key] = profile + except FileNotFoundError: + logger.info(f"File: {CONFIG_PATH} not found") + + groups_list = [{key: value} for key, value in groups.items()] + if groups_list: + groups_ui_collection.insert_many(groups_list) + profiles_list = [{key: value} for key, value in all_profiles.items()] + if profiles_list: + profiles_ui_collection.insert_many(profiles_list) + + elif not CONFIG_FROM_MONGO and used_ui: + used_ui_collection.update_one({}, {"$set": {"used_ui": False}}, upsert=True) + inventory_ui_collection = mongo_client.sc4snmp.inventory_ui + groups_ui_collection = mongo_client.sc4snmp.groups_ui + profiles_ui_collection = mongo_client.sc4snmp.profiles_ui + inventory_ui_collection.drop() + groups_ui_collection.drop() + profiles_ui_collection.drop() + + def load(): inventory_errors = False target = None @@ -75,6 +166,8 @@ def load(): # DB migration in case of update of SC4SNMP migrate_database(mongo_client, periodic_obj) + configure_ui_database(mongo_client) + expiry_time_changed = periodic_obj.did_expiry_time_change( CHAIN_OF_TASKS_EXPIRY_TIME ) @@ -89,11 +182,17 @@ def load(): config_profiles = profiles_manager.return_collection() new_groups = groups_manager.return_collection() - inventory_processor = InventoryProcessor(groups_manager, logger) + inventory_ui_collection = mongo_client.sc4snmp.inventory_ui + inventory_processor = InventoryProcessor( + groups_manager, logger, inventory_ui_collection + ) inventory_record_manager = InventoryRecordManager( mongo_client, periodic_obj, logger ) - logger.info(f"Loading inventory from {INVENTORY_PATH}") + if CONFIG_FROM_MONGO: + logger.info(f"Loading inventory from inventory_ui collection") + else: + logger.info(f"Loading inventory from {INVENTORY_PATH}") inventory_lines, inventory_group_port_mapping = inventory_processor.get_all_hosts() # Function to delete inventory records that are @@ -109,6 +208,15 @@ def load(): target = transform_address_to_key(ir.address, ir.port) if ir.delete: inventory_record_manager.delete(target) + if CONFIG_FROM_MONGO: + if ir.group is None: + mongo_client.sc4snmp.inventory_ui.delete_one( + {"address": ir.address, "port": ir.port} + ) + else: + mongo_client.sc4snmp.inventory_ui.delete_one( + {"address": ir.group} + ) else: inventory_record_manager.update( ir, new_source_record, config_profiles, expiry_time_changed @@ -116,6 +224,16 @@ def load(): except Exception as e: inventory_errors = True + if CONFIG_FROM_MONGO and new_source_record["delete"]: + mongo_client.sc4snmp.inventory_ui.delete_one( + { + "address": new_source_record["address"], + "port": new_source_record["port"], + } + ) + target = transform_address_to_key( + new_source_record["address"], new_source_record["port"] + ) logger.exception(f"Exception raised for {target}: {e}") return inventory_errors diff --git a/splunk_connect_for_snmp/inventory/tasks.py b/splunk_connect_for_snmp/inventory/tasks.py index 820d8799d..dccee4dbe 100644 --- a/splunk_connect_for_snmp/inventory/tasks.py +++ b/splunk_connect_for_snmp/inventory/tasks.py @@ -14,6 +14,7 @@ # limitations under the License. # import typing +from contextlib import suppress from splunk_connect_for_snmp.snmp.manager import get_inventory @@ -21,12 +22,11 @@ from ..common.task_generator import PollTaskGenerator from .loader import transform_address_to_key -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass + import os import re diff --git a/splunk_connect_for_snmp/poller.py b/splunk_connect_for_snmp/poller.py index f7572e37d..20eef65d5 100644 --- a/splunk_connect_for_snmp/poller.py +++ b/splunk_connect_for_snmp/poller.py @@ -17,7 +17,7 @@ # Support use of .env file for developers from contextlib import suppress -with suppress(ImportError): +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py index 9cad8f54d..b6974cc5f 100644 --- a/splunk_connect_for_snmp/snmp/auth.py +++ b/splunk_connect_for_snmp/snmp/auth.py @@ -43,7 +43,7 @@ def get_secret_value( with open(os.path.join(location, key), encoding="utf-8") as file: result = file.read().replace("\n", "") elif required: - raise Exception(f"Required secret key {key} not found in {location}") + raise FileNotFoundError(f"Required secret key {key} not found in {location}") return result @@ -145,7 +145,7 @@ def getAuthV3(logger, ir: InventoryRecord, snmpEngine: SnmpEngine) -> UsmUserDat ) else: - raise Exception(f"invalid username from secret {ir.secret}") + raise FileNotFoundError(f"invalid username from secret {ir.secret}") def getAuthV2c(ir: InventoryRecord) -> CommunityData: diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py index 433507d9c..5167f2d3f 100644 --- a/splunk_connect_for_snmp/snmp/manager.py +++ b/splunk_connect_for_snmp/snmp/manager.py @@ -14,6 +14,7 @@ # limitations under the License. # import typing +from contextlib import suppress from pysnmp.proto.errind import EmptyResponse from pysnmp.smi import error @@ -23,12 +24,11 @@ from splunk_connect_for_snmp.inventory.loader import transform_address_to_key from splunk_connect_for_snmp.snmp.varbinds_resolver import ProfileCollection -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass + import csv import os import time @@ -172,7 +172,9 @@ def get_group_key(mib, oid, index) -> str: def valueAsBest(value) -> Union[str, float]: try: return float(value) - except: + except ValueError: + return value + except TypeError: return value @@ -189,7 +191,7 @@ def map_metric_type(t, snmp_value): if metric_type in MTYPES: try: float(snmp_value) - except: + except ValueError: metric_type = "te" return metric_type diff --git a/splunk_connect_for_snmp/snmp/tasks.py b/splunk_connect_for_snmp/snmp/tasks.py index dc0c8d6eb..c8d4c3463 100644 --- a/splunk_connect_for_snmp/snmp/tasks.py +++ b/splunk_connect_for_snmp/snmp/tasks.py @@ -20,14 +20,13 @@ from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass import os +import socket import time import pymongo @@ -36,6 +35,8 @@ from mongolock import MongoLock, MongoLockLocked from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType +from splunk_connect_for_snmp.common.custom_cache import ttl_lru_cache +from splunk_connect_for_snmp.common.hummanbool import human_bool from splunk_connect_for_snmp.snmp.manager import Poller, get_inventory logger = get_task_logger(__name__) @@ -47,6 +48,9 @@ WALK_MAX_RETRIES = int(os.getenv("WALK_MAX_RETRIES", "5")) SPLUNK_SOURCETYPE_TRAPS = os.getenv("SPLUNK_SOURCETYPE_TRAPS", "sc4snmp:traps") OID_VALIDATOR = re.compile(r"^([0-2])((\.0)|(\.[1-9][0-9]*))*$") +RESOLVE_TRAP_ADDRESS = os.getenv("RESOLVE_TRAP_ADDRESS", "false") +MAX_DNS_CACHE_SIZE_TRAPS = int(os.getenv("MAX_DNS_CACHE_SIZE_TRAPS", "100")) +TTL_DNS_CACHE_TRAPS = int(os.getenv("TTL_DNS_CACHE_TRAPS", "1800")) @shared_task( @@ -129,6 +133,17 @@ def poll(self, **kwargs): return work +@ttl_lru_cache(maxsize=MAX_DNS_CACHE_SIZE_TRAPS, ttl=TTL_DNS_CACHE_TRAPS) +def resolve_address(address: str): + try: + dns_result = socket.gethostbyaddr(address) + result = dns_result[0] + except socket.herror: + logger.info(f"Traps: address {address} can't be resolved.") + result = address + return result + + @shared_task(bind=True, base=Poller) def trap(self, work): @@ -176,6 +191,9 @@ def trap(self, work): _, _, result = self.process_snmp_data(var_bind_table, metrics, work["host"]) + if human_bool(RESOLVE_TRAP_ADDRESS): + work["host"] = resolve_address(work["host"]) + return { "time": time.time(), "result": result, diff --git a/splunk_connect_for_snmp/splunk/tasks.py b/splunk_connect_for_snmp/splunk/tasks.py index 1be1bd196..d38ab5239 100644 --- a/splunk_connect_for_snmp/splunk/tasks.py +++ b/splunk_connect_for_snmp/splunk/tasks.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from contextlib import suppress + from splunk_connect_for_snmp.common.custom_translations import load_custom_translations -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass import json import os @@ -166,7 +166,9 @@ def do_send(data, destination_url, self): def valueAsBest(value) -> Union[str, float]: try: return float(value) - except: + except ValueError: + return value + except TypeError: return value diff --git a/splunk_connect_for_snmp/traps.py b/splunk_connect_for_snmp/traps.py index b7221ca6d..c93cfd5bc 100644 --- a/splunk_connect_for_snmp/traps.py +++ b/splunk_connect_for_snmp/traps.py @@ -14,20 +14,20 @@ # limitations under the License. # import logging +from contextlib import suppress from pysnmp.proto.api import v2c from splunk_connect_for_snmp.snmp.auth import get_secret_value -try: +with suppress(ImportError, OSError): from dotenv import load_dotenv load_dotenv() -except: - pass import asyncio import os +from typing import Any, Dict import yaml from celery import Celery, chain @@ -96,6 +96,14 @@ def cbFun(snmpEngine, stateReference, contextEngineId, contextName, varBinds, cb _ = my_chain.apply_async() +# Callback function for logging traps authentication errors +def authentication_observer_cb_fun(snmpEngine, execpoint, variables, contexts): + logging.error( + f"Security Model failure for device {variables.get('transportAddress', None)}: " + f"{variables.get('statusInformation', {}).get('errorIndication', None)}" + ) + + app.autodiscover_tasks( packages=[ "splunk_connect_for_snmp", @@ -111,9 +119,20 @@ def main(): # Get the event loop for this thread loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) + # Create SNMP engine with autogenernated engineID and pre-bound # to socket transport dispatcher snmpEngine = engine.SnmpEngine() + + # Register a callback function to log errors with traps authentication + observer_context: Dict[Any, Any] = {} + snmpEngine.observer.registerObserver( + authentication_observer_cb_fun, + "rfc2576.prepareDataElements:sm-failure", + "rfc3412.prepareDataElements:sm-failure", + cbCtx=observer_context, + ) + # UDP over IPv4, first listening interface/port config.addTransport( snmpEngine, diff --git a/test/common/test_custom_cache.py b/test/common/test_custom_cache.py new file mode 100644 index 000000000..6022d5a62 --- /dev/null +++ b/test/common/test_custom_cache.py @@ -0,0 +1,67 @@ +from unittest import TestCase +from unittest.mock import mock_open, patch + +from splunk_connect_for_snmp.common.custom_cache import ttl_lru_cache + + +def result_of_cache(x): + return x + + +class TestCustomCache(TestCase): + @patch("time.time") + def test_ttl(self, m_time): + m_time.side_effect = [5, 5, 10, 15] + cached = ttl_lru_cache(ttl=6)(result_of_cache) + + cached(1) + assert "hits=0" in f"{cached.cache_info()}" + assert "misses=1" in f"{cached.cache_info()}" + + cached(1) + assert "hits=1" in f"{cached.cache_info()}" + assert "misses=1" in f"{cached.cache_info()}" + + cached(1) + assert "hits=1" in f"{cached.cache_info()}" + assert "misses=2" in f"{cached.cache_info()}" + + cached.cache_clear() + + @patch("time.time") + def test_max_size(self, m_time): + m_time.side_effect = [5, 5, 10, 15, 20, 25] + maxsize = 2 + cached = ttl_lru_cache(maxsize=maxsize, ttl=300)(result_of_cache) + + cached(1) + assert "hits=0" in f"{cached.cache_info()}" + assert "misses=1" in f"{cached.cache_info()}" + assert f"maxsize={maxsize}" in f"{cached.cache_info()}" + assert "currsize=1" in f"{cached.cache_info()}" + + cached(1) + assert "hits=1" in f"{cached.cache_info()}" + assert "misses=1" in f"{cached.cache_info()}" + assert f"maxsize={maxsize}" in f"{cached.cache_info()}" + assert "currsize=1" in f"{cached.cache_info()}" + + cached(2) + assert "hits=1" in f"{cached.cache_info()}" + assert "misses=2" in f"{cached.cache_info()}" + assert f"maxsize={maxsize}" in f"{cached.cache_info()}" + assert "currsize=2" in f"{cached.cache_info()}" + + cached(3) + assert "hits=1" in f"{cached.cache_info()}" + assert "misses=3" in f"{cached.cache_info()}" + assert f"maxsize={maxsize}" in f"{cached.cache_info()}" + assert "currsize=2" in f"{cached.cache_info()}" + + cached(3) + assert "hits=2" in f"{cached.cache_info()}" + assert "misses=3" in f"{cached.cache_info()}" + assert f"maxsize={maxsize}" in f"{cached.cache_info()}" + assert "currsize=2" in f"{cached.cache_info()}" + + cached.cache_clear() diff --git a/test/common/test_groups.py b/test/common/test_groups.py index 6a558056b..d856b1c33 100644 --- a/test/common/test_groups.py +++ b/test/common/test_groups.py @@ -26,6 +26,10 @@ class TestGroups(TestCase): "splunk_connect_for_snmp.common.collection_manager.CONFIG_PATH", return_yaml_groups(), ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) def test_read_one_group(self): active_groups = { "group1": [ @@ -64,6 +68,10 @@ def return_yaml_groups_more_than_one(self): "splunk_connect_for_snmp.common.collection_manager.CONFIG_PATH", return_not_existing_config(), ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) def test_base_files_not_found(self): with self.assertLogs( "splunk_connect_for_snmp.common.collection_manager", level="INFO" diff --git a/test/common/test_inventory_processor.py b/test/common/test_inventory_processor.py index 4a4c0374c..e4b337191 100644 --- a/test/common/test_inventory_processor.py +++ b/test/common/test_inventory_processor.py @@ -210,7 +210,7 @@ def test_get_group_hosts(self): "delete": "", }, ] - inventory_processor = InventoryProcessor(group_manager, Mock()) + inventory_processor = InventoryProcessor(group_manager, Mock(), Mock()) group_manager.return_element.return_value = [ { "group1": [ @@ -239,7 +239,7 @@ def test_get_group_hosts_hostname(self): "SmartProfiles": "f", "delete": "", } - inventory_processor = InventoryProcessor(group_manager, logger) + inventory_processor = InventoryProcessor(group_manager, logger, Mock()) group_manager.return_element.return_value = [] inventory_processor.get_group_hosts( group_object, "ec2-54-91-99-115.compute-1.amazonaws.com" @@ -253,28 +253,44 @@ def test_get_group_hosts_hostname(self): def test_process_line_comment(self): logger = Mock() source_record = {"address": "#54.234.85.76"} - inventory_processor = InventoryProcessor(Mock(), logger) + inventory_processor = InventoryProcessor(Mock(), logger, Mock()) inventory_processor.process_line(source_record) logger.warning.assert_called_with( "Record: #54.234.85.76 is commented out. Skipping..." ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", + False, + ) @patch( "builtins.open", new_callable=mock_open, read_data=mock_inventory_only_address ) def test_process_line_host(self, m_inventory): source_record = {"address": "54.234.85.76"} - inventory_processor = InventoryProcessor(Mock(), Mock()) + inventory_processor = InventoryProcessor(Mock(), Mock(), Mock()) inventory_processor.get_all_hosts() self.assertEqual(inventory_processor.inventory_records, [source_record]) def test_process_line_group(self): source_record = {"address": "group1"} - inventory_processor = InventoryProcessor(Mock(), Mock()) + inventory_processor = InventoryProcessor(Mock(), Mock(), Mock()) inventory_processor.get_group_hosts = Mock() inventory_processor.process_line(source_record) inventory_processor.get_group_hosts.assert_called_with(source_record, "group1") + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", + False, + ) @patch( "builtins.open", new_callable=mock_open, @@ -291,7 +307,7 @@ def test_ignore_line_host_configured_in_group(self, m_load_element): ] group_manager = Mock() group_manager.return_element.return_value = returned_group - inventory_processor = InventoryProcessor(group_manager, Mock()) + inventory_processor = InventoryProcessor(group_manager, Mock(), Mock()) expected = [ { "address": "0.0.0.0", diff --git a/test/common/test_profiles.py b/test/common/test_profiles.py index bac781027..fbca9b87a 100644 --- a/test/common/test_profiles.py +++ b/test/common/test_profiles.py @@ -53,6 +53,10 @@ def test_base_files_not_found(self): with self.assertRaises(FileNotFoundError): profiles_manager.gather_elements() + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) @mock.patch( "splunk_connect_for_snmp.common.collection_manager.os.listdir", return_yaml_profiles, @@ -77,6 +81,10 @@ def test_config_file_not_found(self): ) ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) @mock.patch( "splunk_connect_for_snmp.common.collection_manager.os.listdir", return_yaml_profiles, @@ -111,6 +119,10 @@ def test_read_base_profiles(self): profiles = profiles_manager.gather_elements() self.assertEqual(profiles, active_profiles) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) @mock.patch( "splunk_connect_for_snmp.common.collection_manager.os.listdir", return_yaml_empty_profiles, @@ -144,6 +156,10 @@ def test_runtime_profiles(self): profiles = profiles_manager.gather_elements() self.assertEqual(profiles, active_profiles) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) @mock.patch( "splunk_connect_for_snmp.common.collection_manager.os.listdir", return_yaml_profiles, @@ -196,6 +212,10 @@ def test_all_profiles(self): profiles = profiles_manager.gather_elements() self.assertEqual(profiles, active_profiles) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", + False, + ) @mock.patch( "splunk_connect_for_snmp.common.collection_manager.os.listdir", return_yaml_profiles, diff --git a/test/inventory/test_loader.py b/test/inventory/test_loader.py index 7229440e7..a31c013ab 100644 --- a/test/inventory/test_loader.py +++ b/test/inventory/test_loader.py @@ -73,6 +73,10 @@ mock_walk_chain_of_tasks, ) class TestLoader(TestCase): + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", + False, + ) def test_walk_task(self): inventory_record_json = { "address": "192.68.0.1", @@ -126,6 +130,10 @@ def test_walk_task(self): self.assertTrue(result["enabled"]) self.assertTrue(result["run_immediately"]) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", + False, + ) def test_walk_task_for_port_161(self): inventory_record_json = { "address": "192.68.0.1", @@ -181,6 +189,13 @@ def test_walk_task_for_port_161(self): self.assertTrue(result["enabled"]) self.assertTrue(result["run_immediately"]) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory_small_walk) @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.update_one") @@ -197,8 +212,10 @@ def test_walk_task_for_port_161(self): @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_load_new_record_small_walk( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -243,6 +260,13 @@ def test_load_new_record_small_walk( periodic_obj_mock.manage_task.call_args.kwargs["kwargs"], ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch("splunk_connect_for_snmp.common.inventory_processor.gen_walk_task") @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @@ -260,8 +284,10 @@ def test_load_new_record_small_walk( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_load_new_record( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -283,6 +309,13 @@ def test_load_new_record( periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch("splunk_connect_for_snmp.common.inventory_processor.gen_walk_task") @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @@ -300,8 +333,10 @@ def test_load_new_record( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_load_modified_record( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -323,6 +358,13 @@ def test_load_modified_record( periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @mock.patch( "splunk_connect_for_snmp.inventory.loader.CHAIN_OF_TASKS_EXPIRY_TIME", 180 ) @@ -342,8 +384,10 @@ def test_load_modified_record( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_load_unchanged_record( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -365,6 +409,13 @@ def test_load_unchanged_record( periodic_obj_mock.manage_task.assert_not_called() + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @mock.patch( "splunk_connect_for_snmp.inventory.loader.CHAIN_OF_TASKS_EXPIRY_TIME", 180 ) @@ -385,8 +436,10 @@ def test_load_unchanged_record( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_load_unchanged_record_with_new_expiry_time( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -409,6 +462,9 @@ def test_load_unchanged_record_with_new_expiry_time( periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) @mock.patch( "splunk_connect_for_snmp.inventory.loader.CHAIN_OF_TASKS_EXPIRY_TIME", 180 ) @@ -430,8 +486,10 @@ def test_load_unchanged_record_with_new_expiry_time( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_ignoring_comment( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -441,6 +499,7 @@ def test_ignoring_comment( m_taskManager, m_open, ): + periodic_obj_mock = Mock() m_taskManager.return_value = periodic_obj_mock m_taskManager.get_chain_of_task_expiry.return_value = 180 @@ -450,6 +509,13 @@ def test_ignoring_comment( m_mongo_collection.assert_not_called() periodic_obj_mock.manage_task.assert_not_called() + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory_delete) @patch("splunk_connect_for_snmp.customtaskmanager.CustomPeriodicTaskManager") @mock.patch("pymongo.collection.Collection.delete_one") @@ -467,8 +533,10 @@ def test_ignoring_comment( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_deleting_record( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -493,6 +561,13 @@ def test_deleting_record( self.assertEqual(({"address": "192.168.0.1"},), calls[0].args) self.assertEqual(({"address": "192.168.0.1"},), calls[1].args) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch( "builtins.open", new_callable=mock_open, @@ -514,8 +589,10 @@ def test_deleting_record( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_deleting_record_non_default_port( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, @@ -540,6 +617,13 @@ def test_deleting_record_non_default_port( self.assertEqual(({"address": "192.168.0.1:345"},), calls[0].args) self.assertEqual(({"address": "192.168.0.1:345"},), calls[1].args) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False + ) + @mock.patch( + "splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO", False + ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.CONFIG_FROM_MONGO", False) @patch("splunk_connect_for_snmp.common.inventory_processor.gen_walk_task") @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("pymongo.collection.Collection.update_one") @@ -562,8 +646,10 @@ def test_deleting_record_non_default_port( @mock.patch( "splunk_connect_for_snmp.common.collection_manager.GroupsManager.return_collection" ) + @mock.patch("splunk_connect_for_snmp.inventory.loader.configure_ui_database") def test_inventory_errors( self, + m_configure_ui_database, m_load_groups, m_update_groups, m_load_profiles, diff --git a/test/snmp/test_tasks.py b/test/snmp/test_tasks.py index 18061b119..321a11a89 100644 --- a/test/snmp/test_tasks.py +++ b/test/snmp/test_tasks.py @@ -248,3 +248,44 @@ def test_trap_retry_translation_failed( }, result, ) + + @patch("splunk_connect_for_snmp.snmp.tasks.RESOLVE_TRAP_ADDRESS", "true") + @patch("splunk_connect_for_snmp.snmp.tasks.resolve_address") + @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") + @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") + @patch("time.time") + def test_trap_reverse_dns_lookup( + self, + m_time, + m_poller, + m_process_data, + m_resolved, + m_resolve_address, + m_mongo_client, + ): + m_poller.return_value = None + from splunk_connect_for_snmp.snmp.tasks import trap + + m_time.return_value = 1640692955.365186 + + m_resolved.return_value = None + m_resolve_address.return_value = "my.host" + + work = {"data": [("asd", "tre")], "host": "192.168.0.1"} + m_process_data.return_value = (False, [], {"test": "value1"}) + m_poller.builder = MagicMock() + m_poller.trap = trap + m_poller.trap.mib_view_controller = MagicMock() + result = trap(work) + + self.assertEqual( + { + "address": "my.host", + "detectchange": False, + "result": {"test": "value1"}, + "sourcetype": "sc4snmp:traps", + "time": 1640692955.365186, + }, + result, + ) diff --git a/ui_tests/config/config.py b/ui_tests/config/config.py new file mode 100644 index 000000000..c350a8f07 --- /dev/null +++ b/ui_tests/config/config.py @@ -0,0 +1,34 @@ +import os + + +def get_execution_type(): + execution_type = os.environ.get("CI_EXECUTION_TYPE") + if execution_type is None: + return EXECUTION_TYPE_LOCAL + else: + return execution_type + + +def get_ui_host_ip_address(): + if EXECUTION_TYPE != EXECUTION_TYPE_LOCAL: + # test executed in pipeline in GitHub actions so the UI is on the same VM where tests are executed + return "localhost" + else: + return UI_HOST_FOR_LOCAL_EXECUTION + + +EXECUTION_TYPE_LOCAL = "local" +EXECUTION_TYPE = get_execution_type() + +UI_HOST_FOR_LOCAL_EXECUTION = "PUT_HERE_IP_ADDRESS_OF_SC4SNMP_VM" +UI_HOST = get_ui_host_ip_address() +UI_URL = f"http://{UI_HOST}:30001/" +EVENT_INDEX = "netops" +LOGS_INDEX = "em_logs" + +# timers +IMPLICIT_WAIT_TIMER = 10 + +# yaml file +YAML_FILE_PATH = "./../integration_tests/values.yaml" +DEFAULT_PORT = 161 diff --git a/ui_tests/config/ui_values.yaml b/ui_tests/config/ui_values.yaml new file mode 100644 index 000000000..eca9f5f13 --- /dev/null +++ b/ui_tests/config/ui_values.yaml @@ -0,0 +1,120 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + repository: ghcr.io/splunk/sc4snmp-ui/frontend/container + tag: "develop" + pullPolicy: "Always" + backEnd: + NodePort: 30002 + repository: ghcr.io/splunk/sc4snmp-ui/backend/container + tag: "develop" + pullPolicy: "Always" + init: + image: registry.access.redhat.com/ubi9/ubi + pullPolicy: IfNotPresent + + # valuesFileDirectory is obligatory if UI is used. It is an absolute directory path on the host machine + # where values.yaml is located and where configuration files from the UI will be generated. + valuesFileDirectory: "/home/splunker" + + # valuesFileName is an exact name of yaml file with user's configuration, located inside directory specified in + # valuesFileDirectory. It is optional. If it is provided then this file fill be updated with configuration from the UI. + # If the valuesFileName is empty, or provided file name can't be found inside valuesFileDirectory directory, + # then configuration from the UI will be saved in few files, each file for different section, inside + # valuesFileDirectory directory. + valuesFileName: "values.yaml" + + # If keepSectionFiles is set to true, separate configration files for different sections will be saved in + # valuesFileDirectory directory regardless of valuesFileName proper configuration. + keepSectionFiles: false +splunk: + enabled: true + protocol: https + host: ###LOAD_BALANCER_ID### + token: ###SPLUNK_TOKEN### + insecureSSL: "true" + port: "8088" +image: + repository: "snmp-local" + tag: "latest" + pullPolicy: "Never" +traps: + replicaCount: 1 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: ###LOAD_BALANCER_ID### +worker: + poller: + replicaCount: 1 + #changed replicaCount from 4 to 1 + concurrency: 4 + prefetch: 1 + trap: + autoscaling: + enabled: false + replicaCount: 1 + concurrency: 8 + prefetch: 60 + sender: + replicaCount: 1 + concurrency: 4 + prefetch: 60 + profilesReloadDelay: 1 + # replicas: Number of replicas for worker container should two or more + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: "DEBUG" +scheduler: + logLevel: "INFO" + customTranslations: + IP-MIB: + icmpOutEchoReps: myCustomName1 + profiles: | + v3profile: + frequency: 5 + varBinds: + - ['IF-MIB'] + - ['TCP-MIB'] + - ['UDP-MIB'] +# profiles: | +# generic_switch: +# frequency: 60 +# varBinds: +# - ['SNMPv2-MIB', 'sysDescr'] +# - ['SNMPv2-MIB', 'sysName', 0] +# - ['IF-MIB'] +# - ['TCP-MIB'] + groups: | + {} +poller: + usernameSecrets: + - sv3poller + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + ###LOAD_BALANCER_ID###,,2c,public,,,600,,, +sim: + # sim must be enabled if you want to use signalFx + enabled: false +# signalfxToken: BCwaJ_Ands4Xh7Nrg +# signalfxRealm: us0 +mongodb: + pdb: + create: true + persistence: + storageClass: "microk8s-hostpath" + volumePermissions: + enabled: true +redis: + architecture: standalone + auth: + enabled: false \ No newline at end of file diff --git a/ui_tests/conftest.py b/ui_tests/conftest.py new file mode 100644 index 000000000..6066c2fa9 --- /dev/null +++ b/ui_tests/conftest.py @@ -0,0 +1,32 @@ +import pytest +from logger.logger import Logger +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() + + +def pytest_addoption(parser): + parser.addoption( + "--device-simulator", + action="store", + dest="device-simulator", + default="127.0.0.1", + help="Device Simulator external IP, basically external IP of VM", + ) + + +@pytest.fixture(scope="function") +def setup(request): + config = {} + host = request.config.getoption("--splunk-host") + config["splunkd_url"] = "https://" + host + ":8089" + config["splunk_user"] = request.config.getoption("--splunk-user") + config["splunk_password"] = request.config.getoption("--splunk-password") + config["device_simulator"] = request.config.getoption("device-simulator") + + return config + + +def pytest_unconfigure(): + logger.info("Closing Web Driver") + WebDriverFactory.close_driver() diff --git a/ui_tests/exceptions_tests.py b/ui_tests/exceptions_tests.py new file mode 100644 index 000000000..28d9bdb53 --- /dev/null +++ b/ui_tests/exceptions_tests.py @@ -0,0 +1,4 @@ +class UiTestsException(Exception): + """Exception raised instead of generic Exception in ui tests""" + + pass diff --git a/ui_tests/logger/logger.py b/ui_tests/logger/logger.py new file mode 100644 index 000000000..ba6e261c5 --- /dev/null +++ b/ui_tests/logger/logger.py @@ -0,0 +1,25 @@ +import logging +import sys + +log = None + + +class Logger: + logger = None + + @classmethod + def initialize_logger(cls): + logger = logging.getLogger(__name__) + logger.setLevel(logging.INFO) + formatter = logging.Formatter("%(message)s") + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + logger.addHandler(handler) + # return logger + cls.logger = logger + + @classmethod + def get_logger(cls): + if cls.logger is None: + cls.initialize_logger() + return cls.logger diff --git a/ui_tests/pages/groups_page.py b/ui_tests/pages/groups_page.py new file mode 100644 index 000000000..a5dc86128 --- /dev/null +++ b/ui_tests/pages/groups_page.py @@ -0,0 +1,282 @@ +import time + +import pages.helper as helper +from logger.logger import Logger +from selenium.webdriver.common.by import By +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory.get_driver() + + +class GroupsPage: + def check_if_groups_table_is_displayed(self): + logger.info("Check if groups page is displayed") + groups_table_xpath = "//div[@data-test='sc4snmp:group-table']" + groups_container = driver.find_element(By.XPATH, groups_table_xpath) + return groups_container.is_displayed() + + def click_add_new_group_button(self): + logger.info(f"Click add new group button") + add_group_button_xpath = "//button[@data-test='sc4snmp:new-item-button']//span" + add_grp_btn = driver.find_element(By.XPATH, add_group_button_xpath) + add_grp_btn.click() + time.sleep(1) + + def set_group_name(self, group_name): + logger.info(f"Set group name: {group_name}") + add_grp_input = self._get_group_name_input() + add_grp_input.send_keys(group_name) + + def _get_group_name_input(self): + add_group_input_xpath = ( + "//div[@data-test='sc4snmp:form:group-name-input']//span//input" + ) + add_grp_input = driver.find_element(By.XPATH, add_group_input_xpath) + return add_grp_input + + def click_submit_button_for_add_group(self): + logger.info(f"Click submit button") + add_group_button_xpath = ( + "//button[@data-test='sc4snmp:form:submit-form-button']" + ) + add_grp_btn = driver.find_element(By.XPATH, add_group_button_xpath) + add_grp_btn.click() + # wait for group to be shown on the list + time.sleep(5) + + def click_submit_button_for_add_device(self): + self.click_submit_button_for_add_group() + + def click_cancel_button_for_add_device(self): + logger.info(f"Click cancel button") + cancel_button_xpath = "//button[@data-test='sc4snmp:form:cancel-button']" + cancel_btn = driver.find_element(By.XPATH, cancel_button_xpath) + cancel_btn.click() + + def check_if_groups_is_on_list(self, group_name): + logger.info(f"Checking if group is configured (is on list)") + group_entry_on_list_xpath = "//div[@data-test='sc4snmp:group']//p" + groups_entries = driver.find_elements(By.XPATH, group_entry_on_list_xpath) + for el in groups_entries: + # logger.info(f"group name > |{el.text}|") # debug + if group_name == el.text: + return True + logger.info("Group has not been found on list") + return False + + def delete_group_from_list(self, group_name): + logger.info(f"Removing group from groups list: {group_name}") + self.click_delete_group_button(group_name) + self.confirm_delete() + self.close_delete_popup() + + def click_delete_group_button(self, group_name): + logger.info(f"Clicking delete group button for: {group_name}") + delete_btn_for_group_with_name_xpath = f"//div[@data-test='sc4snmp:group' and child::*[text()='{group_name}']]//button[@data-test='sc4snmp:group:delete-group-button']" + delete_btn = driver.find_element(By.XPATH, delete_btn_for_group_with_name_xpath) + delete_btn.click() + time.sleep(1) + + def close_delete_popup(self): + logger.info(f"Closing profile delete popup") + close_profile_delete_popup_btn_xpath = ( + "//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_btn = driver.find_element(By.XPATH, close_profile_delete_popup_btn_xpath) + close_btn.click() + time.sleep(1) + + def click_add_device_to_group(self, group_name): + logger.info(f"Click add device to group: {group_name}") + add_device_for_group_with_name_xpath = f"//div[@data-test='sc4snmp:group' and child::*[text()='{group_name}']]//button[@data-test='sc4snmp:group:new-device-button']" + add_device_btn = driver.find_element( + By.XPATH, add_device_for_group_with_name_xpath + ) + add_device_btn.click() + time.sleep(1) + + def get_error_message_while_adding_device_with_no_data(self): + logger.info(f"getting error message while adding device with no data") + error_msg_xpath = f"//p[@data-test='sc4snmp:ip-error']" + err_msg = driver.find_element(By.XPATH, error_msg_xpath) + return err_msg.text + + def get_number_of_devices_for_group(self, group_name): + logger.info(f"getting number of devices for group: {group_name}") + device_row_xpath = "//tr[@data-test='sc4snmp:group-row']" + rows = driver.find_elements(By.XPATH, device_row_xpath) + return len(rows) + + def set_device_ip(self, device_ip, edit=False): + logger.info(f"set device ip: {device_ip}") + device_ip_field_xpath = "//div[@data-test='sc4snmp:form:ip-input']//span//input" + ip_field = driver.find_element(By.XPATH, device_ip_field_xpath) + if edit: + helper.clear_input(ip_field) + ip_field.send_keys(device_ip) + + def check_if_device_is_configured(self, device_ip): + logger.info(f"Checking if device is configured (is on group list)") + device_entry_on_list_xpath = "//td[@data-test='sc4snmp:host-address']" + devices_entries = driver.find_elements(By.XPATH, device_entry_on_list_xpath) + for el in devices_entries: + # logger.info(f"device name > |{el.text}|") # debug + if device_ip == el.text: + return True + logger.info("Device has not been found on list") + return False + + def edit_group_name(self, group_name, new_group_name): + logger.info(f"change group name: {group_name} -> {new_group_name}") + edit_group_button_xpath = f"//div[@data-test='sc4snmp:group' and child::*[text()='{group_name}']]//button[@data-test='sc4snmp:group:edit-group-button']" + edit_group_btn = driver.find_element(By.XPATH, edit_group_button_xpath) + edit_group_btn.click() + add_grp_input = self._get_group_name_input() + helper.clear_input(add_grp_input) + add_grp_input.send_keys(new_group_name) + self.click_submit_button_for_add_group() + + def get_submit_edited_group_name_popup_message(self): + logger.info(f"Get submit edited group name popup text") + edited_group_popup_text_xpath = f"//div[@data-test='modal']//div//p" + edited_group_popup_text = driver.find_element( + By.XPATH, edited_group_popup_text_xpath + ) + return edited_group_popup_text.text + + def close_edited_profile_popup(self): + logger.info(f"Closing edited group popup") + close_popup_btn_xpath = ( + f"//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_popup_btn = driver.find_element(By.XPATH, close_popup_btn_xpath) + close_popup_btn.click() + time.sleep(2) + + def delete_device_from_group(self, device_ip): + logger.info(f"Delete device from group popup") + delete_device_btn_xpath = f"//button[@data-test='sc4snmp:group-row-delete' and ancestor::tr//td[text()='{device_ip}']]" + delete_device_btn = driver.find_element(By.XPATH, delete_device_btn_xpath) + delete_device_btn.click() + time.sleep(2) + self.confirm_delete() + self.close_delete_popup() + + def click_edit_device(self, device_ip): + logger.info(f"Click edit device button") + edit_device_btn_xpath = f"//button[@data-test='sc4snmp:group-row-edit' and ancestor::tr//td[text()='{device_ip}']]" + edit_device_btn = driver.find_element(By.XPATH, edit_device_btn_xpath) + edit_device_btn.click() + time.sleep(2) + + def confirm_delete(self): + logger.info(f"Confirm delete device from group popup") + confirm_delete_xpath = ( + "//button[@data-test='sc4snmp:delete-modal:delete-button']" + ) + confirm_btn = driver.find_element(By.XPATH, confirm_delete_xpath) + confirm_btn.click() + time.sleep(1) + + def set_device_port(self, port, edit=False): + logger.info(f"set device port: {port}") + self._set_group_field("port", port, edit) + + def set_community_string(self, community_string, edit=False): + logger.info(f"set device community string: {community_string}") + self._set_group_field("community_string", community_string, edit) + + def set_secret(self, secret, edit=False): + logger.info(f"set device secret: {secret}") + self._set_group_field("secret", secret, edit) + + def set_security_engine(self, security_engine, edit=False): + logger.info(f"set security engine: {security_engine}") + self._set_group_field("security_engine", security_engine, edit) + + def _set_group_field(self, field_name, value, edit=False): + xpath = { + "port": "//div[@data-test='sc4snmp:form:port-input']//span//input", + "community_string": "//div[@data-test='sc4snmp:form:community-input']//span//input", + "secret": "//div[@data-test='sc4snmp:form:secret-input']//span//input", + "security_engine": "//div[@data-test='sc4snmp:form:security-engine-input']//span//input", + } + field_input = driver.find_element(By.XPATH, xpath[field_name]) + if edit: + helper.clear_input(field_input) + field_input.send_keys(value) + + def set_snmp_version(self, snmp_version): + logger.info(f"set device snmp version: {snmp_version}") + options = { + "From inventory": "//button[@data-test='sc4snmp:form:version-from-inventory']", + "1": "//button[@data-test='sc4snmp:form:version-1']", + "2c": "//button[@data-test='sc4snmp:form:version-2c']", + "3": "//button[@data-test='sc4snmp:form:version-3']", + } + snmp_version_expander_xpath = ( + "//button[@data-test='sc4snmp:form:select-version']" + ) + expander = driver.find_element(By.XPATH, snmp_version_expander_xpath) + expander.click() + time.sleep(1) + option = driver.find_element(By.XPATH, options[snmp_version]) + option.click() + + def get_device_port(self, device_ip): + logger.info(f"get device port: {device_ip}") + return self._get_group_field_value("port", device_ip) + + def get_device_snmp_version(self, device_ip): + logger.info(f"get device snmp_version: {device_ip}") + return self._get_group_field_value("snmp_version", device_ip) + + def get_device_community_string(self, device_ip): + logger.info(f"get device community string: {device_ip}") + return self._get_group_field_value("community_string", device_ip) + + def get_device_secret(self, device_ip): + logger.info(f"get device secret: {device_ip}") + return self._get_group_field_value("secret", device_ip) + + def get_device_security_engine(self, device_ip): + logger.info(f"get device security engine {device_ip}") + return self._get_group_field_value("security_engine", device_ip) + + def _get_group_field_value(self, field, device_ip): + xpath = { + "port": f"//td[@data-test='sc4snmp:host-port' and ancestor::tr//td[text()='{device_ip}']]", + "snmp_version": f"//td[@data-test='sc4snmp:host-version' and ancestor::tr//td[text()='{device_ip}']]", + "community_string": f"//td[@data-test='sc4snmp:host-community' and ancestor::tr//td[text()='{device_ip}']]", + "secret": f"//td[@data-test='sc4snmp:host-secret' and ancestor::tr//td[text()='{device_ip}']]", + "security_engine": f"//td[@data-test='sc4snmp:host-security-engine' and ancestor::tr//td[text()='{device_ip}']]", + } + community = driver.find_element(By.XPATH, xpath[field]) + return community.text + + def get_warning_message_when_removing_group_which_is_configured_in_inventory(self): + logger.info( + f"getting error message while removing group which is configured in inventory" + ) + warning_msg_xpath = ( + f"//div[@data-test-type='warning' and @data-test='message']//div" + ) + warning_msg = driver.find_element(By.XPATH, warning_msg_xpath) + return warning_msg.text + + def clear_groups(self): + logger.info(f"remove all groups") + group_delete_btn_xpath = ( + f"//button[@data-test='sc4snmp:group:delete-group-button']" + ) + delete_btns = driver.find_elements(By.XPATH, group_delete_btn_xpath) + logger.info(f"Need to remove {len(delete_btns)} items") + while len(delete_btns) > 0: + delete_btns[0].click() + time.sleep(1) + self.confirm_delete() + self.close_delete_popup() + time.sleep(1) + delete_btns = driver.find_elements(By.XPATH, group_delete_btn_xpath) + logger.info(f" {len(delete_btns)} more items for removal") diff --git a/ui_tests/pages/header_page.py b/ui_tests/pages/header_page.py new file mode 100644 index 000000000..eee8d5dc0 --- /dev/null +++ b/ui_tests/pages/header_page.py @@ -0,0 +1,83 @@ +import re +import time + +from logger.logger import Logger +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory.get_driver() + + +class HeaderPage: + def switch_to_profiles(self): + self._switch_to_page("profiles") + + def switch_to_groups(self): + self._switch_to_page("groups") + + def switch_to_inventory(self): + self._switch_to_page("inventory") + + def _switch_to_page(self, page_name): + logger.info(f"Switching to {page_name} tab") + page_button_xpath = { + "profiles": "//button[@data-test='sc4snmp:profiles-tab']", + "groups": "//button[@data-test='sc4snmp:groups-tab']", + "inventory": "//button[@data-test='sc4snmp:inventory-tab']", + } + xpath_button = page_button_xpath[page_name] + tab = driver.find_element(By.XPATH, xpath_button) + tab.click() + page_table_xpath = { + "profiles": "//div[@data-test='sc4snmp:profiles-table']", + "groups": "//div[@data-test='sc4snmp:group-table']", + "inventory": "//div[@data-test='sc4snmp:inventory-table']", + } + WebDriverWait(driver, 10).until( + EC.visibility_of_element_located((By.XPATH, page_table_xpath[page_name])) + ) + + def apply_changes(self): + logger.info("Apply changes") + apply_changes_button_xpath = ( + "//button[@data-test='sc4snmp:apply-changes-button']" + ) + apply_btn = driver.find_element(By.XPATH, apply_changes_button_xpath) + apply_btn.click() + time.sleep(3) + + def close_configuration_applied_notification_popup(self): + logger.info("Close configuration applied popup") + popup_xpath = "//button[@data-test='sc4snmp:errors-modal:cancel-button']" + close_popup_button = driver.find_element(By.XPATH, popup_xpath) + close_popup_button.click() + time.sleep(3) + + def get_time_to_upgrade(self): + logger.info("Get time to upgrade") + popup_text_xpath = "//div[@data-test='modal']//div//p" + popup_txt_element = driver.find_element(By.XPATH, popup_text_xpath) + text = popup_txt_element.text + matches = re.search(r"\d+", text) + number = int(matches.group()) + logger.info(f"Extracted number: {number}") + time.sleep(3) + return number + + def get_popup_error_message(self): + logger.info("Get popup error message") + popup_text_xpath = "//div[@data-test='modal']//div//div" + popup_txt_element = driver.find_element(By.XPATH, popup_text_xpath) + return popup_txt_element.text + + def close_error_popup(self): # two similar methods on profile page + logger.info("Close popup error message") + close_profile_delete_popup_btn_xpath = ( + "//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_btn = driver.find_element(By.XPATH, close_profile_delete_popup_btn_xpath) + close_btn.click() + time.sleep(1) diff --git a/ui_tests/pages/helper.py b/ui_tests/pages/helper.py new file mode 100644 index 000000000..779fab440 --- /dev/null +++ b/ui_tests/pages/helper.py @@ -0,0 +1,11 @@ +from logger.logger import Logger +from selenium.webdriver.common.keys import Keys + +logger = Logger().get_logger() + + +def clear_input(input_element): + logger.info("Clearing input") + text = input_element.get_attribute("value") + for num in range(len(text)): + input_element.send_keys(Keys.BACKSPACE) diff --git a/ui_tests/pages/inventory_page.py b/ui_tests/pages/inventory_page.py new file mode 100644 index 000000000..d36281360 --- /dev/null +++ b/ui_tests/pages/inventory_page.py @@ -0,0 +1,320 @@ +import time + +import pages.helper as helper +import selenium.common.exceptions +from logger.logger import Logger +from selenium.webdriver.common.by import By +from selenium.webdriver.common.keys import Keys +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory.get_driver() + + +class InventoryPage: + def check_if_inventory_table_is_displayed(self): + logger.info("Check if inventory page is displayed") + inventory_table_xpath = "//div[@data-test='sc4snmp:inventory-table']" + inventory_container = driver.find_element(By.XPATH, inventory_table_xpath) + return inventory_container.is_displayed() + + def check_if_entry_is_on_list(self, host_ip): + logger.info(f"Checking if host/group entry is configured (is on list)") + entries_on_list_xpath = "//td[@data-test='sc4snmp:inventory-address']" + entries = driver.find_elements(By.XPATH, entries_on_list_xpath) + logger.info(f"list length: {len(entries)}") + for el in entries: + # logger.info(f"entry name > |{el.text}|") # debug + if host_ip == el.text: + return True + logger.info("Entry has not been found on list") + return False + + def click_add_new_device_group_button(self): + logger.info(f"Click add new device/group entry button") + add_group_device_button_xpath = ( + "//button[@data-test='sc4snmp:new-item-button']//span//span" + ) + add_grp_device_btn = driver.find_element( + By.XPATH, add_group_device_button_xpath + ) + add_grp_device_btn.click() + time.sleep(3) + + def click_submit_button_for_add_entry(self): + self._click_submit_button() + + def _click_submit_button(self): + logger.info(f"Click submit button") + add_group_device_item_button_xpath = ( + "//button[@data-test='sc4snmp:form:submit-form-button']" + ) + add_grp_device_btn = driver.find_element( + By.XPATH, add_group_device_item_button_xpath + ) + add_grp_device_btn.click() + time.sleep(5) # wait for group to be shown on the list + + def click_submit_button_for_edit_entry(self): + self._click_submit_button() + + def delete_entry_from_list(self, host_ip): + logger.info(f"Removing entry from inventory list: {host_ip}") + delete_btn_for_inventory_with_host_ip_xpath = f"//button[@data-test='sc4snmp:inventory-row-delete' and ancestor::tr//td[text()='{host_ip}']]" + delete_btn = driver.find_element( + By.XPATH, delete_btn_for_inventory_with_host_ip_xpath + ) + delete_btn.click() + time.sleep(1) + self.confirm_delete() + self.close_delete_popup() + + def close_delete_popup(self): + logger.info(f"Closing inventory delete popup") + self._close_notification_popup() + + def _close_notification_popup(self): + close_inventory_delete_popup_btn_xpath = ( + "//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_btn = driver.find_element( + By.XPATH, close_inventory_delete_popup_btn_xpath + ) + close_btn.click() + time.sleep(1) + + def close_edit_inventory_entry(self): + logger.info(f"Closing inventory edit popup") + self._close_notification_popup() + + def confirm_delete(self): + logger.info(f"Confirm delete entry") + confirm_delete_xpath = ( + "//button[@data-test='sc4snmp:delete-modal:delete-button']" + ) + confirm_btn = driver.find_element(By.XPATH, confirm_delete_xpath) + confirm_btn.click() + time.sleep(1) + + def set_community_string(self, community_string, edit=False): + logger.info(f"Set community string: {community_string}") + community_input_field_xpath = ( + "//div[@data-test='sc4snmp:form:community-input']//span//input" + ) + community_input_field = driver.find_element( + By.XPATH, community_input_field_xpath + ) + if edit: + helper.clear_input(community_input_field) + community_input_field.send_keys(community_string) + + def click_edit_inventory_entry(self, host_ip): + logger.info(f"Edit entry from inventory list with: {host_ip}") + edit_inventory_entry_btn_xpath = f"//button[@data-test='sc4snmp:inventory-row-edit' and ancestor::tr//td[text()='{host_ip}']]" + edit_inventory_entry_btn = driver.find_element( + By.XPATH, edit_inventory_entry_btn_xpath + ) + edit_inventory_entry_btn.click() + + def get_edit_inventory_notice(self): + logger.info(f"Get edited inventory popup text") + edited_inventory_popup_text_xpath = f"//div[@data-test='modal']//div//p" + edited_inventory_popup_text = driver.find_element( + By.XPATH, edited_inventory_popup_text_xpath + ) + return edited_inventory_popup_text.text + + def select_group_inventory_type(self): + logger.info(f"Select group inventory type") + group_inventory_type_btn_xpath = ( + f"//button[@data-test='sc4snmp:form:inventory-type-group']" + ) + group_inventory_type_btn = driver.find_element( + By.XPATH, group_inventory_type_btn_xpath + ) + group_inventory_type_btn.click() + + def get_host_missing_error(self): + logger.info(f"Get host missing error") + return self._get_error_for_missing_or_invalid_inventory_field("host_missing") + + def get_community_string_missing_error(self): + logger.info(f"Get community string missing error") + return self._get_error_for_missing_or_invalid_inventory_field( + "community_string_missing" + ) + + def get_walk_invalid_value_error(self): + logger.info(f"Get walk interval invalid value error") + return self._get_error_for_missing_or_invalid_inventory_field( + "walk_invalid_value" + ) + + def _get_error_for_missing_or_invalid_inventory_field(self, field): + xpath = { + "host_missing": f"//p[@data-test='sc4snmp:ip-group-error']", + "community_string_missing": f"//p[@data-test='sc4snmp:community-error']", + "walk_invalid_value": f"//p[@data-test='sc4snmp:walk-interval-error']", + } + try: + error_msg = driver.find_element(By.XPATH, xpath[field]) + return error_msg.text + except selenium.common.exceptions.NoSuchElementException: + return None + + def edit_device_port(self, port): + logger.info(f"set/edit inventory device port: {port}") + device_port_field_xpath = ( + "//div[@data-test='sc4snmp:form:port-input']//span//input" + ) + port_field = driver.find_element(By.XPATH, device_port_field_xpath) + helper.clear_input(port_field) + port_field.send_keys(port) + + def select_snmp_version(self, snmp_version): + logger.info(f"set device snmp version: {snmp_version}") + options = { + "1": "//button[@data-test='sc4snmp:form:version-1']", + "2c": "//button[@data-test='sc4snmp:form:version-2c']", + "3": "//button[@data-test='sc4snmp:form:version-3']", + } + snmp_version_expander_xpath = ( + "//button[@data-test='sc4snmp:form:select-version']" + ) + expander = driver.find_element(By.XPATH, snmp_version_expander_xpath) + expander.click() + time.sleep(1) + option = driver.find_element(By.XPATH, options[snmp_version]) + option.click() + + def set_host_or_group_name(self, host_ip, edit=False): + logger.info(f"Set host/group item name: {host_ip}") + self._set_inventory_field("host_group_name", host_ip, edit) + + def set_secret(self, secret, edit=False): + logger.info(f"set inventory device secret: {secret}") + self._set_inventory_field("secret", secret, edit) + + def set_security_engine(self, security_engine, edit=False): + logger.info(f"set inventory device security engine: {security_engine}") + self._set_inventory_field("security_engine", security_engine, edit) + + def _set_inventory_field(self, field, value, edit=False): + xpath = { + "host_group_name": "//div[@data-test='sc4snmp:form:group-ip-input']//span//input", + "secret": "//div[@data-test='sc4snmp:form:secret-input']//span//input", + "security_engine": "//div[@data-test='sc4snmp:form:security-engine-input']//span//input", + } + field_input = driver.find_element(By.XPATH, xpath[field]) + if edit: + helper.clear_input(field_input) + field_input.send_keys(value) + + def set_walk_interval(self, walk_interval): + logger.info(f"set/edit inventory device walk interval: {walk_interval}") + sec_engine_field_xpath = ( + "//div[@data-test='sc4snmp:form:walk-interval-input']//span//input" + ) + sec_engine = driver.find_element(By.XPATH, sec_engine_field_xpath) + helper.clear_input(sec_engine) + sec_engine.send_keys(walk_interval) + time.sleep(1) + + def set_smart_profiles(self, param): + logger.info(f"set inventory device smart profiles enabled to: {param}") + if param == "true" or param == "false": + smart_profile_true_xpath = ( + f"//button[@data-test='sc4snmp:form:smart-profile-{param}']" + ) + option = driver.find_element(By.XPATH, smart_profile_true_xpath) + option.click() + else: + logger.error( + f"Wrong parameter specified. Expected: true or false, received: {param}" + ) + + def select_profiles(self, profiles, edit=False): + logger.info(f"select profiles: {profiles}") + profiles_input_xpath = ( + "//div[@data-test='sc4snmp:form:profiles-multiselect']//div//input" + ) + profile_input = driver.find_element(By.XPATH, profiles_input_xpath) + if edit: + profile_options_xpath = "//button[@data-test='selected-option']" + options = driver.find_elements(By.XPATH, profile_options_xpath) + for option in options: + option.click() + time.sleep(0.5) + time.sleep(1) + for profile in profiles: + profile_input.send_keys(profile) + profile_input.send_keys(Keys.ENTER) + time.sleep(2) + # we need to hide profile list, + # otherwise it can break test execution and popup can intercept clicking on smart profiles + profile_input.send_keys(Keys.ESCAPE) + + def _get_inventory_data(self, host, field): + field_xpath = { + "snmp_version": f"//td[@data-test='sc4snmp:inventory-version' and ancestor::tr//td[text()='{host}']]", + "port": f"//td[@data-test='sc4snmp:inventory-port' and ancestor::tr//td[text()='{host}']]", + "community_string": f"//td[@data-test='sc4snmp:inventory-community' and ancestor::tr//td[text()='{host}']]", + "secret": f"//td[@data-test='sc4snmp:inventory-secret' and ancestor::tr//td[text()='{host}']]", + "security_engine": f"//td[@data-test='sc4snmp:inventory-security-engine' and ancestor::tr//td[text()='{host}']]", + "walk_interval": f"//td[@data-test='sc4snmp:inventory-walk-interval' and ancestor::tr//td[text()='{host}']]", + "profiles": f"//td[@data-test='sc4snmp:inventory-profiles' and ancestor::tr//td[text()='{host}']]", + "smart_profiles": f"//td[@data-test='sc4snmp:inventory-smart-profiles' and ancestor::tr//td[text()='{host}']]", + } + field = driver.find_element(By.XPATH, field_xpath[field]) + return field.text + + def get_snmp_version_for_entry(self, host): + logger.info(f"get {host} inventory -> snmp_version") + return self._get_inventory_data(host, "snmp_version") + + def get_port_for_entry(self, host): + logger.info(f"get {host} inventory -> port") + return self._get_inventory_data(host, "port") + + def get_community_string_for_entry(self, host): + logger.info(f"get {host} inventory -> community_string") + return self._get_inventory_data(host, "community_string") + + def get_secret_for_entry(self, host): + logger.info(f"get {host} inventory -> secret") + return self._get_inventory_data(host, "secret") + + def get_security_engine_for_entry(self, host): + logger.info(f"get {host} inventory -> security_engine") + return self._get_inventory_data(host, "security_engine") + + def get_walk_interval_for_entry(self, host): + logger.info(f"get {host} inventory -> walk_interval") + return self._get_inventory_data(host, "walk_interval") + + def get_profiles_for_entry(self, host): + logger.info(f"get {host} inventory -> profiles") + return self._get_inventory_data(host, "profiles") + + def get_smart_profiles_for_entry(self, host): + logger.info(f"get {host} inventory -> smart_profiles") + return self._get_inventory_data(host, "smart_profiles") + + def clear_inventory(self): + logger.info(f"remove all inventory entries") + delete_btn_for_inventory_with_host_ip_xpath = ( + f"//button[@data-test='sc4snmp:inventory-row-delete']" + ) + delete_btns = driver.find_elements( + By.XPATH, delete_btn_for_inventory_with_host_ip_xpath + ) + logger.info(f"Need to remove {len(delete_btns)} items") + while len(delete_btns) > 0: + delete_btns[0].click() + time.sleep(1) + self.confirm_delete() + self.close_delete_popup() + delete_btns = driver.find_elements( + By.XPATH, delete_btn_for_inventory_with_host_ip_xpath + ) + logger.info(f" {len(delete_btns)} more items for removal") diff --git a/ui_tests/pages/profiles_page.py b/ui_tests/pages/profiles_page.py new file mode 100644 index 000000000..8c29eca0c --- /dev/null +++ b/ui_tests/pages/profiles_page.py @@ -0,0 +1,299 @@ +import time + +import pages.helper as helper +from logger.logger import Logger +from selenium.webdriver.common.by import By +from selenium.webdriver.common.keys import Keys +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory.get_driver() + + +class ProfilesPage: + def check_if_profiles_table_is_displayed(self): + logger.info("Check if profiles page is displayed") + profiles_table_xpath = "//div[@data-test='sc4snmp:profiles-table']" + profiles_container = driver.find_element(By.XPATH, profiles_table_xpath) + return profiles_container.is_displayed() + + def click_add_profile_button(self): + logger.info("Click Add New Profile button") + xpath = "//button[@data-test='sc4snmp:new-item-button']" + btn = driver.find_element(By.XPATH, xpath) + btn.click() + + def click_submit_button(self): + logger.info("Click Submit button") + xpath = "//button[@data-test='sc4snmp:form:submit-form-button']" + btn = driver.find_element(By.XPATH, xpath) + btn.click() + time.sleep(5) # wait for profile to be shown on the list + + def select_profile_type(self, profile_type): + logger.info(f"Set profile type: {profile_type}") + profiles = { + "standard": "//button[@data-test='sc4snmp:form:condition-standard']", + "base": "//button[@data-test='sc4snmp:form:condition-base']", + "smart": "//button[@data-test='sc4snmp:form:condition-smart']", + "walk": "//button[@data-test='sc4snmp:form:condition-walk']", + "conditional": "//button[@data-test='sc4snmp:form:condition-conditional']", + } + profile_type_expander_xpath = ( + "//button[@data-test='sc4snmp:form:select-condition']" + ) + expander = driver.find_element(By.XPATH, profile_type_expander_xpath) + expander.click() + option = driver.find_element(By.XPATH, profiles[profile_type]) + option.click() + + def set_frequency(self, freq_value): + logger.info(f"Setting profile frequency: {freq_value}") + xpath = "//div[@data-test='sc4snmp:form:frequency-input']//span//input" + freq_field = driver.find_element(By.XPATH, xpath) + helper.clear_input(freq_field) + # freq_field.send_keys(Keys.BACKSPACE) # clear() is not working here + freq_field.send_keys(freq_value) + + def set_profile_name(self, name): + logger.info(f"Setting profile frequency: {name}") + xpath = "//div[@data-test='sc4snmp:form:profile-name-input']//span//input" + name_input = driver.find_element(By.XPATH, xpath) + helper.clear_input(name_input) # this is useful when editing profile name + name_input.send_keys(name) + + def add_varBind(self, mcomponent, mobject=None, mindex=None): + logger.info(f"Adding varBind: {mcomponent, mobject, mindex}") + add_varBind_button_xpath = "//div[@data-test='sc4snmp:form:add-varbinds']//span[contains(text(),'Add varBind')]" + add_varBind_btn = driver.find_element(By.XPATH, add_varBind_button_xpath) + add_varBind_btn.click() + varbind_row_xpath = "//div[@data-test='sc4snmp:form:varbind-row']" + varBinds_rows = driver.find_elements(By.XPATH, varbind_row_xpath) + component_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-component-input']/span/input" + ) + component_input = varBinds_rows[-1].find_element(By.XPATH, component_xpath) + component_input.send_keys(mcomponent) + if mobject is not None: + object_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-object-input']/span/input" + ) + object_input = varBinds_rows[-1].find_element(By.XPATH, object_xpath) + object_input.send_keys(mobject) + if mindex is not None: + index_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-index-input']/span/input" + ) + index_input = varBinds_rows[-1].find_element(By.XPATH, index_xpath) + index_input.send_keys(mindex) + + def edit_varBind(self, new_mcomponent, new_mobject, new_mindex): + logger.info( + f"Editing varBind new values: {new_mcomponent}, {new_mobject}, {new_mindex}" + ) + varbind_row_xpath = "//div[@data-test='sc4snmp:form:varbind-row']" + varBinds_row = driver.find_element(By.XPATH, varbind_row_xpath) + component_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-component-input']/span/input" + ) + component_input = varBinds_row.find_element(By.XPATH, component_xpath) + helper.clear_input(component_input) + component_input.send_keys(new_mcomponent) + + object_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-object-input']/span/input" + ) + object_input = varBinds_row.find_element(By.XPATH, object_xpath) + helper.clear_input(object_input) + object_input.send_keys(new_mobject) + + index_xpath = ( + "//div[@data-test='sc4snmp:form:varbind-mib-index-input']/span/input" + ) + index_input = varBinds_row.find_element(By.XPATH, index_xpath) + helper.clear_input(index_input) + index_input.send_keys(new_mindex) + + def check_if_profile_is_configured(self, profile_name): + logger.info(f"Checking if profile is on profiles list: {profile_name}") + profiles_name_xpath = "//td[@data-test='sc4snmp:profile-name']" + profile_names = driver.find_elements(By.XPATH, profiles_name_xpath) + for element in profile_names: + # logger.info(f"profile name > |{element.text}|") # debug + if profile_name == element.text: + return True + logger.info("Profile has not been found on list") + return False + + def delete_profile_from_list(self, profile_name): + logger.info(f"Removing profile from profiles list: {profile_name}") + self.click_delete_profile_button(profile_name) + self._confirm_delete_profile() + self.close_profile_delete_popup() + + def click_delete_profile_button(self, profile_name): + logger.info(f"click delete profile button -> {profile_name}") + delete_btn_for_profile_with_name_xpath = f"//button[@data-test='sc4snmp:profile-row-delete' and ancestor::tr//td[text()='{profile_name}']]" + delete_btn = driver.find_element( + By.XPATH, delete_btn_for_profile_with_name_xpath + ) + delete_btn.click() + time.sleep(1) + + def _confirm_delete_profile(self): + confirm_delete_xpath = ( + "//button[@data-test='sc4snmp:delete-modal:delete-button']" + ) + confirm_btn = driver.find_element(By.XPATH, confirm_delete_xpath) + confirm_btn.click() + time.sleep(1) + + def close_profile_delete_popup(self): + logger.info(f"Closing profile delete popup") + close_profile_delete_popup_btn_xpath = ( + "//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_btn = driver.find_element(By.XPATH, close_profile_delete_popup_btn_xpath) + close_btn.click() + time.sleep(1) + + def get_profile_type_for_profile_entry(self, profile_name): + logger.info(f"getting profile type for profile {profile_name}") + profile_type_for_profile_with_name_xpath = f"//td[@data-test='sc4snmp:profile-type' and ancestor::tr//td[text()='{profile_name}']]" + profile_type = driver.find_element( + By.XPATH, profile_type_for_profile_with_name_xpath + ) + return profile_type.text + + def set_smart_profile_field(self, field_value): + logger.info(f"Setting smart profile field {field_value}") + smart_profile_field_xpath = ( + "//div[@data-test='sc4snmp:form:condition-field-input']//span//input" + ) + field = driver.find_element(By.XPATH, smart_profile_field_xpath) + field.send_keys(field_value) + + def add_smart_profile_pattern(self, pattern): + logger.info(f"Add smart profile pattern {pattern}") + add_pattern_button_xpath = "//span[contains(text(),'Add pattern')]" + add_pattern_button = driver.find_element(By.XPATH, add_pattern_button_xpath) + add_pattern_button.click() + time.sleep(1) + pattern_row_xpath = ( + "//div[@data-test='sc4snmp:form:field-pattern']//span//input" + ) + pattern_rows = driver.find_elements(By.XPATH, pattern_row_xpath) + pattern_rows[-1].send_keys(pattern) + + def check_if_frequency_setting_field_is_visible(self): + logger.info(f"Checking if frequency setting field is visible") + xpath = "//div[@data-test='sc4snmp:form:frequency-input']//span//input" + try: + freq_field = driver.find_element(By.XPATH, xpath) + return freq_field.is_displayed() + except Exception as e: + return False + + def add_condition(self, field_value, operation, value): + logger.info(f"Adding condition: {field_value}, {operation}, {value}") + add_condition_button_xpath = ( + "//div[@data-test='sc4snmp:form:add-conditional-profile']//button" + ) + add_condition_btn = driver.find_element(By.XPATH, add_condition_button_xpath) + add_condition_btn.click() + time.sleep(1) + # set field + set_field_xpath = ( + "//div[@data-test='sc4snmp:form:conditional-field']//span//input" + ) + field = driver.find_element(By.XPATH, set_field_xpath) + field.send_keys(field_value) + # select operation + operation_expander_xpath = ( + "//button[@data-test='sc4snmp:form:conditional-select-operation']" + ) + operation_expander = driver.find_element(By.XPATH, operation_expander_xpath) + operation_expander.click() + operation_option_xpath = ( + f"//button[@data-test='sc4snmp:form:conditional-{operation}']" + ) + operation_option = driver.find_element(By.XPATH, operation_option_xpath) + operation_option.click() + # set value + value_field_xpath = ( + "//div[@data-test='sc4snmp:form:conditional-condition']//span//input" + ) + value_field = driver.find_element(By.XPATH, value_field_xpath) + value_field.send_keys(value) + + def click_edit_profile(self, profile_name): + logger.info(f"Edit profile: {profile_name}") + edit_btn_for_profile_with_name_xpath = f"//button[@data-test='sc4snmp:profile-row-edit' and ancestor::tr//td[text()='{profile_name}']]" + edit_btn = driver.find_element(By.XPATH, edit_btn_for_profile_with_name_xpath) + edit_btn.click() + time.sleep(1) + + def close_edited_profile_popup(self): + logger.info(f"Closing edited profile popup") + close_popup_btn_xpath = ( + f"//button[@data-test='sc4snmp:errors-modal:cancel-button']" + ) + close_popup_btn = driver.find_element(By.XPATH, close_popup_btn_xpath) + close_popup_btn.click() + time.sleep(2) + + def get_submit_edited_profile_text(self): + logger.info(f"Get submit edited profile popup text") + edited_profile_popup_text_xpath = f"//div[@data-test='modal']//div//p" + edited_profile_popup_text = driver.find_element( + By.XPATH, edited_profile_popup_text_xpath + ) + return edited_profile_popup_text.text + + def get_profile_freq(self, profile_name): + logger.info(f"Get profile frequency {profile_name}") + profile_freq_xpath = f"//td[@data-test='sc4snmp:profile-frequency' and ancestor::tr//td[text()='{profile_name}']]" + profile_freq = driver.find_element(By.XPATH, profile_freq_xpath) + return profile_freq.text + + def expand_profile(self, profile_name): + logger.info(f"Clik profile expand button: {profile_name}") + profile_expand_btn_xpath = f"//tr[@data-test='sc4snmp:profile-row' and child::td[text()='{profile_name}']]//td[@data-test='expand']" + profile_expand_btn = driver.find_element(By.XPATH, profile_expand_btn_xpath) + profile_expand_btn.click() + time.sleep(1) + + def get_profile_varbind(self, profile_name): + logger.info(f"Get profile varBind {profile_name}") + profile_mcomponent_xpath = ( + f"//td[@data-test='sc4snmp:profile-mib-component-expanded']//p" + ) + mcomponent = driver.find_element(By.XPATH, profile_mcomponent_xpath) + profile_mobject_xpath = ( + f"//td[@data-test='sc4snmp:profile-mib-object_expanded']//p" + ) + mobject = driver.find_element(By.XPATH, profile_mobject_xpath) + profile_mindex_xpath = ( + f"//td[@data-test='sc4snmp:profile-mib-index-expanded']//p" + ) + mindex = driver.find_element(By.XPATH, profile_mindex_xpath) + varBind = { + "mcomponent": mcomponent.text, + "mobject": mobject.text, + "mindex": int(mindex.text), + } + return varBind + + def clear_profiles(self): + logger.info(f"remove all profiles") + profile_delete_btn_xpath = f"//button[@data-test='sc4snmp:profile-row-delete']" + delete_btns = driver.find_elements(By.XPATH, profile_delete_btn_xpath) + logger.info(f"Need to remove {len(delete_btns)} items") + while len(delete_btns) > 0: + delete_btns[0].click() + time.sleep(1) + self._confirm_delete_profile() + self.close_profile_delete_popup() + time.sleep(1) + delete_btns = driver.find_elements(By.XPATH, profile_delete_btn_xpath) + logger.info(f" {len(delete_btns)} more items for removal") diff --git a/ui_tests/pages/yaml_values_reader.py b/ui_tests/pages/yaml_values_reader.py new file mode 100644 index 000000000..aa6c25c04 --- /dev/null +++ b/ui_tests/pages/yaml_values_reader.py @@ -0,0 +1,51 @@ +import yaml +from config import config + + +class YamlValuesReader: + _yaml_file_path = config.YAML_FILE_PATH + + def _open_and_read_yaml_file(self): + with open(self._yaml_file_path) as yaml_file: + try: + # try to load YAML data into a Python dictionary + data = yaml.safe_load(yaml_file) + + # print(data) + return data + + except yaml.YAMLError as e: + print(f"Error reading YAML file: {e}") + + def get_scheduler_profiles(self): + data = self._open_and_read_yaml_file() + profiles = data["scheduler"]["profiles"] + return profiles + + def get_scheduler_groups(self): + data = self._open_and_read_yaml_file() + profiles = data["scheduler"]["groups"] + return profiles + + def get_inventory_entries(self): + data = self._open_and_read_yaml_file() + profiles = data["poller"]["inventory"] + return profiles + + def get_field_value(self, field): + return field + + +## DEBUG -> +# if __name__ == "__main__": +# helper = YamlValuesReader() +# print(helper.get_scheduler_profiles()) +# data = helper.get_scheduler_profiles() +# +# print("-") +# print(helper.get_scheduler_groups()) +# groups = helper.get_scheduler_groups() +# print("-") +# inventory =helper.get_inventory_entries() +# print(inventory) +# print("-") diff --git a/ui_tests/pytest.ini b/ui_tests/pytest.ini new file mode 100644 index 000000000..2eebd2cc5 --- /dev/null +++ b/ui_tests/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +log_level = INFO +addopts = -rfps --durations=10 --disable-pytest-warnings --continue-on-collection-errors + +;log_level = DEBUG \ No newline at end of file diff --git a/ui_tests/requirements.txt b/ui_tests/requirements.txt new file mode 100644 index 000000000..d23ff5c49 --- /dev/null +++ b/ui_tests/requirements.txt @@ -0,0 +1,4 @@ +pytest-splunk-addon +selenium +webdriver_manager +pyyaml \ No newline at end of file diff --git a/ui_tests/splunk_search.py b/ui_tests/splunk_search.py new file mode 100644 index 000000000..0a4c303c6 --- /dev/null +++ b/ui_tests/splunk_search.py @@ -0,0 +1,214 @@ +""" +Copyright 2018-2019 Splunk, Inc.. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import json +import logging +import os +import sys +import time + +import requests +from exceptions_tests import UiTestsException +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + +TIMEROUT = 500 + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +formatter = logging.Formatter( + "%(asctime)s - %(name)s -" + " %(levelname)s - %(message)s" +) +handler = logging.StreamHandler(sys.stdout) +handler.setFormatter(formatter) +logger.addHandler(handler) + + +def check_events_from_splunk( + start_time="-1h@h", + end_time="now", + url="", + user="", + query="", + password="", +): + """ + send a search request to splunk and return the events from the result + """ + logger.info( + f"search query = {str(query)} ,start_time: {start_time}, end_time: {end_time}" + ) + events = _collect_events(query, start_time, end_time, url, user, password) + + return events + + +def _collect_events(query, start_time, end_time, url="", user="", password=""): + """ + Collect events by running the given search query + @param: query (search query) + @param: start_time (search start time) + @param: end_time (search end time) + returns events + """ + + search_url = "{}/services/search/jobs?output_mode=json".format(url) + logger.debug("requesting: %s", search_url) + data = { + "search": query, + "earliest_time": start_time, + "latest_time": end_time, + } + logger.debug("SEARCH DATA: {}".format(data)) + create_job = _requests_retry_session().post( + search_url, auth=(user, password), verify=False, data=data + ) + _check_request_status(create_job) + + json_res = create_job.json() + job_id = json_res["sid"] + events = _wait_for_job_and_get_events(job_id, url, user, password) + + return events + + +def _collect_metrics( + start_time, end_time, url="", user="", password="", index="", metric_name="" +): + """ + Verify metrics by running the given api query + @param: dimension (metric dimension) + @param: metric_name (metric name) + @param: start_time (search start time) + @param: end_time (search end time) + returns events + """ + api_url = ( + url + + "/services/catalog/metricstore/dimensions/host/values?filter=index%3d" + + index + + "&metric_name=" + + metric_name + + "&earliest=" + + start_time + + "&latest=" + + end_time + + "&output_mode=json" + ) + logger.debug("requesting: %s", api_url) + + create_job = _requests_retry_session().get( + api_url, auth=(user, password), verify=False + ) + + _check_request_status(create_job) + + json_res = create_job.json() + + events = json_res["entry"] + # logger.info('events: %s', events) + + return events + + +def _wait_for_job_and_get_events(job_id, url="", user="", password=""): + """ + Wait for the search job to finish and collect the result events + @param: job_id + returns events + """ + events = [] + job_url = "{}/services/search/jobs/{}?output_mode=json".format(url, str(job_id)) + logger.debug("requesting: %s", job_url) + + for _ in range(TIMEROUT): + res = _requests_retry_session().get( + job_url, auth=(user, password), verify=False + ) + _check_request_status(res) + + job_res = res.json() + dispatch_state = job_res["entry"][0]["content"]["dispatchState"] + + if dispatch_state == "DONE": + events = _get_events(job_id, url, user, password) + break + if dispatch_state == "FAILED": + raise UiTestsException("Search job: {} failed".format(job_url)) + time.sleep(1) + + return events + + +def _get_events(job_id, url="", user="", password=""): + """ + collect the result events from a search job + @param: job_id + returns events + """ + event_url = "{}/services/search/jobs/{}/events?output_mode=json".format( + url, str(job_id) + ) + logger.debug("requesting: %s", event_url) + + event_job = _requests_retry_session().get( + event_url, auth=(user, password), verify=False + ) + _check_request_status(event_job) + + event_job_json = event_job.json() + events = event_job_json["results"] + logger.debug("Events from get_events method returned %s events", len(events)) + + return events + + +def _check_request_status(req_obj): + """ + check if a request is successful + @param: req_obj + returns True/False + """ + if not req_obj.ok: + raise UiTestsException( + "status code: {} \n details: {}".format( + str(req_obj.status_code), req_obj.text + ) + ) + + +def _requests_retry_session( + retries=10, backoff_factor=0.1, status_forcelist=(500, 502, 504) +): + """ + create a retry session for HTTP/HTTPS requests + @param: retries (num of retry time) + @param: backoff_factor + @param: status_forcelist (list of error status code to trigger retry) + @param: session + returns: session + """ + session = requests.Session() + retry = Retry( + total=int(retries), + backoff_factor=backoff_factor, + status_forcelist=status_forcelist, + ) + adapter = HTTPAdapter(max_retries=retry) + session.mount("http://", adapter) + session.mount("https://", adapter) + + return session diff --git a/ui_tests/tests/test_basic.py b/ui_tests/tests/test_basic.py new file mode 100644 index 000000000..5b4f8b717 --- /dev/null +++ b/ui_tests/tests/test_basic.py @@ -0,0 +1,56 @@ +import pytest +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from webdriver.webriver_factory import WebDriverFactory + +driver = WebDriverFactory().get_driver() +logger = Logger().get_logger() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() + + +@pytest.mark.basic +def test_check_page_title_is_correct(): + """ + Test that SC4SNMP UI page tile is correct + """ + page_title = driver.title + + logger.info(f"Page Title: {page_title}") + assert "SC4SNMP Manager" == page_title + + +@pytest.mark.basic +def test_check_selected_tab_behaviour(): + """ + Test that selected tab stays selected upon refreshing page + check if corresponding tables are displayed + """ + p_header.switch_to_profiles() + url = driver.current_url + assert "/?tab=Profiles" in url + assert p_profiles.check_if_profiles_table_is_displayed() + driver.refresh() + url = driver.current_url + assert "/?tab=Profiles" in url + + p_header.switch_to_groups() + url = driver.current_url + assert "/?tab=Groups" in url + assert p_groups.check_if_groups_table_is_displayed() + driver.refresh() + url = driver.current_url + assert "/?tab=Groups" in url + + p_header.switch_to_inventory() + url = driver.current_url + assert "/?tab=Inventory" in url + assert p_inventory.check_if_inventory_table_is_displayed() + driver.refresh() + url = driver.current_url + assert "/?tab=Inventory" in url diff --git a/ui_tests/tests/test_error_handling_and_complex_scenarios.py b/ui_tests/tests/test_error_handling_and_complex_scenarios.py new file mode 100644 index 000000000..edbc2da2b --- /dev/null +++ b/ui_tests/tests/test_error_handling_and_complex_scenarios.py @@ -0,0 +1,524 @@ +import time + +import pytest +from config import config +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from pages.yaml_values_reader import YamlValuesReader +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() +values_reader = YamlValuesReader() + + +@pytest.mark.basic +def test_trying_to_configure_profle_with_the_same_name(): + """ + Configure profile + try to configure profile with the same name again + check error message + """ + profile_name = "same_profile" + profile_freq = 10 + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(profile_freq) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(profile_freq) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + + message = p_header.get_popup_error_message() + assert ( + message + == f"Profile with name {profile_name} already exists. Profile was not added." + ) + p_header.close_error_popup() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + + p_profiles.delete_profile_from_list(profile_name) + + +@pytest.mark.basic +def test_trying_to_configure_group_with_the_same_name(): + """ + Configure group + try to configure group with the same name again + check error message + """ + group_name = "same_group" + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + # try to add same group again + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + # check error message + message = p_header.get_popup_error_message() + assert ( + message == f"Group with name {group_name} already exists. Group was not added." + ) + p_header.close_error_popup() + + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + p_groups.delete_group_from_list(group_name) + + +@pytest.mark.basic +def test_trying_to_add_group_device_which_already_exists(): + """ + Configure group with device + try to add the same device to the group + check error message + """ + group_name = "same_group_device" + device_ip = "10.20.20.10" + port = 324 + snmp_version = "2c" + community_string = "test-device" + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + # add device to grp + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.set_device_port(port) + p_groups.set_snmp_version(snmp_version) + p_groups.set_community_string(community_string) + p_groups.click_submit_button_for_add_device() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + # try to add same device again + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.set_device_port(port) + p_groups.click_submit_button_for_add_device() + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is True + + # check error message + message = p_header.get_popup_error_message() + assert ( + message + == f"Host {device_ip}:{port} already exists in group {group_name}. Record was not added." + ) + p_header.close_error_popup() + + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is True + + p_groups.delete_group_from_list(group_name) + + +@pytest.mark.basic +def test_trying_to_add_inventory_with_host_which_already_exists(): + """ + Configure inventory with host + try to add the same host as another inventory entry + check error message + """ + host_ip = "100.200.100.200" + community_string = "test-device" + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_ip) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is True + + # try to add same device again + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_ip) + p_inventory.set_community_string("different_string") + p_inventory.click_submit_button_for_add_entry() + + # check error message + message = p_header.get_popup_error_message() + assert ( + message + == f"Host {host_ip}:{config.DEFAULT_PORT} already exists in the inventory. Record was not added." + ) + p_header.close_error_popup() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is True + + p_inventory.delete_entry_from_list(host_ip) + + +@pytest.mark.basic +def test_trying_to_add_inventory_with_group_which_is_already_added(): + """ + Configure inventory with group + try to add the same group as another inventory entry + check error message + """ + # add group + group_name = f"test-group-inventory" + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + + community_string = "public" + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # try to add same device again + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string("public_test_same_group") + p_inventory.click_submit_button_for_add_entry() + + # check error message + message = p_header.get_popup_error_message() + assert ( + message + == f"Group {group_name} has already been added to the inventory. Record was not added." + ) + p_header.close_error_popup() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(group_name) + p_header.switch_to_groups() + p_groups.delete_group_from_list(group_name) + + +@pytest.mark.basic +def test_trying_to_add_inventory_group_with_host_which_is_configured_as_host(): + """ + Configure inventory with group with host + try to add the inventory entry with the same host which is configured in group + check error message + """ + # add group + group_name = f"test-group-inventory" + device_ip = "40.50.60.70" + community_string = "public" + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.click_submit_button_for_add_device() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # try to add the same host as inventory entry again + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(device_ip) + p_inventory.set_community_string("public_test_same_host") + p_inventory.click_submit_button_for_add_entry() + + # check error message + message = p_header.get_popup_error_message() + assert ( + message + == f"Host {device_ip}:{config.DEFAULT_PORT} already exists in group {group_name}. Record was not added." + ) + p_header.close_error_popup() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(group_name) + p_header.switch_to_groups() + p_groups.delete_group_from_list(group_name) + + +@pytest.mark.basic +def test_removing_group_which_is_configured_in_inventory(): + """ + Configure inventory -> add group as inventory entry + remove group which was added into inventory + check that upon removing group inventory entry is also removed + """ + # add group + group_name = f"test-group-inventory" + community_string = "public" + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # delete group + p_header.switch_to_groups() + # p_groups.delete_group_from_list(group_name) + p_groups.click_delete_group_button(group_name) + message = ( + p_groups.get_warning_message_when_removing_group_which_is_configured_in_inventory() + ) + assert message == "WARNING: This group is configured in the inventory" + p_groups.confirm_delete() + p_groups.close_delete_popup() + + # check inventory is also removed + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_header.switch_to_inventory() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_removing_profile_which_is_configured_in_inventory(): + """ + Configure inventory with profile + remove profile which was added into inventory + check that upon removing profile, this profile in inventory entry is also removed + """ + # add group + profile_name = "removing_profile" + host = "99.99.99.99" + community_string = "public" + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.select_profiles([profile_name]) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # delete profile + p_header.switch_to_profiles() + p_profiles.click_delete_profile_button(profile_name) + + message = ( + p_groups.get_warning_message_when_removing_group_which_is_configured_in_inventory() + ) + assert ( + message + == "WARNING: This profile is configured in some records in the inventory" + ) + p_profiles._confirm_delete_profile() + p_profiles.close_profile_delete_popup() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + + # check inventory - no profile + p_header.switch_to_inventory() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + received_profiles = p_inventory.get_profiles_for_entry(host) + assert "" == received_profiles + # delete inventory entry + p_inventory.delete_entry_from_list(host) + + +@pytest.mark.basic +def test_try_to_add_to_inventory_group_which_does_not_exist(): + """ + Configure inventory with group which does not exist + check error message + """ + + group_name = "does_not_exist" + community_string = "abcd" + p_header.switch_to_inventory() + + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + + # check error message + message = p_header.get_popup_error_message() + assert ( + message + == f"Group {group_name} doesn't exist in the configuration. Record was not added." + ) + p_header.close_error_popup() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_trying_to_edit_profile_name_into_profile_name_that_exists(): + """ + Configure two profiles + try to change one profile to the second + check error message + """ + profile_name_1 = "profile_1" + profile_name_2 = "profile_2" + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name_1) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name_2) + p_profiles.add_varBind("IP-MIB") + p_profiles.click_submit_button() + + # edit profile name + p_profiles.click_edit_profile(profile_name_1) + p_profiles.set_profile_name(profile_name_2) + p_profiles.click_submit_button() + + message = p_header.get_popup_error_message() + assert ( + message + == f"Profile with name {profile_name_2} already exists. Profile was not edited." + ) + p_header.close_error_popup() + exist = p_profiles.check_if_profile_is_configured(profile_name_1) + assert exist is True + exist = p_profiles.check_if_profile_is_configured(profile_name_2) + assert exist is True + + p_profiles.delete_profile_from_list(profile_name_1) + p_profiles.delete_profile_from_list(profile_name_2) + + +@pytest.mark.basic +def test_trying_to_edit_group_name_into_another_group_name(): + """ + Configure two groups + try to change one group to the second + check error message + """ + group_name_1 = "group_1" + group_name_2 = "group_2" + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name_1) + p_groups.click_submit_button_for_add_group() + + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name_2) + p_groups.click_submit_button_for_add_group() + + # edit group name + p_groups.edit_group_name(group_name_1, group_name_2) + + message = p_header.get_popup_error_message() + assert ( + message + == f"Group with name {group_name_2} already exists. Group was not edited." + ) + p_header.close_error_popup() + is_on_list = p_groups.check_if_groups_is_on_list(group_name_1) + assert is_on_list is True + is_on_list = p_groups.check_if_groups_is_on_list(group_name_2) + assert is_on_list is True + + p_groups.delete_group_from_list(group_name_1) + p_groups.delete_group_from_list(group_name_2) + + +@pytest.mark.basic +def test_trying_to_edit_inventory_host_into_host_which_exists(): + """ + Configure two inventory hosts + try to change one host to the second + check error message + """ + host_1 = "11.11.11.11" + community_1 = "com1" + host_2 = "22.22.22.22" + community_2 = "abcs" + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_1) + p_inventory.set_community_string(community_1) + p_inventory.click_submit_button_for_add_entry() + + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_2) + p_inventory.set_community_string(community_2) + p_inventory.click_submit_button_for_add_entry() + + # edit inventory host + p_inventory.click_edit_inventory_entry(host_1) + p_inventory.set_host_or_group_name(host_2, True) + p_inventory.click_submit_button_for_add_entry() + + message = p_header.get_popup_error_message() + assert ( + message + == f"Host {host_2}:{config.DEFAULT_PORT} already exists in the inventory. Record was not edited." + ) + p_header.close_error_popup() + is_on_list = p_inventory.check_if_entry_is_on_list(host_1) + assert is_on_list is True + is_on_list = p_inventory.check_if_entry_is_on_list(host_2) + assert is_on_list is True + + p_inventory.delete_entry_from_list(host_1) + p_inventory.delete_entry_from_list(host_2) diff --git a/ui_tests/tests/test_groups_basic.py b/ui_tests/tests/test_groups_basic.py new file mode 100644 index 000000000..7b4b3014a --- /dev/null +++ b/ui_tests/tests/test_groups_basic.py @@ -0,0 +1,249 @@ +import time + +import pytest +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() + + +@pytest.mark.basic +def test_add_and_remove_group(): + """ + Test that user is able to add group, + check newly added group is displayed on groups list + remove group and check it is not on the list + """ + group_name = f"test-group" + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_change_group_name(): + """ + Test that user is able to add group, + check that user is able to change group name + """ + group_name = f"change-name" + new_group_name = "new-group-name" + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + is_on_list_new = p_groups.check_if_groups_is_on_list(new_group_name) + assert is_on_list_new is False + # edit name + p_groups.edit_group_name(group_name, new_group_name) + message = p_groups.get_submit_edited_group_name_popup_message() # common method? + expected_message = ( + f"{group_name} was also renamed to {new_group_name} in the inventory" + ) + assert expected_message == message + p_groups.close_edited_profile_popup() # common method? + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + is_on_list_new = p_groups.check_if_groups_is_on_list(new_group_name) + assert is_on_list_new is True + + p_groups.delete_group_from_list(new_group_name) + is_on_list = p_groups.check_if_groups_is_on_list(new_group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_try_adding_device_to_group_with_no_data(): + """ + Test that user is not able to add device with no data + check error message + then click cancel + check no device on list + """ + group_name = f"device-with-no-data" + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + p_groups.click_add_device_to_group(group_name) + p_groups.click_submit_button_for_add_device() + message = p_groups.get_error_message_while_adding_device_with_no_data() + assert message == "Address or host name is required" + p_groups.click_cancel_button_for_add_device() + number_of_devices = p_groups.get_number_of_devices_for_group(group_name) + assert 0 == number_of_devices + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_add_and_remove_device_into_group(): + """ + Test that user is able to add device into group, + After adding device into group that group is auto selected + check added device displayed on devices list + remove device and check it is not on the list anymore + """ + group_name = f"test-add-one-device" + device_ip = "1.2.3.4" + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.click_submit_button_for_add_device() + number_of_devices = p_groups.get_number_of_devices_for_group(group_name) + assert 1 == number_of_devices + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is True + p_groups.delete_device_from_group(device_ip) + number_of_devices = p_groups.get_number_of_devices_for_group(group_name) + assert 0 == number_of_devices + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is False + + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_add_device_with_all_fields(): + """ + Test that user is able to add device into group, + After adding device into group that group is auto selected + check added device displayed on devices list + remove device and check it is not on the list anymore + """ + group_name = f"test-add-one-device" + device_ip = "1.2.3.4" + port = 1234 + snmp_version = "2c" + community_string = "public" + secret = "secret" + security_engine = "8000000903000AAAEF536715" + + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + # add device to grp + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.set_device_port(port) + p_groups.set_snmp_version(snmp_version) + p_groups.set_community_string(community_string) + p_groups.set_secret(secret) + p_groups.set_security_engine(security_engine) + + p_groups.click_submit_button_for_add_device() + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is True + + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_edit_device_with_all_fields(): + """ + Test that user is able to add device into group, + User is able to edit device + remove device and check it is not on the list anymore + """ + group_name = f"test-edit-device" + device_ip = "1.2.3.4" + port = 1234 + snmp_version = "2c" + community_string = "public" + secret = "secret" + security_engine = "8000000903000AAAEF536715" + + p_header.switch_to_groups() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + # add device to grp + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.set_device_port(port) + p_groups.set_snmp_version(snmp_version) + p_groups.set_community_string(community_string) + p_groups.set_secret(secret) + p_groups.set_security_engine(security_engine) + p_groups.click_submit_button_for_add_device() + + # edit device data + new_device_ip = "4.3.2.1" + new_port = 4321 + new_snmp_version = "1" + new_community_string = "community" + new_secret = "test" + new_security_engine = "8000000903000AAAEF511115" + + p_groups.click_edit_device(device_ip) + p_groups.set_device_ip(new_device_ip, True) + p_groups.set_device_port(new_port, True) + p_groups.set_snmp_version(new_snmp_version) + p_groups.set_community_string(new_community_string, True) + p_groups.set_secret(new_secret, True) + p_groups.set_security_engine(new_security_engine, True) + p_groups.click_submit_button_for_add_device() + # verify + is_configured = p_groups.check_if_device_is_configured(device_ip) + assert is_configured is False + is_configured = p_groups.check_if_device_is_configured(new_device_ip) + assert is_configured is True + port = p_groups.get_device_port(new_device_ip) + assert int(port) == new_port + snmp_version_received = p_groups.get_device_snmp_version(new_device_ip) + assert snmp_version_received == new_snmp_version + community_string_received = p_groups.get_device_community_string(new_device_ip) + assert community_string_received == new_community_string + secret_received = p_groups.get_device_secret(new_device_ip) + assert secret_received == new_secret + security_engine_received = p_groups.get_device_security_engine(new_device_ip) + assert security_engine_received == new_security_engine + + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False diff --git a/ui_tests/tests/test_inventory_basic.py b/ui_tests/tests/test_inventory_basic.py new file mode 100644 index 000000000..b8cb71b33 --- /dev/null +++ b/ui_tests/tests/test_inventory_basic.py @@ -0,0 +1,407 @@ +import time + +import pytest +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() + + +@pytest.mark.basic +def test_add_and_remove_inventory_entry(): + """ + Test that user is able to add inventory entry, + check newly added inventory is displayed on inventory list + remove inventory entry and check it is not on the list + """ + host_ip = "1.2.3.4" + community_string = "public" + p_header.switch_to_inventory() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is False + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_ip) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is True + p_inventory.delete_entry_from_list(host_ip) + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is False + + +@pytest.mark.basic +def test_add_device_into_inventory_then_change_it(): + """ + Test that user is able to add inventory entry, + check newly added inventory is displayed on inventory list + user is able to edit host + changed host is visible in inventory + remove inventory entry and check it is not on the list + """ + host_ip = "1.2.3.4" + community_string = "public" + p_header.switch_to_inventory() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is False + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host_ip) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is True + # change + new_host_ip = "10.20.30.40" + p_inventory.click_edit_inventory_entry(host_ip) + p_inventory.set_host_or_group_name(new_host_ip, True) + p_inventory.click_submit_button_for_edit_entry() + + expected_notice = "Address or port was edited which resulted in deleting the old device and creating the new one at the end of the list." + received_notice = p_inventory.get_edit_inventory_notice() + assert expected_notice == received_notice + p_inventory.close_edit_inventory_entry() + + is_on_list = p_inventory.check_if_entry_is_on_list(host_ip) + assert is_on_list is False + is_on_list = p_inventory.check_if_entry_is_on_list(new_host_ip) + assert is_on_list is True + # delete + p_inventory.delete_entry_from_list(new_host_ip) + is_on_list = p_inventory.check_if_entry_is_on_list(new_host_ip) + assert is_on_list is False + + +@pytest.mark.basic +def test_add_group_into_inventory_entry(): + """ + Test that user is able to add inventory entry, + check newly added inventory is displayed on inventory list + user is able to edit host + changed host is visible in inventory + remove inventory entry and check it is not on the list + """ + # add group + group_name = f"test-group-inventory" + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is True + + community_string = "public" + p_header.switch_to_inventory() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is False + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.set_community_string(community_string) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(group_name) + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is False + p_header.switch_to_groups() + p_groups.delete_group_from_list(group_name) + is_on_list = p_groups.check_if_groups_is_on_list(group_name) + assert is_on_list is False + + +@pytest.mark.basic +def test_try_to_add_device_with_no_data_into_inventory(): + """ + Test that user is not able to add inventory entry with no data + set host, check community string required + set community + check inventory added + remove inventory entry and check it is not on the list + """ + host = "1.2.2.1" + community = "teststring" + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_host_missing_error() + assert error == "Address or host name is required" + error = p_inventory.get_community_string_missing_error() + assert ( + error == "When using SNMP version 1 or 2c, community string must be specified" + ) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + + p_inventory.set_host_or_group_name(host) + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_host_missing_error() + assert error is None + error = p_inventory.get_community_string_missing_error() + assert ( + error == "When using SNMP version 1 or 2c, community string must be specified" + ) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + + p_inventory.set_community_string(community) + error = p_inventory.get_community_string_missing_error() + assert ( + error == "When using SNMP version 1 or 2c, community string must be specified" + ) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + + +@pytest.mark.basic +def test_setting_min_walk_interval_value_in_inventory(): + """ + Test that user able to set walk interval value + acceptable valus are in range 1800 - 604800 + test min boundary + remove inventory entry and check it is not on the list + """ + host = "3.3.3.3" + community = "public" + + # min + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + + p_inventory.set_walk_interval("1799") + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_walk_invalid_value_error() + assert error == "Walk Interval number must be an integer in range 1800-604800." + p_inventory.set_walk_interval("1800") + + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_walk_invalid_value_error() + assert error is None + + # this two fields are set at the end to validate behavior of setting walk interval + p_inventory.set_host_or_group_name(host) + p_inventory.set_community_string(community) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + + +@pytest.mark.basic +def test_setting_max_walk_interval_value_in_inventory(): + """ + Test that user able to set walk interval value + acceptable valus are in range 1800 - 604800 + test max boundary + remove inventory entry and check it is not on the list + """ + host = "4.4.4.4" + community = "pub_test" + + # min + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + + p_inventory.set_walk_interval("604801") + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_walk_invalid_value_error() + assert error == "Walk Interval number must be an integer in range 1800-604800." + p_inventory.set_walk_interval("604800") + p_inventory.click_submit_button_for_add_entry() + error = p_inventory.get_walk_invalid_value_error() + assert error is None + + # this two fields are set at the end to validate behavior of setting walk interval + p_inventory.set_host_or_group_name(host) + p_inventory.set_community_string(community) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + + +@pytest.mark.basic +def test_try_to_add_device_with_all_available_fields_into_inventory(): + """ + Test that user is not able to add inventory entry all available fields + then remove inventory entry and check it is not on the list + """ + host = "1.2.2.1" + port = "1234" + snmp_version = "3" + community = "teststring" + secret = "test_secret" + security_engine = "8000000903000AAAEF536715" + walk_interval = "3600" + profile_1 = "profile_1" + profile_2 = "profile_2" + profiles = [profile_1, profile_2] + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_1) + p_profiles.add_varBind("IP-MIB", "ifDescr") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_2) + p_profiles.add_varBind("IP-MIB", "ifError") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.edit_device_port(port) + p_inventory.select_snmp_version(snmp_version) + p_inventory.set_community_string(community) + p_inventory.set_secret(secret) + p_inventory.set_security_engine(security_engine) + p_inventory.set_walk_interval(walk_interval) + p_inventory.select_profiles(profiles) + p_inventory.set_smart_profiles("true") + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + time.sleep(10) + + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile_1) + p_profiles.delete_profile_from_list(profile_2) + + +@pytest.mark.basic +def test_edit_inventory_entry_with_all_available_fields(): + """ + Test that user is not able to add inventory entry all available fields + check that user can edit all fields + check values of edited fields + then remove inventory entry and check it is not on the list + """ + host = "99.20.10.10" + port = "1234" + snmp_version = "3" + community = "teststring" + secret = "test_secret" + security_engine = "8000000903000AAAEF536715" + walk_interval = "3600" + smart_profiles = "false" + profile_1 = "profile_1_edit" + profile_2 = "profile_2_edit" + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_1) + p_profiles.add_varBind("IP-MIB", "ifDescr") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_2) + p_profiles.add_varBind("IP-MIB", "ifError") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.edit_device_port(port) + p_inventory.select_snmp_version(snmp_version) + p_inventory.set_community_string(community) + p_inventory.set_secret(secret) + p_inventory.set_security_engine(security_engine) + p_inventory.set_walk_interval(walk_interval) + p_inventory.select_profiles([profile_1]) + p_inventory.set_smart_profiles(smart_profiles) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # edit + new_host = "10.20.30.40" + new_port = "55555" + new_snmp_version = "2c" + new_community = "test_new_community" + new_secret = "changed_secret" + new_security_engine = "800000090BC0DD111101" + new_walk_interval = "10000" + new_smart_profiles = "true" + + p_inventory.click_edit_inventory_entry(host) + p_inventory.set_host_or_group_name(new_host, True) + p_inventory.edit_device_port(new_port) + p_inventory.select_snmp_version(new_snmp_version) + p_inventory.set_community_string(new_community, True) + p_inventory.set_secret(new_secret, True) + p_inventory.set_security_engine(new_security_engine, True) + p_inventory.set_walk_interval(new_walk_interval) + p_inventory.select_profiles([profile_2], True) + p_inventory.set_smart_profiles(new_smart_profiles) + p_inventory.click_submit_button_for_add_entry() + + expected_notice = "Address or port was edited which resulted in deleting the old device and creating the new one at the end of the list." + received_notice = p_inventory.get_edit_inventory_notice() + assert expected_notice == received_notice + p_inventory.close_edit_inventory_entry() + + # check + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + is_on_list = p_inventory.check_if_entry_is_on_list(new_host) + assert is_on_list is True + received_port = p_inventory.get_port_for_entry(new_host) + assert new_port == received_port + received_snmp_version = p_inventory.get_snmp_version_for_entry(new_host) + assert new_snmp_version == received_snmp_version + received_community_string = p_inventory.get_community_string_for_entry(new_host) + assert new_community == received_community_string + received_secret = p_inventory.get_secret_for_entry(new_host) + assert new_secret == received_secret + received_sec_engine = p_inventory.get_security_engine_for_entry(new_host) + assert new_security_engine == received_sec_engine + received_walk_interval = p_inventory.get_walk_interval_for_entry(new_host) + assert new_walk_interval == received_walk_interval + received_profiles = p_inventory.get_profiles_for_entry(new_host) + assert profile_2 == received_profiles + received_smart_profiles = p_inventory.get_smart_profiles_for_entry(new_host) + assert new_smart_profiles == received_smart_profiles + + # delete + p_inventory.delete_entry_from_list(new_host) + is_on_list = p_inventory.check_if_entry_is_on_list(new_host) + assert is_on_list is False + + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile_1) + p_profiles.delete_profile_from_list(profile_2) diff --git a/ui_tests/tests/test_profiles_basic.py b/ui_tests/tests/test_profiles_basic.py new file mode 100644 index 000000000..22eafce6a --- /dev/null +++ b/ui_tests/tests/test_profiles_basic.py @@ -0,0 +1,199 @@ +import time + +import pytest +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() + + +@pytest.mark.basic +@pytest.mark.parametrize("profile_type", ["standard", "base"]) +def test_add_profile(profile_type): + """ + Test that user is able to add profile, + check newly added profile is displayed on profiles list + remove profile and check it is not on the list + """ + profile_name = f"test-profile-{profile_type}" + p_header.switch_to_profiles() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(100) + p_profiles.select_profile_type(profile_type) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + time.sleep(5) # wait for profile to be shown on the list + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + profile_type_of_profile = p_profiles.get_profile_type_for_profile_entry( + profile_name + ) + assert profile_type == profile_type_of_profile + p_profiles.delete_profile_from_list(profile_name) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + + +@pytest.mark.basic +def test_add_smart_profile(): + """ + Test that user is able to add smart profile, + check newly added profile is displayed on profiles list + remove profile and check it is not on the list + """ + profile_type = "smart" + profile_name = f"test-profile-{profile_type}" + p_header.switch_to_profiles() + time.sleep(5) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(3600) + p_profiles.select_profile_type(profile_type) + p_profiles.set_smart_profile_field("SNMPv2-MIB.sysDescr") + p_profiles.add_smart_profile_pattern(".*linux.*") + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + time.sleep(5) # wait for profile to be shown on the list + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + profile_type_of_profile = p_profiles.get_profile_type_for_profile_entry( + profile_name + ) + assert profile_type == profile_type_of_profile + p_profiles.delete_profile_from_list(profile_name) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + + +@pytest.mark.basic +def test_add_walk_profile(): + """ + Test that user is able to add walk profile, + check newly added profile is displayed on profiles list + remove profile and check it is not on the list + """ + profile_type = "walk" + profile_name = f"test-profile-{profile_type}" + p_header.switch_to_profiles() + time.sleep(5) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.select_profile_type(profile_type) + visible = p_profiles.check_if_frequency_setting_field_is_visible() + assert visible is False + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + time.sleep(5) # wait for profile to be shown on the list + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + profile_type_of_profile = p_profiles.get_profile_type_for_profile_entry( + profile_name + ) + assert profile_type == profile_type_of_profile + p_profiles.delete_profile_from_list(profile_name) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + + +@pytest.mark.basic +def test_add_conditional_profile(): + """ + Test that user is able to add conditional profile, + check newly added profile is displayed on profiles list + remove profile and check it is not on the list + """ + profile_type = "conditional" + profile_name = f"test-profile-{profile_type}" + p_header.switch_to_profiles() + time.sleep(5) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.select_profile_type(profile_type) + p_profiles.add_condition("IF-MIB.ifAdminStatus", "equals", "up") + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + profile_type_of_profile = p_profiles.get_profile_type_for_profile_entry( + profile_name + ) + assert profile_type == profile_type_of_profile + p_profiles.delete_profile_from_list(profile_name) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + + +@pytest.mark.basic +def test_edit_profile(): + """ + Test that user is able to edit profile, + editing profile name works + editing frequency works + editing varBinds works + """ + profile_type = "standard" + profile_name = f"test-profile-{profile_type}" + p_header.switch_to_profiles() + time.sleep(5) + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(100) + p_profiles.select_profile_type(profile_type) + p_profiles.add_varBind("IP-MIB", "ifDescr", 1) + p_profiles.click_submit_button() + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is True + # edit profile + new_freq = 45 + new_profile_name = "new_name" + new_varBind = {"mcomponent": "IP-MIBv2", "mobject": "ifDescr_v2", "mindex": 2} + + p_profiles.click_edit_profile(profile_name) + p_profiles.set_profile_name(new_profile_name) + p_profiles.set_frequency(new_freq) + p_profiles.edit_varBind( + new_varBind["mcomponent"], new_varBind["mobject"], new_varBind["mindex"] + ) + p_profiles.click_submit_button() + + # verify notice : If {pname} was used in some records in the inventory, it was updated to {new_pname} + received = p_profiles.get_submit_edited_profile_text() + expected = f"If {profile_name} was used in some records in the inventory, it was updated to {new_profile_name}" + assert expected == received + p_profiles.close_edited_profile_popup() + # check edited fields + # name + exist = p_profiles.check_if_profile_is_configured(profile_name) + assert exist is False + exist = p_profiles.check_if_profile_is_configured(new_profile_name) + assert exist is True + # freq + received_freq = p_profiles.get_profile_freq(new_profile_name) + assert new_freq == int(received_freq) + # varBinds - this verification is very case specific as profile row and expanded row does not have same Web element container + p_profiles.expand_profile(new_profile_name) + varBind = p_profiles.get_profile_varbind(new_profile_name) + assert new_varBind == varBind + p_profiles.delete_profile_from_list(new_profile_name) + exist = p_profiles.check_if_profile_is_configured(new_profile_name) + assert exist is False diff --git a/ui_tests/tests/test_save_update_configuration.py b/ui_tests/tests/test_save_update_configuration.py new file mode 100644 index 000000000..ac8a2eda2 --- /dev/null +++ b/ui_tests/tests/test_save_update_configuration.py @@ -0,0 +1,259 @@ +import time + +import pytest +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from pages.yaml_values_reader import YamlValuesReader +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() +values_reader = YamlValuesReader() + + +@pytest.fixture(autouse=True, scope="module") +def setup_and_teardown(): + # clear profiles + p_header.switch_to_profiles() + p_profiles.clear_profiles() + + # clear groups + p_header.switch_to_groups() + p_groups.clear_groups() + + # clear inventory + # we should wait for timer expiry to not have temporary records in inventory with delete flag set to true as this will cause test failures + p_header.switch_to_inventory() + p_inventory.clear_inventory() + p_header.apply_changes() + time_to_upgrade = p_header.get_time_to_upgrade() + p_header.close_configuration_applied_notification_popup() + time.sleep(time_to_upgrade + 30) # wait for upgrade + walk time + polling + yield + # teardown here if needed + + +@pytest.mark.extended +def test_check_that_profile_config_is_stored_upon_applying_configuration(): + """ + Configure profile + check that profile is stored in yaml file + edit profile, change freq + add new profile + click apply changes once again + changes stored even when the timer has not yet expired + """ + profile_name_1 = "store_profile" + profile_freq_1 = 10 + profile_freq_1_new = 10 + profile_name_2 = "profile_next" + profile_freq_2 = 77 + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name_1) + p_profiles.set_frequency(profile_freq_1) + p_profiles.add_varBind("IF-MIB", "ifInErrors", "1") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + # check no profile + profiles = values_reader.get_scheduler_profiles() + assert "{}\n" == profiles # profiles should be empty + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_profile_output = f"{profile_name_1}:\n frequency: {profile_freq_1}\n varBinds:\n - ['IF-MIB', 'ifInErrors', '1']\n" + profiles = values_reader.get_scheduler_profiles() + assert expected_profile_output == profiles + + # edit profile + p_profiles.click_edit_profile(profile_name_1) + p_profiles.set_frequency(profile_freq_1_new) + p_profiles.click_submit_button() + # add another profile + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name_2) + p_profiles.set_frequency(profile_freq_2) + p_profiles.add_varBind("SNMPv2-MIB", "sysDescr") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + # check that configuration is not changed because it has been not applied + profiles = values_reader.get_scheduler_profiles() + assert expected_profile_output == profiles + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_profile_output_2 = f"{profile_name_1}:\n frequency: {profile_freq_1_new}\n varBinds:\n - ['IF-MIB', 'ifInErrors', '1']\n{profile_name_2}:\n frequency: {profile_freq_2}\n varBinds:\n - ['SNMPv2-MIB', 'sysDescr']\n" + profiles = values_reader.get_scheduler_profiles() + assert expected_profile_output_2 == profiles + + # finalize - clear + p_profiles.delete_profile_from_list(profile_name_1) + p_profiles.delete_profile_from_list(profile_name_2) + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check no profile + profiles = values_reader.get_scheduler_profiles() + assert "{}\n" == profiles # profiles should be empty + + +@pytest.mark.extended +def test_check_that_group_config_is_stored_upon_applying_configuration(): + """ + Configure group + check that group is stored in yaml file + add device to group + click apply changes once again + changes stored even when the timer has not yet expired + """ + + group_name = f"test-group-store" + device_ip = "11.22.33.44" + port = 1234 + snmp_version = "2c" + community_string = "public" + secret = "secret" + security_engine = "8000000903000AAAEF536715" + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + + # check no group + groups = values_reader.get_scheduler_groups() + assert "{}\n" == groups # groups should be empty + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_group_output = f"{group_name}: []\n" + groups = values_reader.get_scheduler_groups() + assert expected_group_output == groups + + # edit group + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(device_ip) + p_groups.set_device_port(port) + p_groups.set_snmp_version(snmp_version) + p_groups.set_community_string(community_string) + p_groups.set_secret(secret) + p_groups.set_security_engine(security_engine) + p_groups.click_submit_button_for_add_device() + + # check that configuration is not changed because it has been not applied + groups = values_reader.get_scheduler_groups() + assert expected_group_output == groups + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_groups_output_2 = f"test-group-store:\n- address: {device_ip}\n port: {port}\n version: '{snmp_version}'\n community: '{community_string}'\n secret: '{secret}'\n security_engine: {security_engine}\n" + groups = values_reader.get_scheduler_groups() + assert expected_groups_output_2 == groups + + # finalize - clear + p_groups.delete_group_from_list(group_name) + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check no group + groups = values_reader.get_scheduler_groups() + assert "{}\n" == groups # groups should be empty + + +@pytest.mark.extended +def test_check_that_inventory_config_is_stored_upon_applying_configuration(): + """ + add inventory entry + check that inventory is stored in yaml file + remove inventory + click apply changes once again + changes stored even when the timer has not yet expired + """ + inventory_first_row = "address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete" + host = "88.77.66.55" + port = "612" + snmp_version = "2c" + community = "green" + walk_interval = "3600" + smart_profiles = "false" + profile = "test_profile_1" + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile) + p_profiles.add_varBind("IP-MIB", "ifDescr") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.edit_device_port(port) + p_inventory.select_snmp_version(snmp_version) + p_inventory.set_community_string(community) + p_inventory.set_walk_interval(walk_interval) + p_inventory.select_profiles([profile]) + p_inventory.set_smart_profiles(smart_profiles) + p_inventory.click_submit_button_for_add_entry() + + # check no inventory entry + inventory = values_reader.get_inventory_entries() + assert inventory_first_row == inventory # groups should be empty + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_inventory_output = f"{inventory_first_row}\n{host},{port},{snmp_version},{community},,,{walk_interval},{profile},f,f" + inventory = values_reader.get_inventory_entries() + assert expected_inventory_output == inventory + + # remove inventory + p_inventory.delete_entry_from_list(host) + + # check that configuration is not changed because it has been not applied + inventory = values_reader.get_inventory_entries() + assert expected_inventory_output == inventory + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # check that configuration is stored + expected_inventory_output_2 = f"{inventory_first_row}\n{host},{port},{snmp_version},{community},,,{walk_interval},{profile},f,t" + inventory = values_reader.get_inventory_entries() + assert expected_inventory_output_2 == inventory + + # finalize - clear + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile) + + # apply changes + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() diff --git a/ui_tests/tests/test_splunk_integration.py b/ui_tests/tests/test_splunk_integration.py new file mode 100644 index 000000000..2738ce1d7 --- /dev/null +++ b/ui_tests/tests/test_splunk_integration.py @@ -0,0 +1,505 @@ +import time + +import pytest +from config import config +from logger.logger import Logger +from pages.groups_page import GroupsPage +from pages.header_page import HeaderPage +from pages.inventory_page import InventoryPage +from pages.profiles_page import ProfilesPage +from splunk_search import check_events_from_splunk +from webdriver.webriver_factory import WebDriverFactory + +logger = Logger().get_logger() +driver = WebDriverFactory().get_driver() +p_header = HeaderPage() +p_profiles = ProfilesPage() +p_groups = GroupsPage() +p_inventory = InventoryPage() + + +@pytest.fixture(autouse=True, scope="module") +def setup_and_teardown(): + # clear profiles + p_header.switch_to_profiles() + p_profiles.clear_profiles() + + # clear groups + p_header.switch_to_groups() + p_groups.clear_groups() + + # clear inventory + p_header.switch_to_inventory() + p_inventory.clear_inventory() + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + yield + # teardown here if needed + + +@pytest.mark.extended +def test_applying_changes_for_device_that_does_not_exists(setup): + """ + Configure device which does not exist + walk checking, and no polling + Test that after applying changes: + walk is scheduled + no polling is scheduled + no events received on netops index + """ + host = "1.2.3.4" + community = "public" + profile_name = "splunk_profile_1" + profile_freq = 10 + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(profile_freq) + p_profiles.add_varBind("IF-MIB", "ifInErrors") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.select_profiles([profile_name]) + p_inventory.set_community_string(community) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # apply changes + p_header.apply_changes() + time_to_upgrade = p_header.get_time_to_upgrade() + p_header.close_configuration_applied_notification_popup() + time.sleep(time_to_upgrade + 60) # wait for upgrade + + # check data in Splunk + # check walk scheduled + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';walk"' + ) + events = check_events_from_splunk( + start_time="-3m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 1 + + # check no profiles polling + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';*;poll"' + ) + events = check_events_from_splunk( + start_time="-3m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 0 + + # check no events + search_query = "index=" + config.EVENT_INDEX + " *" + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 0 + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + # clear inventory + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # clear profiles + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile_name) + + +@pytest.mark.extended +def test_setting_group_in_inventory(setup): + """ + Configure group with device, + configure smart profiles - disabled, + configure one standard profile + apply changes + check no polling on smart profiles + check standard profile is working + """ + group_name = "splk-interaction-grp" + host = setup["device_simulator"] + community = "public" + profile_name = "standard_profile_12s" + profile_freq = 12 + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_name) + p_profiles.set_frequency(profile_freq) + p_profiles.add_varBind("IF-MIB", "ifDescr") + p_profiles.click_submit_button() + time.sleep(1) # wait for profile to be shown on the list + + p_header.switch_to_groups() + p_groups.click_add_new_group_button() + p_groups.set_group_name(group_name) + p_groups.click_submit_button_for_add_group() + p_groups.click_add_device_to_group(group_name) + p_groups.set_device_ip(host) + p_groups.click_submit_button_for_add_device() + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.select_group_inventory_type() + p_inventory.set_host_or_group_name(group_name) + p_inventory.select_profiles([profile_name]) + p_inventory.set_community_string(community) + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is True + + # apply changes + p_header.apply_changes() + time_to_upgrade = p_header.get_time_to_upgrade() + p_header.close_configuration_applied_notification_popup() + time.sleep(time_to_upgrade + 60) # wait for upgrade + walk time + polling + + # check data in Splunk + # check walk scheduled + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';walk"' + ) + events = check_events_from_splunk( + start_time="-2m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 1 + + # check profiles polling + search_query = ( + "index=" + + config.LOGS_INDEX + + ' "Sending due task sc4snmp;' + + host + + ';12;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # checking smart profiles not working + search_query = ( + "index=" + + config.LOGS_INDEX + + ' "Sending due task sc4snmp;' + + host + + ';600;poll"' + ) + events = check_events_from_splunk( + start_time="-2m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 0 + + # checking polling for mandatory profile - 1200 - this should be visible even when smart profiles are disabled + search_query = ( + "index=" + + config.LOGS_INDEX + + ' "Sending due task sc4snmp;' + + host + + ';1200;poll"' + ) + events = check_events_from_splunk( + start_time="-2m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 1 + + # check events received + search_query = "index=" + config.EVENT_INDEX + " *" + events = check_events_from_splunk( + start_time="-2m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # delete + p_inventory.delete_entry_from_list(group_name) + is_on_list = p_inventory.check_if_entry_is_on_list(group_name) + assert is_on_list is False + # clear inventory + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # clear groups + p_header.switch_to_groups() + p_groups.delete_group_from_list(group_name) + + # clear profiles + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile_name) + + +@pytest.mark.extended +def test_setting_host_in_inventory(setup): + """ + Configure device, enable smart profiles and two standard profiles, and one base profile + check smart profiles are working + check standard profiles are working + remove one profile freq: 10s + check profile is not working anymore + check second profile is still working + """ + + host = setup["device_simulator"] + community = "public" + new_community = "test1234" + profile_1_name = "standard_profile_10s" + profile_1_freq = 10 + profile_2_name = "standard_profile_7s" + profile_2_freq = 7 + base_profile_name = "base" + base_profile_freq = 5 + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_1_name) + p_profiles.set_frequency(profile_1_freq) + p_profiles.add_varBind("IF-MIB", "ifDescr") + p_profiles.click_submit_button() + + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(profile_2_name) + p_profiles.set_frequency(profile_2_freq) + p_profiles.add_varBind("SNMPv2-MIB", "sysName") + p_profiles.click_submit_button() + + p_header.switch_to_profiles() + p_profiles.click_add_profile_button() + p_profiles.set_profile_name(base_profile_name) + p_profiles.select_profile_type("base") + p_profiles.set_frequency(base_profile_freq) + p_profiles.add_varBind("IF-MIB", "ifDescr") + p_profiles.click_submit_button() + + p_header.switch_to_inventory() + p_inventory.click_add_new_device_group_button() + p_inventory.set_host_or_group_name(host) + p_inventory.select_profiles([profile_1_name, profile_2_name]) + p_inventory.set_community_string(community) + p_inventory.set_smart_profiles("true") + p_inventory.click_submit_button_for_add_entry() + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is True + + # apply changes + p_header.apply_changes() + time_to_upgrade = p_header.get_time_to_upgrade() + p_header.close_configuration_applied_notification_popup() + time.sleep(time_to_upgrade + 30) # wait for upgrade + walk time + polling + + # check data in Splunk + # check walk scheduled + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';walk"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 1 + + # check profiles polling + time.sleep(60) # wait to be sure that profile are being polled + search_query = ( + "index=" + + config.LOGS_INDEX + + ' "Sending due task sc4snmp;' + + host + + ';10;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';7;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # checking smart/base profiles + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';5;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # check events received + search_query = "index=" + config.EVENT_INDEX + " *" + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # remove profiles + p_inventory.click_edit_inventory_entry(host) + p_inventory.select_profiles([profile_2_name], True) + p_inventory.set_smart_profiles("false") + # p_inventory.set_community_string(new_community, True) + p_inventory.click_submit_button_for_add_entry() + # apply changes + p_header.apply_changes() + time_to_upgrade = p_header.get_time_to_upgrade() + p_header.close_configuration_applied_notification_popup() + time.sleep(time_to_upgrade + 90) # wait for upgrade + walk time + polling + + # check walk scheduled + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';walk"' + ) + events = check_events_from_splunk( + start_time="-2m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 1 + + # check profiles polling + time.sleep(60) # wait to be sure that disabled profile is no more polled + search_query = ( + "index=" + + config.LOGS_INDEX + + ' "Sending due task sc4snmp;' + + host + + ';10;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 0 + + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';7;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # checking smart/base profiles - no polling + search_query = ( + "index=" + config.LOGS_INDEX + ' "Sending due task sc4snmp;' + host + ';5;poll"' + ) + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) == 0 + + # check events received + search_query = "index=" + config.EVENT_INDEX + " *" + events = check_events_from_splunk( + start_time="-1m@m", + url=setup["splunkd_url"], + user=setup["splunk_user"], + query=["search {}".format(search_query)], + password=setup["splunk_password"], + ) + logger.info("Splunk received %s events in the last minute", len(events)) + assert len(events) > 1 + + # delete + p_inventory.delete_entry_from_list(host) + is_on_list = p_inventory.check_if_entry_is_on_list(host) + assert is_on_list is False + # clear inventory + p_header.apply_changes() + p_header.close_configuration_applied_notification_popup() + + # clear profiles + p_header.switch_to_profiles() + p_profiles.delete_profile_from_list(profile_1_name) + p_profiles.delete_profile_from_list(profile_2_name) + p_profiles.delete_profile_from_list(base_profile_name) diff --git a/ui_tests/webdriver/webriver_factory.py b/ui_tests/webdriver/webriver_factory.py new file mode 100644 index 000000000..638c7be69 --- /dev/null +++ b/ui_tests/webdriver/webriver_factory.py @@ -0,0 +1,52 @@ +import time + +import config.config as config +import pytest +from logger.logger import Logger +from selenium import webdriver +from selenium.webdriver.chrome.options import Options +from selenium.webdriver.chrome.service import Service as ChromeService +from webdriver_manager.chrome import ChromeDriverManager + +logger = Logger().get_logger() + + +class WebDriverFactory: + _driver = None + + @classmethod + def get_driver(cls): + if cls._driver is None: + logger.info(f"Execution type: {config.EXECUTION_TYPE}") + logger.info(f"UI URL: {config.UI_URL}") + chrome_options = Options() + if config.EXECUTION_TYPE != "local": + logger.info(f"Enable headless execution") + chrome_options.add_argument("--headless") + chrome_options.add_argument("--disable-gpu") + chrome_options.add_argument("--window-size=1920x1080") + # web_driver = webdriver.Chrome(options=chrome_options) + + cls._driver = webdriver.Chrome( + service=ChromeService(ChromeDriverManager().install()), + options=chrome_options, + ) + + cls._driver.maximize_window() + cls._driver.implicitly_wait(config.IMPLICIT_WAIT_TIMER) + cls._driver.get(config.UI_URL) + return cls._driver + + @classmethod + def close_driver(cls): + if cls._driver is not None: + logger.info("Killing webdriver and closing browser") + cls._driver.quit() + cls._driver = None + else: + logger.warn("Unable to kill driver it does not exist") + + @classmethod + def restart_driver(cls): + cls.close_driver() + return cls.get_driver()