diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 000000000..6e85c81d8 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,36 @@ +# .github/release.yml + +changelog: + exclude: + labels: + - release-note/ignore-for-release + authors: + - octocat + categories: + - title: Exciting New Features πŸŽ‰ + labels: + - release-note/new-feature + - title: Enhancement πŸš€ + labels: + - release-note/enhancement + - title: Component updates ⬆️ + labels: + - release-note/update + - title: Docs update πŸ—„οΈ + labels: + - release-note/docs + - title: Community update πŸ§‘πŸ»β€πŸ€β€πŸ§‘πŸΎ + labels: + - release-note/community + + - title: Breaking Changes πŸ›  + labels: + - release-note/breaking-change + + - title: Deprecations ❌ + labels: + - release-note/deprecation + + - title: Other Changes + labels: + - "*" diff --git a/.github/workflows/housekeeping-stale-issues-prs.yaml b/.github/workflows/housekeeping-stale-issues-prs.yaml new file mode 100644 index 000000000..57a2b0d9f --- /dev/null +++ b/.github/workflows/housekeeping-stale-issues-prs.yaml @@ -0,0 +1,26 @@ +name: Housekeeping - Close stale issues and PRs +on: + schedule: + - cron: '0 9 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9.0.0 + with: + stale-issue-message: 'This issue is being marked stale due to a period of inactivity. If this issue is still relevant, please comment or remove the stale label. Otherwise, this issue will close in 30 days.' + stale-pr-message: 'This PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.' + close-issue-message: 'This issue was closed because it has been stalled for 30 days with no activity. If this issue is still relevant, please re-open a new issue.' + close-pr-message: 'This PR was closed because it has been stalled for 30 days with no activity. If this PR is still relevant, please re-open a new PR against main.' + days-before-issue-stale: 60 + days-before-pr-stale: 60 + days-before-issue-close: 30 + days-before-pr-close: 30 + # Don't add stale label to PRs / issues with milestones "upcoming" attached. + exempt-milestones: "upcoming" + # Don't add stale label to PRs / issues with this label + exempt-issue-labels: 'never-stale, kind/requirement' + exempt-pr-labels: 'never-stale, kind/requirement' + # Make it 1000 to clean up a bit then wen can lower it + operations-per-run: 1000 diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 7a43e82b2..c72b9431c 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -24,7 +24,7 @@ jobs: - name: Install Nginx ingress controller run: | - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.3/deploy/static/provider/kind/deploy.yaml + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.0/deploy/static/provider/kind/deploy.yaml kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=120s - name: Set up Go 1.19 @@ -43,7 +43,6 @@ jobs: - name: Set /etc/hosts run: | sudo -- sh -c "echo '127.0.0.1 harbor.local' >> /etc/hosts" - sudo -- sh -c "echo '127.0.0.1 notary.harbor.local' >> /etc/hosts" - name: Run integration tests working-directory: ./test @@ -56,7 +55,7 @@ jobs: df -h free -m mkdir -p /tmp/harbor - for name in core jobservice registry registryctl trivy notaryserver notarysigner portal redis database; do \ + for name in core jobservice registry registryctl trivy portal redis database; do \ kubectl -n default logs -l "component=$name" --all-containers > /tmp/harbor/$name.log ; \ done diff --git a/.github/workflows/publish_release.yaml b/.github/workflows/publish_release.yaml new file mode 100644 index 000000000..604b5beaa --- /dev/null +++ b/.github/workflows/publish_release.yaml @@ -0,0 +1,47 @@ +name: Publish Release + +on: + push: + tags: + - 'v*.*.*' + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + ref: ${{ github.ref }} + - name: Setup Helm + uses: azure/setup-helm@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + id: install + - name: Helm Package + run: echo "PACKAGE_PATH=$(helm package . | awk '{print $NF}')" >> $GITHUB_ENV + - name: Publish Helm Chart + run: | + helm registry login registry-1.docker.io -u ${{ secrets.DOCKER_HUB_USERNAME }} -p ${{ secrets.DOCKER_HUB_PASSWORD }} + helm push ${{ env.PACKAGE_PATH }} oci://registry-1.docker.io/${{ secrets.DOCKER_HUB_USERNAME }} + helm registry login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }} + helm push ${{ env.PACKAGE_PATH }} oci://ghcr.io/${{ github.actor }} + - name: Upload to chart repository + run: | + git config --global user.email "${{ github.event.repository.name }}@users.noreply.github.com" + git config --global user.name "${{ github.event.repository.name }} CI" + git fetch origin chart-repository + git checkout chart-repository + mkdir -p ../helm-temp + cd ../helm-temp + cp ${{ env.PACKAGE_PATH }} . + helm repo index --merge ../harbor-helm/index.yaml . + mv ./* ../harbor-helm + cd ../harbor-helm + git add . + git commit -s -m "feat: Upload Harbor ${{ github.ref }} to chart repository" + git push origin chart-repository + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: ${{ env.PACKAGE_PATH }} diff --git a/Chart.yaml b/Chart.yaml index b7b601cba..59edf1fcd 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -8,7 +8,7 @@ keywords: - registry - harbor home: https://goharbor.io -icon: https://raw.githubusercontent.com/goharbor/website/master/static/img/logos/harbor-icon-color.png +icon: https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/harbor-icon-color.png sources: - https://github.com/goharbor/harbor - https://github.com/goharbor/harbor-helm diff --git a/README.md b/README.md index 38c108952..79cd21351 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Helm Chart for Harbor -**Notes:** The master branch is in heavy development, please use the other stable versions instead. A highly available solution for Harbor based on chart can be find [here](docs/High%20Availability.md). And refer to the [guide](docs/Upgrade.md) to upgrade the existing deployment. +**Notes:** The master branch is in heavy development, please use the other stable versions instead. A highly available solution for Harbor based on chart can be found [here](docs/High%20Availability.md). And refer to the [guide](docs/Upgrade.md) to upgrade the existing deployment. This repository, including the issues, focuses on deploying Harbor chart via helm. For functionality issues or Harbor questions, please open issues on [goharbor/harbor](https://github.com/goharbor/harbor) @@ -38,7 +38,7 @@ The following items can be set via `--set` flag during installation or configure The external URL for Harbor core service is used to: 1. populate the docker/helm commands showed on portal -2. populate the token service URL returned to docker/notary client +2. populate the token service URL returned to docker client Format: `protocol://domain[:port]`. Usually: @@ -75,345 +75,346 @@ helm uninstall my-release The following table lists the configurable parameters of the Harbor chart and the default values. -| Parameter | Description | Default | -| -------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | -| **Expose** | | | -| `expose.type` | How to expose the service: `ingress`, `clusterIP`, `nodePort` or `loadBalancer`, other values will be ignored and the creation of service will be skipped. | `ingress` | -| `expose.tls.enabled` | Enable TLS or not. Delete the `ssl-redirect` annotations in `expose.ingress.annotations` when TLS is disabled and `expose.type` is `ingress`. Note: if the `expose.type` is `ingress` and TLS is disabled, the port must be included in the command when pulling/pushing images. Refer to https://github.com/goharbor/harbor/issues/5291 for details. | `true` | -| `expose.tls.certSource` | The source of the TLS certificate. Set as `auto`, `secret` or `none` and fill the information in the corresponding section: 1) auto: generate the TLS certificate automatically 2) secret: read the TLS certificate from the specified secret. The TLS certificate can be generated manually or by cert manager 3) none: configure no TLS certificate for the ingress. If the default TLS certificate is configured in the ingress controller, choose this option | `auto` | -| `expose.tls.auto.commonName` | The common name used to generate the certificate, it's necessary when the type isn't `ingress` | | -| `expose.tls.secret.secretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key | | -| `expose.tls.secret.notarySecretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key. Only needed when the `expose.type` is `ingress` | | -| `expose.ingress.hosts.core` | The host of Harbor core service in ingress rule | `core.harbor.domain` | -| `expose.ingress.hosts.notary` | The host of Harbor Notary service in ingress rule | `notary.harbor.domain` | -| `expose.ingress.controller` | The ingress controller type. Currently supports `default`, `gce`, `alb`, `f5-bigip` and `ncp` | `default` | -| `expose.ingress.kubeVersionOverride` | Allows the ability to override the kubernetes version used while templating the ingress | | -| `expose.ingress.annotations` | The annotations used commonly for ingresses | | -| `expose.ingress.harbor.annotations` | The annotations specific to harbor ingress | {} | -| `expose.ingress.harbor.labels` | The labels specific to harbor ingress | {} | -| `expose.ingress.notary.annotations` | The annotations specific to notary ingress | {} | -| `expose.ingress.notary.labels` | The labels specific to notary ingress | {} | -| `expose.clusterIP.name` | The name of ClusterIP service | `harbor` | -| `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | -| `expose.clusterIP.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.clusterIP.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `443` | -| `expose.clusterIP.ports.notaryPort` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | `4443` | -| `expose.nodePort.name` | The name of NodePort service | `harbor` | -| `expose.nodePort.ports.http.port` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.nodePort.ports.http.nodePort` | The node port Harbor listens on when serving HTTP | `30002` | -| `expose.nodePort.ports.https.port` | The service port Harbor listens on when serving HTTPS | `443` | -| `expose.nodePort.ports.https.nodePort` | The node port Harbor listens on when serving HTTPS | `30003` | -| `expose.nodePort.ports.notary.port` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | `4443` | -| `expose.nodePort.ports.notary.nodePort` | The node port Notary listens on. Only needed when `notary.enabled` is set to `true` | `30004` | -| `expose.loadBalancer.name` | The name of service | `harbor` | -| `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | -| `expose.loadBalancer.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.loadBalancer.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `30002` | -| `expose.loadBalancer.ports.notaryPort` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | | -| `expose.loadBalancer.annotations` | The annotations attached to the loadBalancer service | {} | -| `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | -| **Internal TLS** | | | -| `internalTLS.enabled` | Enable TLS for the components (core, jobservice, portal, registry, trivy) | `false` | -| `internalTLS.certSource` | Method to provide TLS for the components, options are `auto`, `manual`, `secret`. | `auto` | -| `internalTLS.trustCa` | The content of trust CA, only available when `certSource` is `manual`. **Note**: all the internal certificates of the components must be issued by this CA | | -| `internalTLS.core.secretName` | The secret name for core component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.core.crt` | Content of core's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.core.key` | Content of core's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.jobservice.secretName` | The secret name for jobservice component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.jobservice.crt` | Content of jobservice's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.jobservice.key` | Content of jobservice's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.registry.secretName` | The secret name for registry component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.registry.crt` | Content of registry's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.registry.key` | Content of registry's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.portal.secretName` | The secret name for portal component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.portal.crt` | Content of portal's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.portal.key` | Content of portal's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.trivy.secretName` | The secret name for trivy component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.trivy.crt` | Content of trivy's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.trivy.key` | Content of trivy's TLS key file, only available when `certSource` is `manual` | | -| **IPFamily** | | | -| `ipFamily.ipv4.enabled` | if cluster is ipv4 enabled, all ipv4 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | -| `ipFamily.ipv6.enabled` | if cluster is ipv6 enabled, all ipv6 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | -| **Persistence** | | | -| `persistence.enabled` | Enable the data persistence or not | `true` | -| `persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | -| `persistence.persistentVolumeClaim.registry.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `persistence.persistentVolumeClaim.registry.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.registry.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.registry.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.registry.size` | The size of the volume | `5Gi` | -| `persistence.persistentVolumeClaim.registry.annotations` | The annotations of the volume | | -|`persistence.persistentVolumeClaim.jobservice.jobLog.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.jobservice.jobLog.size` | The size of the volume | `1Gi` | -| `persistence.persistentVolumeClaim.jobservice.jobLog.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.database.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.subPath` | The sub path used in the volume. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.accessMode` | The access mode of the volume. If external database is used, the setting will be ignored | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.database.size` | The size of the volume. If external database is used, the setting will be ignored | `1Gi` | -| `persistence.persistentVolumeClaim.database.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.redis.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.subPath` | The sub path used in the volume. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.accessMode` | The access mode of the volume. If external Redis is used, the setting will be ignored | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.redis.size` | The size of the volume. If external Redis is used, the setting will be ignored | `1Gi` | -| `persistence.persistentVolumeClaim.redis.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.trivy.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `persistence.persistentVolumeClaim.trivy.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.trivy.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.trivy.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.trivy.size` | The size of the volume | `1Gi` | -| `persistence.persistentVolumeClaim.trivy.annotations` | The annotations of the volume | | -| `persistence.imageChartStorage.disableredirect` | The configuration for managing redirects from content backends. For backends which not supported it (such as using minio for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more details | `false` | -| `persistence.imageChartStorage.caBundleSecretName` | Specify the `caBundleSecretName` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's and containers. | | -| `persistence.imageChartStorage.type` | The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more details | `filesystem` | -| `persistence.imageChartStorage.gcs.existingSecret` | An existing secret containing the gcs service account json key. The key must be gcs-key.json. | `""` | -| `persistence.imageChartStorage.gcs.useWorkloadIdentity` | A boolean to allow the use of workloadidentity in a GKE cluster. To use it, create a kubernetes service account and set the name in the key `serviceAccountName` of each component, then allow automounting the service account. | `false` | -| **General** | | | -| `externalURL` | The external URL for Harbor core service | `https://core.harbor.domain` | -| `caBundleSecretName` | The custom CA bundle secret name, the secret must contain key named "ca.crt" which will be injected into the trust store for core, jobservice, registry, trivy components. | | -| `uaaSecretName` | If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`. | | -| `imagePullPolicy` | The image pull policy | | -| `imagePullSecrets` | The imagePullSecrets names for all deployments | | -| `updateStrategy.type` | The update strategy for deployments with persistent volumes(jobservice, registry): `RollingUpdate` or `Recreate`. Set it as `Recreate` when `RWM` for volumes isn't supported | `RollingUpdate` | -| `logLevel` | The log level: `debug`, `info`, `warning`, `error` or `fatal` | `info` | -| `harborAdminPassword` | The initial password of Harbor admin. Change it from portal after launching Harbor | `Harbor12345` | -| `existingSecretAdminPassword` | The name of secret where admin password can be found. | | -| `existingSecretAdminPasswordKey` | The name of the key in the secret where to find harbor admin password Harbor | `HARBOR_ADMIN_PASSWORD` | -| `caSecretName` | The name of the secret which contains key named `ca.crt`. Setting this enables the download link on portal to download the CA certificate when the certificate isn't generated automatically | | -| `secretKey` | The key used for encryption. Must be a string of 16 chars | `not-a-secure-key` | -| `existingSecretSecretKey` | An existing secret containing the encoding secretKey | `""` | -| `proxy.httpProxy` | The URL of the HTTP proxy server | | -| `proxy.httpsProxy` | The URL of the HTTPS proxy server | | -| `proxy.noProxy` | The URLs that the proxy settings not apply to | 127.0.0.1,localhost,.local,.internal | -| `proxy.components` | The component list that the proxy settings apply to | core, jobservice, trivy | -| `enableMigrateHelmHook` | Run the migration job via helm hook, if it is true, the database migration will be separated from harbor-core, run with a preupgrade job migration-job | `false` | -| **Nginx** (if service exposed via `ingress`, Nginx will not be used) | | | -| `nginx.image.repository` | Image repository | `goharbor/nginx-photon` | -| `nginx.image.tag` | Image tag | `dev` | -| `nginx.replicas` | The replica count | `1` | -| `nginx.revisionHistoryLimit` | The revision history limit | `10` | -| `nginx.resources` | The [resources] to allocate for container | undefined | -| `nginx.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `nginx.nodeSelector` | Node labels for pod assignment | `{}` | -| `nginx.tolerations` | Tolerations for pod assignment | `[]` | -| `nginx.affinity` | Node/Pod affinities | `{}` | -| `nginx.podAnnotations` | Annotations to add to the nginx pod | `{}` | -| `nginx.priorityClassName` | The priority class to run the pod as | | -| **Portal** | | | -| `portal.image.repository` | Repository for portal image | `goharbor/harbor-portal` | -| `portal.image.tag` | Tag for portal image | `dev` | -| `portal.replicas` | The replica count | `1` | -| `portal.revisionHistoryLimit` | The revision history limit | `10` | -| `portal.resources` | The [resources] to allocate for container | undefined | -| `portal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `portal.nodeSelector` | Node labels for pod assignment | `{}` | -| `portal.tolerations` | Tolerations for pod assignment | `[]` | -| `portal.affinity` | Node/Pod affinities | `{}` | -| `portal.podAnnotations` | Annotations to add to the portal pod | `{}` | -| `portal.priorityClassName` | The priority class to run the pod as | | -| `portal.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **Core** | | | -| `core.image.repository` | Repository for Harbor core image | `goharbor/harbor-core` | -| `core.image.tag` | Tag for Harbor core image | `dev` | -| `core.replicas` | The replica count | `1` | -| `core.revisionHistoryLimit` | The revision history limit | `10` | -| `core.startupProbe.initialDelaySeconds` | The initial delay in seconds for the startup probe | `10` | -| `core.resources` | The [resources] to allocate for container | undefined | -| `core.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `core.nodeSelector` | Node labels for pod assignment | `{}` | -| `core.tolerations` | Tolerations for pod assignment | `[]` | -| `core.affinity` | Node/Pod affinities | `{}` | -| `core.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| `core.podAnnotations` | Annotations to add to the core pod | `{}` | -| `core.serviceAnnotations` | Annotations to add to the core service | `{}` | -| `core.secret` | Secret is used when core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `core.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set | | -| `core.tokenKey` | PEM-formatted RSA private key used to sign service tokens. Only used if `core.secretName` is unset. If set, `core.tokenCert` MUST also be set. | | -| `core.tokenCert` | PEM-formatted certificate signed by `core.tokenKey` used to validate service tokens. Only used if `core.secretName` is unset. If set, `core.tokenKey` MUST also be set. | | -| `core.xsrfKey` | The XSRF key. Will be generated automatically if it isn't specified | | -| `core.priorityClassName` | The priority class to run the pod as | | -| `core.artifactPullAsyncFlushDuration` | The time duration for async update artifact pull_time and repository pull_count | | -| `core.gdpr.deleteUser` | Enable GDPR compliant user delete | `false` | -| **Jobservice** | | | -| `jobservice.image.repository` | Repository for jobservice image | `goharbor/harbor-jobservice` | -| `jobservice.image.tag` | Tag for jobservice image | `dev` | -| `jobservice.replicas` | The replica count | `1` | -| `jobservice.revisionHistoryLimit` | The revision history limit | `10` | -| `jobservice.maxJobWorkers` | The max job workers | `10` | -| `jobservice.jobLoggers` | The loggers for jobs: `file`, `database` or `stdout` | `[file]` | -| `jobservice.loggerSweeperDuration` | The jobLogger sweeper duration in days (ignored if `jobLoggers` is set to `stdout`) | `14` | -| `jobservice.resources` | The [resources] to allocate for container | undefined | -| `jobservice.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `jobservice.nodeSelector` | Node labels for pod assignment | `{}` | -| `jobservice.tolerations` | Tolerations for pod assignment | `[]` | -| `jobservice.affinity` | Node/Pod affinities | `{}` | -| `jobservice.podAnnotations` | Annotations to add to the jobservice pod | `{}` | -| `jobservice.priorityClassName` | The priority class to run the pod as | | -| `jobservice.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| `jobservice.secret` | Secret is used when job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| **Registry** | | | -| `registry.registry.image.repository` | Repository for registry image | `goharbor/registry-photon` | -| `registry.registry.image.tag` | Tag for registry image | `dev` | -| `registry.registry.resources` | The [resources] to allocate for container | undefined | -| `registry.controller.image.repository` | Repository for registry controller image | `goharbor/harbor-registryctl` | -| `registry.controller.image.tag` | Tag for registry controller image | `dev` | -| `registry.controller.resources` | The [resources] to allocate for container | undefined | -| `registry.replicas` | The replica count | `1` | -| `registry.revisionHistoryLimit` | The revision history limit | `10` | -| `registry.nodeSelector` | Node labels for pod assignment | `{}` | -| `registry.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `registry.tolerations` | Tolerations for pod assignment | `[]` | -| `registry.affinity` | Node/Pod affinities | `{}` | -| `registry.middleware` | Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#middleware). | | -| `registry.podAnnotations` | Annotations to add to the registry pod | `{}` | -| `registry.priorityClassName` | The priority class to run the pod as | | -| `registry.secret` | Secret is used to secure the upload state from client and registry storage backend. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#http). If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `registry.credentials.username` | The username for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | -| `registry.credentials.password` | The password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | -| `registry.credentials.existingSecret` | An existing secret containing the password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). The key must be `REGISTRY_PASSWD` | `""` | -| `registry.credentials.htpasswdString` | Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. | undefined | -| `registry.relativeurls` | If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. Needed if harbor is behind a reverse proxy | `false` | -| `registry.upload_purging.enabled` | If true, enable purge _upload directories | `true` | -| `registry.upload_purging.age` | Remove files in _upload directories which exist for a period of time, default is one week. | `168h` | -| `registry.upload_purging.interval` | The interval of the purge operations | `24h` | -| `registry.upload_purging.dryrun` | If true, enable dryrun for purging _upload, default false | `false` | -| `registry.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **[Trivy][trivy]** | | | -| `trivy.enabled` | The flag to enable Trivy scanner | `true` | -| `trivy.image.repository` | Repository for Trivy adapter image | `goharbor/trivy-adapter-photon` | -| `trivy.image.tag` | Tag for Trivy adapter image | `dev` | -| `trivy.resources` | The [resources] to allocate for Trivy adapter container | | -| `trivy.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `trivy.replicas` | The number of Pod replicas | `1` | -| `trivy.debugMode` | The flag to enable Trivy debug mode | `false` | -| `trivy.vulnType` | Comma-separated list of vulnerability types. Possible values `os` and `library`. | `os,library` | -| `trivy.severity` | Comma-separated list of severities to be checked | `UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL` | -| `trivy.ignoreUnfixed` | The flag to display only fixed vulnerabilities | `false` | -| `trivy.insecure` | The flag to skip verifying registry certificate | `false` | -| `trivy.skipUpdate` | The flag to disable [Trivy DB][trivy-db] downloads from GitHub | `false` | -| `trivy.offlineScan` | The flag prevents Trivy from sending API requests to identify dependencies. | `false` | -| `trivy.securityCheck` | Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. | `vuln` | -| `trivy.timeout` | The duration to wait for scan completion | `5m0s` | -| `trivy.gitHubToken` | The GitHub access token to download [Trivy DB][trivy-db] (see [GitHub rate limiting][trivy-rate-limiting]) | | -| `trivy.priorityClassName` | The priority class to run the pod as | | -| **Notary** | | | -| `notary.enabled` | Enable Notary? | `true` | -| `notary.server.image.repository` | Repository for notary server image | `goharbor/notary-server-photon` | -| `notary.server.image.tag` | Tag for notary server image | `dev` | -| `notary.server.replicas` | The replica count | `1` | -| `notary.server.resources` | The [resources] to allocate for container | undefined | -| `notary.server.priorityClassName` | The priority class to run the pod as | | -| `notary.server.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `notary.signer.image.repository` | Repository for notary signer image | `goharbor/notary-signer-photon` | -| `notary.signer.image.tag` | Tag for notary signer image | `dev` | -| `notary.signer.replicas` | The replica count | `1` | -| `notary.signer.resources` | The [resources] to allocate for container | undefined | -| `notary.signer.priorityClassName` | The priority class to run the pod as | | -| `notary.signer.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `notary.nodeSelector` | Node labels for pod assignment | `{}` | -| `notary.tolerations` | Tolerations for pod assignment | `[]` | -| `notary.affinity` | Node/Pod affinities | `{}` | -| `notary.podAnnotations` | Annotations to add to the notary pod | `{}` | -| `notary.serviceAnnotations` | Annotations to add to the notary service | `{}` | -| `notary.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate authority, certificate and private key for notary communications. The secret must contain keys named `ca.crt`, `tls.crt` and `tls.key` that contain the CA, certificate and private key. They will be generated if not set. | | -| **Database** | | | -| `database.type` | If external database is used, set it to `external` | `internal` | -| `database.internal.image.repository` | Repository for database image | `goharbor/harbor-db` | -| `database.internal.image.tag` | Tag for database image | `dev` | -| `database.internal.password` | The password for database | `changeit` | -| `database.internal.shmSizeLimit` | The limit for the size of shared memory for internal PostgreSQL, conventionally it's around 50% of the memory limit of the container | `512Mi` | -| `database.internal.resources` | The [resources] to allocate for container | undefined | -| `database.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `database.internal.initContainer.migrator.resources` | The [resources] to allocate for the database migrator initContainer | undefined | -| `database.internal.initContainer.permissions.resources` | The [resources] to allocate for the database permissions initContainer | undefined | -| `database.internal.nodeSelector` | Node labels for pod assignment | `{}` | -| `database.internal.tolerations` | Tolerations for pod assignment | `[]` | -| `database.internal.affinity` | Node/Pod affinities | `{}` | -| `database.internal.priorityClassName` | The priority class to run the pod as | | -| `database.internal.livenessProbe.timeoutSeconds` | The timeout used in liveness probe; 1 to 5 seconds | 1 | -| `database.internal.readinessProbe.timeoutSeconds` | The timeout used in readiness probe; 1 to 5 seconds | 1 | -| `database.external.host` | The hostname of external database | `192.168.0.1` | -| `database.external.port` | The port of external database | `5432` | -| `database.external.username` | The username of external database | `user` | -| `database.external.password` | The password of external database | `password` | -| `database.external.coreDatabase` | The database used by core service | `registry` | -| `database.external.notaryServerDatabase` | The database used by Notary server | `notary_server` | -| `database.external.notarySignerDatabase` | The database used by Notary signer | `notary_signer` | -| `database.external.existingSecret` | An existing password containing the database password. the key must be `password`. | `""` | -| `database.external.sslmode` | Connection method of external database (require, verify-full, verify-ca, disable) | `disable` | -| `database.maxIdleConns` | The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. | `50` | -| `database.maxOpenConns` | The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. | `100` | -| `database.podAnnotations` | Annotations to add to the database pod | `{}` | -| **Redis** | | | -| `redis.type` | If external redis is used, set it to `external` | `internal` | -| `redis.internal.image.repository` | Repository for redis image | `goharbor/redis-photon` | -| `redis.internal.image.tag` | Tag for redis image | `dev` | -| `redis.internal.resources` | The [resources] to allocate for container | undefined | -| `redis.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `redis.internal.nodeSelector` | Node labels for pod assignment | `{}` | -| `redis.internal.tolerations` | Tolerations for pod assignment | `[]` | -| `redis.internal.affinity` | Node/Pod affinities | `{}` | -| `redis.internal.priorityClassName` | The priority class to run the pod as | | -| `redis.external.addr` | The addr of external Redis: :. When using sentinel, it should be :,:,: | `192.168.0.2:6379` | -| `redis.external.sentinelMasterSet` | The name of the set of Redis instances to monitor | | -| `redis.external.coreDatabaseIndex` | The database index for core | `0` | -| `redis.external.jobserviceDatabaseIndex` | The database index for jobservice | `1` | -| `redis.external.registryDatabaseIndex` | The database index for registry | `2` | -| `redis.external.trivyAdapterIndex` | The database index for trivy adapter | `5` | -| `redis.external.password` | The password of external Redis | | -| `redis.external.existingSecret` | Use an existing secret to connect to redis. The key must be `REDIS_PASSWORD`. | `""` | -| `redis.podAnnotations` | Annotations to add to the redis pod | `{}` | -| **Exporter** | | | -| `exporter.replicas` | The replica count | `1` | -| `exporter.revisionHistoryLimit` | The revision history limit | `10` | -| `exporter.podAnnotations` | Annotations to add to the exporter pod | `{}` | -| `exporter.image.repository` | Repository for redis image | `goharbor/harbor-exporter` | -| `exporter.image.tag` | Tag for exporter image | `dev` | -| `exporter.nodeSelector` | Node labels for pod assignment | `{}` | -| `exporter.tolerations` | Tolerations for pod assignment | `[]` | -| `exporter.affinity` | Node/Pod affinities | `{}` | -| `exporter.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `exporter.cacheDuration` | the cache duration for information that exporter collected from Harbor | `30` | -| `exporter.cacheCleanInterval` | cache clean interval for information that exporter collected from Harbor | `14400` | -| `exporter.priorityClassName` | The priority class to run the pod as | | -| **Metrics** | | | -| `metrics.enabled` | if enable harbor metrics | `false` | -| `metrics.core.path` | the url path for core metrics | `/metrics` | -| `metrics.core.port` | the port for core metrics | `8001` | -| `metrics.registry.path` | the url path for registry metrics | `/metrics` | -| `metrics.registry.port` | the port for registry metrics | `8001` | -| `metrics.exporter.path` | the url path for exporter metrics | `/metrics` | -| `metrics.exporter.port` | the port for exporter metrics | `8001` | -| `metrics.serviceMonitor.enabled` | create prometheus serviceMonitor. Requires prometheus CRD's | `false` | -| `metrics.serviceMonitor.additionalLabels` | additional labels to upsert to the manifest | `""` | -| `metrics.serviceMonitor.interval` | scrape period for harbor metrics | `""` | -| `metrics.serviceMonitor.metricRelabelings` | metrics relabel to add/mod/del before ingestion | `[]` | -| `metrics.serviceMonitor.relabelings` | relabels to add/mod/del to sample before scrape | `[]` | -| **Trace** | | | -| `trace.enabled` | Enable tracing or not | `false` | -| `trace.provider` | The tracing provider: `jaeger` or `otel`. `jaeger` should be 1.26+ | `jaeger` | -| `trace.sample_rate` | Set `sample_rate` to 1 if you want sampling 100% of trace data; set 0.5 if you want sampling 50% of trace data, and so forth | `1` | -| `trace.namespace` | Namespace used to differentiate different harbor services | | -| `trace.attributes` | `attributes` is a key value dict contains user defined attributes used to initialize trace provider | | -| `trace.jaeger.endpoint` | The endpoint of jaeger | `http://hostname:14268/api/traces` | -| `trace.jaeger.username` | The username of jaeger | | -| `trace.jaeger.password` | The password of jaeger | | -| `trace.jaeger.agent_host` | The agent host of jaeger | | -| `trace.jaeger.agent_port` | The agent port of jaeger | `6831` | -| `trace.otel.endpoint` | The endpoint of otel | `hostname:4318` | -| `trace.otel.url_path` | The URL path of otel | `/v1/traces` | -| `trace.otel.compression` | Whether enable compression or not for otel | `false` | -| `trace.otel.insecure` | Whether establish insecure connection or not for otel | `true` | -| `trace.otel.timeout` | The timeout in seconds of otel | `10` | -| **Cache** | | | -| `cache.enabled` | Enable cache layer or not | `false` | -| `cache.expireHours` | The expire hours of cache layer | `24` | +| Parameter | Description | Default | +|-----------------------------------------------------------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | +| **Expose** | | | +| `expose.type` | How to expose the service: `ingress`, `clusterIP`, `nodePort` or `loadBalancer`, other values will be ignored and the creation of service will be skipped. | `ingress` | +| `expose.tls.enabled` | Enable TLS or not. Delete the `ssl-redirect` annotations in `expose.ingress.annotations` when TLS is disabled and `expose.type` is `ingress`. Note: if the `expose.type` is `ingress` and TLS is disabled, the port must be included in the command when pulling/pushing images. Refer to https://github.com/goharbor/harbor/issues/5291 for details. | `true` | +| `expose.tls.certSource` | The source of the TLS certificate. Set as `auto`, `secret` or `none` and fill the information in the corresponding section: 1) auto: generate the TLS certificate automatically 2) secret: read the TLS certificate from the specified secret. The TLS certificate can be generated manually or by cert manager 3) none: configure no TLS certificate for the ingress. If the default TLS certificate is configured in the ingress controller, choose this option | `auto` | +| `expose.tls.auto.commonName` | The common name used to generate the certificate, it's necessary when the type isn't `ingress` | | +| `expose.tls.secret.secretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key | | +| `expose.ingress.hosts.core` | The host of Harbor core service in ingress rule | `core.harbor.domain` | +| `expose.ingress.controller` | The ingress controller type. Currently supports `default`, `gce`, `alb`, `f5-bigip` and `ncp` | `default` | +| `expose.ingress.kubeVersionOverride` | Allows the ability to override the kubernetes version used while templating the ingress | | +| `expose.ingress.annotations` | The annotations used commonly for ingresses | | +| `expose.ingress.labels` | The labels specific to ingress | {} | +| `expose.clusterIP.name` | The name of ClusterIP service | `harbor` | +| `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | +| `expose.clusterIP.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.clusterIP.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `443` | +| `expose.clusterIP.annotations` | The annotations used commonly for clusterIP | | +| `expose.clusterIP.labels` | The labels specific to clusterIP | {} | +| `expose.nodePort.name` | The name of NodePort service | `harbor` | +| `expose.nodePort.ports.http.port` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.nodePort.ports.http.nodePort` | The node port Harbor listens on when serving HTTP | `30002` | +| `expose.nodePort.ports.https.port` | The service port Harbor listens on when serving HTTPS | `443` | +| `expose.nodePort.ports.https.nodePort` | The node port Harbor listens on when serving HTTPS | `30003` | +| `expose.nodePort.annotations` | The annotations used commonly for nodePort | | +| `expose.nodePort.labels` | The labels specific to nodePort | {} | +| `expose.loadBalancer.name` | The name of service | `harbor` | +| `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | +| `expose.loadBalancer.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.loadBalancer.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `30002` | +| `expose.loadBalancer.annotations` | The annotations attached to the loadBalancer service | {} | +| `expose.loadBalancer.labels` | The labels specific to loadBalancer | {} | +| `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | +| **Internal TLS** | | | +| `internalTLS.enabled` | Enable TLS for the components (core, jobservice, portal, registry, trivy) | `false` | +| `internalTLS.strong_ssl_ciphers` | Enable strong ssl ciphers for nginx and portal | `false` +| `internalTLS.certSource` | Method to provide TLS for the components, options are `auto`, `manual`, `secret`. | `auto` | +| `internalTLS.trustCa` | The content of trust CA, only available when `certSource` is `manual`. **Note**: all the internal certificates of the components must be issued by this CA | | +| `internalTLS.core.secretName` | The secret name for core component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.core.crt` | Content of core's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.core.key` | Content of core's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.jobservice.secretName` | The secret name for jobservice component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.jobservice.crt` | Content of jobservice's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.jobservice.key` | Content of jobservice's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.registry.secretName` | The secret name for registry component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.registry.crt` | Content of registry's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.registry.key` | Content of registry's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.portal.secretName` | The secret name for portal component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.portal.crt` | Content of portal's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.portal.key` | Content of portal's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.trivy.secretName` | The secret name for trivy component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.trivy.crt` | Content of trivy's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.trivy.key` | Content of trivy's TLS key file, only available when `certSource` is `manual` | | +| **IPFamily** | | | +| `ipFamily.ipv4.enabled` | if cluster is ipv4 enabled, all ipv4 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | +| `ipFamily.ipv6.enabled` | if cluster is ipv6 enabled, all ipv6 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | +| **Persistence** | | | +| `persistence.enabled` | Enable the data persistence or not | `true` | +| `persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | +| `persistence.persistentVolumeClaim.registry.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | +| `persistence.persistentVolumeClaim.registry.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.registry.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.registry.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.registry.size` | The size of the volume | `5Gi` | +| `persistence.persistentVolumeClaim.registry.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.jobservice.jobLog.size` | The size of the volume | `1Gi` | +| `persistence.persistentVolumeClaim.jobservice.jobLog.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.database.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.subPath` | The sub path used in the volume. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.accessMode` | The access mode of the volume. If external database is used, the setting will be ignored | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.database.size` | The size of the volume. If external database is used, the setting will be ignored | `1Gi` | +| `persistence.persistentVolumeClaim.database.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.redis.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.subPath` | The sub path used in the volume. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.accessMode` | The access mode of the volume. If external Redis is used, the setting will be ignored | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.redis.size` | The size of the volume. If external Redis is used, the setting will be ignored | `1Gi` | +| `persistence.persistentVolumeClaim.redis.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.trivy.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | +| `persistence.persistentVolumeClaim.trivy.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.trivy.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.trivy.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.trivy.size` | The size of the volume | `1Gi` | +| `persistence.persistentVolumeClaim.trivy.annotations` | The annotations of the volume | | +| `persistence.imageChartStorage.disableredirect` | The configuration for managing redirects from content backends. For backends which not supported it (such as using minio for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more details | `false` | +| `persistence.imageChartStorage.caBundleSecretName` | Specify the `caBundleSecretName` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's and containers. | | +| `persistence.imageChartStorage.type` | The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more details | `filesystem` | +| `persistence.imageChartStorage.gcs.existingSecret` | An existing secret containing the gcs service account json key. The key must be gcs-key.json. | `""` | +| `persistence.imageChartStorage.gcs.useWorkloadIdentity` | A boolean to allow the use of workloadidentity in a GKE cluster. To use it, create a kubernetes service account and set the name in the key `serviceAccountName` of each component, then allow automounting the service account. | `false` | +| **General** | | | +| `externalURL` | The external URL for Harbor core service | `https://core.harbor.domain` | +| `caBundleSecretName` | The custom CA bundle secret name, the secret must contain key named "ca.crt" which will be injected into the trust store for core, jobservice, registry, trivy components. | | +| `uaaSecretName` | If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`. | | +| `imagePullPolicy` | The image pull policy | | +| `imagePullSecrets` | The imagePullSecrets names for all deployments | | +| `updateStrategy.type` | The update strategy for deployments with persistent volumes(jobservice, registry): `RollingUpdate` or `Recreate`. Set it as `Recreate` when `RWM` for volumes isn't supported | `RollingUpdate` | +| `logLevel` | The log level: `debug`, `info`, `warning`, `error` or `fatal` | `info` | +| `harborAdminPassword` | The initial password of Harbor admin. Change it from portal after launching Harbor | `Harbor12345` | +| `existingSecretAdminPassword` | The name of secret where admin password can be found. | | +| `existingSecretAdminPasswordKey` | The name of the key in the secret where to find harbor admin password Harbor | `HARBOR_ADMIN_PASSWORD` | +| `caSecretName` | The name of the secret which contains key named `ca.crt`. Setting this enables the download link on portal to download the CA certificate when the certificate isn't generated automatically | | +| `secretKey` | The key used for encryption. Must be a string of 16 chars | `not-a-secure-key` | +| `existingSecretSecretKey` | An existing secret containing the encoding secretKey | `""` | +| `proxy.httpProxy` | The URL of the HTTP proxy server | | +| `proxy.httpsProxy` | The URL of the HTTPS proxy server | | +| `proxy.noProxy` | The URLs that the proxy settings not apply to | 127.0.0.1,localhost,.local,.internal | +| `proxy.components` | The component list that the proxy settings apply to | core, jobservice, trivy | +| `enableMigrateHelmHook` | Run the migration job via helm hook, if it is true, the database migration will be separated from harbor-core, run with a preupgrade job migration-job | `false` | +| **Nginx** (if service exposed via `ingress`, Nginx will not be used) | | | +| `nginx.image.repository` | Image repository | `goharbor/nginx-photon` | +| `nginx.image.tag` | Image tag | `dev` | +| `nginx.replicas` | The replica count | `1` | +| `nginx.revisionHistoryLimit` | The revision history limit | `10` | +| `nginx.resources` | The [resources] to allocate for container | undefined | +| `nginx.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `nginx.nodeSelector` | Node labels for pod assignment | `{}` | +| `nginx.tolerations` | Tolerations for pod assignment | `[]` | +| `nginx.affinity` | Node/Pod affinities | `{}` | +| `nginx.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `nginx.podAnnotations` | Annotations to add to the nginx pod | `{}` | +| `nginx.priorityClassName` | The priority class to run the pod as | | +| **Portal** | | | +| `portal.image.repository` | Repository for portal image | `goharbor/harbor-portal` | +| `portal.image.tag` | Tag for portal image | `dev` | +| `portal.replicas` | The replica count | `1` | +| `portal.revisionHistoryLimit` | The revision history limit | `10` | +| `portal.resources` | The [resources] to allocate for container | undefined | +| `portal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `portal.nodeSelector` | Node labels for pod assignment | `{}` | +| `portal.tolerations` | Tolerations for pod assignment | `[]` | +| `portal.affinity` | Node/Pod affinities | `{}` | +| `portal.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `portal.podAnnotations` | Annotations to add to the portal pod | `{}` | +| `portal.serviceAnnotations` | Annotations to add to the portal service | `{}` | +| `portal.priorityClassName` | The priority class to run the pod as | | +| `portal.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Core** | | | +| `core.image.repository` | Repository for Harbor core image | `goharbor/harbor-core` | +| `core.image.tag` | Tag for Harbor core image | `dev` | +| `core.replicas` | The replica count | `1` | +| `core.revisionHistoryLimit` | The revision history limit | `10` | +| `core.startupProbe.initialDelaySeconds` | The initial delay in seconds for the startup probe | `10` | +| `core.resources` | The [resources] to allocate for container | undefined | +| `core.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `core.nodeSelector` | Node labels for pod assignment | `{}` | +| `core.tolerations` | Tolerations for pod assignment | `[]` | +| `core.affinity` | Node/Pod affinities | `{}` | +| `core.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| `core.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `core.podAnnotations` | Annotations to add to the core pod | `{}` | +| `core.serviceAnnotations` | Annotations to add to the core service | `{}` | +| `core.configureUserSettings` | A JSON string to set in the environment variable `CONFIG_OVERWRITE_JSON` to configure user settings. See the [official docs](https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#configure-users-settings-using-an-environment-variable). | | +| `core.quotaUpdateProvider` | The provider for updating project quota(usage), there are 2 options, redis or db. By default it is implemented by db but you can configure it to redis which can improve the performance of high concurrent pushing to the same project, and reduce the database connections spike and occupies. Using redis will bring up some delay for quota usage updation for display, so only suggest switch provider to redis if you were ran into the db connections spike around the scenario of high concurrent pushing to same project, no improvment for other scenes. | `db` | +| `core.secret` | Secret is used when core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| `core.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set | | +| `core.tokenKey` | PEM-formatted RSA private key used to sign service tokens. Only used if `core.secretName` is unset. If set, `core.tokenCert` MUST also be set. | | +| `core.tokenCert` | PEM-formatted certificate signed by `core.tokenKey` used to validate service tokens. Only used if `core.secretName` is unset. If set, `core.tokenKey` MUST also be set. | | +| `core.xsrfKey` | The XSRF key. Will be generated automatically if it isn't specified | | +| `core.priorityClassName` | The priority class to run the pod as | | +| `core.artifactPullAsyncFlushDuration` | The time duration for async update artifact pull_time and repository pull_count | | +| `core.gdpr.deleteUser` | Enable GDPR compliant user delete | `false` | +| `core.gdpr.auditLogsCompliant` | Enable GDPR compliant for audit logs by changing username to its CRC32 value if that user was deleted from the system | `false` | +| **Jobservice** | | | +| `jobservice.image.repository` | Repository for jobservice image | `goharbor/harbor-jobservice` | +| `jobservice.image.tag` | Tag for jobservice image | `dev` | +| `jobservice.replicas` | The replica count | `1` | +| `jobservice.revisionHistoryLimit` | The revision history limit | `10` | +| `jobservice.maxJobWorkers` | The max job workers | `10` | +| `jobservice.jobLoggers` | The loggers for jobs: `file`, `database` or `stdout` | `[file]` | +| `jobservice.loggerSweeperDuration` | The jobLogger sweeper duration in days (ignored if `jobLoggers` is set to `stdout`) | `14` | +| `jobservice.notification.webhook_job_max_retry` | The maximum retry of webhook sending notifications | `3` | +| `jobservice.notification.webhook_job_http_client_timeout` | The http client timeout value of webhook sending notifications | `3` | +| `jobservice.reaper.max_update_hours` | the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 | `24` | +| `jobservice.reaper.max_dangling_hours` | the max time for execution in running state without new task created | `168` | +| `jobservice.resources` | The [resources] to allocate for container | undefined | +| `jobservice.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `jobservice.nodeSelector` | Node labels for pod assignment | `{}` | +| `jobservice.tolerations` | Tolerations for pod assignment | `[]` | +| `jobservice.affinity` | Node/Pod affinities | `{}` | +| `jobservice.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `jobservice.podAnnotations` | Annotations to add to the jobservice pod | `{}` | +| `jobservice.priorityClassName` | The priority class to run the pod as | | +| `jobservice.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| `jobservice.secret` | Secret is used when job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| **Registry** | | | +| `registry.registry.image.repository` | Repository for registry image | `goharbor/registry-photon` | +| `registry.registry.image.tag` | Tag for registry image | `dev` | +| `registry.registry.resources` | The [resources] to allocate for container | undefined | +| `registry.controller.image.repository` | Repository for registry controller image | `goharbor/harbor-registryctl` | +| `registry.controller.image.tag` | Tag for registry controller image | `dev` | +| `registry.controller.resources` | The [resources] to allocate for container | undefined | +| `registry.replicas` | The replica count | `1` | +| `registry.revisionHistoryLimit` | The revision history limit | `10` | +| `registry.nodeSelector` | Node labels for pod assignment | `{}` | +| `registry.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `registry.tolerations` | Tolerations for pod assignment | `[]` | +| `registry.affinity` | Node/Pod affinities | `{}` | +| `registry.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| `registry.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `registry.middleware` | Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#middleware). | | +| `registry.podAnnotations` | Annotations to add to the registry pod | `{}` | +| `registry.priorityClassName` | The priority class to run the pod as | | +| `registry.secret` | Secret is used to secure the upload state from client and registry storage backend. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#http). If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| `registry.credentials.username` | The username that harbor core uses internally to access the registry instance. Together with the `registry.credentials.password`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | +| `registry.credentials.password` | The password that harbor core uses internally to access the registry instance. Together with the `registry.credentials.username`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | +| `registry.credentials.existingSecret` | An existing secret containing the password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). The key must be `REGISTRY_PASSWD` | `""` | +| `registry.credentials.htpasswdString` | Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. | undefined | +| `registry.relativeurls` | If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. Needed if harbor is behind a reverse proxy | `false` | +| `registry.upload_purging.enabled` | If true, enable purge _upload directories | `true` | +| `registry.upload_purging.age` | Remove files in _upload directories which exist for a period of time, default is one week. | `168h` | +| `registry.upload_purging.interval` | The interval of the purge operations | `24h` | +| `registry.upload_purging.dryrun` | If true, enable dryrun for purging _upload, default false | `false` | +| **[Trivy][trivy]** | | | +| `trivy.enabled` | The flag to enable Trivy scanner | `true` | +| `trivy.image.repository` | Repository for Trivy adapter image | `goharbor/trivy-adapter-photon` | +| `trivy.image.tag` | Tag for Trivy adapter image | `dev` | +| `trivy.resources` | The [resources] to allocate for Trivy adapter container | | +| `trivy.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `trivy.replicas` | The number of Pod replicas | `1` | +| `trivy.debugMode` | The flag to enable Trivy debug mode | `false` | +| `trivy.vulnType` | Comma-separated list of vulnerability types. Possible values `os` and `library`. | `os,library` | +| `trivy.severity` | Comma-separated list of severities to be checked | `UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL` | +| `trivy.ignoreUnfixed` | The flag to display only fixed vulnerabilities | `false` | +| `trivy.insecure` | The flag to skip verifying registry certificate | `false` | +| `trivy.skipUpdate` | The flag to disable [Trivy DB][trivy-db] downloads from GitHub | `false` | +| `trivy.skipJavaDBUpdate` | If the flag is enabled you have to manually download the `trivy-java.db` file [Trivy Java DB][trivy-java-db] and mount it in the `/home/scanner/.cache/trivy/java-db/trivy-java.db` path | `false` | +| `trivy.offlineScan` | The flag prevents Trivy from sending API requests to identify dependencies. | `false` | +| `trivy.securityCheck` | Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. | `vuln` | +| `trivy.timeout` | The duration to wait for scan completion | `5m0s` | +| `trivy.gitHubToken` | The GitHub access token to download [Trivy DB][trivy-db] (see [GitHub rate limiting][trivy-rate-limiting]) | | +| `trivy.priorityClassName` | The priority class to run the pod as | | +| `trivy.topologySpreadConstraints` | The priority class to run the pod as | | +| `trivy.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Database** | | | +| `database.type` | If external database is used, set it to `external` | `internal` | +| `database.internal.image.repository` | Repository for database image | `goharbor/harbor-db` | +| `database.internal.image.tag` | Tag for database image | `dev` | +| `database.internal.password` | The password for database | `changeit` | +| `database.internal.shmSizeLimit` | The limit for the size of shared memory for internal PostgreSQL, conventionally it's around 50% of the memory limit of the container | `512Mi` | +| `database.internal.resources` | The [resources] to allocate for container | undefined | +| `database.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `database.internal.initContainer.migrator.resources` | The [resources] to allocate for the database migrator initContainer | undefined | +| `database.internal.initContainer.permissions.resources` | The [resources] to allocate for the database permissions initContainer | undefined | +| `database.internal.nodeSelector` | Node labels for pod assignment | `{}` | +| `database.internal.tolerations` | Tolerations for pod assignment | `[]` | +| `database.internal.affinity` | Node/Pod affinities | `{}` | +| `database.internal.priorityClassName` | The priority class to run the pod as | | +| `database.internal.livenessProbe.timeoutSeconds` | The timeout used in liveness probe; 1 to 5 seconds | 1 | +| `database.internal.readinessProbe.timeoutSeconds` | The timeout used in readiness probe; 1 to 5 seconds | 1 | +| `database.external.host` | The hostname of external database | `192.168.0.1` | +| `database.external.port` | The port of external database | `5432` | +| `database.external.username` | The username of external database | `user` | +| `database.external.password` | The password of external database | `password` | +| `database.external.coreDatabase` | The database used by core service | `registry` | +| `database.external.existingSecret` | An existing password containing the database password. the key must be `password`. | `""` | +| `database.external.sslmode` | Connection method of external database (require, verify-full, verify-ca, disable) | `disable` | +| `database.maxIdleConns` | The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. | `50` | +| `database.maxOpenConns` | The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. | `100` | +| `database.podAnnotations` | Annotations to add to the database pod | `{}` | +| **Redis** | | | +| `redis.type` | If external redis is used, set it to `external` | `internal` | +| `redis.internal.image.repository` | Repository for redis image | `goharbor/redis-photon` | +| `redis.internal.image.tag` | Tag for redis image | `dev` | +| `redis.internal.resources` | The [resources] to allocate for container | undefined | +| `redis.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `redis.internal.nodeSelector` | Node labels for pod assignment | `{}` | +| `redis.internal.tolerations` | Tolerations for pod assignment | `[]` | +| `redis.internal.affinity` | Node/Pod affinities | `{}` | +| `redis.internal.priorityClassName` | The priority class to run the pod as | | +| `redis.internal.jobserviceDatabaseIndex` | The database index for jobservice | `1` | +| `redis.internal.registryDatabaseIndex` | The database index for registry | `2` | +| `redis.internal.trivyAdapterIndex` | The database index for trivy adapter | `5` | +| `redis.internal.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | +| `redis.internal.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | +| `redis.external.addr` | The addr of external Redis: :. When using sentinel, it should be :,:,: | `192.168.0.2:6379` | +| `redis.external.sentinelMasterSet` | The name of the set of Redis instances to monitor | | +| `redis.external.coreDatabaseIndex` | The database index for core | `0` | +| `redis.external.jobserviceDatabaseIndex` | The database index for jobservice | `1` | +| `redis.external.registryDatabaseIndex` | The database index for registry | `2` | +| `redis.external.trivyAdapterIndex` | The database index for trivy adapter | `5` | +| `redis.external.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | +| `redis.external.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | +| `redis.external.username` | The username of external Redis | | +| `redis.external.password` | The password of external Redis | | +| `redis.external.existingSecret` | Use an existing secret to connect to redis. The key must be `REDIS_PASSWORD`. | `""` | +| `redis.podAnnotations` | Annotations to add to the redis pod | `{}` | +| **Exporter** | | | +| `exporter.replicas` | The replica count | `1` | +| `exporter.revisionHistoryLimit` | The revision history limit | `10` | +| `exporter.podAnnotations` | Annotations to add to the exporter pod | `{}` | +| `exporter.image.repository` | Repository for redis image | `goharbor/harbor-exporter` | +| `exporter.image.tag` | Tag for exporter image | `dev` | +| `exporter.nodeSelector` | Node labels for pod assignment | `{}` | +| `exporter.tolerations` | Tolerations for pod assignment | `[]` | +| `exporter.affinity` | Node/Pod affinities | `{}` | +| `exporter.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `exporter.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `exporter.cacheDuration` | the cache duration for information that exporter collected from Harbor | `30` | +| `exporter.cacheCleanInterval` | cache clean interval for information that exporter collected from Harbor | `14400` | +| `exporter.priorityClassName` | The priority class to run the pod as | | +| **Metrics** | | | +| `metrics.enabled` | if enable harbor metrics | `false` | +| `metrics.core.path` | the url path for core metrics | `/metrics` | +| `metrics.core.port` | the port for core metrics | `8001` | +| `metrics.registry.path` | the url path for registry metrics | `/metrics` | +| `metrics.registry.port` | the port for registry metrics | `8001` | +| `metrics.exporter.path` | the url path for exporter metrics | `/metrics` | +| `metrics.exporter.port` | the port for exporter metrics | `8001` | +| `metrics.serviceMonitor.enabled` | create prometheus serviceMonitor. Requires prometheus CRD's | `false` | +| `metrics.serviceMonitor.additionalLabels` | additional labels to upsert to the manifest | `""` | +| `metrics.serviceMonitor.interval` | scrape period for harbor metrics | `""` | +| `metrics.serviceMonitor.metricRelabelings` | metrics relabel to add/mod/del before ingestion | `[]` | +| `metrics.serviceMonitor.relabelings` | relabels to add/mod/del to sample before scrape | `[]` | +| **Trace** | | | +| `trace.enabled` | Enable tracing or not | `false` | +| `trace.provider` | The tracing provider: `jaeger` or `otel`. `jaeger` should be 1.26+ | `jaeger` | +| `trace.sample_rate` | Set `sample_rate` to 1 if you want sampling 100% of trace data; set 0.5 if you want sampling 50% of trace data, and so forth | `1` | +| `trace.namespace` | Namespace used to differentiate different harbor services | | +| `trace.attributes` | `attributes` is a key value dict contains user defined attributes used to initialize trace provider | | +| `trace.jaeger.endpoint` | The endpoint of jaeger | `http://hostname:14268/api/traces` | +| `trace.jaeger.username` | The username of jaeger | | +| `trace.jaeger.password` | The password of jaeger | | +| `trace.jaeger.agent_host` | The agent host of jaeger | | +| `trace.jaeger.agent_port` | The agent port of jaeger | `6831` | +| `trace.otel.endpoint` | The endpoint of otel | `hostname:4318` | +| `trace.otel.url_path` | The URL path of otel | `/v1/traces` | +| `trace.otel.compression` | Whether enable compression or not for otel | `false` | +| `trace.otel.insecure` | Whether establish insecure connection or not for otel | `true` | +| `trace.otel.timeout` | The timeout in seconds of otel | `10` | +| **Cache** | | | +| `cache.enabled` | Enable cache layer or not | `false` | +| `cache.expireHours` | The expire hours of cache layer | `24` | [resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ [trivy]: https://github.com/aquasecurity/trivy [trivy-db]: https://github.com/aquasecurity/trivy-db +[trivy-java-db]: https://github.com/aquasecurity/trivy-java-db [trivy-rate-limiting]: https://github.com/aquasecurity/trivy#github-rate-limiting diff --git a/conf/notary-server.json b/conf/notary-server.json deleted file mode 100644 index b3c262413..000000000 --- a/conf/notary-server.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "server": { - "http_addr": ":4443" - }, - "trust_service": { - "type": "remote", - "hostname": "{{ template "harbor.notary-signer" . }}", - "port": "7899", - "tls_ca_file": "/etc/ssl/notary/ca.crt", - "key_algorithm": "ecdsa" - }, - "logging": { - "level": "{{ .Values.logLevel }}" - }, - "storage": { - "backend": "postgres", - "db_url": "{{ template "harbor.database.notaryServer" . }}" - }, - "auth": { - "type": "token", - "options": { - "realm": "{{ .Values.externalURL }}/service/token", - "service": "harbor-notary", - "issuer": "harbor-token-issuer", - "rootcertbundle": "/root.crt" - } - } -} \ No newline at end of file diff --git a/conf/notary-signer.json b/conf/notary-signer.json deleted file mode 100644 index 75a4d68bd..000000000 --- a/conf/notary-signer.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "server": { - "grpc_addr": ":7899", - "tls_cert_file": "/etc/ssl/notary/tls.crt", - "tls_key_file": "/etc/ssl/notary/tls.key" - }, - "logging": { - "level": "{{ .Values.logLevel }}" - }, - "storage": { - "backend": "postgres", - "db_url": "{{ template "harbor.database.notarySigner" . }}", - "default_alias": "defaultalias" - } -} \ No newline at end of file diff --git a/docs/High Availability.md b/docs/High Availability.md index e4a2c0217..17a190ae8 100644 --- a/docs/High Availability.md +++ b/docs/High Availability.md @@ -39,13 +39,13 @@ helm fetch harbor/harbor --untar Configure the following items in `values.yaml`, you can also set them as parameters via `--set` flag during running `helm install`: - **Ingress rule** - Configure the `expose.ingress.hosts.core` and `expose.ingress.hosts.notary`. + Configure the `expose.ingress.hosts.core`. - **External URL** Configure the `externalURL`. - **External PostgreSQL** Set the `database.type` to `external` and fill the information in `database.external` section. - Four empty databases should be created manually for `Harbor core`, `Notary server` and `Notary signer` and configure them in the section. Harbor will create tables automatically when starting up. + Four empty databases should be created manually for `Harbor core`, and configure them in the section. Harbor will create tables automatically when starting up. - **External Redis** Set the `redis.type` to `external` and fill the information in `redis.external` section. Redis sentinel is supported after v1.9.0, configure the `redis.external.sentinelMasterSet` and `redis.external.addr` to enable it. @@ -61,7 +61,7 @@ Configure the following items in `values.yaml`, you can also set them as paramet If you have no PVCs that can be shared across nodes, you can use external object storage to store images and charts and store the job logs in database. Set the `persistence.imageChartStorage.type` to the value you want to use and fill the corresponding section and set `jobservice.jobLoggers` to `database`. - **Replica** - Set `portal.replicas`, `core.replicas`, `jobservice.replicas`, `registry.replicas`, `notary.server.replicas` and `notary.signer.replicas` to `n`(`n`>=2). + Set `portal.replicas`, `core.replicas`, `jobservice.replicas`, `registry.replicas` to `n`(`n`>=2). ### Installation diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index 7274bc1b9..0914d5684 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -31,6 +31,14 @@ heritage: {{ .Release.Service }} release: {{ .Release.Name }} chart: {{ .Chart.Name }} app: "{{ template "harbor.name" . }}" +{{ include "harbor.matchLabels" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/name: {{ include "harbor.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: {{ include "harbor.name" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} {{- end -}} {{/* matchLabels */}} @@ -39,6 +47,13 @@ release: {{ .Release.Name }} app: "{{ template "harbor.name" . }}" {{- end -}} +{{/* Helper for printing values from existing secrets*/}} +{{- define "harbor.secretKeyHelper" -}} + {{- if and (not (empty .data)) (hasKey .data .key) }} + {{- index .data .key | b64dec -}} + {{- end -}} +{{- end -}} + {{- define "harbor.autoGenCert" -}} {{- if and .Values.expose.tls.enabled (eq .Values.expose.tls.certSource "auto") -}} {{- printf "true" -}} @@ -89,7 +104,12 @@ app: "{{ template "harbor.name" . }}" {{- define "harbor.database.rawPassword" -}} {{- if eq .Values.database.type "internal" -}} - {{- .Values.database.internal.password -}} + {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (include "harbor.database" .) -}} + {{- if and (not (empty $existingSecret)) (hasKey $existingSecret.data "POSTGRES_PASSWORD") -}} + {{- .Values.database.internal.password | default (index $existingSecret.data "POSTGRES_PASSWORD" | b64dec) -}} + {{- else -}} + {{- .Values.database.internal.password -}} + {{- end -}} {{- else -}} {{- .Values.database.external.password -}} {{- end -}} @@ -111,22 +131,6 @@ app: "{{ template "harbor.name" . }}" {{- end -}} {{- end -}} -{{- define "harbor.database.notaryServerDatabase" -}} - {{- if eq .Values.database.type "internal" -}} - {{- printf "%s" "notaryserver" -}} - {{- else -}} - {{- .Values.database.external.notaryServerDatabase -}} - {{- end -}} -{{- end -}} - -{{- define "harbor.database.notarySignerDatabase" -}} - {{- if eq .Values.database.type "internal" -}} - {{- printf "%s" "notarysigner" -}} - {{- else -}} - {{- .Values.database.external.notarySignerDatabase -}} - {{- end -}} -{{- end -}} - {{- define "harbor.database.sslmode" -}} {{- if eq .Values.database.type "internal" -}} {{- printf "%s" "disable" -}} @@ -135,14 +139,6 @@ app: "{{ template "harbor.name" . }}" {{- end -}} {{- end -}} -{{- define "harbor.database.notaryServer" -}} -postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.database.escapedRawPassword" . }}@{{ template "harbor.database.host" . }}:{{ template "harbor.database.port" . }}/{{ template "harbor.database.notaryServerDatabase" . }}?sslmode={{ template "harbor.database.sslmode" . }} -{{- end -}} - -{{- define "harbor.database.notarySigner" -}} -postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.database.escapedRawPassword" . }}@{{ template "harbor.database.host" . }}:{{ template "harbor.database.port" . }}/{{ template "harbor.database.notarySignerDatabase" . }}?sslmode={{ template "harbor.database.sslmode" . }} -{{- end -}} - {{- define "harbor.redis.scheme" -}} {{- with .Values.redis }} {{- ternary "redis+sentinel" "redis" (and (eq .type "external" ) (not (not .external.sentinelMasterSet))) }} @@ -168,12 +164,26 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- end }} {{- end -}} + +{{- define "harbor.redis.pwdfromsecret" -}} + {{- (lookup "v1" "Secret" .Release.Namespace (.Values.redis.external.existingSecret)).data.REDIS_PASSWORD | b64dec }} +{{- end -}} + +{{- define "harbor.redis.cred" -}} + {{- with .Values.redis }} + {{- if (and (eq .type "external" ) (.external.existingSecret)) }} + {{- printf ":%s@" (include "harbor.redis.pwdfromsecret" $) }} + {{- else }} + {{- ternary (printf "%s:%s@" (.external.username | urlquery) (.external.password | urlquery)) "" (and (eq .type "external" ) (not (not .external.password))) }} + {{- end }} + {{- end }} +{{- end -}} + /*scheme://[:password@]host:port[/master_set]*/ {{- define "harbor.redis.url" -}} {{- with .Values.redis }} {{- $path := ternary "" (printf "/%s" (include "harbor.redis.masterSet" $)) (not (include "harbor.redis.masterSet" $)) }} - {{- $cred := ternary (printf ":%s@" (.external.password | urlquery)) "" (and (eq .type "external" ) (not (not .external.password))) }} - {{- printf "%s://%s%s%s" (include "harbor.redis.scheme" $) $cred (include "harbor.redis.addr" $) $path -}} + {{- printf "%s://%s%s%s" (include "harbor.redis.scheme" $) (include "harbor.redis.cred" $) (include "harbor.redis.addr" $) $path -}} {{- end }} {{- end -}} @@ -188,7 +198,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab /*scheme://[:password@]addr/db_index*/ {{- define "harbor.redis.urlForJobservice" -}} {{- with .Values.redis }} - {{- $index := ternary "1" .external.jobserviceDatabaseIndex (eq .type "internal") }} + {{- $index := ternary .internal.jobserviceDatabaseIndex .external.jobserviceDatabaseIndex (eq .type "internal") }} {{- printf "%s/%s" (include "harbor.redis.url" $) $index -}} {{- end }} {{- end -}} @@ -196,7 +206,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab /*scheme://[:password@]addr/db_index?idle_timeout_seconds=30*/ {{- define "harbor.redis.urlForRegistry" -}} {{- with .Values.redis }} - {{- $index := ternary "2" .external.registryDatabaseIndex (eq .type "internal") }} + {{- $index := ternary .internal.registryDatabaseIndex .external.registryDatabaseIndex (eq .type "internal") }} {{- printf "%s/%s?idle_timeout_seconds=30" (include "harbor.redis.url" $) $index -}} {{- end }} {{- end -}} @@ -204,14 +214,30 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab /*scheme://[:password@]addr/db_index?idle_timeout_seconds=30*/ {{- define "harbor.redis.urlForTrivy" -}} {{- with .Values.redis }} - {{- $index := ternary "5" .external.trivyAdapterIndex (eq .type "internal") }} + {{- $index := ternary .internal.trivyAdapterIndex .external.trivyAdapterIndex (eq .type "internal") }} + {{- printf "%s/%s?idle_timeout_seconds=30" (include "harbor.redis.url" $) $index -}} + {{- end }} +{{- end -}} + +/*scheme://[:password@]addr/db_index?idle_timeout_seconds=30*/ +{{- define "harbor.redis.urlForHarbor" -}} + {{- with .Values.redis }} + {{- $index := ternary .internal.harborDatabaseIndex .external.harborDatabaseIndex (eq .type "internal") }} + {{- printf "%s/%s?idle_timeout_seconds=30" (include "harbor.redis.url" $) $index -}} + {{- end }} +{{- end -}} + +/*scheme://[:password@]addr/db_index?idle_timeout_seconds=30*/ +{{- define "harbor.redis.urlForCache" -}} + {{- with .Values.redis }} + {{- $index := ternary .internal.cacheLayerDatabaseIndex .external.cacheLayerDatabaseIndex (eq .type "internal") }} {{- printf "%s/%s?idle_timeout_seconds=30" (include "harbor.redis.url" $) $index -}} {{- end }} {{- end -}} {{- define "harbor.redis.dbForRegistry" -}} {{- with .Values.redis }} - {{- ternary "2" .external.registryDatabaseIndex (eq .type "internal") }} + {{- ternary .internal.registryDatabaseIndex .external.registryDatabaseIndex (eq .type "internal") }} {{- end }} {{- end -}} @@ -247,14 +273,6 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- printf "%s-trivy" (include "harbor.fullname" .) -}} {{- end -}} -{{- define "harbor.notary-server" -}} - {{- printf "%s-notary-server" (include "harbor.fullname" .) -}} -{{- end -}} - -{{- define "harbor.notary-signer" -}} - {{- printf "%s-notary-signer" (include "harbor.fullname" .) -}} -{{- end -}} - {{- define "harbor.nginx" -}} {{- printf "%s-nginx" (include "harbor.fullname" .) -}} {{- end -}} @@ -267,12 +285,8 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- printf "%s-ingress" (include "harbor.fullname" .) -}} {{- end -}} -{{- define "harbor.ingress-notary" -}} - {{- printf "%s-ingress-notary" (include "harbor.fullname" .) -}} -{{- end -}} - {{- define "harbor.noProxy" -}} - {{- printf "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" (include "harbor.core" .) (include "harbor.jobservice" .) (include "harbor.database" .) (include "harbor.notary-server" .) (include "harbor.notary-signer" .) (include "harbor.registry" .) (include "harbor.portal" .) (include "harbor.trivy" .) (include "harbor.exporter" .) .Values.proxy.noProxy -}} + {{- printf "%s,%s,%s,%s,%s,%s,%s,%s" (include "harbor.core" .) (include "harbor.jobservice" .) (include "harbor.database" .) (include "harbor.registry" .) (include "harbor.portal" .) (include "harbor.trivy" .) (include "harbor.exporter" .) .Values.proxy.noProxy -}} {{- end -}} {{- define "harbor.caBundleVolume" -}} @@ -287,7 +301,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab subPath: ca.crt {{- end -}} -{{/* scheme for all components except notary because it only support http mode */}} +{{/* scheme for all components because it only support http mode */}} {{- define "harbor.component.scheme" -}} {{- if .Values.internalTLS.enabled -}} {{- printf "https" -}} @@ -490,16 +504,6 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- end -}} {{- end -}} -{{- define "harbor.tlsNotarySecretForIngress" -}} - {{- if eq .Values.expose.tls.certSource "none" -}} - {{- printf "" -}} - {{- else if eq .Values.expose.tls.certSource "secret" -}} - {{- .Values.expose.tls.secret.notarySecretName -}} - {{- else -}} - {{- include "harbor.ingress" . -}} - {{- end -}} -{{- end -}} - {{- define "harbor.tlsSecretForNginx" -}} {{- if eq .Values.expose.tls.certSource "secret" -}} {{- .Values.expose.tls.secret.secretName -}} @@ -521,7 +525,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab TRACE_SAMPLE_RATE: "{{ .Values.trace.sample_rate }}" TRACE_NAMESPACE: "{{ .Values.trace.namespace }}" {{- if .Values.trace.attributes }} - TRACE_ATTRIBUTES: "{{ .Values.trace.attributes | toJson }}" + TRACE_ATTRIBUTES: {{ .Values.trace.attributes | toJson | squote }} {{- end }} {{- if eq .Values.trace.provider "jaeger" }} TRACE_JAEGER_ENDPOINT: "{{ .Values.trace.jaeger.endpoint }}" diff --git a/templates/core/core-cm.yaml b/templates/core/core-cm.yaml index 307074752..93cab01b4 100644 --- a/templates/core/core-cm.yaml +++ b/templates/core/core-cm.yaml @@ -26,8 +26,6 @@ data: JOBSERVICE_URL: "{{ template "harbor.jobserviceURL" . }}" REGISTRY_URL: "{{ template "harbor.registryURL" . }}" TOKEN_SERVICE_URL: "{{ template "harbor.tokenServiceURL" . }}" - WITH_NOTARY: "{{ .Values.notary.enabled }}" - NOTARY_URL: "http://{{ template "harbor.notary-server" . }}:4443" CORE_LOCAL_URL: "{{ ternary "https://127.0.0.1:8443" "http://127.0.0.1:8080" .Values.internalTLS.enabled }}" WITH_TRIVY: {{ .Values.trivy.enabled | quote }} TRIVY_ADAPTER_URL: "{{ template "harbor.trivyAdapterURL" . }}" @@ -37,6 +35,12 @@ data: CHART_CACHE_DRIVER: "redis" _REDIS_URL_CORE: "{{ template "harbor.redis.urlForCore" . }}" _REDIS_URL_REG: "{{ template "harbor.redis.urlForRegistry" . }}" + {{- if or (and (eq .Values.redis.type "internal") .Values.redis.internal.harborDatabaseIndex) (and (eq .Values.redis.type "external") .Values.redis.external.harborDatabaseIndex) }} + _REDIS_URL_HARBOR: "{{ template "harbor.redis.urlForHarbor" . }}" + {{- end }} + {{- if or (and (eq .Values.redis.type "internal") .Values.redis.internal.cacheLayerDatabaseIndex) (and (eq .Values.redis.type "external") .Values.redis.external.cacheLayerDatabaseIndex) }} + _REDIS_URL_CACHE_LAYER: "{{ template "harbor.redis.urlForCache" . }}" + {{- end }} PORTAL_URL: "{{ template "harbor.portalURL" . }}" REGISTRY_CONTROLLER_URL: "{{ template "harbor.registryControllerURL" . }}" REGISTRY_CREDENTIAL_USERNAME: "{{ .Values.registry.credentials.username }}" @@ -48,7 +52,7 @@ data: HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}" NO_PROXY: "{{ template "harbor.noProxy" . }}" {{- end }} - PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,jfrog-artifactory" + PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory" {{- if .Values.metrics.enabled}} METRIC_ENABLE: "true" METRIC_PATH: "{{ .Values.metrics.core.path }}" @@ -63,17 +67,24 @@ data: {{- end }} {{- template "harbor.traceEnvsForCore" . }} - {{- if .Values.core.artifactPullAsyncFlushDuration | quote }} - ARTIFACT_PULL_ASYNC_FLUSH_DURATION: {{ .Values.core.artifactPullAsyncFlushDuration }} + {{- if .Values.core.artifactPullAsyncFlushDuration }} + ARTIFACT_PULL_ASYNC_FLUSH_DURATION: {{ .Values.core.artifactPullAsyncFlushDuration | quote }} {{- end }} {{- if .Values.core.gdpr}} {{- if .Values.core.gdpr.deleteUser}} GDPR_DELETE_USER: "true" {{- end }} + {{- if .Values.core.gdpr.auditLogsCompliant}} + GDPR_AUDIT_LOGS: "true" + {{- end }} {{- end }} {{- if .Values.cache.enabled }} CACHE_ENABLED: "true" CACHE_EXPIRE_HOURS: "{{ .Values.cache.expireHours }}" {{- end }} + + {{- if .Values.core.quotaUpdateProvider }} + QUOTA_UPDATE_PROVIDER: "{{ .Values.core.quotaUpdateProvider }}" + {{- end }} \ No newline at end of file diff --git a/templates/core/core-dpl.yaml b/templates/core/core-dpl.yaml index fb630c09f..2ee8fd59c 100644 --- a/templates/core/core-dpl.yaml +++ b/templates/core/core-dpl.yaml @@ -5,6 +5,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: core + app.kubernetes.io/component: core spec: replicas: {{ .Values.core.replicas }} revisionHistoryLimit: {{ .Values.core.revisionHistoryLimit }} @@ -15,8 +16,12 @@ spec: template: metadata: labels: -{{ include "harbor.matchLabels" . | indent 8 }} +{{ include "harbor.labels" . | indent 8 }} component: core + app.kubernetes.io/component: core +{{- if .Values.core.podLabels }} +{{ toYaml .Values.core.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/core/core-cm.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/core/core-secret.yaml") . | sha256sum }} @@ -42,6 +47,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.core.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.core.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: core +{{- end }} +{{- end }} {{- with .Values.core.initContainers }} initContainers: {{- toYaml . | nindent 8 }} @@ -83,13 +98,17 @@ spec: - name: CORE_SECRET valueFrom: secretKeyRef: - name: {{ template "harbor.core" . }} + name: {{ default (include "harbor.core" .) .Values.core.existingSecret }} key: secret - name: JOBSERVICE_SECRET valueFrom: secretKeyRef: - name: "{{ template "harbor.jobservice" . }}" + name: {{ default (include "harbor.jobservice" .) .Values.jobservice.existingSecret }} + {{- if .Values.jobservice.existingSecret }} + key: {{ .Values.jobservice.existingSecretKey }} + {{- else }} key: JOBSERVICE_SECRET + {{- end }} {{- if .Values.existingSecretAdminPassword }} - name: HARBOR_ADMIN_PASSWORD valueFrom: @@ -121,6 +140,19 @@ spec: name: {{ .Values.registry.credentials.existingSecret }} key: REGISTRY_PASSWD {{- end }} + {{- if .Values.core.existingXsrfSecret }} + - name: CSRF_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.core.existingXsrfSecret }} + key: {{ .Values.core.existingXsrfSecretKey }} + {{- end }} +{{- with .Values.core.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} ports: - containerPort: {{ template "harbor.core.containerPort" . }} volumeMounts: diff --git a/templates/core/core-pre-upgrade-job.yaml b/templates/core/core-pre-upgrade-job.yaml index 43c9d3596..ce0b13134 100644 --- a/templates/core/core-pre-upgrade-job.yaml +++ b/templates/core/core-pre-upgrade-job.yaml @@ -47,6 +47,9 @@ spec: secretKeyRef: name: {{ .Values.database.external.existingSecret }} key: password + {{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} {{- end }} volumeMounts: - name: config diff --git a/templates/core/core-secret.yaml b/templates/core/core-secret.yaml index 20f835b1d..62a41fce8 100644 --- a/templates/core/core-secret.yaml +++ b/templates/core/core-secret.yaml @@ -1,3 +1,4 @@ +{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (include "harbor.core" .) }} apiVersion: v1 kind: Secret metadata: @@ -9,7 +10,9 @@ data: {{- if not .Values.existingSecretSecretKey }} secretKey: {{ .Values.secretKey | b64enc | quote }} {{- end }} - secret: {{ .Values.core.secret | default (randAlphaNum 16) | b64enc | quote }} + {{- if not .Values.core.existingSecret }} + secret: {{ .Values.core.secret | default (include "harbor.secretKeyHelper" (dict "key" "secret" "data" $existingSecret.data)) | default (randAlphaNum 16) | b64enc | quote }} + {{- end }} {{- if not .Values.core.secretName }} {{- $ca := genCA "harbor-token-ca" 365 }} tls.key: {{ .Values.core.tokenKey | default $ca.Key | b64enc | quote }} @@ -24,5 +27,10 @@ data: {{- if not .Values.registry.credentials.existingSecret }} REGISTRY_CREDENTIAL_PASSWORD: {{ .Values.registry.credentials.password | b64enc | quote }} {{- end }} - CSRF_KEY: {{ .Values.core.xsrfKey | default (randAlphaNum 32) | b64enc | quote }} + {{- if not .Values.core.existingXsrfSecret }} + CSRF_KEY: {{ .Values.core.xsrfKey | default (include "harbor.secretKeyHelper" (dict "key" "CSRF_KEY" "data" $existingSecret.data)) | default (randAlphaNum 32) | b64enc | quote }} + {{- end }} +{{- if .Values.core.configureUserSettings }} + CONFIG_OVERWRITE_JSON: {{ .Values.core.configureUserSettings | b64enc | quote }} +{{- end }} {{- template "harbor.traceJaegerPassword" . }} diff --git a/templates/database/database-ss.yaml b/templates/database/database-ss.yaml index 733243c71..dc6f962e1 100644 --- a/templates/database/database-ss.yaml +++ b/templates/database/database-ss.yaml @@ -7,6 +7,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: database + app.kubernetes.io/component: database spec: replicas: 1 serviceName: "{{ template "harbor.database" . }}" @@ -19,6 +20,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: database + app.kubernetes.io/component: database +{{- if .Values.database.podLabels }} +{{ toYaml .Values.database.podLabels | indent 8 }} +{{- end }} annotations: checksum/secret: {{ include (print $.Template.BasePath "/database/database-secret.yaml") . | sha256sum }} {{- if .Values.database.podAnnotations }} @@ -38,23 +43,6 @@ spec: automountServiceAccountToken: {{ .Values.database.internal.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 initContainers: - # as we change the data directory to a sub folder to support psp, the init container here - # is used to migrate the existing data. See https://github.com/goharbor/harbor-helm/issues/756 - # for more detail. - # we may remove it after several releases - - name: "data-migrator" - image: {{ .Values.database.internal.image.repository }}:{{ .Values.database.internal.image.tag }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - command: ["/bin/sh"] - args: ["-c", "[ -e /var/lib/postgresql/data/postgresql.conf ] && [ ! -d /var/lib/postgresql/data/pgdata ] && mkdir -m 0700 /var/lib/postgresql/data/pgdata && mv /var/lib/postgresql/data/* /var/lib/postgresql/data/pgdata/ || true"] -{{- if .Values.database.internal.initContainer.migrator.resources }} - resources: -{{ toYaml .Values.database.internal.initContainer.migrator.resources | indent 10 }} -{{- end }} - volumeMounts: - - name: database-data - mountPath: /var/lib/postgresql/data - subPath: {{ $database.subPath }} # with "fsGroup" set, each time a volume is mounted, Kubernetes must recursively chown() and chmod() all the files and directories inside the volume # this causes the postgresql reports the "data directory /var/lib/postgresql/data/pgdata has group or world access" issue when using some CSIs e.g. Ceph # use this init container to correct the permission @@ -62,6 +50,9 @@ spec: - name: "data-permissions-ensurer" image: {{ .Values.database.internal.image.repository }}:{{ .Values.database.internal.image.tag }} imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} command: ["/bin/sh"] args: ["-c", "chmod -R 700 /var/lib/postgresql/data/pgdata || true"] {{- if .Values.database.internal.initContainer.permissions.resources }} @@ -76,6 +67,9 @@ spec: - name: database image: {{ .Values.database.internal.image.repository }}:{{ .Values.database.internal.image.tag }} imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} livenessProbe: exec: command: @@ -102,6 +96,9 @@ spec: # more detail refer to https://github.com/goharbor/harbor-helm/issues/756 - name: PGDATA value: "/var/lib/postgresql/data/pgdata" +{{- with .Values.database.internal.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} volumeMounts: - name: database-data mountPath: /var/lib/postgresql/data diff --git a/templates/exporter/exporter-dpl.yaml b/templates/exporter/exporter-dpl.yaml index 5ff36f48a..01e9258ea 100644 --- a/templates/exporter/exporter-dpl.yaml +++ b/templates/exporter/exporter-dpl.yaml @@ -6,6 +6,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: exporter + app.kubernetes.io/component: exporter spec: replicas: {{ .Values.exporter.replicas }} revisionHistoryLimit: {{ .Values.exporter.revisionHistoryLimit }} @@ -18,7 +19,18 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: exporter + app.kubernetes.io/component: exporter +{{- if .Values.exporter.podLabels }} +{{ toYaml .Values.exporter.podLabels | indent 8 }} +{{- end }} annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/exporter/exporter-cm-env.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/exporter/exporter-secret.yaml") . | sha256sum }} +{{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} + checksum/tls: {{ include (print $.Template.BasePath "/internal/auto-tls.yaml") . | sha256sum }} +{{- else if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "manual") }} + checksum/tls: {{ include (print $.Template.BasePath "/core/core-tls.yaml") . | sha256sum }} +{{- end }} {{- if .Values.exporter.podAnnotations }} {{ toYaml .Values.exporter.podAnnotations | indent 8 }} {{- end }} @@ -34,6 +46,16 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.exporter.automountServiceAccountToken | default false }} +{{- with .Values.exporter.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: exporter +{{- end }} +{{- end }} containers: - name: exporter image: {{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }} @@ -75,8 +97,15 @@ spec: resources: {{ toYaml .Values.exporter.resources | indent 10 }} {{- end }} +{{- with .Values.exporter.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} +{{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} ports: - - containerPort: {{ template "harbor.core.containerPort" . }} + - containerPort: {{ .Values.metrics.exporter.port }} volumeMounts: {{- if .Values.caBundleSecretName }} {{ include "harbor.caBundleVolumeMount" . | indent 8 }} diff --git a/templates/ingress/ingress.yaml b/templates/ingress/ingress.yaml index eedd13604..73472c605 100644 --- a/templates/ingress/ingress.yaml +++ b/templates/ingress/ingress.yaml @@ -8,7 +8,6 @@ {{- $_ := set . "v2_path" "/v2/*" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/*" -}} {{- $_ := set . "controller_path" "/c/*" -}} - {{- $_ := set . "notary_path" "/" -}} {{- else if eq .Values.expose.ingress.controller "ncp" }} {{- $_ := set . "portal_path" "/.*" -}} {{- $_ := set . "api_path" "/api/.*" -}} @@ -16,7 +15,6 @@ {{- $_ := set . "v2_path" "/v2/.*" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/.*" -}} {{- $_ := set . "controller_path" "/c/.*" -}} - {{- $_ := set . "notary_path" "/.*" -}} {{- else }} {{- $_ := set . "portal_path" "/" -}} {{- $_ := set . "api_path" "/api/" -}} @@ -24,7 +22,6 @@ {{- $_ := set . "v2_path" "/v2/" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/" -}} {{- $_ := set . "controller_path" "/c/" -}} - {{- $_ := set . "notary_path" "/" -}} {{- end }} --- @@ -40,8 +37,8 @@ metadata: name: "{{ template "harbor.ingress" . }}" labels: {{ include "harbor.labels" . | indent 4 }} -{{- if $ingress.harbor.labels }} -{{ toYaml $ingress.harbor.labels | indent 4 }} +{{- if $ingress.labels }} +{{ toYaml $ingress.labels | indent 4 }} {{- end }} annotations: {{ toYaml $ingress.annotations | indent 4 }} @@ -54,9 +51,6 @@ metadata: ncp/http-redirect: "true" {{- end }} {{- end }} -{{- if $ingress.harbor.annotations }} -{{ toYaml $ingress.harbor.annotations | indent 4 }} -{{- end }} spec: {{- if $ingress.className }} ingressClassName: {{ $ingress.className }} @@ -145,65 +139,4 @@ spec: host: {{ $ingress.hosts.core }} {{- end }} -{{- if .Values.notary.enabled }} ---- -{{- if semverCompare "<1.14-0" (include "harbor.ingress.kubeVersion" .) }} -apiVersion: extensions/v1beta1 -{{- else if semverCompare "<1.19-0" (include "harbor.ingress.kubeVersion" .) }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: networking.k8s.io/v1 -{{- end }} -kind: Ingress -metadata: - name: "{{ template "harbor.ingress-notary" . }}" - labels: -{{ include "harbor.labels" . | indent 4 }} -{{- if $ingress.notary.labels }} -{{ toYaml $ingress.notary.labels | indent 4 }} -{{- end }} - annotations: -{{ toYaml $ingress.annotations | indent 4 }} -{{- if eq .Values.expose.ingress.controller "ncp" }} - ncp/use-regex: "true" - {{- if $tls.enabled }} - ncp/http-redirect: "true" - {{- end }} -{{- end }} -{{- if $ingress.notary.annotations }} -{{ toYaml $ingress.notary.annotations | indent 4 }} -{{- end }} -spec: - {{- if $ingress.className }} - ingressClassName: {{ $ingress.className }} - {{- end }} - {{- if $tls.enabled }} - tls: - - secretName: {{ template "harbor.tlsNotarySecretForIngress" . }} - {{- if $ingress.hosts.notary }} - hosts: - - {{ $ingress.hosts.notary }} - {{- end }} - {{- end }} - rules: - - http: - paths: - - path: {{ .notary_path }} -{{- if semverCompare "<1.19-0" (include "harbor.ingress.kubeVersion" .) }} - backend: - serviceName: {{ template "harbor.notary-server" . }} - servicePort: 4443 -{{- else }} - pathType: Prefix - backend: - service: - name: {{ template "harbor.notary-server" . }} - port: - number: 4443 -{{- end -}} - {{- if $ingress.hosts.notary }} - host: {{ $ingress.hosts.notary }} - {{- end }} -{{- end }} - {{- end }} diff --git a/templates/ingress/secret.yaml b/templates/ingress/secret.yaml index 0d89af99a..41507b3dd 100644 --- a/templates/ingress/secret.yaml +++ b/templates/ingress/secret.yaml @@ -1,6 +1,6 @@ {{- if eq (include "harbor.autoGenCertForIngress" .) "true" }} {{- $ca := genCA "harbor-ca" 365 }} -{{- $cert := genSignedCert .Values.expose.ingress.hosts.core nil (list .Values.expose.ingress.hosts.core .Values.expose.ingress.hosts.notary) 365 $ca }} +{{- $cert := genSignedCert .Values.expose.ingress.hosts.core nil (list .Values.expose.ingress.hosts.core) 365 $ca }} apiVersion: v1 kind: Secret metadata: diff --git a/templates/jobservice/jobservice-cm-env.yaml b/templates/jobservice/jobservice-cm-env.yaml index 28b8f753f..8411c7a47 100644 --- a/templates/jobservice/jobservice-cm-env.yaml +++ b/templates/jobservice/jobservice-cm-env.yaml @@ -10,6 +10,10 @@ data: REGISTRY_URL: "{{ template "harbor.registryURL" . }}" REGISTRY_CONTROLLER_URL: "{{ template "harbor.registryControllerURL" . }}" REGISTRY_CREDENTIAL_USERNAME: "{{ .Values.registry.credentials.username }}" + + JOBSERVICE_WEBHOOK_JOB_MAX_RETRY: "{{ .Values.jobservice.notification.webhook_job_max_retry }}" + JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT: "{{ .Values.jobservice.notification.webhook_job_http_client_timeout }}" + {{- if has "jobservice" .Values.proxy.components }} HTTP_PROXY: "{{ .Values.proxy.httpProxy }}" HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}" @@ -25,3 +29,6 @@ data: CACHE_ENABLED: "true" CACHE_EXPIRE_HOURS: "{{ .Values.cache.expireHours }}" {{- end }} + {{- if or (and (eq .Values.redis.type "internal") .Values.redis.internal.cacheLayerDatabaseIndex) (and (eq .Values.redis.type "external") .Values.redis.external.cacheLayerDatabaseIndex) }} + _REDIS_URL_CACHE_LAYER: "{{ template "harbor.redis.urlForCache" . }}" + {{- end }} diff --git a/templates/jobservice/jobservice-cm.yaml b/templates/jobservice/jobservice-cm.yaml index 6500475e5..8211c6220 100644 --- a/templates/jobservice/jobservice-cm.yaml +++ b/templates/jobservice/jobservice-cm.yaml @@ -49,4 +49,9 @@ data: #Loggers for the job service loggers: - name: "STD_OUTPUT" - level: {{ .Values.logLevel | upper }} \ No newline at end of file + level: {{ .Values.logLevel | upper }} + reaper: + # the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 + max_update_hours: {{ .Values.jobservice.reaper.max_update_hours }} + # the max time for execution in running state without new task created + max_dangling_hours: {{ .Values.jobservice.reaper.max_dangling_hours }} diff --git a/templates/jobservice/jobservice-dpl.yaml b/templates/jobservice/jobservice-dpl.yaml index 8c461a82c..1bb669082 100644 --- a/templates/jobservice/jobservice-dpl.yaml +++ b/templates/jobservice/jobservice-dpl.yaml @@ -5,6 +5,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: jobservice + app.kubernetes.io/component: jobservice spec: replicas: {{ .Values.jobservice.replicas }} revisionHistoryLimit: {{ .Values.jobservice.revisionHistoryLimit }} @@ -22,6 +23,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: jobservice + app.kubernetes.io/component: jobservice +{{- if .Values.jobservice.podLabels }} +{{ toYaml .Values.jobservice.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/jobservice/jobservice-cm.yaml") . | sha256sum }} checksum/configmap-env: {{ include (print $.Template.BasePath "/jobservice/jobservice-cm-env.yaml") . | sha256sum }} @@ -48,6 +53,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.jobservice.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.jobservice.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: jobservice +{{- end }} +{{- end }} {{- with .Values.jobservice.initContainers }} initContainers: {{- toYaml . | nindent 8 }} @@ -78,8 +93,15 @@ spec: - name: CORE_SECRET valueFrom: secretKeyRef: - name: {{ template "harbor.core" . }} + name: {{ default (include "harbor.core" .) .Values.core.existingSecret }} key: secret + {{- if .Values.jobservice.existingSecret }} + - name: JOBSERVICE_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.jobservice.existingSecret }} + key: {{ .Values.jobservice.existingSecretKey }} + {{- end }} {{- if .Values.internalTLS.enabled }} - name: INTERNAL_TLS_ENABLED value: "true" @@ -97,6 +119,12 @@ spec: name: {{ .Values.registry.credentials.existingSecret }} key: REGISTRY_PASSWD {{- end }} +{{- with .Values.jobservice.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} envFrom: - configMapRef: name: "{{ template "harbor.jobservice" . }}-env" diff --git a/templates/jobservice/jobservice-pvc.yaml b/templates/jobservice/jobservice-pvc.yaml index a6b8b8bd3..3f7d00b67 100644 --- a/templates/jobservice/jobservice-pvc.yaml +++ b/templates/jobservice/jobservice-pvc.yaml @@ -14,6 +14,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: jobservice + app.kubernetes.io/component: jobservice spec: accessModes: - {{ $jobLog.accessMode }} diff --git a/templates/jobservice/jobservice-secrets.yaml b/templates/jobservice/jobservice-secrets.yaml index 3dfa6bd5e..eeb00bde0 100644 --- a/templates/jobservice/jobservice-secrets.yaml +++ b/templates/jobservice/jobservice-secrets.yaml @@ -1,3 +1,4 @@ +{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (include "harbor.jobservice" .) }} apiVersion: v1 kind: Secret metadata: @@ -6,7 +7,9 @@ metadata: {{ include "harbor.labels" . | indent 4 }} type: Opaque data: - JOBSERVICE_SECRET: {{ .Values.jobservice.secret | default (randAlphaNum 16) | b64enc | quote }} + {{- if not .Values.jobservice.existingSecret }} + JOBSERVICE_SECRET: {{ .Values.jobservice.secret | default (include "harbor.secretKeyHelper" (dict "key" "JOBSERVICE_SECRET" "data" $existingSecret.data)) | default (randAlphaNum 16) | b64enc | quote }} + {{- end }} {{- if not .Values.registry.credentials.existingSecret }} REGISTRY_CREDENTIAL_PASSWORD: {{ .Values.registry.credentials.password | b64enc | quote }} {{- end }} diff --git a/templates/metrics/metrics-svcmon.yaml b/templates/metrics/metrics-svcmon.yaml index ad8522974..1122ef01e 100644 --- a/templates/metrics/metrics-svcmon.yaml +++ b/templates/metrics/metrics-svcmon.yaml @@ -1,4 +1,4 @@ -{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: diff --git a/templates/nginx/configmap-http.yaml b/templates/nginx/configmap-http.yaml index 3aa426356..c4b8354d0 100644 --- a/templates/nginx/configmap-http.yaml +++ b/templates/nginx/configmap-http.yaml @@ -127,6 +127,8 @@ data: proxy_set_header X-Forwarded-Proto $x_forwarded_proto; proxy_buffering off; proxy_request_buffering off; + proxy_send_timeout 900; + proxy_read_timeout 900; } location /service/ { diff --git a/templates/nginx/configmap-https.yaml b/templates/nginx/configmap-https.yaml index 045c576a8..56c943a61 100644 --- a/templates/nginx/configmap-https.yaml +++ b/templates/nginx/configmap-https.yaml @@ -36,12 +36,6 @@ data: server "{{ template "harbor.portal" . }}:{{ template "harbor.portal.servicePort" . }}"; } - {{- if .Values.notary.enabled }} - upstream notary-server { - server {{ template "harbor.notary-server" . }}:4443; - } - {{- end }} - log_format timed_combined '[$time_local]:$remote_addr - ' '"$request" $status $body_bytes_sent ' '"$http_referer" "$http_user_agent" ' @@ -54,44 +48,6 @@ data: "" $scheme; } - {{- if .Values.notary.enabled }} - server { - {{- if .Values.ipFamily.ipv4.enabled }} - listen 4443 ssl; - {{- end}} - {{- if .Values.ipFamily.ipv6.enabled}} - listen [::]:4443 ssl; - {{- end }} - server_tokens off; - # ssl - ssl_certificate /etc/nginx/cert/tls.crt; - ssl_certificate_key /etc/nginx/cert/tls.key; - - # recommendations from https://raymii.org/s/tutorials/strong_ssl_security_on_nginx.html - ssl_protocols tlsv1.2; - ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; - ssl_prefer_server_ciphers on; - ssl_session_cache shared:ssl:10m; - - # disable any limits to avoid http 413 for large image uploads - client_max_body_size 0; - - # required to avoid http 411: see issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - proxy_pass http://notary-server/v2/; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $x_forwarded_proto; - - proxy_buffering off; - proxy_request_buffering off; - } - } - {{- end }} - server { {{- if .Values.ipFamily.ipv4.enabled }} listen 8443 ssl; @@ -106,8 +62,12 @@ data: ssl_certificate_key /etc/nginx/cert/tls.key; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.2; + ssl_protocols TLSv1.2 TLSv1.3; + {{- if .Values.internalTLS.strong_ssl_ciphers }} + ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; + {{ else }} ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; + {{- end }} ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; diff --git a/templates/nginx/deployment.yaml b/templates/nginx/deployment.yaml index bc1de0abf..3abc94198 100644 --- a/templates/nginx/deployment.yaml +++ b/templates/nginx/deployment.yaml @@ -6,6 +6,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: nginx + app.kubernetes.io/component: nginx spec: replicas: {{ .Values.nginx.replicas }} revisionHistoryLimit: {{ .Values.nginx.revisionHistoryLimit }} @@ -18,6 +19,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: nginx + app.kubernetes.io/component: nginx +{{- if .Values.nginx.podLabels }} +{{ toYaml .Values.nginx.podLabels | indent 8 }} +{{- end }} annotations: {{- if not .Values.expose.tls.enabled }} checksum/configmap: {{ include (print $.Template.BasePath "/nginx/configmap-http.yaml") . | sha256sum }} @@ -42,6 +47,16 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.nginx.automountServiceAccountToken | default false }} +{{- with .Values.nginx.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: nginx +{{- end }} +{{- end }} containers: - name: nginx image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" @@ -70,10 +85,18 @@ spec: resources: {{ toYaml .Values.nginx.resources | indent 10 }} {{- end }} +{{- with .Values.nginx.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} +{{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} ports: - containerPort: 8080 + {{- if .Values.expose.tls.enabled }} - containerPort: 8443 - - containerPort: 4443 + {{- end }} volumeMounts: - name: config mountPath: /etc/nginx/nginx.conf diff --git a/templates/nginx/service.yaml b/templates/nginx/service.yaml index df4da0944..691584ce0 100644 --- a/templates/nginx/service.yaml +++ b/templates/nginx/service.yaml @@ -7,12 +7,18 @@ metadata: name: {{ $clusterIP.name }} labels: {{ include "harbor.labels" . | indent 4 }} +{{- if .Values.expose.clusterIP.labels }} +{{ toYaml $clusterIP.labels | indent 4 }} +{{- end }} {{- with $clusterIP.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: type: ClusterIP + {{- if .Values.expose.clusterIP.staticClusterIP }} + clusterIP: {{ .Values.expose.clusterIP.staticClusterIP }} + {{- end }} ports: - name: http port: {{ $clusterIP.ports.httpPort }} @@ -22,16 +28,18 @@ spec: port: {{ $clusterIP.ports.httpsPort }} targetPort: 8443 {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $clusterIP.ports.notaryPort }} - targetPort: 4443 - {{- end }} {{- else if eq .Values.expose.type "nodePort" }} {{- $nodePort := .Values.expose.nodePort }} name: {{ $nodePort.name }} labels: {{ include "harbor.labels" . | indent 4 }} +{{- if .Values.expose.nodePort.labels }} +{{ toYaml $nodePort.labels | indent 4 }} +{{- end }} +{{- with $nodePort.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} spec: type: NodePort ports: @@ -49,19 +57,14 @@ spec: nodePort: {{ $nodePort.ports.https.nodePort }} {{- end }} {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $nodePort.ports.notary.port }} - targetPort: 4443 - {{- if $nodePort.ports.notary.nodePort }} - nodePort: {{ $nodePort.ports.notary.nodePort }} - {{- end }} - {{- end }} {{- else if eq .Values.expose.type "loadBalancer" }} {{- $loadBalancer := .Values.expose.loadBalancer }} name: {{ $loadBalancer.name }} labels: {{ include "harbor.labels" . | indent 4 }} +{{- if .Values.expose.loadBalancer.labels }} +{{ toYaml $loadBalancer.labels | indent 4 }} +{{- end }} {{- with $loadBalancer.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -84,11 +87,6 @@ spec: port: {{ $loadBalancer.ports.httpsPort }} targetPort: 8443 {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $loadBalancer.ports.notaryPort }} - targetPort: 4443 - {{- end }} {{- end }} selector: {{ include "harbor.matchLabels" . | indent 4 }} diff --git a/templates/notary/notary-secret.yaml b/templates/notary/notary-secret.yaml deleted file mode 100644 index 6de63dd8c..000000000 --- a/templates/notary/notary-secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.notary.enabled }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary -type: Opaque -data: - {{- if not .Values.notary.secretName }} - {{- $ca := genCA "harbor-notary-ca" 365 }} - {{- $cert := genSignedCert (include "harbor.notary-signer" .) nil (list (include "harbor.notary-signer" .)) 365 $ca }} - ca.crt: {{ $ca.Cert | b64enc | quote }} - tls.crt: {{ $cert.Cert | b64enc | quote }} - tls.key: {{ $cert.Key | b64enc | quote }} - {{- end }} - server.json: {{ tpl (.Files.Get "conf/notary-server.json") . | b64enc }} - signer.json: {{ tpl (.Files.Get "conf/notary-signer.json") . | b64enc }} - NOTARY_SERVER_DB_URL: {{ include "harbor.database.notaryServer" . | b64enc }} - NOTARY_SIGNER_DB_URL: {{ include "harbor.database.notarySigner" . | b64enc }} -{{- end }} diff --git a/templates/notary/notary-server.yaml b/templates/notary/notary-server.yaml deleted file mode 100644 index 64cfd293f..000000000 --- a/templates/notary/notary-server.yaml +++ /dev/null @@ -1,111 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary-server -spec: - replicas: {{ .Values.notary.server.replicas }} - selector: - matchLabels: -{{ include "harbor.matchLabels" . | indent 6 }} - component: notary-server - template: - metadata: - labels: -{{ include "harbor.labels" . | indent 8 }} - component: notary-server - annotations: - checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} - checksum/secret-core: {{ include (print $.Template.BasePath "/core/core-secret.yaml") . | sha256sum }} -{{- if .Values.notary.server.podAnnotations }} -{{ toYaml .Values.notary.server.podAnnotations | indent 8 }} -{{- end }} - spec: - securityContext: - runAsUser: 10000 - fsGroup: 10000 -{{- if .Values.notary.server.serviceAccountName }} - serviceAccountName: {{ .Values.notary.server.serviceAccountName }} -{{- end -}} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - automountServiceAccountToken: {{ .Values.notary.server.automountServiceAccountToken | default false }} - containers: - - name: notary-server - image: {{ .Values.notary.server.image.repository }}:{{ .Values.notary.server.image.tag }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - livenessProbe: - httpGet: - path: /_notary_server/health - scheme: "HTTP" - port: 4443 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /_notary_server/health - scheme: "HTTP" - port: 4443 - initialDelaySeconds: 20 - periodSeconds: 10 -{{- if .Values.notary.server.resources }} - resources: -{{ toYaml .Values.notary.server.resources | indent 10 }} -{{- end }} - env: - - name: MIGRATIONS_PATH - value: migrations/server/postgresql - - name: DB_URL - valueFrom: - secretKeyRef: - name: {{ template "harbor.notary-server" . }} - key: NOTARY_SERVER_DB_URL - volumeMounts: - - name: config - mountPath: /etc/notary/server-config.postgres.json - subPath: server.json - - name: token-service-certificate - mountPath: /root.crt - subPath: tls.crt - - name: signer-certificate - mountPath: /etc/ssl/notary/ca.crt - subPath: ca.crt - volumes: - - name: config - secret: - secretName: "{{ template "harbor.notary-server" . }}" - - name: token-service-certificate - secret: - {{- if .Values.core.secretName }} - secretName: {{ .Values.core.secretName }} - {{- else }} - secretName: {{ template "harbor.core" . }} - {{- end }} - - name: signer-certificate - secret: - {{- if .Values.notary.secretName }} - secretName: {{ .Values.notary.secretName }} - {{- else }} - secretName: {{ template "harbor.notary-server" . }} - {{- end }} - {{- with .Values.notary.server.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.server.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.server.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.notary.server.priorityClassName }} - priorityClassName: {{ .Values.notary.server.priorityClassName }} - {{- end }} -{{ end }} diff --git a/templates/notary/notary-signer.yaml b/templates/notary/notary-signer.yaml deleted file mode 100644 index d94e4909b..000000000 --- a/templates/notary/notary-signer.yaml +++ /dev/null @@ -1,105 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "harbor.notary-signer" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary-signer -spec: - replicas: {{ .Values.notary.signer.replicas }} - selector: - matchLabels: -{{ include "harbor.matchLabels" . | indent 6 }} - component: notary-signer - template: - metadata: - labels: -{{ include "harbor.labels" . | indent 8 }} - component: notary-signer - annotations: - checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} -{{- if .Values.notary.signer.podAnnotations }} -{{ toYaml .Values.notary.signer.podAnnotations | indent 8 }} -{{- end }} - spec: - securityContext: - runAsUser: 10000 - fsGroup: 10000 -{{- if .Values.notary.signer.serviceAccountName }} - serviceAccountName: {{ .Values.notary.signer.serviceAccountName }} -{{- end -}} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - automountServiceAccountToken: {{ .Values.notary.signer.automountServiceAccountToken | default false }} - containers: - - name: notary-signer - image: {{ .Values.notary.signer.image.repository }}:{{ .Values.notary.signer.image.tag }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - livenessProbe: - httpGet: - path: / - scheme: "HTTPS" - port: 7899 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - httpGet: - path: / - scheme: "HTTPS" - port: 7899 - initialDelaySeconds: 20 - periodSeconds: 10 -{{- if .Values.notary.signer.resources }} - resources: -{{ toYaml .Values.notary.signer.resources | indent 10 }} -{{- end }} - env: - - name: MIGRATIONS_PATH - value: migrations/signer/postgresql - - name: DB_URL - valueFrom: - secretKeyRef: - name: {{ template "harbor.notary-server" . }} - key: NOTARY_SIGNER_DB_URL - - name: NOTARY_SIGNER_DEFAULTALIAS - value: defaultalias - volumeMounts: - - name: config - mountPath: /etc/notary/signer-config.postgres.json - subPath: signer.json - - name: signer-certificate - mountPath: /etc/ssl/notary/tls.crt - subPath: tls.crt - - name: signer-certificate - mountPath: /etc/ssl/notary/tls.key - subPath: tls.key - volumes: - - name: config - secret: - secretName: "{{ template "harbor.notary-server" . }}" - - name: signer-certificate - secret: - {{- if .Values.notary.secretName }} - secretName: {{ .Values.notary.secretName }} - {{- else }} - secretName: {{ template "harbor.notary-server" . }} - {{- end }} - {{- with .Values.notary.signer.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.signer.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.signer.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.notary.signer.priorityClassName }} - priorityClassName: {{ .Values.notary.signer.priorityClassName }} - {{- end }} -{{ end }} diff --git a/templates/notary/notary-svc.yaml b/templates/notary/notary-svc.yaml deleted file mode 100644 index b6aa42d89..000000000 --- a/templates/notary/notary-svc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} -{{- with .Values.notary.serviceAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} -{{- end }} -spec: -{{- if or (eq .Values.expose.ingress.controller "gce") (eq .Values.expose.ingress.controller "alb") (eq .Values.expose.ingress.controller "f5-bigip") }} - type: NodePort -{{- end }} - ports: - - port: 4443 - selector: -{{ include "harbor.matchLabels" . | indent 4 }} - component: notary-server - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "harbor.notary-signer" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} -spec: - ports: - - port: 7899 - selector: -{{ include "harbor.matchLabels" . | indent 4 }} - component: notary-signer -{{ end }} diff --git a/templates/portal/configmap.yaml b/templates/portal/configmap.yaml index 1cea8ab63..7b2118e72 100644 --- a/templates/portal/configmap.yaml +++ b/templates/portal/configmap.yaml @@ -30,8 +30,12 @@ data: ssl_certificate_key /etc/harbor/ssl/portal/tls.key; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.2; + ssl_protocols TLSv1.2 TLSv1.3; + {{- if .Values.internalTLS.strong_ssl_ciphers }} + ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; + {{ else }} ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; + {{- end }} ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; {{- else }} diff --git a/templates/portal/deployment.yaml b/templates/portal/deployment.yaml index 7f1e62f59..4dea94438 100644 --- a/templates/portal/deployment.yaml +++ b/templates/portal/deployment.yaml @@ -5,6 +5,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: portal + app.kubernetes.io/component: portal spec: replicas: {{ .Values.portal.replicas }} revisionHistoryLimit: {{ .Values.portal.revisionHistoryLimit }} @@ -15,14 +16,19 @@ spec: template: metadata: labels: -{{ include "harbor.matchLabels" . | indent 8 }} +{{ include "harbor.labels" . | indent 8 }} component: portal + app.kubernetes.io/component: portal +{{- if .Values.portal.podLabels }} +{{ toYaml .Values.portal.podLabels | indent 8 }} +{{- end }} annotations: {{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} checksum/tls: {{ include (print $.Template.BasePath "/internal/auto-tls.yaml") . | sha256sum }} {{- else if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "manual") }} checksum/tls: {{ include (print $.Template.BasePath "/portal/tls.yaml") . | sha256sum }} {{- end }} + checksum/configmap: {{ include (print $.Template.BasePath "/portal/configmap.yaml") . | sha256sum }} {{- if .Values.portal.podAnnotations }} {{ toYaml .Values.portal.podAnnotations | indent 8 }} {{- end }} @@ -38,6 +44,16 @@ spec: serviceAccountName: {{ .Values.portal.serviceAccountName }} {{- end }} automountServiceAccountToken: {{ .Values.portal.automountServiceAccountToken | default false }} +{{- with .Values.portal.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: portal +{{- end }} +{{- end }} {{- with .Values.portal.initContainers }} initContainers: {{- toYaml . | nindent 8 }} @@ -50,6 +66,13 @@ spec: resources: {{ toYaml .Values.portal.resources | indent 10 }} {{- end }} +{{- with .Values.portal.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} +{{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} livenessProbe: httpGet: path: / diff --git a/templates/portal/service.yaml b/templates/portal/service.yaml index ff4eda435..d00026da4 100644 --- a/templates/portal/service.yaml +++ b/templates/portal/service.yaml @@ -4,6 +4,10 @@ metadata: name: "{{ template "harbor.portal" . }}" labels: {{ include "harbor.labels" . | indent 4 }} +{{- with .Values.portal.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} spec: {{- if or (eq .Values.expose.ingress.controller "gce") (eq .Values.expose.ingress.controller "alb") (eq .Values.expose.ingress.controller "f5-bigip") }} type: NodePort diff --git a/templates/redis/statefulset.yaml b/templates/redis/statefulset.yaml index 74b7581fd..9573f9ad6 100644 --- a/templates/redis/statefulset.yaml +++ b/templates/redis/statefulset.yaml @@ -7,6 +7,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: redis + app.kubernetes.io/component: redis spec: replicas: 1 serviceName: {{ template "harbor.redis" . }} @@ -19,6 +20,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: redis + app.kubernetes.io/component: redis +{{- if .Values.redis.podLabels }} +{{ toYaml .Values.redis.podLabels | indent 8 }} +{{- end }} {{- if .Values.redis.podAnnotations }} annotations: {{ toYaml .Values.redis.podAnnotations | indent 8 }} @@ -40,6 +45,9 @@ spec: - name: redis image: {{ .Values.redis.internal.image.repository }}:{{ .Values.redis.internal.image.tag }} imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} livenessProbe: tcpSocket: port: 6379 @@ -53,6 +61,10 @@ spec: {{- if .Values.redis.internal.resources }} resources: {{ toYaml .Values.redis.internal.resources | indent 10 }} +{{- end }} +{{- with .Values.redis.internal.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} {{- end }} volumeMounts: - name: data diff --git a/templates/registry/registry-dpl.yaml b/templates/registry/registry-dpl.yaml index 09224b545..0965cf2e2 100644 --- a/templates/registry/registry-dpl.yaml +++ b/templates/registry/registry-dpl.yaml @@ -7,6 +7,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: registry + app.kubernetes.io/component: registry spec: replicas: {{ .Values.registry.replicas }} revisionHistoryLimit: {{ .Values.registry.revisionHistoryLimit }} @@ -24,6 +25,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: registry + app.kubernetes.io/component: registry +{{- if .Values.registry.podLabels }} +{{ toYaml .Values.registry.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/registry/registry-cm.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/registry/registry-secret.yaml") . | sha256sum }} @@ -51,6 +56,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.registry.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.registry.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: registry +{{- end }} +{{- end }} {{- with .Values.registry.initContainers }} initContainers: {{- toYaml . | nindent 8 }} @@ -77,6 +92,9 @@ spec: resources: {{ toYaml .Values.registry.registry.resources | indent 10 }} {{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} args: ["serve", "/etc/registry/config.yml"] envFrom: - secretRef: @@ -86,6 +104,13 @@ spec: name: {{ .Values.persistence.imageChartStorage.s3.existingSecret }} {{- end }} env: + {{- if .Values.registry.existingSecret }} + - name: REGISTRY_HTTP_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.registry.existingSecret }} + key: {{ .Values.registry.existingSecretKey }} + {{- end }} {{- if has "registry" .Values.proxy.components }} - name: HTTP_PROXY value: "{{ .Values.proxy.httpProxy }}" @@ -104,6 +129,13 @@ spec: - name: INTERNAL_TLS_TRUST_CA_PATH value: /etc/harbor/ssl/registry/ca.crt {{- end }} + {{- if .Values.redis.external.existingSecret }} + - name: REGISTRY_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.redis.external.existingSecret }} + key: REDIS_PASSWORD + {{- end }} {{- if .Values.persistence.imageChartStorage.azure.existingSecret }} - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY valueFrom: @@ -111,9 +143,39 @@ spec: name: {{ .Values.persistence.imageChartStorage.azure.existingSecret }} key: AZURE_STORAGE_ACCESS_KEY {{- end }} + {{- if .Values.persistence.imageChartStorage.swift.existingSecret }} + - name: REGISTRY_STORAGE_SWIFT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_PASSWORD + - name: REGISTRY_STORAGE_SWIFT_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_SECRETKEY + optional: true + - name: REGISTRY_STORAGE_SWIFT_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_ACCESSKEY + optional: true + {{- end }} + {{- if .Values.persistence.imageChartStorage.oss.existingSecret }} + - name: REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.oss.existingSecret }} + key: REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + optional: true + {{- end}} +{{- with .Values.registry.registry.extraEnvVars }} +{{- toYaml . | nindent 8 }} +{{- end }} ports: - containerPort: {{ template "harbor.registry.containerPort" . }} - - containerPort: 5001 + - containerPort: {{ ternary .Values.metrics.registry.port 5001 .Values.metrics.enabled }} volumeMounts: - name: registry-data mountPath: {{ .Values.persistence.imageChartStorage.filesystem.rootdirectory }} @@ -169,6 +231,9 @@ spec: resources: {{ toYaml .Values.registry.controller.resources | indent 10 }} {{- end }} + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 10 }} + {{- end }} envFrom: - configMapRef: name: "{{ template "harbor.registryCtl" . }}" @@ -176,17 +241,32 @@ spec: name: "{{ template "harbor.registry" . }}" - secretRef: name: "{{ template "harbor.registryCtl" . }}" + {{- if .Values.persistence.imageChartStorage.s3.existingSecret }} + - secretRef: + name: {{ .Values.persistence.imageChartStorage.s3.existingSecret }} + {{- end }} env: + {{- if .Values.registry.existingSecret }} + - name: REGISTRY_HTTP_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.registry.existingSecret }} + key: {{ .Values.registry.existingSecretKey }} + {{- end }} - name: CORE_SECRET valueFrom: secretKeyRef: - name: {{ template "harbor.core" . }} + name: {{ default (include "harbor.core" .) .Values.core.existingSecret }} key: secret - name: JOBSERVICE_SECRET valueFrom: secretKeyRef: - name: {{ template "harbor.jobservice" . }} + name: {{ default (include "harbor.jobservice" .) .Values.jobservice.existingSecret }} + {{- if .Values.jobservice.existingSecret }} + key: {{ .Values.jobservice.existingSecretKey }} + {{- else }} key: JOBSERVICE_SECRET + {{- end }} {{- if has "registry" .Values.proxy.components }} - name: HTTP_PROXY value: "{{ .Values.proxy.httpProxy }}" @@ -219,6 +299,36 @@ spec: name: {{ .Values.persistence.imageChartStorage.azure.existingSecret }} key: AZURE_STORAGE_ACCESS_KEY {{- end }} + {{- if .Values.persistence.imageChartStorage.swift.existingSecret }} + - name: REGISTRY_STORAGE_SWIFT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_PASSWORD + - name: REGISTRY_STORAGE_SWIFT_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_SECRETKEY + optional: true + - name: REGISTRY_STORAGE_SWIFT_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.swift.existingSecret }} + key: REGISTRY_STORAGE_SWIFT_ACCESSKEY + optional: true + {{- end }} + {{- if .Values.persistence.imageChartStorage.oss.existingSecret }} + - name: REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + valueFrom: + secretKeyRef: + name: {{ .Values.persistence.imageChartStorage.oss.existingSecret }} + key: REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + optional: true + {{- end}} +{{- with .Values.registry.controller.extraEnvVars }} +{{- toYaml . | nindent 8 }} +{{- end }} ports: - containerPort: {{ template "harbor.registryctl.containerPort" . }} volumeMounts: diff --git a/templates/registry/registry-pvc.yaml b/templates/registry/registry-pvc.yaml index 2112e2287..5d6d4d3dd 100644 --- a/templates/registry/registry-pvc.yaml +++ b/templates/registry/registry-pvc.yaml @@ -15,6 +15,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: registry + app.kubernetes.io/component: registry spec: accessModes: - {{ $registry.accessMode }} diff --git a/templates/registry/registry-secret.yaml b/templates/registry/registry-secret.yaml index 529462906..e853a9cbe 100644 --- a/templates/registry/registry-secret.yaml +++ b/templates/registry/registry-secret.yaml @@ -1,3 +1,4 @@ +{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (include "harbor.registry" .) }} apiVersion: v1 kind: Secret metadata: @@ -6,7 +7,9 @@ metadata: {{ include "harbor.labels" . | indent 4 }} type: Opaque data: - REGISTRY_HTTP_SECRET: {{ .Values.registry.secret | default (randAlphaNum 16) | b64enc | quote }} + {{- if not .Values.registry.existingSecret }} + REGISTRY_HTTP_SECRET: {{ .Values.registry.secret | default (include "harbor.secretKeyHelper" (dict "key" "REGISTRY_HTTP_SECRET" "data" $existingSecret.data)) | default (randAlphaNum 16) | b64enc | quote }} + {{- end }} {{- if not .Values.redis.external.existingSecret }} REGISTRY_REDIS_PASSWORD: {{ include "harbor.redis.password" . | b64enc | quote }} {{- end }} @@ -23,7 +26,7 @@ data: {{- if and (not $storage.s3.existingSecret) ($storage.s3.secretkey) }} REGISTRY_STORAGE_S3_SECRETKEY: {{ $storage.s3.secretkey | b64enc | quote }} {{- end }} - {{- else if eq $type "swift" }} + {{- else if and (eq $type "swift") (not ($storage.swift.existingSecret)) }} REGISTRY_STORAGE_SWIFT_PASSWORD: {{ $storage.swift.password | b64enc | quote }} {{- if $storage.swift.secretkey }} REGISTRY_STORAGE_SWIFT_SECRETKEY: {{ $storage.swift.secretkey | b64enc | quote }} @@ -31,7 +34,7 @@ data: {{- if $storage.swift.accesskey }} REGISTRY_STORAGE_SWIFT_ACCESSKEY: {{ $storage.swift.accesskey | b64enc | quote }} {{- end }} - {{- else if eq $type "oss" }} + {{- else if and (eq $type "oss") ((not ($storage.oss.existingSecret))) }} REGISTRY_STORAGE_OSS_ACCESSKEYSECRET: {{ $storage.oss.accesskeysecret | b64enc | quote }} {{- end }} {{- if not .Values.registry.credentials.existingSecret }} diff --git a/templates/trivy/trivy-sts.yaml b/templates/trivy/trivy-sts.yaml index 37b19ac2d..90bd8db4e 100644 --- a/templates/trivy/trivy-sts.yaml +++ b/templates/trivy/trivy-sts.yaml @@ -7,6 +7,7 @@ metadata: labels: {{ include "harbor.labels" . | indent 4 }} component: trivy + app.kubernetes.io/component: trivy spec: replicas: {{ .Values.trivy.replicas }} serviceName: {{ template "harbor.trivy" . }} @@ -19,6 +20,10 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: trivy + app.kubernetes.io/component: trivy +{{- if .Values.trivy.podLabels }} +{{ toYaml .Values.trivy.podLabels | indent 8 }} +{{- end }} annotations: checksum/secret: {{ include (print $.Template.BasePath "/trivy/trivy-secret.yaml") . | sha256sum }} {{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} @@ -41,13 +46,27 @@ spec: runAsUser: 10000 fsGroup: 10000 automountServiceAccountToken: {{ .Values.trivy.automountServiceAccountToken | default false }} +{{- with .Values.trivy.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: trivy +{{- end }} +{{- end }} + {{- with .Values.trivy.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: trivy image: {{ .Values.trivy.image.repository }}:{{ .Values.trivy.image.tag }} imagePullPolicy: {{ .Values.imagePullPolicy }} - securityContext: - privileged: false - allowPrivilegeEscalation: false + {{- if not (empty .Values.containerSecurityContext) }} + securityContext: {{ .Values.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} env: {{- if has "trivy" .Values.proxy.components }} - name: HTTP_PROXY @@ -80,6 +99,8 @@ spec: value: {{ .Values.trivy.ignoreUnfixed | default false | quote }} - name: "SCANNER_TRIVY_SKIP_UPDATE" value: {{ .Values.trivy.skipUpdate | default false | quote }} + - name: "SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE" + value: {{ .Values.trivy.skipJavaDBUpdate | default false | quote }} - name: "SCANNER_TRIVY_OFFLINE_SCAN" value: {{ .Values.trivy.offlineScan | default false | quote }} - name: "SCANNER_TRIVY_SECURITY_CHECKS" @@ -111,6 +132,9 @@ spec: secretKeyRef: name: {{ template "harbor.trivy" . }} key: redisURL +{{- with .Values.trivy.extraEnvVars }} +{{- toYaml . | nindent 12 }} +{{- end }} ports: - name: api-server containerPort: {{ template "harbor.trivy.containerPort" . }} diff --git a/test/e2e/Dockerfile b/test/e2e/Dockerfile index 19747113a..680139695 100644 --- a/test/e2e/Dockerfile +++ b/test/e2e/Dockerfile @@ -1,7 +1,7 @@ FROM alpine:3 -ARG KUBECTL_VERSION="v1.21.1" -ARG HELM_VERSION="v3.9.2" -RUN apk add bash curl bind-tools \ - && curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl \ - && curl https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz -o - | tar -xzO linux-amd64/helm > /usr/local/bin/helm && chmod +x /usr/local/bin/helm +ARG KUBECTL_VERSION="v1.27.1" +ARG HELM_VERSION="v3.13.3" +RUN apk add bash bind-tools \ + && wget https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -P /usr/local/bin/ && chmod +x /usr/local/bin/kubectl \ + && wget -O - https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -xzO linux-amd64/helm > /usr/local/bin/helm && chmod +x /usr/local/bin/helm diff --git a/test/e2e/Jenkinsfile b/test/e2e/Jenkinsfile index a40751fbe..2b34f9320 100644 --- a/test/e2e/Jenkinsfile +++ b/test/e2e/Jenkinsfile @@ -6,6 +6,7 @@ class HarborChartFreshInstallPipelineExecutor extends FreshInstallPipelineExecut String context String namespace String coreHostname + String ingressControllerServiceType String ingressControllerIP HarborChartFreshInstallPipelineExecutor(Script script) { @@ -36,13 +37,29 @@ class HarborChartFreshInstallPipelineExecutor extends FreshInstallPipelineExecut HarborInstance install(){ // the scope of the credential is just inside the "withCredentials" block, so we need to call "withCredentials" again script.withCredentials([script.file(credentialsId: "kubeconfig", variable: "KUBE_CONFIG_FILE_PATH")]) { - // get the IP address of the ingress controller - ingressControllerIP = script.sh( + // get the service type of the ingress controller + ingressControllerServiceType = script.sh( returnStdout: true, - script:""" + script: """ docker run -i --rm -v \${KUBE_CONFIG_FILE_PATH}:/root/.kube/config deployer:dev \ - sh -c 'host \$(kubectl get svc nginx-ingress-controller-controller --context ${context} -n ingress-controller -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") | awk "/has address/ { print \\\$4; exit }"' + sh -c 'kubectl get svc ingress-nginx-controller --context ${context} -n ingress-nginx -o jsonpath="{.spec.type}"' """).trim() + // get the IP address of the ingress controller + if (ingressControllerServiceType == 'LoadBalancer') { + ingressControllerIP = script.sh( + returnStdout: true, + script: """ + docker run -i --rm -v \${KUBE_CONFIG_FILE_PATH}:/root/.kube/config deployer:dev \ + sh -c 'host \$(kubectl get svc ingress-nginx-controller --context ${context} -n ingress-nginx -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") | awk "/has address/ { print \\\$4; exit }"' + """).trim() + } else if (ingressControllerServiceType == 'NodePort') { + ingressControllerIP = script.sh( + returnStdout: true, + script: """ + docker run -i --rm -v \${KUBE_CONFIG_FILE_PATH}:/root/.kube/config deployer:dev \ + sh -c 'kubectl get svc ingress-nginx-controller --context ${context} -n ingress-nginx -o jsonpath="{.spec.externalIPs[0]}"' + """).trim() + } // install harbor chart script.sh """ # insert the hostAliases to run the replication test @@ -50,23 +67,21 @@ class HarborChartFreshInstallPipelineExecutor extends FreshInstallPipelineExecut # install harbor chart docker run -i --rm -w /workspace -v \${KUBE_CONFIG_FILE_PATH}:/root/.kube/config -v \$(pwd):/workspace deployer:dev \ helm install harbor --kube-context ${context} -n ${namespace} --create-namespace \ - --set "expose.ingress.hosts.core=${coreHostname},expose.ingress.hosts.notary=notary.${coreHostname},externalURL=https://${coreHostname},internalTLS.enabled=true,imagePullPolicy=Always,trivy.skipUpdate=true,core.gcTimeWindowHours=0" . + --set "expose.ingress.hosts.core=${coreHostname},externalURL=https://${coreHostname},internalTLS.enabled=true,imagePullPolicy=Always,trivy.skipUpdate=true,core.gcTimeWindowHours=0" . """ } HarborInstance instance = new HarborInstance() instance.coreServiceURL = "https://" + coreHostname - instance.notaryServiceURL = "https://notary." + coreHostname instance.adminPassword = "Harbor12345" instance.authMode = "database" - instance.components = "trivy,notary" - instance.hostIPMappings = "${coreHostname}:${ingressControllerIP},notary.${coreHostname}:${ingressControllerIP}" + instance.components = "trivy" + instance.hostIPMappings = "${coreHostname}:${ingressControllerIP}" script.currentBuild.description = """ Kubernetes: ${context} Namespace: ${namespace} Core Service: $instance.coreServiceURL - Notary Service: $instance.notaryServiceURL Ingress Controller IP: ${ingressControllerIP} """ @@ -91,7 +106,7 @@ def properties = { } return [ parameters([ - string(name: 'branch', defaultValue: 'master', description: 'The branch/tag to run for'), + string(name: 'branch', defaultValue: 'main', description: 'The branch/tag to run for'), choice(name: "cluster", choices: names, description: 'The Kubernetes cluster that the Harbor is deployed on') ]), buildDiscarder(strategy: logRotator(numToKeepStr: "15")), @@ -101,7 +116,7 @@ def properties = { def caseSettings = { CaseSettings settings = new CaseSettings() - settings.cases = "gc,common,database,trivy,notary" + settings.cases = "gc,trivy,common,database" return settings } diff --git a/test/integration/ingress_test.go b/test/integration/ingress_test.go index 4e7364973..107a4f8c9 100644 --- a/test/integration/ingress_test.go +++ b/test/integration/ingress_test.go @@ -14,15 +14,13 @@ type IngressTestSuite struct { func (i *IngressTestSuite) TestIngress() { k8s.GetIngress(i.T(), i.Options.KubectlOptions, fmt.Sprintf("%s-ingress", i.ReleaseName)) - k8s.GetIngress(i.T(), i.Options.KubectlOptions, fmt.Sprintf("%s-ingress-notary", i.ReleaseName)) } func TestIngressTestSuite(t *testing.T) { suite.Run(t, &IngressTestSuite{ BaseTestSuite: NewBaseTestSuite(map[string]string{ - "expose.ingress.hosts.core": "harbor.local", - "expose.ingress.hosts.notary": "notary.harbor.local", - "externalURL": "https://harbor.local", + "expose.ingress.hosts.core": "harbor.local", + "externalURL": "https://harbor.local", }), }) } diff --git a/values.yaml b/values.yaml index 86655c3a5..aae90514e 100644 --- a/values.yaml +++ b/values.yaml @@ -26,15 +26,9 @@ expose: # "tls.crt" - the certificate # "tls.key" - the private key secretName: "" - # The name of secret which contains keys named: - # "tls.crt" - the certificate - # "tls.key" - the private key - # Only needed when the "expose.type" is "ingress". - notarySecretName: "" ingress: hosts: core: core.harbor.domain - notary: notary.harbor.domain # set to the type of ingress controller if it has specific requirements. # leave as `default` for most ingress controllers. # set to `gce` if using the GCE ingress controller @@ -52,29 +46,22 @@ expose: ingress.kubernetes.io/proxy-body-size: "0" nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/proxy-body-size: "0" - notary: - # notary ingress-specific annotations - annotations: {} - # notary ingress-specific labels - labels: {} - harbor: - # harbor ingress-specific annotations - annotations: {} - # harbor ingress-specific labels - labels: {} + # ingress-specific labels + labels: {} clusterIP: # The name of ClusterIP service name: harbor - # Annotations on the ClusterIP service - annotations: {} + # The ip address of the ClusterIP service (leave empty for acquiring dynamic ip) + staticClusterIP: "" ports: # The service port Harbor listens on when serving HTTP httpPort: 80 # The service port Harbor listens on when serving HTTPS httpsPort: 443 - # The service port Notary listens on. Only needed when notary.enabled - # is set to true - notaryPort: 4443 + # Annotations on the ClusterIP service + annotations: {} + # ClusterIP-specific labels + labels: {} nodePort: # The name of NodePort service name: harbor @@ -89,12 +76,10 @@ expose: port: 443 # The node port Harbor listens on when serving HTTPS nodePort: 30003 - # Only needed when notary.enabled is set to true - notary: - # The service port Notary listens on - port: 4443 - # The node port Notary listens on - nodePort: 30004 + # Annotations on the nodePort service + annotations: {} + # nodePort-specific labels + labels: {} loadBalancer: # The name of LoadBalancer service name: harbor @@ -105,15 +90,15 @@ expose: httpPort: 80 # The service port Harbor listens on when serving HTTPS httpsPort: 443 - # The service port Notary listens on. Only needed when notary.enabled - # is set to true - notaryPort: 4443 + # Annotations on the loadBalancer service annotations: {} + # loadBalancer-specific labels + labels: {} sourceRanges: [] # The external URL for Harbor core service. It is used to # 1) populate the docker/helm commands showed on portal -# 2) populate the token service URL returned to docker/notary client +# 2) populate the token service URL returned to docker client # # Format: protocol://domain[:port]. Usually: # 1) if "expose.type" is "ingress", the "domain" should be @@ -126,67 +111,6 @@ expose: # If Harbor is deployed behind the proxy, set it as the URL of proxy externalURL: https://core.harbor.domain -# The internal TLS used for harbor components secure communicating. In order to enable https -# in each components tls cert files need to provided in advance. -internalTLS: - # If internal TLS enabled - enabled: false - # There are three ways to provide tls - # 1) "auto" will generate cert automatically - # 2) "manual" need provide cert file manually in following value - # 3) "secret" internal certificates from secret - certSource: "auto" - # The content of trust ca, only available when `certSource` is "manual" - trustCa: "" - # core related cert configuration - core: - # secret name for core's tls certs - secretName: "" - # Content of core's TLS cert file, only available when `certSource` is "manual" - crt: "" - # Content of core's TLS key file, only available when `certSource` is "manual" - key: "" - # jobservice related cert configuration - jobservice: - # secret name for jobservice's tls certs - secretName: "" - # Content of jobservice's TLS key file, only available when `certSource` is "manual" - crt: "" - # Content of jobservice's TLS key file, only available when `certSource` is "manual" - key: "" - # registry related cert configuration - registry: - # secret name for registry's tls certs - secretName: "" - # Content of registry's TLS key file, only available when `certSource` is "manual" - crt: "" - # Content of registry's TLS key file, only available when `certSource` is "manual" - key: "" - # portal related cert configuration - portal: - # secret name for portal's tls certs - secretName: "" - # Content of portal's TLS key file, only available when `certSource` is "manual" - crt: "" - # Content of portal's TLS key file, only available when `certSource` is "manual" - key: "" - # trivy related cert configuration - trivy: - # secret name for trivy's tls certs - secretName: "" - # Content of trivy's TLS key file, only available when `certSource` is "manual" - crt: "" - # Content of trivy's TLS key file, only available when `certSource` is "manual" - key: "" - -ipFamily: - # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component - ipv6: - enabled: true - # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component - ipv4: - enabled: true - # The persistence is enabled by default and a default StorageClass # is needed in the k8s cluster to provision volumes dynamically. # Specify another StorageClass in the "storageClass" or set "existingClaim" @@ -249,14 +173,14 @@ persistence: annotations: {} # Define which storage backend is used for registry to store # images and charts. Refer to - # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage # for the detail. imageChartStorage: # Specify whether to disable `redirect` for images and chart storage, for # backends which not supported it (such as using minio for `s3` storage type), please disable # it. To disable redirects, simply set `disableredirect` to `true` instead. # Refer to - # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect # for the detail. disableredirect: false # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. @@ -284,7 +208,7 @@ persistence: encodedkey: base64-encoded-json-key-file #rootdirectory: /gcs/object/name/prefix #chunksize: "5242880" - # To use existing secret, the key must be gcs-key.json + # To use existing secret, the key must be GCS_KEY_DATA existingSecret: "" useWorkloadIdentity: false s3: @@ -312,6 +236,8 @@ persistence: username: username password: password container: containername + # keys in existing secret must be REGISTRY_STORAGE_SWIFT_PASSWORD, REGISTRY_STORAGE_SWIFT_SECRETKEY, REGISTRY_STORAGE_SWIFT_ACCESSKEY + existingSecret: "" #region: fr #tenant: tenantname #tenantid: tenantid @@ -332,6 +258,8 @@ persistence: accesskeysecret: accesskeysecret region: regionname bucket: bucketname + # key in existingSecret must be REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + existingSecret: "" #endpoint: endpoint #internal: false #encrypt: false @@ -339,6 +267,76 @@ persistence: #chunksize: 10M #rootdirectory: rootdirectory +# The initial password of Harbor admin. Change it from portal after launching Harbor +# or give an existing secret for it +# key in secret is given via (default to HARBOR_ADMIN_PASSWORD) +# existingSecretAdminPassword: +existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD +harborAdminPassword: "Harbor12345" + +# The internal TLS used for harbor components secure communicating. In order to enable https +# in each component tls cert files need to provided in advance. +internalTLS: + # If internal TLS enabled + enabled: false + # enable strong ssl ciphers (default: false) + strong_ssl_ciphers: false + # There are three ways to provide tls + # 1) "auto" will generate cert automatically + # 2) "manual" need provide cert file manually in following value + # 3) "secret" internal certificates from secret + certSource: "auto" + # The content of trust ca, only available when `certSource` is "manual" + trustCa: "" + # core related cert configuration + core: + # secret name for core's tls certs + secretName: "" + # Content of core's TLS cert file, only available when `certSource` is "manual" + crt: "" + # Content of core's TLS key file, only available when `certSource` is "manual" + key: "" + # jobservice related cert configuration + jobservice: + # secret name for jobservice's tls certs + secretName: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + key: "" + # registry related cert configuration + registry: + # secret name for registry's tls certs + secretName: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + key: "" + # portal related cert configuration + portal: + # secret name for portal's tls certs + secretName: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + key: "" + # trivy related cert configuration + trivy: + # secret name for trivy's tls certs + secretName: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + key: "" + +ipFamily: + # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component + ipv6: + enabled: true + # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component + ipv4: + enabled: true + imagePullPolicy: IfNotPresent # Use this set to assign a list of default pullSecrets @@ -354,13 +352,6 @@ updateStrategy: # debug, info, warning, error or fatal logLevel: info -# The initial password of Harbor admin. Change it from portal after launching Harbor -# or give an existing secret for it -# key in secret is given via (default to HARBOR_ADMIN_PASSWORD) -# existingSecretAdminPassword: -existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD -harborAdminPassword: "Harbor12345" - # The name of the secret which contains key named "ca.crt". Setting this enables the # download link on portal to download the CA certificate when the certificate isn't # generated automatically @@ -695,6 +686,13 @@ trivy: podAnnotations: {} ## The priority class to run the pod as priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] notary: enabled: true @@ -898,7 +896,7 @@ metrics: port: 8001 ## Create prometheus serviceMonitor to scrape harbor metrics. ## This requires the monitoring.coreos.com/v1 CRD. Please see - ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md ## serviceMonitor: enabled: false @@ -909,8 +907,8 @@ metrics: metricRelabelings: [] # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] # Relabel configs to apply to samples before ingestion. relabelings: [] @@ -918,8 +916,8 @@ metrics: # separator: ; # regex: ^(.*)$ # targetLabel: nodename - # replacement: $1 - # action: replace + # replacement: $1 + # action: replace trace: enabled: false @@ -960,3 +958,545 @@ cache: enabled: false # default keep cache for one day. expireHours: 24 + +## set Container Security Context to comply with PSP restricted policy if necessary +## each of the conatiner will apply the same security context +## containerSecurityContext:{} is initially an empty yaml that you could edit it on demand, we just filled with a common template for convenience +containerSecurityContext: + privileged: false + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + runAsNonRoot: true + capabilities: + drop: + - ALL + +# If service exposed via "ingress", the Nginx will not be used +nginx: + image: + repository: goharbor/nginx-photon + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + +portal: + image: + repository: goharbor/harbor-portal + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## Additional service annotations + serviceAnnotations: {} + ## The priority class to run the pod as + priorityClassName: + +core: + image: + repository: goharbor/harbor-core + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + ## Startup probe values + startupProbe: + enabled: true + initialDelaySeconds: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## Additional service annotations + serviceAnnotations: {} + ## The priority class to run the pod as + priorityClassName: + ## User settings configuration json string + configureUserSettings: + # The provider for updating project quota(usage), there are 2 options, redis or db. + # By default it is implemented by db but you can configure it to redis which + # can improve the performance of high concurrent pushing to the same project, + # and reduce the database connections spike and occupies. + # Using redis will bring up some delay for quota usage updation for display, so only + # suggest switch provider to redis if you were ran into the db connections spike around + # the scenario of high concurrent pushing to same project, no improvment for other scenes. + quotaUpdateProvider: db # Or redis + # Secret is used when core server communicates with other components. + # If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret + # Must be a string of 16 chars. + secret: "" + # Fill in the name of a kubernetes secret if you want to use your own + # If using existingSecret, the key must be secret + existingSecret: "" + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate and private key for token encryption/decryption. + # The secret must contain keys named: + # "tls.key" - the private key + # "tls.crt" - the certificate + secretName: "" + # If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead. + # If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated. + # tokenKey and tokenCert must BOTH be set or BOTH unset. + # The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line. + tokenKey: | + # If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line. + tokenCert: | + # The XSRF key. Will be generated automatically if it isn't specified + xsrfKey: "" + # If using existingSecret, the key is defined by core.existingXsrfSecretKey + existingXsrfSecret: "" + # If using existingSecret, the key + existingXsrfSecretKey: CSRF_KEY + # The time duration for async update artifact pull_time and repository + # pull_count, the unit is second. Will be 10 seconds if it isn't set. + # eg. artifactPullAsyncFlushDuration: 10 + artifactPullAsyncFlushDuration: + gdpr: + deleteUser: false + auditLogsCompliant: false + +jobservice: + image: + repository: goharbor/harbor-jobservice + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + maxJobWorkers: 10 + # The logger for jobs: "file", "database" or "stdout" + jobLoggers: + - file + # - database + # - stdout + # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`) + loggerSweeperDuration: 14 #days + notification: + webhook_job_max_retry: 3 + webhook_job_http_client_timeout: 3 # in seconds + reaper: + # the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 + max_update_hours: 24 + # the max time for execution in running state without new task created + max_dangling_hours: 168 + # Secret is used when job service communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Use an existing secret resource + existingSecret: "" + # Key within the existing secret for the job service secret + existingSecretKey: JOBSERVICE_SECRET + +registry: + registry: + image: + repository: goharbor/registry-photon + tag: dev + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + controller: + image: + repository: goharbor/harbor-registryctl + tag: dev + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + # Secret is used to secure the upload state from client + # and registry storage backend. + # See: https://github.com/distribution/distribution/blob/main/docs/configuration.md#http + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Use an existing secret resource + existingSecret: "" + # Key within the existing secret for the registry service secret + existingSecretKey: REGISTRY_HTTP_SECRET + # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. + relativeurls: false + credentials: + username: "harbor_registry_user" + password: "harbor_registry_password" + # If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD + existingSecret: "" + # Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. + # htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string + htpasswdString: "" + middleware: + enabled: false + type: cloudFront + cloudFront: + baseurl: example.cloudfront.net + keypairid: KEYPAIRID + duration: 3000s + ipfilteredby: none + # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key + # that allows access to CloudFront + privateKeySecret: "my-secret" + # enable purge _upload directories + upload_purging: + enabled: true + # remove files in _upload directories which exist for a period of time, default is one week. + age: 168h + # the interval of the purge operations + interval: 24h + dryrun: false + +trivy: + # enabled the flag to enable Trivy scanner + enabled: true + image: + # repository the repository for Trivy adapter image + repository: goharbor/trivy-adapter-photon + # tag the tag for Trivy adapter image + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # replicas the number of Pod replicas + replicas: 1 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 1 + memory: 1Gi + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + # debugMode the flag to enable Trivy debug mode with more verbose scanning log + debugMode: false + # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`. + vulnType: "os,library" + # severity a comma-separated list of severities to be checked + severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" + # ignoreUnfixed the flag to display only fixed vulnerabilities + ignoreUnfixed: false + # insecure the flag to skip verifying registry certificate + insecure: false + # gitHubToken the GitHub access token to download Trivy DB + # + # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. + # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached + # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update + # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. + # Currently, the database is updated every 12 hours and published as a new release to GitHub. + # + # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough + # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 + # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult + # https://developer.github.com/v3/#rate-limiting + # + # You can create a GitHub token by following the instructions in + # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line + gitHubToken: "" + # skipUpdate the flag to disable Trivy DB downloads from GitHub + # + # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. + # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the + # `/home/scanner/.cache/trivy/db/trivy.db` path. + skipUpdate: false + # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the + # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path + # + skipJavaDBUpdate: false + # The offlineScan option prevents Trivy from sending API requests to identify dependencies. + # + # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. + # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't + # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode. + # It would work if all the dependencies are in local. + # This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment. + offlineScan: false + # Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`. + securityCheck: "vuln" + # The duration to wait for scan completion + timeout: 5m0s + +database: + # if external database is used, set "type" to "external" + # and fill the connection information in "external" section + type: internal + internal: + image: + repository: goharbor/harbor-db + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + # The timeout used in livenessProbe; 1 to 5 seconds + livenessProbe: + timeoutSeconds: 1 + # The timeout used in readinessProbe; 1 to 5 seconds + readinessProbe: + timeoutSeconds: 1 + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + ## The priority class to run the pod as + priorityClassName: + # The initial superuser password for internal database + password: "changeit" + # The size limit for Shared memory, pgSQL use it for shared_buffer + # More details see: + # https://github.com/goharbor/harbor/issues/15034 + shmSizeLimit: 512Mi + initContainer: + migrator: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + permissions: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + external: + host: "192.168.0.1" + port: "5432" + username: "user" + password: "password" + coreDatabase: "registry" + # if using existing secret, the key must be "password" + existingSecret: "" + # "disable" - No SSL + # "require" - Always SSL (skip verification) + # "verify-ca" - Always SSL (verify that the certificate presented by the + # server was signed by a trusted CA) + # "verify-full" - Always SSL (verify that the certification presented by the + # server was signed by a trusted CA and the server host name matches the one + # in the certificate) + sslmode: "disable" + # The maximum number of connections in the idle connection pool per pod (core+exporter). + # If it <=0, no idle connections are retained. + maxIdleConns: 100 + # The maximum number of open connections to the database per pod (core+exporter). + # If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 1024 for postgre of harbor. + maxOpenConns: 900 + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + +redis: + # if external Redis is used, set "type" to "external" + # and fill the connection information in "external" section + type: internal + internal: + image: + repository: goharbor/redis-photon + tag: dev + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + ## The priority class to run the pod as + priorityClassName: + # # jobserviceDatabaseIndex defaults to "1" + # # registryDatabaseIndex defaults to "2" + # # trivyAdapterIndex defaults to "5" + # # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional + # # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + trivyAdapterIndex: "5" + # harborDatabaseIndex: "6" + # cacheLayerDatabaseIndex: "7" + external: + # support redis, redis+sentinel + # addr for redis: : + # addr for redis+sentinel: :,:,: + addr: "192.168.0.2:6379" + # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel + sentinelMasterSet: "" + # The "coreDatabaseIndex" must be "0" as the library Harbor + # used doesn't support configuring it + # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional + # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional + coreDatabaseIndex: "0" + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + trivyAdapterIndex: "5" + # harborDatabaseIndex: "6" + # cacheLayerDatabaseIndex: "7" + # username field can be an empty string, and it will be authenticated against the default user + username: "" + password: "" + # If using existingSecret, the key must be REDIS_PASSWORD + existingSecret: "" + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + +exporter: + image: + repository: goharbor/harbor-exporter + tag: dev + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + ## The priority class to run the pod as + priorityClassName: + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + cacheDuration: 23 + cacheCleanInterval: 14400