diff --git a/base/apps/account-api.yaml b/base/apps/account-api.yaml index cd82fa3c..5b9dc277 100644 --- a/base/apps/account-api.yaml +++ b/base/apps/account-api.yaml @@ -18,6 +18,13 @@ spec: - name: account-api image: ghcr.io/biosimulations/account-api imagePullPolicy: "Always" + resources: + requests: + memory: "500Mi" + cpu: "200m" + limits: + memory: "1Gi" + cpu: "500m" volumeMounts: - name: config mountPath: "/app/config" diff --git a/base/apps/api.yaml b/base/apps/api.yaml index 45883a9f..e54e2f63 100644 --- a/base/apps/api.yaml +++ b/base/apps/api.yaml @@ -43,11 +43,11 @@ spec: imagePullPolicy: "Always" resources: requests: - memory: "500Mi" + memory: "1500Mi" cpu: "500m" limits: - memory: "4Gi" - cpu: "2000m" + memory: "2500Mi" + cpu: "1000m" livenessProbe: httpGet: path: /health diff --git a/base/apps/combine-api.yaml b/base/apps/combine-api.yaml index e9b31743..12326977 100644 --- a/base/apps/combine-api.yaml +++ b/base/apps/combine-api.yaml @@ -21,7 +21,7 @@ spec: resources: requests: memory: "1Gi" - cpu: "25m" + cpu: "500m" limits: memory: "2Gi" cpu: "1000m" diff --git a/base/apps/hsds-service.yaml b/base/apps/hsds-service.yaml index 246e0d7a..a762b1aa 100644 --- a/base/apps/hsds-service.yaml +++ b/base/apps/hsds-service.yaml @@ -56,6 +56,13 @@ spec: - name: sn image: hdfgroup/hsds:v0.7beta8 imagePullPolicy: Always + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "500m" ports: - containerPort: 5101 env: @@ -97,6 +104,13 @@ spec: command: ["/sbin/killall5", "-15"] imagePullPolicy: Always + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "500m" livenessProbe: httpGet: path: /info diff --git a/base/apps/simulators-api.yaml b/base/apps/simulators-api.yaml index 476b7147..8a819930 100644 --- a/base/apps/simulators-api.yaml +++ b/base/apps/simulators-api.yaml @@ -19,6 +19,13 @@ spec: - name: simulators-api image: ghcr.io/biosimulations/simulators-api imagePullPolicy: "Always" + resources: + requests: + memory: "500Mi" + cpu: "50m" + limits: + memory: "1Gi" + cpu: "200m" volumeMounts: - name: config mountPath: "/app/config" diff --git a/base/resources/redis-ha.yaml b/base/resources/redis-ha.yaml index 1dd70e5c..4b794bad 100644 --- a/base/resources/redis-ha.yaml +++ b/base/resources/redis-ha.yaml @@ -53,6 +53,13 @@ spec: containers: - name: redis image: redis + resources: + requests: + memory: "1Gi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "200m" command: - "redis-server" args: diff --git a/config/dev/hsds/config.yaml b/config/dev/hsds/config.yaml index f5b7213d..13e9a2d7 100644 --- a/config/dev/hsds/config.yaml +++ b/config/dev/hsds/config.yaml @@ -10,6 +10,7 @@ hsds_endpoint: https://data.biosimulations.dev # used for hateos links in respon aws_s3_gateway: https://storage.googleapis.com # use endpoint for the region HSDS is running in, e.g. 'https://s3.amazonaws.com' for us-east-1 bucket_name: biosim-hsds-dev # set to use a default bucket, otherwise bucket param is needed for all requests #aws_s3_gateway: http://s3low.scality.uchc.edu +#aws_s3_no_sign_request: false # do not use credentials for S3 requests, equivalent of --no-sign-request for AWS CLI aws_dynamodb_gateway: null # use for dynamodb endpint, e.g. 'https://dynamodb.us-east-1.amazonaws.com', aws_dynamodb_users_table: null # set to table name if dynamodb is used to store usernames and passwords azure_connection_string: null # use for connecting to Azure blob storage @@ -17,11 +18,11 @@ azure_resource_id: null # resource id for use with Azure Active Directory azure_storage_account: null # storage account to use on Azure azure_resource_group: null # Azure resource group the container (BUCKET_NAME) belongs to root_dir: null # base directory to use for Posix storage -password_salt: null # salt value if dynamically generated passwords are used +password_salt: null # salt value to generate password based on username. Not recommended for public deployments head_port: 5100 # port to use for head node head_ram: 512m # memory for head container dn_port: 6101 # Start dn ports at 6101 -dn_ram: 3g # memory for DN container (per container) +dn_ram: 1g # memory for DN container (per container) sn_port: 5101 # Start sn ports at 5101 sn_ram: 1g # memory for SN container rangeget_port: 6900 # singleton proxy at port 6900 @@ -78,7 +79,7 @@ standalone_app: false # True when run as a single application blosc_nthreads: 2 # number of threads to use for blosc compression. Set to 0 to have blosc auto-determine thread count http_compression: false # Use HTTP compression http_max_url_length: 512 # Limit http request url + params to be less than this -k8s_app_label: hsds # The app label for k8s deployments +k8s_dn_label_selector: app=hsds # Selector for getting data node pods from a k8s deployment (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) k8s_namespace: dev # Specifies if a the client should be limited to a specific namespace. Useful for some RBAC configurations. restart_policy: on-failure # Docker restart policy domain_req_max_objects_limit: 500 # maximum number of objects to return in GET domain request with use_cache @@ -91,5 +92,6 @@ aws_lambda_chunkread_function: null # name of aws lambda function for chunk rea aws_lambda_threshold: 4 # number of chunks per node per request to reach before using lambda aws_lambda_max_invoke: 1000 # max number of lambda functions to invoke simultaneously aws_lambda_gateway: null # use lambda endpoint for region HSDS is running in +k8s_app_label: hsds # The app label for k8s deployments (use k8s_dn_label_selector instead) write_zero_chunks: False # write chunk to storage even when it's all zeros (or in general equial to the fill value) max_chunks_per_request: 1000 # maximum number of chunks to be serviced by one request \ No newline at end of file diff --git a/config/prod/hsds/config.yaml b/config/prod/hsds/config.yaml index 8fb1e83c..3f2cb818 100644 --- a/config/prod/hsds/config.yaml +++ b/config/prod/hsds/config.yaml @@ -10,6 +10,7 @@ hsds_endpoint: https://data.biosimulations.org # used for hateos links in respon aws_s3_gateway: https://storage.googleapis.com # use endpoint for the region HSDS is running in, e.g. 'https://s3.amazonaws.com' for us-east-1 bucket_name: biosim-hsds-prod # set to use a default bucket, otherwise bucket param is needed for all requests #aws_s3_gateway: http://s3low.scality.uchc.edu +#aws_s3_no_sign_request: false # do not use credentials for S3 requests, equivalent of --no-sign-request for AWS CLI aws_dynamodb_gateway: null # use for dynamodb endpint, e.g. 'https://dynamodb.us-east-1.amazonaws.com', aws_dynamodb_users_table: null # set to table name if dynamodb is used to store usernames and passwords azure_connection_string: null # use for connecting to Azure blob storage @@ -17,11 +18,11 @@ azure_resource_id: null # resource id for use with Azure Active Directory azure_storage_account: null # storage account to use on Azure azure_resource_group: null # Azure resource group the container (BUCKET_NAME) belongs to root_dir: null # base directory to use for Posix storage -password_salt: null # salt value if dynamically generated passwords are used +password_salt: null # salt value to generate password based on username. Not recommended for public deployments head_port: 5100 # port to use for head node head_ram: 512m # memory for head container dn_port: 6101 # Start dn ports at 6101 -dn_ram: 3g # memory for DN container (per container) +dn_ram: 1g # memory for DN container (per container) sn_port: 5101 # Start sn ports at 5101 sn_ram: 1g # memory for SN container rangeget_port: 6900 # singleton proxy at port 6900 @@ -78,17 +79,19 @@ standalone_app: false # True when run as a single application blosc_nthreads: 2 # number of threads to use for blosc compression. Set to 0 to have blosc auto-determine thread count http_compression: false # Use HTTP compression http_max_url_length: 512 # Limit http request url + params to be less than this -k8s_app_label: hsds # The app label for k8s deployments +k8s_dn_label_selector: app=hsds # Selector for getting data node pods from a k8s deployment (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) k8s_namespace: prod # Specifies if a the client should be limited to a specific namespace. Useful for some RBAC configurations. restart_policy: on-failure # Docker restart policy domain_req_max_objects_limit: 500 # maximum number of objects to return in GET domain request with use_cache # the following two values with give backof times of approx: 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8 dn_max_retries: 7 # number of time to retry DN requests dn_retry_backoff_exp: 0.1 # backoff factor for retries + # DEPRECATED - the remaining config values are not used in currently but kept for backward compatibility with older container images aws_lambda_chunkread_function: null # name of aws lambda function for chunk reading aws_lambda_threshold: 4 # number of chunks per node per request to reach before using lambda aws_lambda_max_invoke: 1000 # max number of lambda functions to invoke simultaneously aws_lambda_gateway: null # use lambda endpoint for region HSDS is running in +k8s_app_label: hsds # The app label for k8s deployments (use k8s_dn_label_selector instead) write_zero_chunks: False # write chunk to storage even when it's all zeros (or in general equial to the fill value) max_chunks_per_request: 1000 # maximum number of chunks to be serviced by one request \ No newline at end of file