зеркало из https://github.com/specklesystems/helm.git
1328 строки
84 KiB
YAML
1328 строки
84 KiB
YAML
## @section Namespace
|
|
##
|
|
|
|
## @param namespace The name of the namespace in which Speckle will be deployed.
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
|
|
##
|
|
namespace: speckle-test
|
|
## @param create_namespace Enabling this will create a new namespace into which Speckle will be deployed
|
|
## The name of the namespace to create should be provided in the `namespace` parameter.
|
|
##
|
|
create_namespace: false
|
|
## @section SSL
|
|
##
|
|
|
|
## @param domain The DNS host name at which this Speckle deployment will be reachable
|
|
##
|
|
domain: localhost
|
|
## @param ssl_canonical_url HTTPS protocol will be the preferred protocol for serving this Speckle deployment
|
|
##
|
|
ssl_canonical_url: true
|
|
## @param tlsRejectUnauthorized If '1' (true), Speckle will reject any SSL certificates that are not signed by a trusted Certificate Authority. Should only be disabled in a trusted local development environment. https://nodejs.org/api/cli.html#node_tls_reject_unauthorizedvalue
|
|
##
|
|
tlsRejectUnauthorized: '1'
|
|
## @param cert_manager_issuer The name of the ClusterIssuer kubernetes resource that provides the SSL Certificate
|
|
##
|
|
cert_manager_issuer: letsencrypt-staging
|
|
## @section Feature flags
|
|
## @descriptionStart
|
|
## This object is a central location to define feature flags for the whole chart.
|
|
## @descriptionEnd
|
|
featureFlags:
|
|
## @param featureFlags.automateModuleEnabled High level flag fully toggles the integrated automate module
|
|
automateModuleEnabled: false
|
|
## @param featureFlags.gendoAIModuleEnabled High level flag that toggles the Gendo AI render module
|
|
gendoAIModuleEnabled: false
|
|
## @param featureFlags.noClosureWrites Toggles whether to stop writing to the closure table
|
|
noClosureWrites: false
|
|
## @param featureFlags.workspaceModuleEnabled High level flag fully toggles the workspaces module
|
|
workspaceModuleEnabled: false
|
|
## @param featureFlags.workspaceSsoEnabled High level flag fully toggles the workspaces dynamic sso
|
|
workspaceSsoEnabled: false
|
|
## @param featureFlags.multipleEmailsModuleEnabled High level flag fully toggles multiple emails
|
|
multipleEmailsModuleEnabled: false
|
|
analytics:
|
|
## @param analytics.enabled Enable or disable analytics
|
|
enabled: true
|
|
## @param analytics.mixpanel_token_id The Mixpanel token ID used to identify this Speckle deployment in MixPanel
|
|
mixpanel_token_id: 'acd87c5a50b56df91a795e999812a3a4'
|
|
## @param analytics.mixpanel_api_host The Mixpanel API host to which analytics data will be sent
|
|
mixpanel_api_host: 'https://analytics.speckle.systems'
|
|
## @section Network Plugin configuration
|
|
## @descriptionStart
|
|
## This is used to define the type of network policy that is deployed.
|
|
## Different Kubernetes Network Plugins or Container Network Interfaces (CNIs) can make use of different types of
|
|
## Network Policy. Some of these provide more features than the standard Kubernetes Network Policy.
|
|
## @descriptionEnd
|
|
networkPlugin:
|
|
## @param networkPlugin.type (Optional) Used to configure which type of NetworkPolicy is deployed. Options are 'kubernetes' or 'cilium'.
|
|
type: 'kubernetes'
|
|
## @section Ingress metadata for NetworkPolicy
|
|
## @descriptionStart
|
|
## This section is ignored unless networkPolicy is enabled for frontend or server.
|
|
## The NetworkPolicy uses this value to enable connections from the ingress controller pod in this namespace to reach Speckle.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
## @descriptionEnd
|
|
##
|
|
ingress:
|
|
## @param ingress.namespace The namespace in which the ingress controller is deployed.
|
|
namespace: ingress-nginx
|
|
## @param ingress.controllerName The name of the Kubernetes pod in which the ingress controller is deployed.
|
|
controllerName: ingress-nginx
|
|
## @section Common parameters
|
|
##
|
|
## @param docker_image_tag Speckle is published as a Docker Image. The version of the image which will be deployed is specified by this tag.
|
|
##
|
|
docker_image_tag: '2.21.3-branch.testing2.211223-a8fd768'
|
|
## @param imagePullPolicy Determines the conditions when the Docker Images for Speckle should be pulled from the Image registry.
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
|
|
##
|
|
imagePullPolicy: IfNotPresent
|
|
## @param secretName Deprecated, please use individual secret parameters. This is the name of the Kubernetes Secret resource in which secrets for Speckle are stored. Secrets within this Secret resource may include Postgres and Redis connectin strings, S3 secret values, email server passwords, etc..
|
|
## The expected key within the Secret resource is indicated elsewhere in this values.yaml file.
|
|
## This is expected to be an opaque Secret resource type.
|
|
## If component secrets are provided, they will take precedence.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: server-vars
|
|
## @param file_size_limit_mb This maximum size of any single file (unit is Megabytes) that can be uploaded to Speckle
|
|
##
|
|
file_size_limit_mb: 100
|
|
## @section Monitoring
|
|
## @descriptionStart
|
|
## This enables metrics generated by Speckle to be ingested by Prometheus: https://prometheus.io/
|
|
## Enabling this requires Prometheus to have been deployed prior, as this resource expects the Prometheus Customer Resource Definition
|
|
## for the ServiceMonitor to already be existing within the cluster.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources
|
|
## @descriptionEnd
|
|
##
|
|
|
|
## @param enable_prometheus_monitoring If enabled, Speckle deploys a Prometheus ServiceMonitor resource
|
|
##
|
|
enable_prometheus_monitoring: false
|
|
prometheusMonitoring:
|
|
## @param prometheusMonitoring.namespace If provided, deploys Speckle's Prometheus resources (e.g. ServiceMonitor) in the given namespace
|
|
## Prometheus prior to v0.19.0, or any version when deployed with default parameters, expects ServiceMonitors to be deployed within the same namespace.
|
|
## This parameter allows the Prometheus resources provided by Speckle to be deployed in another namespace.
|
|
## This allows Prometheus (< v0.19.0 or any version with default configuration) to be deployed in a separate namespace from Speckle.
|
|
## Note that Speckle expect the namespace to exist prior to deployment.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources
|
|
##
|
|
namespace: ''
|
|
## @param prometheusMonitoring.release If provided, adds the value to a `release` label on all the Prometheus resources deployed by Speckle
|
|
## Prometheus prior to v0.19.0, or any version when deployed with default parameters, expects ServiceMonitors to be selectable on the release label.
|
|
## This parameter allows Prometheus to be deployed with a non-default release name.
|
|
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources
|
|
##
|
|
release: ''
|
|
networkPolicy:
|
|
## @extra prometheusMonitoring.networkPolicy.inCluster Parameters for allowing ingress from the Prometheus pod that will scrape this Speckle release. It is assumed that Prometheus is deployed within the Kubernetes cluster.
|
|
##
|
|
inCluster:
|
|
kubernetes:
|
|
## @param prometheusMonitoring.networkPolicy.inCluster.kubernetes.podSelector (Kubernetes Network Policy only) The pod Selector yaml object used to uniquely select the prometheus pods within the cluster and given namespace
|
|
## For Kubernetes Network Policies this is a podSelector object.
|
|
## For Cilium Network Policies this is ignored.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podSelector: {}
|
|
## @param prometheusMonitoring.networkPolicy.inCluster.kubernetes.namespaceSelector (Kubernetes Network Policy only) The namespace selector yaml object used to uniquely select the namespace in which the prometheus pods are deployed
|
|
## This is a Kubernetes namespaceSelector object.
|
|
## For Cilium Network Policies this is ignored
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
namespaceSelector: {}
|
|
cilium:
|
|
## @param prometheusMonitoring.networkPolicy.inCluster.cilium.fromEndpoints (Cilium Network Policy only) The endpoint selector yaml object used to uniquely select the in-cluster endpoint in which the prometheus pods are deployed
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#ingress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/selector.go
|
|
##
|
|
fromEndpoints: []
|
|
## @section Postgres Database
|
|
## @descriptionStart
|
|
## Defines parameters related to connections to the Postgres database.
|
|
## @descriptionEnd
|
|
##
|
|
db:
|
|
## @param db.useCertificate If enabled, the certificate defined in db.certificate is used to verify TLS connections to the Postgres database
|
|
##
|
|
useCertificate: false
|
|
## @param db.maxConnectionsServer The number of connections to the Postgres database to provide in the connection pool
|
|
##
|
|
maxConnectionsServer: 4
|
|
## @param db.certificate The x509 public certificate for SSL connections to the Postgres database. Use of this certificate requires db.useCertificate to be enabled and an appropriate value for db.PGSSLMODE provided.
|
|
## The value must be formatted as a multi-line string. We recommend using the pipe-symbol and taking care to
|
|
## indent all lines of the value correctly.
|
|
## ref: https://helm.sh/docs/chart_template_guide/yaml_techniques/#strings-in-yaml
|
|
##
|
|
certificate: '' # Multi-line string with the contents of `ca-certificate.crt`
|
|
## @param db.PGSSLMODE This defines the level of security used when connecting to the Postgres database
|
|
## Postgres provides different froms of protection from different types of threat when communicating between the client (Speckle) and the Postgres database.
|
|
## ref: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION
|
|
##
|
|
PGSSLMODE: require
|
|
connectionString:
|
|
## @param db.connectionString.secretName Required. A secret containing the full connection string to the Postgres database (e.g. in format of `protocol://username:password@host:port/database`) stored within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param db.connectionString.secretKey Required. The key within the Kubernetes Secret holding the connection string.
|
|
##
|
|
secretKey: ''
|
|
## @extra db.networkPolicy If networkPolicy is enabled for any service, this provides the NetworkPolicy with the necessary details to allow egress connections to the Postgres database
|
|
##
|
|
networkPolicy:
|
|
## @extra db.networkPolicy.externalToCluster Only required if the Postgres database is not hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
externalToCluster:
|
|
## @param db.networkPolicy.externalToCluster.enabled If enabled, indicates that the Postgres database is hosted externally to the Kubernetes cluster
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only one deployed
|
|
##
|
|
enabled: true
|
|
## @extra db.networkPolicy.inCluster Only required if the Postgres database is hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
inCluster:
|
|
## @param db.networkPolicy.inCluster.enabled If enabled, indicates that the Postgres database is hosted withing the same Kubernetes cluster in which Speckle will be deployed
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only set of egress network policy rules deployed.
|
|
##
|
|
enabled: false
|
|
## @param db.networkPolicy.inCluster.port the port on the server providing the Postgres database (default: "5432")
|
|
##
|
|
port: ''
|
|
kubernetes:
|
|
## @param db.networkPolicy.inCluster.kubernetes.podSelector (Kubernetes Network Policy only) The pod Selector yaml object used to uniquely select the postgres compatible database pods within the cluster and given namespace
|
|
## For Kubernetes Network Policies this is a podSelector object.
|
|
## For Cilium Network Policies this is ignored.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podSelector: {}
|
|
## @param db.networkPolicy.inCluster.kubernetes.namespaceSelector (Kubernetes Network Policy only) The namespace selector yaml object used to uniquely select the namespace in which the postgres compatible database pods are deployed
|
|
## This is a Kubernetes namespaceSelector object.
|
|
## For Cilium Network Policies this is ignored
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
namespaceSelector: {}
|
|
cilium:
|
|
## @param db.networkPolicy.inCluster.cilium.endpointSelector (Cilium Network Policy only) The endpoint selector yaml object used to uniquely select the in-cluster endpoint in which the postgres compatible database pods are deployed
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/selector.go
|
|
endpointSelector: {}
|
|
## @param db.networkPolicy.inCluster.cilium.serviceSelector (Cilium Network Policy only) The service selector yaml object used to uniquely select the in-cluster service providing the postgres compatible database service
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/service.go
|
|
serviceSelector: {}
|
|
## @section S3 Compatible Storage
|
|
## @descriptionStart
|
|
## Defines parameters related to connecting to the S3 compatible storage.
|
|
## @descriptionEnd
|
|
##
|
|
s3:
|
|
configMap:
|
|
## @param s3.configMap.enabled If enabled, the s3.endpoint, s3.bucket, and s3.access_key values will be determined from a configMap and the values provided in this helm release ignored.
|
|
## This allows these values to be generated by external tooling, e.g. Terraform or CloudFormation, and then inserted into the Kubernetes cluster by these tools.
|
|
##
|
|
enabled: false
|
|
## @param s3.configMap.name The name of the ConfigMap in which values for the keys (`endpoint`, `bucket`, and `access_key`) are provided. Expected to be in the namespace defined by the `namespace` parameter of this Helm Chart.
|
|
##
|
|
name: ''
|
|
## @param s3.endpoint The URL at which the s3 compatible storage is hosted. If `s3.configMap.enabled` is true, this value is ignored.
|
|
## The url should be prefixed by the protocol (e.g. `https://`)
|
|
## The url may need to include the port if it is not the default (e.g. `443` for `https` protocol)
|
|
##
|
|
endpoint: ''
|
|
## @param s3.bucket The s3 compatible bucket in which Speckle data will be stored. If `s3.configMap.enabled` is true, this value is ignored.
|
|
## The access key should be granted write permissions to this bucket
|
|
##
|
|
bucket: ''
|
|
## @param s3.access_key The key of the access key used to authenticate with the s3 compatible storage. If `s3.configMap.enabled` is true, this value is ignored.
|
|
##
|
|
access_key: ''
|
|
secret_key:
|
|
## @param s3.secret_key.secretName Required. A Kubernetes secret containing the s3 secret_key. This is expected to be the name of an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param s3.secret_key.secretKey Required. The key within the Kubernetes Secret, the value of which is the s3 secret.
|
|
##
|
|
secretKey: ''
|
|
## @param s3.create_bucket If enabled, will create a bucket with the given bucket name at this endpoint
|
|
## If enabled, the access_key must be granted the appropriate bucket creation privileges
|
|
##
|
|
create_bucket: 'false'
|
|
## @param s3.region The region in which the bucket resides (or will be created in).
|
|
## If not provided, defaults to `us-east-1`. For many providers of s3 compatible storage, such as minio, this value may be ignored.
|
|
##
|
|
region: ''
|
|
## @extra s3.networkPolicy If networkPolicy is enabled for any service, this provides the NetworkPolicy with the necessary details to allow egress connections to the s3 compatible storage
|
|
##
|
|
networkPolicy:
|
|
## @param s3.networkPolicy.port the port on the server providing the s3 compatible storage (default: "443")
|
|
##
|
|
port: ''
|
|
## @extra s3.networkPolicy.externalToCluster Only required if the s3 compatible storage is not hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
externalToCluster:
|
|
## @param s3.networkPolicy.externalToCluster.enabled If enabled, indicates that the s3 compatible storage is hosted externally to the Kubernetes cluster
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only one deployed
|
|
##
|
|
enabled: true
|
|
## @extra s3.networkPolicy.inCluster Only required if the s3 compatible storage is hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
inCluster:
|
|
## @param s3.networkPolicy.inCluster.enabled If enabled, indicates that the s3 compatible storage is hosted withing the same Kubernetes cluster in which Speckle will be deployed
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only set of egress network policy rules deployed.
|
|
##
|
|
enabled: false
|
|
kubernetes:
|
|
## @param s3.networkPolicy.inCluster.kubernetes.podSelector (Kubernetes Network Policy only) The pod Selector yaml object used to uniquely select the s3 compatible storage pods within the cluster and given namespace
|
|
## For Kubernetes Network Policies this is a podSelector object.
|
|
## For Cilium Network Policies this is ignored.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podSelector: {}
|
|
## @param s3.networkPolicy.inCluster.kubernetes.namespaceSelector (Kubernetes Network Policy only) The namespace selector yaml object used to uniquely select the namespace in which the s3 compatible storage pods are deployed
|
|
## This is a Kubernetes namespaceSelector object.
|
|
## For Cilium Network Policies, this is ignored
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
namespaceSelector: {}
|
|
cilium:
|
|
## @param s3.networkPolicy.inCluster.cilium.endpointSelector (Cilium Network Policy only) The endpoint selector yaml object used to uniquely select the in-cluster endpoint in which the s3 compatible storage pods are deployed
|
|
## For Kubernetes Network Policies, this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/selector.go
|
|
endpointSelector: {}
|
|
## @param s3.networkPolicy.inCluster.cilium.serviceSelector (Cilium Network Policy only) The service selector yaml object used to uniquely select the in-cluster service providing the s3 compatible storage service
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/service.go
|
|
serviceSelector: {}
|
|
## @section Redis Store
|
|
## @descriptionStart
|
|
## Defines parameters related to connecting to the Redis Store.
|
|
## @descriptionEnd
|
|
##
|
|
redis:
|
|
connectionString:
|
|
## @param redis.connectionString.secretName Required. A secret containing the full connection string to the Redis store (e.g. in format of `protocol://username:password@host:port/database`) stored within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param redis.connectionString.secretKey Required. The key within the Kubernetes Secret holding the Redis connection string.
|
|
##
|
|
secretKey: ''
|
|
## @extra redis.networkPolicy If networkPolicy is enabled for Speckle server, this provides the NetworkPolicy with the necessary details to allow egress connections to the Redis store
|
|
##
|
|
networkPolicy:
|
|
## @extra redis.networkPolicy.externalToCluster Only required if the Redis store is not hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
externalToCluster:
|
|
## @param redis.networkPolicy.externalToCluster.enabled If enabled, indicates that the Redis store is hosted externally to the Kubernetes cluster
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only one deployed
|
|
##
|
|
enabled: true
|
|
## @extra redis.networkPolicy.inCluster is only required if the Redis store is hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
inCluster:
|
|
## @param redis.networkPolicy.inCluster.enabled If enabled, indicates that the Redis store is hosted withing the same Kubernetes cluster in which Speckle will be deployed
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only set of egress network policy rules deployed.
|
|
##
|
|
enabled: false
|
|
## @param redis.networkPolicy.inCluster.port the port on the server providing the Redis store (default: "6379")
|
|
##
|
|
port: ''
|
|
kubernetes:
|
|
## @param redis.networkPolicy.inCluster.kubernetes.podSelector (Kubernetes Network Policy only) The pod Selector yaml object used to uniquely select the redis store pods within the cluster and given namespace
|
|
## For Kubernetes Network Policies this is a podSelector object.
|
|
## For Cilium Network Policies this is ignored.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podSelector: {}
|
|
## @param redis.networkPolicy.inCluster.kubernetes.namespaceSelector (Kubernetes Network Policy only) The namespace selector yaml object used to uniquely select the namespace in which the redis store pods are deployed
|
|
## This is a Kubernetes namespaceSelector object.
|
|
## For Cilium Network Policies, this is ignored
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
namespaceSelector: {}
|
|
cilium:
|
|
## @param redis.networkPolicy.inCluster.cilium.endpointSelector (Cilium Network Policy only) The endpoint selector yaml object used to uniquely select the in-cluster endpoint in which the redis pods are deployed
|
|
## For Kubernetes Network Policies, this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/selector.go
|
|
endpointSelector: {}
|
|
## @param redis.networkPolicy.inCluster.cilium.serviceSelector (Cilium Network Policy only) The service selector yaml object used to uniquely select the in-cluster service providing the redis store service
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/service.go
|
|
serviceSelector: {}
|
|
## @section Server
|
|
## @descriptionStart
|
|
## Defines parameters related to the backend server component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
server:
|
|
## @param server.replicas The number of instances of the Server pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param server.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param server.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param server.image The Docker image to be used for the Speckle Server component. If blank, defaults to speckle/speckle-server:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
## @param server.enableFe2Messaging If enabled, the related FE1 deployment will show banners/messages about the new frontend
|
|
##
|
|
enableFe2Messaging: false
|
|
onboarding:
|
|
## @param server.onboarding.stream_url The (cross-server) URL to the project/stream that should be used as the onboarding project base.
|
|
##
|
|
stream_url: 'https://latest.speckle.systems/projects/843d07eb10'
|
|
## @param server.onboarding.stream_cache_bust_number Increase this number to trigger the re-pulling of the base stream
|
|
##
|
|
stream_cache_bust_number: 1
|
|
inspect:
|
|
## @param server.inspect.enabled If enabled, indicates that the Speckle server should be deployed with the nodejs inspect feature enabled
|
|
enabled: false
|
|
## @param server.inspect.port The port on which the nodejs inspect feature should be exposed
|
|
port: '7000'
|
|
## @param server.adminOverrideEnabled Enables the server side admin authz override
|
|
adminOverrideEnabled: false
|
|
## @param server.weeklyDigestEnabled Enables sending out the serevr weekly digest emails
|
|
weeklyDigestEnabled: false
|
|
## @param server.max_object_size_mb The maximum size of an individual object which can be uploaded to the server
|
|
max_object_size_mb: 100
|
|
## @param server.max_object_upload_file_size_mb Objects are batched together and uploaded to the /objects endpoint as http POST form data. This determines the maximum size of that form data which can be uploaded to the server. It should be greater than or equal to max_object_size_mb.
|
|
max_object_upload_file_size_mb: 100
|
|
## @param server.max_project_models_per_page The maximum number of models that can be returned in a single page of a query for all models of a project
|
|
max_project_models_per_page: 500
|
|
## @param server.speckleAutomateUrl The url of the Speckle Automate instance
|
|
speckleAutomateUrl: 'https://automate.speckle.systems'
|
|
gendoAI:
|
|
## @param server.gendoAI.apiUrl The url of the Gendo AI application, including protocol.
|
|
apiUrl: 'https://api.gendo.ai/external/generate'
|
|
key:
|
|
## @param server.gendoAI.key.secretName The name of the Kubernetes Secret containing the Gendo AI key. If left blank, will default to the `secretName` parameter.
|
|
secretName: ''
|
|
## @param server.gendoAI.key.secretKey The key within the Kubernetes Secret holding the Gendo AI key as its value.
|
|
secretKey: 'gendoai_key'
|
|
keyResponse:
|
|
## @param server.gendoAI.keyResponse.secretName The name of the Kubernetes Secret containing the Gendo AI key response. If left blank, will default to the `secretName` parameter.
|
|
secretName: ''
|
|
## @param server.gendoAI.keyResponse.secretKey The key within the Kubernetes Secret holding the Gendo AI key response as its value.
|
|
secretKey: 'gendoai_key_response'
|
|
ratelimiting:
|
|
## @param server.gendoAI.ratelimiting.renderRequest The number of render requests allowed per period
|
|
renderRequest: 1
|
|
## @param server.gendoAI.ratelimiting.renderRequestPeriodSeconds The period in seconds for the render request limit
|
|
renderRequestPeriodSeconds: 20
|
|
## @param server.gendoAI.ratelimiting.burstRenderRequest The number of render requests allowed in 'burst' mode when the other limit is reached.
|
|
burstRenderRequest: 3
|
|
## @param server.gendoAI.ratelimiting.burstRenderRequestPeriodSeconds The period in seconds for the burst render request limit.
|
|
burstRenderRequestPeriodSeconds: 60
|
|
## @param server.encryptionKeys.path The path where the encryption keys should be loaded from
|
|
encryptionKeys:
|
|
path: '/encryption-keys/keys.json'
|
|
licenseTokenSecret:
|
|
## @param server.licenseTokenSecret.secretName The name of the Kubernetes Secret containing the Session secret. This is a unique value (can be generated randomly). This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.licenseTokenSecret.secretKey The key within the Kubernetes Secret holding the Session secret as its value.
|
|
##
|
|
secretKey: ''
|
|
sessionSecret:
|
|
## @param server.sessionSecret.secretName The name of the Kubernetes Secret containing the Session secret. This is a unique value (can be generated randomly). This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.sessionSecret.secretKey The key within the Kubernetes Secret holding the Session secret as its value.
|
|
##
|
|
secretKey: ''
|
|
## @extra server.auth Speckle provides a number of different mechanisms for authenticating users. Each available option must be configured here.
|
|
##
|
|
auth:
|
|
local:
|
|
## @param server.auth.local.enabled If enabled, users can register and authenticate with an email address and password.
|
|
## The login details are stored in the Postgres database connected to Speckle and are encrypted.
|
|
##
|
|
enabled: true
|
|
google:
|
|
## @param server.auth.google.enabled If enabled, users can authenticate via Google with their Google account credentials. If enabling Google, the `server.auth.google.client_id` parameter is required, and a secret must be provided via the Kubernetes secret referenced in `server.auth.google.clientSecret`.
|
|
##
|
|
enabled: false
|
|
## @param server.auth.google.client_id This is the ID for Speckle that you have registered with Google.
|
|
##
|
|
client_id: ''
|
|
clientSecret:
|
|
## @param server.auth.google.clientSecret.secretName The name of the Kubernetes Secret containing the Google client secret. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.auth.google.clientSecret.secretKey The key within the Kubernetes Secret holding the Google client secret as its value.
|
|
##
|
|
secretKey: ''
|
|
github:
|
|
## @param server.auth.github.enabled If enabled, users can authenticate via Github with their Github account credentials. If enabling Github authentication, the `server.auth.github.client_id` parameter is required.
|
|
##
|
|
enabled: false
|
|
## @param server.auth.github.client_id This is the ID for Speckle that you have registered with Github
|
|
##
|
|
client_id: ''
|
|
clientSecret:
|
|
## @param server.auth.github.clientSecret.secretName The name of the Kubernetes Secret containing the GitHub client secret. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.auth.github.clientSecret.secretKey The key within the Kubernetes Secret holding the GitHub client secret as its value.
|
|
##
|
|
secretKey: ''
|
|
azure_ad:
|
|
## @param server.auth.azure_ad.enabled If enabled, users can authenticate via Azure Active Directory.
|
|
##
|
|
enabled: false
|
|
## @param server.auth.azure_ad.org_name This is the Organisation Name that you have registered with Azure
|
|
##
|
|
org_name: ''
|
|
## @param server.auth.azure_ad.identity_metadata This is the identity metadata for Speckle that you have registered with Azure
|
|
##
|
|
identity_metadata: ''
|
|
## @param server.auth.azure_ad.issuer This is the issuer name for Speckle that you have registered with Azure
|
|
##
|
|
issuer: ''
|
|
## @param server.auth.azure_ad.client_id This is the ID for Speckle that you have registered with Azure
|
|
##
|
|
client_id: ''
|
|
clientSecret:
|
|
## @param server.auth.azure_ad.clientSecret.secretName The name of the Kubernetes Secret containing the Azure AD client secret. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.auth.azure_ad.clientSecret.secretKey The key within the Kubernetes Secret holding the Azure AD client secret as its value.
|
|
##
|
|
secretKey: ''
|
|
## @param server.auth.azure_ad.additional_domains List of `matchName` or `matchPattern` maps for domains that should be allow-listed for egress in Network Policy. https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-safelist-urls?tabs=public-cloud are enabled by default.
|
|
##
|
|
additional_domains: []
|
|
## @param server.auth.azure_ad.port Port on server to connect to. Used to allow egress in Network Policy. Defaults to 443
|
|
##
|
|
port: 443
|
|
oidc:
|
|
## @param server.auth.oidc.enabled If enabled, users can authenticate via OpenID Connect identity provider
|
|
##
|
|
enabled: false
|
|
## @param server.auth.oidc.name This is the name that you want displayed on the login button
|
|
##
|
|
name: ''
|
|
## @param server.auth.oidc.discovery_url This is the OIDC discovery URL for the identity provider you want to use
|
|
##
|
|
discovery_url: ''
|
|
## @param server.auth.oidc.client_id This is the ID for Speckle that you have registered with the OIDC identity provider
|
|
##
|
|
client_id: ''
|
|
clientSecret:
|
|
## @param server.auth.oidc.clientSecret.secretName The name of the Kubernetes Secret containing the OIDC client secret. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.auth.oidc.clientSecret.secretKey The key within the Kubernetes Secret holding the OIDC client secret as its value.
|
|
##
|
|
secretKey: ''
|
|
## @param server.auth.oidc.domains List of `matchName` or `matchPattern` maps for domains that should be allow-listed for egress in Network Policy.
|
|
##
|
|
domains: []
|
|
## @extra server.email Speckle can communicate with users via email, providing account verification and notification.
|
|
##
|
|
email:
|
|
## @param server.email.enabled If enabled, Speckle can send email to users - for example, email verification for account registration.
|
|
##
|
|
enabled: false
|
|
## @param server.email.host The domain name or IP address of the server hosting the email service.
|
|
##
|
|
host: ''
|
|
## @param server.email.port The port on the server for the email service.
|
|
##
|
|
port: ''
|
|
## @param server.email.from The email address from which Speckle will send emails. Defaults to 'no-reply@speckle.systems' if left blank.
|
|
##
|
|
from: ''
|
|
## @param server.email.username The username with which Speckle will authenticate with the email service.
|
|
## Note that the `email_password` is expected to be provided in the Kubernetes Secret with the name provided in the `secretName` parameter.
|
|
##
|
|
username: ''
|
|
password:
|
|
## @param server.email.password.secretName The name of the Kubernetes Secret containing the email password. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.email.password.secretKey The key within the Kubernetes Secret holding the email password as its value.
|
|
##
|
|
secretKey: ''
|
|
## @extra server.email.networkPolicy If networkPolicy is enabled for Speckle server, this provides the Network Policy with the necessary details to allow egress connections to the email server
|
|
##
|
|
networkPolicy:
|
|
## @extra server.email.networkPolicy.externalToCluster Only required if the Redis store is not hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
externalToCluster:
|
|
## @param server.email.networkPolicy.externalToCluster.enabled If enabled, indicates that the email server is hosted externally to the Kubernetes cluster
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only one deployed
|
|
##
|
|
enabled: true
|
|
## @extra server.email.networkPolicy.inCluster is only required if the email server is hosted within the Kubernetes cluster in which Speckle will be deployed.
|
|
##
|
|
inCluster:
|
|
## @param server.email.networkPolicy.inCluster.enabled If enabled, indicates that the email server is hosted withing the same Kubernetes cluster in which Speckle will be deployed
|
|
## Only one of externalToCluster or inCluster should be enabled. If both are enabled then inCluster takes precedence and is the only set of egress network policy rules deployed.
|
|
##
|
|
enabled: false
|
|
kubernetes:
|
|
## @param server.email.networkPolicy.inCluster.kubernetes.podSelector (Kubernetes Network Policy only) The pod Selector yaml object used to uniquely select the email server pods within the cluster and given namespace
|
|
## For Kubernetes Network Policies this is a podSelector object.
|
|
## For Cilium Network Policies this is ignored.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podSelector: {}
|
|
## @param server.email.networkPolicy.inCluster.kubernetes.namespaceSelector (Kubernetes Network Policy only) The namespace selector yaml object used to uniquely select the namespace in which the email server pods are deployed
|
|
## This is a Kubernetes namespaceSelector object.
|
|
## For Cilium Network Policies, this is ignored
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
namespaceSelector: {}
|
|
cilium:
|
|
## @param server.email.networkPolicy.inCluster.cilium.endpointSelector (Cilium Network Policy only) The endpoint selector yaml object used to uniquely select the in-cluster endpoint in which the email server pods are deployed
|
|
## For Kubernetes Network Policies, this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/selector.go
|
|
endpointSelector: {}
|
|
## @param server.email.networkPolicy.inCluster.cilium.serviceSelector (Cilium Network Policy only) The service selector yaml object used to uniquely select the in-cluster service providing the email server
|
|
## For Kubernetes Network Policies this is ignored.
|
|
## ref: https://docs.cilium.io/en/v1.9/policy/language/#egress
|
|
## ref: https://github.com/cilium/cilium/blob/master/pkg/policy/api/service.go
|
|
serviceSelector: {}
|
|
requests:
|
|
## @param server.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param server.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 1Gi
|
|
limits:
|
|
## @param server.limits.cpu The maximum CPU that will be made available to the server Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1500m
|
|
## @param server.limits.memory The maximum Memory that will be made available to the server Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 3Gi
|
|
ratelimiting:
|
|
## @param server.ratelimiting.all_requests The maximum number of requests that can be made to the Speckle server in a moving one second window.
|
|
all_requests: 500
|
|
## @param server.ratelimiting.burst_all_requests If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server in a moving one minute window.
|
|
burst_all_requests: 2000
|
|
## @param server.ratelimiting.user_create The maximum number of requests that can be made to the Speckle server to create a new user in a moving one second window.
|
|
user_create: 6
|
|
## @param server.ratelimiting.burst_user_create If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to create a new user in a moving one minute window.
|
|
burst_user_create: 1000
|
|
## @param server.ratelimiting.stream_create The maximum number of requests that can be made to the Speckle server to create a new stream in a moving one second window.
|
|
stream_create: 1
|
|
## @param server.ratelimiting.burst_stream_create If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to create a new stream in a moving one minute window.
|
|
burst_stream_create: 100
|
|
## @param server.ratelimiting.commit_create The maximum number of requests that can be made to the Speckle server to create a new commit in a moving one second window.
|
|
commit_create: 1
|
|
## @param server.ratelimiting.burst_commit_create If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to create a new commit in a moving one minute window.
|
|
burst_commit_create: 100
|
|
## @param server.ratelimiting.post_getobjects_streamid The maximum number of requests that can be made to the Speckle server to get a new object in a moving one second window.
|
|
post_getobjects_streamid: 3
|
|
## @param server.ratelimiting.burst_post_getobjects_streamid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to get a new object in a moving one minute window.
|
|
burst_post_getobjects_streamid: 200
|
|
## @param server.ratelimiting.post_diff_streamid The maximum number of requests that can be made to the Speckle server to undertake a diff in a moving one second window.
|
|
post_diff_streamid: 10
|
|
## @param server.ratelimiting.burst_post_diff_streamid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to undertake a diff in a moving one minute window.
|
|
burst_post_diff_streamid: 1000
|
|
## @param server.ratelimiting.post_objects_streamid The maximum number of requests that can be made to the Speckle server to post a new object in a moving one second window.
|
|
post_objects_streamid: 6
|
|
## @param server.ratelimiting.burst_post_objects_streamid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to post a new object in a moving one minute window.
|
|
burst_post_objects_streamid: 400
|
|
## @param server.ratelimiting.get_objects_streamid_objectid The maximum number of requests that can be made to the Speckle server to get an object in a moving one second window.
|
|
get_objects_streamid_objectid: 3
|
|
## @param server.ratelimiting.burst_get_objects_streamid_objectid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to get an object in a moving one minute window.
|
|
burst_get_objects_streamid_objectid: 200
|
|
## @param server.ratelimiting.get_objects_streamid_objectid_single The maximum number of requests that can be made to the Speckle server to get a single object in a moving one second window.
|
|
get_objects_streamid_objectid_single: 3
|
|
## @param server.ratelimiting.burst_get_objects_streamid_objectid_single If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to get a single object in a moving one minute window.
|
|
burst_get_objects_streamid_objectid_single: 200
|
|
## @param server.ratelimiting.post_graphql The maximum number of requests that can be made to the GraphQL API in a moving one second window.
|
|
post_graphql: 50
|
|
## @param server.ratelimiting.burst_post_graphql If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the GraphQL API in a moving one minute window.
|
|
burst_post_graphql: 200
|
|
## @param server.ratelimiting.get_auth The maximum number of requests that can be made to the Speckle server to authenticate in a moving 10 minute window.
|
|
get_auth: 4
|
|
## @param server.ratelimiting.burst_get_auth If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to authenticate in a moving thirty minute window.
|
|
burst_get_auth: 10
|
|
serviceAccount:
|
|
## @param server.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
fileUploads:
|
|
## @param server.fileUploads.enabled If enabled, file uploads on the server will be flagged as enabled
|
|
enabled: true
|
|
mailchimp:
|
|
## @param server.mailchimp.enabled Mailchimp integration feature flag
|
|
enabled: false
|
|
apikey:
|
|
## @param server.mailchimp.apikey.secretName The name of the Kubernetes Secret containing the Mailchimp API key.
|
|
secretName: '' # defaults to .secretName
|
|
## @param server.mailchimp.apikey.secretKey The key within the Kubernetes Secret holding the Mailchimp API key as its value.
|
|
secretKey: 'mailchimp_apikey'
|
|
## @param server.mailchimp.serverPrefix Mailchimp api server prefix
|
|
serverPrefix: ''
|
|
## @param server.mailchimp.newsletterListId Audience id for the newsletter list
|
|
newsletterListId: ''
|
|
## @param server.mailchimp.onboardingListId Audience id for the onboarding list
|
|
onboardingListId: ''
|
|
## @param server.mailchimp.onboardingJourneyId Id of the onboarding journey
|
|
onboardingJourneyId: ''
|
|
## @param server.mailchimp.onboardingStepId Id of the onboarding journey step we trigger
|
|
onboardingStepId: ''
|
|
migration:
|
|
## @param server.migration.movedFrom Indicate the URL where the server moved from
|
|
movedFrom: ''
|
|
## @param server.migration.movedTo Indicate the URL where the server moved to
|
|
movedTo: ''
|
|
monitoring:
|
|
apollo:
|
|
## @param server.monitoring.apollo.enabled (Optional) If enabled, exports metrics from the GraphQL API to Apollo Graphql Studio.
|
|
## If enabling Apollo, a secret containing the key to the Apollo Graphql Studio API must stored within the Kubernetes cluster as an opaque Kubernetes Secret.
|
|
## The name of the Kubernetes Secret resource must match the `secretName` parameter, and the key within this Kubernetes Secret must be `apollo_key`.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
enabled: false
|
|
## @param server.monitoring.apollo.graph_id The ID for Speckle that you registered in Apollo Graphql Studio.
|
|
##
|
|
graph_id: ''
|
|
key:
|
|
## @param server.monitoring.apollo.key.secretName The name of the Kubernetes Secret containing the Apollo monitoring key. This is expected to be provided within the Kubernetes cluster as an opaque Kubernetes Secret. Ref: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets
|
|
##
|
|
secretName: ''
|
|
## @param server.monitoring.apollo.key.secretKey The key within the Kubernetes Secret holding the Apollo monitoring key as its value.
|
|
##
|
|
secretKey: ''
|
|
## @param server.monitoring.mp (Optional) If server.monitoring.mp.enabled is set to false, metrics will not be collected by the Speckle server.
|
|
mp: {}
|
|
## @param server.disable_tracking If set to true, will prevent tracking metrics from being collected
|
|
##
|
|
disable_tracking: false
|
|
## @param server.disable_tracing If set to true, will prevent tracing metrics from being collected
|
|
##
|
|
disable_tracing: false
|
|
networkPolicy:
|
|
## @param server.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `ingress`, `postgres.networkPolicy`, `redis.networkPolicy`, and `s3.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param server.affinity Affinity for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param server.nodeSelector Node labels for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param server.tolerations Tolerations for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param server.topologySpreadConstraints Spread Constraints for Speckle server pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @section Server
|
|
## @descriptionStart
|
|
## Defines parameters related to the backend server component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
objects:
|
|
## @param objects.replicas The number of instances of the Server pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param objects.image The Docker image to be used for the Speckle Objects component. If blank, defaults to speckle/speckle-server:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
## @param objects.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param objects.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
inspect:
|
|
## @param objects.inspect.enabled If enabled, indicates that the Speckle server should be deployed with the nodejs inspect feature enabled
|
|
enabled: false
|
|
## @param objects.inspect.port The port on which the nodejs inspect feature should be exposed
|
|
port: '7000'
|
|
requests:
|
|
## @param objects.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param objects.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 1Gi
|
|
limits:
|
|
## @param objects.limits.cpu The maximum CPU that will be made available to the server Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1500m
|
|
## @param objects.limits.memory The maximum Memory that will be made available to the server Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 3Gi
|
|
ratelimiting:
|
|
## @param objects.ratelimiting.post_objects_streamid The maximum number of requests that can be made to the Speckle server to post a new object in a moving one second window.
|
|
post_objects_streamid: 6
|
|
## @param objects.ratelimiting.burst_post_objects_streamid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to post a new object in a moving one minute window.
|
|
burst_post_objects_streamid: 400
|
|
## @param objects.ratelimiting.get_objects_streamid_objectid The maximum number of requests that can be made to the Speckle server to get an object in a moving one second window.
|
|
get_objects_streamid_objectid: 3
|
|
## @param objects.ratelimiting.burst_get_objects_streamid_objectid If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to get an object in a moving one minute window.
|
|
burst_get_objects_streamid_objectid: 200
|
|
## @param objects.ratelimiting.get_objects_streamid_objectid_single The maximum number of requests that can be made to the Speckle server to get a single object in a moving one second window.
|
|
get_objects_streamid_objectid_single: 3
|
|
## @param objects.ratelimiting.burst_get_objects_streamid_objectid_single If the regular limit is exceeded, the limit is increased to the burst limit. This is the maximum number of requests that can be made to the Speckle server to get a single object in a moving one minute window.
|
|
burst_get_objects_streamid_objectid_single: 200
|
|
serviceAccount:
|
|
## @param objects.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
networkPolicy:
|
|
## @param objects.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `ingress`, `postgres.networkPolicy`, `redis.networkPolicy`, and `s3.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param objects.affinity Affinity for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param objects.nodeSelector Node labels for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param objects.tolerations Tolerations for Speckle server pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param objects.topologySpreadConstraints Spread Constraints for Speckle server pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @section Frontend
|
|
## @descriptionStart
|
|
## Defines parameters related to the frontend server component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
frontend:
|
|
## @param frontend.replicas The number of instances of the Frontend pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param frontend.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param frontend.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param frontend.image The Docker image to be used for the Speckle Frontend component. If blank, defaults to speckle/speckle-frontend:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
requests:
|
|
## @param frontend.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 250m
|
|
## @param frontend.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 256Mi
|
|
limits:
|
|
## @param frontend.limits.cpu The maximum CPU that will be made available to the frontend Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param frontend.limits.memory The maximum Memory that will be made available to the frontend Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 512Mi
|
|
networkPolicy:
|
|
## @param frontend.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `ingress` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param frontend.affinity Affinity for Speckle frontend pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param frontend.nodeSelector Node labels for Speckle frontend pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param frontend.tolerations Tolerations for Speckle frontend pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param frontend.topologySpreadConstraints Spread Constraints for Speckle frontend pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param frontend.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @section frontend_2
|
|
## @descriptionStart
|
|
## Defines parameters related to the new web application component of Speckle
|
|
## @descriptionEnd
|
|
##
|
|
frontend_2:
|
|
## @param frontend_2.image The Docker image to be used for the Speckle Frontend 2 component. If blank, defaults to speckle/speckle-frontend-2:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
## @param frontend_2.ghostApiKey API Key for Ghost, which provides the blog content for the new web application frontend.
|
|
##
|
|
ghostApiKey: ''
|
|
## @param frontend_2.logClientApiToken SEQ API token
|
|
##
|
|
logClientApiToken: ''
|
|
## @param frontend_2.logClientApiEndpoint SEQ endpoint URL
|
|
##
|
|
logClientApiEndpoint: ''
|
|
## @param frontend_2.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param frontend_2.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param frontend_2.enabled Feature flag to enable running the new web application frontend.
|
|
##
|
|
enabled: true
|
|
## @param frontend_2.replicas The number of instances of the Frontend 2 server prod to be deployed withing the cluster.
|
|
##
|
|
replicas: 1
|
|
requests:
|
|
## @param frontend_2.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 250m
|
|
## @param frontend_2.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 256Mi
|
|
limits:
|
|
## @param frontend_2.limits.cpu The maximum CPU that will be made available to the frontend Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param frontend_2.limits.memory The maximum Memory that will be made available to the frontend Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 512Mi
|
|
networkPolicy:
|
|
## @param frontend_2.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `ingress` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param frontend_2.affinity Affinity for Speckle frontend pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param frontend_2.nodeSelector Node labels for Speckle frontend pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param frontend_2.tolerations Tolerations for Speckle frontend pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param frontend_2.topologySpreadConstraints Spread Constraints for Speckle frontend pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param frontend_2.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @section Preview Service
|
|
## @descriptionStart
|
|
## Defines parameters related to the Preview Service component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
preview_service:
|
|
## @param preview_service.replicas The number of instances of the Preview Service pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param preview_service.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param preview_service.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param preview_service.image The Docker image to be used for the Speckle Preview Service component. If blank, defaults to speckle/speckle-preview-service:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
## @param preview_service.port The port on which the Preview Service will run. This is not exposed, but used within its own local network within the pod.
|
|
port: '3001'
|
|
monitoring:
|
|
## @param preview_service.monitoring.metricsPort The port on which the metrics server will be exposed.
|
|
metricsPort: '9094'
|
|
puppeteer:
|
|
## @param preview_service.puppeteer.userDataDirectory The path to the user data directory. If not set, defaults to '/tmp/puppeteer'. This is mounted in the deployment as a volume with read-write access.
|
|
userDataDirectory: ''
|
|
## @param preview_service.puppeteer.timeoutMilliseconds The timeout in milliseconds for the Puppeteer service.
|
|
timeoutMilliseconds: '3600000'
|
|
requests:
|
|
## @param preview_service.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 500m
|
|
## @param preview_service.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 2Gi
|
|
limits:
|
|
## @param preview_service.limits.cpu The maximum CPU that will be made available to the Preview Service Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param preview_service.limits.memory The maximum Memory that will be made available to the Preview Service Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 4Gi
|
|
networkPolicy:
|
|
## @param preview_service.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `db.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param preview_service.affinity Affinity for Speckle Preview Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param preview_service.nodeSelector Node labels for Speckle Preview Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param preview_service.tolerations Tolerations for Speckle Preview Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param preview_service.topologySpreadConstraints Spread Constraints for Speckle Preview Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param preview_service.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @section Webhook Service
|
|
## @descriptionStart
|
|
## Defines parameters related to the Webhook Service component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
webhook_service:
|
|
## @param webhook_service.replicas The number of instances of the Webhook Service pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param webhook_service.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param webhook_service.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param webhook_service.image The Docker image to be used for the Speckle Webhook Service component. If blank, defaults to speckle/speckle-webhook-service:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
requests:
|
|
## @param webhook_service.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 500m
|
|
## @param webhook_service.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 2Gi
|
|
limits:
|
|
## @param webhook_service.limits.cpu The maximum CPU that will be made available to the Webhook Service Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param webhook_service.limits.memory The maximum Memory that will be made available to the Webhook Service Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 4Gi
|
|
networkPolicy:
|
|
## @param webhook_service.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `db.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param webhook_service.affinity Affinity for Speckle Webhook Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param webhook_service.nodeSelector Node labels for Speckle Webhook Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param webhook_service.tolerations Tolerations for Speckle Webhook Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param webhook_service.topologySpreadConstraints Spread Constraints for Speckle Webhook Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param webhook_service.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @section File Import Service
|
|
## @descriptionStart
|
|
## Defines parameters related to the File Import Service component of Speckle.
|
|
## @descriptionEnd
|
|
##
|
|
fileimport_service:
|
|
## @param fileimport_service.replicas The number of instances of the FileImport Service pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param fileimport_service.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param fileimport_service.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param fileimport_service.image The Docker image to be used for the Speckle FileImport Service component. If blank, defaults to speckle/speckle-fileimport-service:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
requests:
|
|
## @param fileimport_service.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 100m
|
|
## @param fileimport_service.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 512Mi
|
|
limits:
|
|
## @param fileimport_service.limits.cpu The maximum CPU that will be made available to the FileImport Service Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 1000m
|
|
## @param fileimport_service.limits.memory The maximum Memory that will be made available to the FileImport Service Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 2Gi
|
|
networkPolicy:
|
|
## @param fileimport_service.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `db.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param fileimport_service.affinity Affinity for Speckle FileImport Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param fileimport_service.nodeSelector Node labels for Speckle FileImport Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param fileimport_service.tolerations Tolerations for Speckle FileImport Service pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param fileimport_service.topologySpreadConstraints Spread Constraints for Speckle FileImport Service pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param fileimport_service.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @param fileimport_service.time_limit_min The maximum time that a file can take to be processed by the FileImport Service.
|
|
## Files which take longer than this value to process will be cancelled.
|
|
## If you experience repeated issues with small files taking a long time, and increasing CPU and/or memory requests & limits does not help,
|
|
## please reach out to Speckle at https://speckle.community/
|
|
##
|
|
time_limit_min: 10
|
|
## @section Monitoring
|
|
## @descriptionStart
|
|
## Provides Speckle with metrics related to the Postgres database.
|
|
## @descriptionEnd
|
|
##
|
|
monitoring:
|
|
## @param monitoring.replicas The number of instances of the Monitoring pod to be deployed within the cluster.
|
|
##
|
|
replicas: 1
|
|
## @param monitoring.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param monitoring.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param monitoring.image The Docker image to be used for the Speckle Monitoring component. If blank, defaults to speckle/speckle-monitoring-deployment:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
requests:
|
|
## @param monitoring.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 100m
|
|
## @param monitoring.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 64Mi
|
|
limits:
|
|
## @param monitoring.limits.cpu The maximum CPU that will be made available to the Monitoring Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 200m
|
|
## @param monitoring.limits.memory The maximum Memory that will be made available to the Monitoring Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 512Mi
|
|
networkPolicy:
|
|
## @param monitoring.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## If enabled, the `db.networkPolicy` parameters need be configured.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
## @param monitoring.affinity Affinity for Speckle Monitoring pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
##
|
|
affinity: {}
|
|
## @param monitoring.nodeSelector Node labels for Speckle Monitoring pods scheduling
|
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
##
|
|
nodeSelector: {}
|
|
## @param monitoring.tolerations Tolerations for Speckle Monitoring pods scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param monitoring.topologySpreadConstraints Spread Constraints for Speckle Monitoring pod scheduling
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
##
|
|
topologySpreadConstraints: []
|
|
serviceAccount:
|
|
## @param monitoring.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|
|
## @section Testing
|
|
## @descriptionStart
|
|
## Defines parameters related to testing that the deployment of Speckle has been successful.
|
|
## @descriptionEnd
|
|
##
|
|
|
|
## @param helm_test_enabled If enabled, an additional pod is deployed which verifies some functionality of Speckle to determine if it is deployed correctly
|
|
##
|
|
helm_test_enabled: true
|
|
test:
|
|
## @param test.logLevel The minimum level of logs which will be output. Suitable values are trace, debug, info, warn, error, fatal, or silent
|
|
##
|
|
logLevel: 'info'
|
|
## @param test.logPretty If enabled, will output logs in a human-readable format. Otherwise, logs will be output in JSON format.
|
|
##
|
|
logPretty: false
|
|
## @param test.image The Docker image to be used for the Speckle Test component. If blank, defaults to speckle/speckle-test-deployment:{{ .Values.docker_image_tag }}. If provided, this value should be the full path including tag. The docker_image_tag value will be ignored.
|
|
##
|
|
image: ''
|
|
requests:
|
|
## @param test.requests.cpu The CPU that should be available on a node when scheduling this pod.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 100m
|
|
## @param test.requests.memory The Memory that should be available on a node when scheduling this pod.
|
|
## Depending on the Kubernetes cluster's configuration, exceeding this value may result in pod eviction from a node.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 64Mi
|
|
limits:
|
|
## @param test.limits.cpu The maximum CPU that will be made available to the Test Pod in a given period.
|
|
## If this limit is exceeded, execution of the Pod will be paused until the next period.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
cpu: 200m
|
|
## @param test.limits.memory The maximum Memory that will be made available to the Test Pod.
|
|
## If this limit is exceeded, processes within the pod that request additional memory may be stopped.
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
##
|
|
memory: 512Mi
|
|
networkPolicy:
|
|
## @param test.networkPolicy.enabled If enabled, will provide additional security be limiting network traffic into and out of the pod to only the required endpoints and ports.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
serviceAccount:
|
|
## @param test.serviceAccount.create If enabled, a Kubernetes Service Account will be created for this pod.
|
|
## This provides additional security by limiting this pod's access to the Kubernetes API and to Secrets on the Kubernetes cluster.
|
|
## If disabled, the default Service Account will be used which in most Kubernetes configurations will grant this pod
|
|
## access to most secrets on the cluster and access to the Kubernetes API.
|
|
##
|
|
create: true
|