From 86f6caddddc2e8fc46542d08e2700cc04185a3ed Mon Sep 17 00:00:00 2001 From: Kevin Harris Date: Wed, 30 Oct 2019 08:19:18 -0400 Subject: [PATCH] Added all kinds of stuff. --- cluster-config/app.yaml | 120 + .../container-azm-ms-agentconfig.yaml | 82 + cluster-config/falco.yaml | 3484 +++++++++++++++++ cluster-config/gatekeeper.yaml | 457 +++ cluster-config/limitranges.yaml | 38 +- cluster-config/linkerd.yaml | 3248 +++++++++++++++ cluster-config/namespaces.yaml | 50 + cluster-config/nginx-ingress.yaml | 548 +++ cluster-config/np-allow-consolidated.yaml | 134 + cluster-config/np-deny-all.yaml | 32 + cluster-config/quotas.yaml | 16 +- cluster-config/rbac-namespaces.yaml | 2 +- 12 files changed, 8183 insertions(+), 28 deletions(-) create mode 100644 cluster-config/app.yaml create mode 100644 cluster-config/container-azm-ms-agentconfig.yaml create mode 100644 cluster-config/falco.yaml create mode 100644 cluster-config/gatekeeper.yaml create mode 100644 cluster-config/linkerd.yaml create mode 100644 cluster-config/nginx-ingress.yaml create mode 100644 cluster-config/np-allow-consolidated.yaml create mode 100644 cluster-config/np-deny-all.yaml diff --git a/cluster-config/app.yaml b/cluster-config/app.yaml new file mode 100644 index 0000000..240a74b --- /dev/null +++ b/cluster-config/app.yaml @@ -0,0 +1,120 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imageclassifierweb + namespace: dev + labels: + app: imageclassifierweb +spec: + replicas: 1 + selector: + matchLabels: + app: imageclassifierweb + template: + metadata: + labels: + app: imageclassifierweb + spec: + containers: + - name: imageclassifierweb + image: kevingbb/imageclassifierweb:v1 + imagePullPolicy: Always + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + limits: + memory: 250Mi + cpu: 250m + requests: + memory: 100Mi + cpu: 100m + dnsPolicy: ClusterFirst +--- +apiVersion: v1 +kind: Service +metadata: + name: imageclassifierweb + namespace: dev + labels: + app: imageclassifierweb + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "contosofinsvcsubnet" +spec: + type: LoadBalancer + loadBalancerIP: 100.64.2.232 + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: imageclassifierweb +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: imageclassifierweb + namespace: dev + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/upstream-vhost: imageclassifierweb.dev.svc.cluster.local:80 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_hide_header l5d-remote-ip; + proxy_hide_header l5d-server-id; +spec: + rules: + - http: + paths: + - backend: + serviceName: imageclassifierweb + servicePort: 80 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imageclassifierworker + namespace: dev + labels: + app: imageclassifierworker +spec: + replicas: 1 + selector: + matchLabels: + app: imageclassifierworker + template: + metadata: + labels: + app: imageclassifierworker + spec: + securityContext: + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + containers: + - name: imageclassifierworker + image: kevingbb/imageclassifierworker:v1 + imagePullPolicy: IfNotPresent + env: + - name: API_BASE_URL + value: http://imageclassifierweb + volumeMounts: + - mountPath: /app/assets/images + name: fruitshare + resources: + limits: + memory: 1G + cpu: 1000m + requests: + memory: 500Mi + cpu: 500m + volumes: + - name: fruitshare + azureFile: + secretName: fruit-secret + shareName: fruit + readOnly: false + dnsPolicy: ClusterFirst diff --git a/cluster-config/container-azm-ms-agentconfig.yaml b/cluster-config/container-azm-ms-agentconfig.yaml new file mode 100644 index 0000000..4af5d97 --- /dev/null +++ b/cluster-config/container-azm-ms-agentconfig.yaml @@ -0,0 +1,82 @@ +kind: ConfigMap +apiVersion: v1 +data: + schema-version: + #string.used by agent to parse config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by customer to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + log-data-collection-settings: |- + # Log data collection settings + [log_collection_settings] + [log_collection_settings.stdout] + # In the absense of this configmap, default value for enabled is true + enabled = true + # exclude_namespaces setting holds good only if enabled is set to true + # kube-system log collection is disabled by default in the absence of 'log_collection_settings.stdout' setting. If you want to enable kube-system, remove it from the following setting. + # If you want to continue to disable kube-system log collection keep this namespace in the following setting and add any other namespace you want to disable log collection to the array. + # In the absense of this configmap, default value for exclude_namespaces = ["kube-system"] + exclude_namespaces = ["kube-system"] + + [log_collection_settings.stderr] + # Default value for enabled is true + enabled = true + # exclude_namespaces setting holds good only if enabled is set to true + # kube-system log collection is disabled by default in the absence of 'log_collection_settings.stderr' setting. If you want to enable kube-system, remove it from the following setting. + # If you want to continue to disable kube-system log collection keep this namespace in the following setting and add any other namespace you want to disable log collection to the array. + # In the absense of this cofigmap, default value for exclude_namespaces = ["kube-system"] + exclude_namespaces = ["kube-system"] + + [log_collection_settings.env_var] + # In the absense of this configmap, default value for enabled is true + enabled = true + prometheus-data-collection-settings: |- + # Custom Prometheus metrics data collection settings + [prometheus_data_collection_settings.cluster] + # Cluster level scrape endpoint(s). These metrics will be scraped from agent's Replicaset (singleton) + + #Interval specifying how often to scrape for metrics. This is duration of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are ns, us (or µs), ms, s, m, h. + interval = "1m" + + ## Uncomment the following settings with valid string arrays for prometheus scraping + #fieldpass = ["metric_to_pass1", "metric_to_pass12"] + + #fielddrop = ["metric_to_drop"] + + # An array of urls to scrape metrics from. + urls = ["https://linkerd-tap.linkerd.svc.cluster.local/metrics"] + + # An array of Kubernetes services to scrape metrics from. + kubernetes_services = ["https://metrics-server.kube-system.svc.cluster.local/metrics"] + + # When monitor_kubernetes_pods = true, replicaset will scrape Kubernetes pods for the following prometheus annotations: + # - prometheus.io/scrape: Enable scraping for this pod + # - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + # set this to `https` & most likely set the tls config. + # - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + # - prometheus.io/port: If port is not 9102 use this annotation + monitor_kubernetes_pods = false + + [prometheus_data_collection_settings.node] + # Node level scrape endpoint(s). These metrics will be scraped from agent's DaemonSet running in every node in the cluster + + #Interval specifying how often to scrape for metrics. This is duration of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are ns, us (or µs), ms, s, m, h. + interval = "1m" + + ## Uncomment the following settings with valid string arrays for prometheus scraping + + # An array of urls to scrape metrics from. $NODE_IP (all upper case) will substitute of running Node's IP address + # urls = ["http://$NODE_IP:9103/metrics"] + + #fieldpass = ["metric_to_pass1", "metric_to_pass12"] + + #fielddrop = ["metric_to_drop"] + agent-settings: |- + # agent health model feature settings + [agent_settings.health_model] + # In the absence of this configmap, default value for enabled is false + enabled = true +metadata: + name: container-azm-ms-agentconfig + namespace: kube-system \ No newline at end of file diff --git a/cluster-config/falco.yaml b/cluster-config/falco.yaml new file mode 100644 index 0000000..ea682ec --- /dev/null +++ b/cluster-config/falco.yaml @@ -0,0 +1,3484 @@ +--- +# Source: falco/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sysdig-falco + namespace: falco + labels: + app: sysdig-falco + chart: "falco-1.0.9" + release: "sysdig-falco" + heritage: "Tiller" +data: + falco.yaml: |- + # File(s) or Directories containing Falco rules, loaded at startup. + # The name "rules_file" is only for backwards compatibility. + # If the entry is a file, it will be read directly. If the entry is a directory, + # every file in that directory will be read, in alphabetical order. + # + # falco_rules.yaml ships with the falco package and is overridden with + # every new software version. falco_rules.local.yaml is only created + # if it doesn't exist. If you want to customize the set of rules, add + # your customizations to falco_rules.local.yaml. + # + # The files will be read in the order presented here, so make sure if + # you have overrides they appear in later files. + rules_file: + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/k8s_audit_rules.yaml + - /etc/falco/rules.d + + # If true, the times displayed in log messages and output messages + # will be in ISO 8601. By default, times are displayed in the local + # time zone, as governed by /etc/localtime. + time_format_iso_8601: false + + # Whether to output events in json or text + json_output: false + + # When using json output, whether or not to include the "output" property + # itself (e.g. "File below a known binary directory opened for writing + # (user=root ....") in the json output. + json_include_output_property: true + + # Send information logs to stderr and/or syslog Note these are *not* security + # notification logs! These are just Falco lifecycle (and possibly error) logs. + log_stderr: true + log_syslog: true + + # Minimum log level to include in logs. Note: these levels are + # separate from the priority field of rules. This refers only to the + # log level of falco's internal logging. Can be one of "emergency", + # "alert", "critical", "error", "warning", "notice", "info", "debug". + log_level: info + + # Minimum rule priority level to load and run. All rules having a + # priority more severe than this level will be loaded/run. Can be one + # of "emergency", "alert", "critical", "error", "warning", "notice", + # "info", "debug". + priority: debug + + # Whether or not output to any of the output channels below is + # buffered. Defaults to false + buffered_outputs: false + + # Falco uses a shared buffer between the kernel and userspace to pass + # system call information. When falco detects that this buffer is + # full and system calls have been dropped, it can take one or more of + # the following actions: + # - "ignore": do nothing. If an empty list is provided, ignore is assumed. + # - "log": log a CRITICAL message noting that the buffer was full. + # - "alert": emit a falco alert noting that the buffer was full. + # - "exit": exit falco with a non-zero rc. + # + # The rate at which log/alert messages are emitted is governed by a + # token bucket. The rate corresponds to one message every 30 seconds + # with a burst of 10 messages. + syscall_event_drops: + actions: + - log + - alert + rate: 0.03333 + max_burst: 10 + + # A throttling mechanism implemented as a token bucket limits the + # rate of falco notifications. This throttling is controlled by the following configuration + # options: + # - rate: the number of tokens (i.e. right to send a notification) + # gained per second. Defaults to 1. + # - max_burst: the maximum number of tokens outstanding. Defaults to 1000. + # + # With these defaults, falco could send up to 1000 notifications after + # an initial quiet period, and then up to 1 notification per second + # afterward. It would gain the full burst back after 1000 seconds of + # no activity. + outputs: + rate: 1 + max_burst: 1000 + + # Where security notifications should go. + # Multiple outputs can be enabled. + + syslog_output: + enabled: true + + # If keep_alive is set to true, the file will be opened once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the file will be re-opened + # for each output message. + # + # Also, the file will be closed and reopened if falco is signaled with + # SIGUSR1. + file_output: + enabled: false + keep_alive: false + filename: ./events.txt + + stdout_output: + enabled: true + + # Falco contains an embedded webserver that can be used to accept K8s + # Audit Events. These config options control the behavior of that + # webserver. (By default, the webserver is disabled). + # + # The ssl_certificate is a combination SSL Certificate and corresponding + # key contained in a single file. You can generate a key/cert as follows: + # + # $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem + # $ cat certificate.pem key.pem > falco.pem + # $ sudo cp falco.pem /etc/falco/falco.pem + + webserver: + enabled: false + listen_port: 8765 + k8s_audit_endpoint: /k8s-audit + ssl_enabled: false + ssl_certificate: /etc/falco/falco.pem + + # Possible additional things you might want to do with program output: + # - send to a slack webhook: + # program: "\"jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX\"" + # - logging (alternate method than syslog): + # program: logger -t falco-test + # - send over a network connection: + # program: nc host.example.com 80 + + # If keep_alive is set to true, the program will be started once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the program will be re-spawned + # for each output message. + # + # Also, the program will be closed and reopened if falco is signaled with + # SIGUSR1. + program_output: + enabled: false + keep_alive: false + program: mail -s "Falco Notification" someone@example.com + + http_output: + enabled: false + url: http://some.url + + application_rules.yaml: | + # + # Copyright (C) 2016-2018 Draios Inc dba Sysdig. + # + # This file is part of falco. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + - required_engine_version: 2 + + ################################################################ + # By default all application-related rules are disabled for + # performance reasons. Depending on the application(s) you use, + # uncomment the corresponding rule definitions for + # application-specific activity monitoring. + ################################################################ + + # Elasticsearch ports + - macro: elasticsearch_cluster_port + condition: fd.sport=9300 + - macro: elasticsearch_api_port + condition: fd.sport=9200 + - macro: elasticsearch_port + condition: elasticsearch_cluster_port or elasticsearch_api_port + + # - rule: Elasticsearch unexpected network inbound traffic + # desc: inbound network traffic to elasticsearch on a port other than the standard ports + # condition: user.name = elasticsearch and inbound and not elasticsearch_port + # output: "Inbound network traffic to Elasticsearch on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: Elasticsearch unexpected network outbound traffic + # desc: outbound network traffic from elasticsearch on a port other than the standard ports + # condition: user.name = elasticsearch and outbound and not elasticsearch_cluster_port + # output: "Outbound network traffic from Elasticsearch on unexpected port (connection=%fd.name)" + # priority: WARNING + + + # ActiveMQ ports + - macro: activemq_cluster_port + condition: fd.sport=61616 + - macro: activemq_web_port + condition: fd.sport=8161 + - macro: activemq_port + condition: activemq_web_port or activemq_cluster_port + + # - rule: Activemq unexpected network inbound traffic + # desc: inbound network traffic to activemq on a port other than the standard ports + # condition: user.name = activemq and inbound and not activemq_port + # output: "Inbound network traffic to ActiveMQ on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: Activemq unexpected network outbound traffic + # desc: outbound network traffic from activemq on a port other than the standard ports + # condition: user.name = activemq and outbound and not activemq_cluster_port + # output: "Outbound network traffic from ActiveMQ on unexpected port (connection=%fd.name)" + # priority: WARNING + + + # Cassandra ports + # https://docs.datastax.com/en/cassandra/2.0/cassandra/security/secureFireWall_r.html + - macro: cassandra_thrift_client_port + condition: fd.sport=9160 + - macro: cassandra_cql_port + condition: fd.sport=9042 + - macro: cassandra_cluster_port + condition: fd.sport=7000 + - macro: cassandra_ssl_cluster_port + condition: fd.sport=7001 + - macro: cassandra_jmx_port + condition: fd.sport=7199 + - macro: cassandra_port + condition: > + cassandra_thrift_client_port or + cassandra_cql_port or cassandra_cluster_port or + cassandra_ssl_cluster_port or cassandra_jmx_port + + # - rule: Cassandra unexpected network inbound traffic + # desc: inbound network traffic to cassandra on a port other than the standard ports + # condition: user.name = cassandra and inbound and not cassandra_port + # output: "Inbound network traffic to Cassandra on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: Cassandra unexpected network outbound traffic + # desc: outbound network traffic from cassandra on a port other than the standard ports + # condition: user.name = cassandra and outbound and not (cassandra_ssl_cluster_port or cassandra_cluster_port) + # output: "Outbound network traffic from Cassandra on unexpected port (connection=%fd.name)" + # priority: WARNING + + # Couchdb ports + # https://github.com/davisp/couchdb/blob/master/etc/couchdb/local.ini + - macro: couchdb_httpd_port + condition: fd.sport=5984 + - macro: couchdb_httpd_ssl_port + condition: fd.sport=6984 + # xxx can't tell what clustering ports are used. not writing rules for this + # yet. + + # Fluentd ports + - macro: fluentd_http_port + condition: fd.sport=9880 + - macro: fluentd_forward_port + condition: fd.sport=24224 + + # - rule: Fluentd unexpected network inbound traffic + # desc: inbound network traffic to fluentd on a port other than the standard ports + # condition: user.name = td-agent and inbound and not (fluentd_forward_port or fluentd_http_port) + # output: "Inbound network traffic to Fluentd on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: Tdagent unexpected network outbound traffic + # desc: outbound network traffic from fluentd on a port other than the standard ports + # condition: user.name = td-agent and outbound and not fluentd_forward_port + # output: "Outbound network traffic from Fluentd on unexpected port (connection=%fd.name)" + # priority: WARNING + + # Gearman ports + # http://gearman.org/protocol/ + # - rule: Gearman unexpected network outbound traffic + # desc: outbound network traffic from gearman on a port other than the standard ports + # condition: user.name = gearman and outbound and outbound and not fd.sport = 4730 + # output: "Outbound network traffic from Gearman on unexpected port (connection=%fd.name)" + # priority: WARNING + + # Zookeeper + - macro: zookeeper_port + condition: fd.sport = 2181 + + # Kafka ports + # - rule: Kafka unexpected network inbound traffic + # desc: inbound network traffic to kafka on a port other than the standard ports + # condition: user.name = kafka and inbound and fd.sport != 9092 + # output: "Inbound network traffic to Kafka on unexpected port (connection=%fd.name)" + # priority: WARNING + + # Memcached ports + # - rule: Memcached unexpected network inbound traffic + # desc: inbound network traffic to memcached on a port other than the standard ports + # condition: user.name = memcached and inbound and fd.sport != 11211 + # output: "Inbound network traffic to Memcached on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: Memcached unexpected network outbound traffic + # desc: any outbound network traffic from memcached. memcached never initiates outbound connections. + # condition: user.name = memcached and outbound + # output: "Unexpected Memcached outbound connection (connection=%fd.name)" + # priority: WARNING + + + # MongoDB ports + - macro: mongodb_server_port + condition: fd.sport = 27017 + - macro: mongodb_shardserver_port + condition: fd.sport = 27018 + - macro: mongodb_configserver_port + condition: fd.sport = 27019 + - macro: mongodb_webserver_port + condition: fd.sport = 28017 + + # - rule: Mongodb unexpected network inbound traffic + # desc: inbound network traffic to mongodb on a port other than the standard ports + # condition: > + # user.name = mongodb and inbound and not (mongodb_server_port or + # mongodb_shardserver_port or mongodb_configserver_port or mongodb_webserver_port) + # output: "Inbound network traffic to MongoDB on unexpected port (connection=%fd.name)" + # priority: WARNING + + # MySQL ports + # - rule: Mysql unexpected network inbound traffic + # desc: inbound network traffic to mysql on a port other than the standard ports + # condition: user.name = mysql and inbound and fd.sport != 3306 + # output: "Inbound network traffic to MySQL on unexpected port (connection=%fd.name)" + # priority: WARNING + + # - rule: HTTP server unexpected network inbound traffic + # desc: inbound network traffic to a http server program on a port other than the standard ports + # condition: proc.name in (http_server_binaries) and inbound and fd.sport != 80 and fd.sport != 443 + # output: "Inbound network traffic to HTTP Server on unexpected port (connection=%fd.name)" + # priority: WARNING + falco_rules.local.yaml: | + # + # Copyright (C) 2016-2018 Draios Inc dba Sysdig. + # + # This file is part of falco. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + #################### + # Your custom rules! + #################### + + # Add new rules, like this one + # - rule: The program "sudo" is run in a container + # desc: An event will trigger every time you run sudo in a container + # condition: evt.type = execve and evt.dir=< and container.id != host and proc.name = sudo + # output: "Sudo run in container (user=%user.name %container.info parent=%proc.pname cmdline=%proc.cmdline)" + # priority: ERROR + # tags: [users, container] + + # Or override/append to any rule, macro, or list from the Default Rules + falco_rules.yaml: |+ + # + # Copyright (C) 2016-2018 Draios Inc dba Sysdig. + # + # This file is part of falco. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + # See xxx for details on falco engine and rules versioning. Currently, + # this specific rules file is compatible with engine version 0 + # (e.g. falco releases <= 0.13.1), so we'll keep the + # required_engine_version lines commented out, so maintain + # compatibility with older falco releases. With the first incompatible + # change to this rules file, we'll uncomment this line and set it to + # the falco engine version in use at the time. + # + #- required_engine_version: 2 + + # Currently disabled as read/write are ignored syscalls. The nearly + # similar open_write/open_read check for files being opened for + # reading/writing. + # - macro: write + # condition: (syscall.type=write and fd.type in (file, directory)) + # - macro: read + # condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) + + - macro: open_write + condition: (evt.type=open or evt.type=openat) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0 + + - macro: open_read + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0 + + - macro: open_directory + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='d' and fd.num>=0 + + - macro: never_true + condition: (evt.num=0) + + - macro: always_true + condition: (evt.num>=0) + + # In some cases, such as dropped system call events, information about + # the process name may be missing. For some rules that really depend + # on the identity of the process performing an action such as opening + # a file, etc., we require that the process name be known. + - macro: proc_name_exists + condition: (proc.name!="") + + - macro: rename + condition: evt.type in (rename, renameat) + - macro: mkdir + condition: evt.type in (mkdir, mkdirat) + - macro: remove + condition: evt.type in (rmdir, unlink, unlinkat) + + - macro: modify + condition: rename or remove + + - macro: spawned_process + condition: evt.type = execve and evt.dir=< + + - macro: create_symlink + condition: evt.type in (symlink, symlinkat) and evt.dir=< + + # File categories + - macro: bin_dir + condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin) + + - macro: bin_dir_mkdir + condition: > + (evt.arg[1] startswith /bin/ or + evt.arg[1] startswith /sbin/ or + evt.arg[1] startswith /usr/bin/ or + evt.arg[1] startswith /usr/sbin/) + + - macro: bin_dir_rename + condition: > + evt.arg[1] startswith /bin/ or + evt.arg[1] startswith /sbin/ or + evt.arg[1] startswith /usr/bin/ or + evt.arg[1] startswith /usr/sbin/ + + - macro: etc_dir + condition: fd.name startswith /etc/ + + # This detects writes immediately below / or any write anywhere below /root + - macro: root_dir + condition: ((fd.directory=/ or fd.name startswith /root) and fd.name contains "/") + + - list: shell_binaries + items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash] + + - list: ssh_binaries + items: [ + sshd, sftp-server, ssh-agent, + ssh, scp, sftp, + ssh-keygen, ssh-keysign, ssh-keyscan, ssh-add + ] + + - list: shell_mgmt_binaries + items: [add-shell, remove-shell] + + - macro: shell_procs + condition: proc.name in (shell_binaries) + + - list: coreutils_binaries + items: [ + truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, + groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, + basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, + base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, + comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, + tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, + tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, + tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, + chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, + readlink, sleep, stty, mkdir, df, dir, rmdir, touch + ] + + # dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: login_binaries + items: [ + login, systemd, '"(systemd)"', systemd-logind, su, + nologin, faillog, lastlog, newgrp, sg + ] + + # dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: passwd_binaries + items: [ + shadowconfig, grpck, pwunconv, grpconv, pwck, + groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, + groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, + gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, deluser, delgroup + ] + + # repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | + # awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: shadowutils_binaries + items: [ + chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, + groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, + newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd + ] + + - list: sysdigcloud_binaries + items: [setup-backend, dragent, sdchecks] + + - list: docker_binaries + items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] + + - list: k8s_binaries + items: [hyperkube, skydns, kube2sky, exechealthz, weave-net, loopback, bridge, openshift-sdn, openshift] + + - list: lxd_binaries + items: [lxd, lxcfs] + + - list: http_server_binaries + items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] + + - list: db_server_binaries + items: [mysqld, postgres, sqlplus] + + - list: mysql_mgmt_binaries + items: [mysql_install_d, mysql_ssl_rsa_s] + + - list: postgres_mgmt_binaries + items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] + + - list: db_mgmt_binaries + items: [mysql_mgmt_binaries, postgres_mgmt_binaries] + + - list: nosql_server_binaries + items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] + + - list: gitlab_binaries + items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] + + - list: interpreted_binaries + items: [lua, node, perl, perl5, perl6, php, python, python2, python3, ruby, tcl] + + - macro: interpreted_procs + condition: > + (proc.name in (interpreted_binaries)) + + - macro: server_procs + condition: proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd) + + # The explicit quotes are needed to avoid the - characters being + # interpreted by the filter expression. + - list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] + + - list: openscap_rpm_binaries + items: [probe_rpminfo, probe_rpmverify, probe_rpmverifyfile, probe_rpmverifypackage] + + - macro: rpm_procs + condition: (proc.name in (rpm_binaries, openscap_rpm_binaries) or proc.name in (salt-minion)) + + - list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit, apt-config, apt-cache + ] + + # The truncated dpkg-preconfigu is intentional, process names are + # truncated at the sysdig level. + - list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client, apk] + + - macro: package_mgmt_procs + condition: proc.name in (package_mgmt_binaries) + + - macro: package_mgmt_ancestor_procs + condition: proc.pname in (package_mgmt_binaries) or + proc.aname[2] in (package_mgmt_binaries) or + proc.aname[3] in (package_mgmt_binaries) or + proc.aname[4] in (package_mgmt_binaries) + + - macro: coreos_write_ssh_dir + condition: (proc.name=update-ssh-keys and fd.name startswith /home/core/.ssh) + + - macro: run_by_package_mgmt_binaries + condition: proc.aname in (package_mgmt_binaries, needrestart) + + - list: ssl_mgmt_binaries + items: [ca-certificates] + + - list: dhcp_binaries + items: [dhclient, dhclient-script, 11-dhclient] + + # A canonical set of processes that run other programs with different + # privileges or as a different user. + - list: userexec_binaries + items: [sudo, su, suexec, critical-stack, dzdo] + + - list: known_setuid_binaries + items: [ + sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, + filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, + start-stop-daem + ] + + - list: user_mgmt_binaries + items: [login_binaries, passwd_binaries, shadowutils_binaries] + + - list: dev_creation_binaries + items: [blkid, rename_device, update_engine, sgdisk] + + - list: hids_binaries + items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, osqueryd, ossec-syscheckd] + + - list: vpn_binaries + items: [openvpn] + + - list: nomachine_binaries + items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] + + - macro: system_procs + condition: proc.name in (coreutils_binaries, user_mgmt_binaries) + + - list: mail_binaries + items: [ + sendmail, sendmail-msp, postfix, procmail, exim4, + pickup, showq, mailq, dovecot, imap-login, imap, + mailmng-core, pop3-login, dovecot-lda, pop3 + ] + + - list: mail_config_binaries + items: [ + update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, + update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., + postfix.config, postfix-script, postconf + ] + + - list: sensitive_file_names + items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf] + + - list: sensitive_directory_names + items: [/, /etc, /etc/, /root, /root/] + + - macro: sensitive_files + condition: > + fd.name startswith /etc and + (fd.name in (sensitive_file_names) + or fd.directory in (/etc/sudoers.d, /etc/pam.d)) + + # Indicates that the process is new. Currently detected using time + # since process was started, using a threshold of 5 seconds. + - macro: proc_is_new + condition: proc.duration <= 5000000000 + + # Network + - macro: inbound + condition: > + (((evt.type in (accept,listen) and evt.dir=<) or + (evt.type in (recvfrom,recvmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + # RFC1918 addresses were assigned for private network usage + - list: rfc_1918_addresses + items: ['"10.0.0.0/8"', '"172.16.0.0/12"', '"192.168.0.0/16"'] + + - macro: outbound + condition: > + (((evt.type = connect and evt.dir=<) or + (evt.type in (sendto,sendmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + # Very similar to inbound/outbound, but combines the tests together + # for efficiency. + - macro: inbound_outbound + condition: > + (((evt.type in (accept,listen,connect) and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + - macro: ssh_port + condition: fd.sport=22 + + # In a local/user rules file, you could override this macro to + # enumerate the servers for which ssh connections are allowed. For + # example, you might have a ssh gateway host for which ssh connections + # are allowed. + # + # In the main falco rules file, there isn't any way to know the + # specific hosts for which ssh access is allowed, so this macro just + # repeats ssh_port, which effectively allows ssh from all hosts. In + # the overridden macro, the condition would look something like + # "fd.sip="a.b.c.d" or fd.sip="e.f.g.h" or ..." + - macro: allowed_ssh_hosts + condition: ssh_port + + - rule: Disallowed SSH Connection + desc: Detect any new ssh connection to a host other than those in an allowed group of hosts + condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts + output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_remote_service] + + # These rules and supporting macros are more of an example for how to + # use the fd.*ip and fd.*ip.name fields to match connection + # information against ips, netmasks, and complete domain names. + # + # To use this rule, you should modify consider_all_outbound_conns and + # populate allowed_{source,destination}_{ipaddrs,networks,domains} with the + # values that make sense for your environment. + - macro: consider_all_outbound_conns + condition: (never_true) + + # Note that this can be either individual IPs or netmasks + - list: allowed_outbound_destination_ipaddrs + items: ['"127.0.0.1"', '"8.8.8.8"'] + + - list: allowed_outbound_destination_networks + items: ['"127.0.0.1/8"'] + + - list: allowed_outbound_destination_domains + items: [google.com, www.yahoo.com] + + - rule: Unexpected outbound connection destination + desc: Detect any outbound connection to a destination outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_outbound_conns and outbound and not + ((fd.sip in (allowed_outbound_destination_ipaddrs)) or + (fd.snet in (allowed_outbound_destination_networks)) or + (fd.sip.name in (allowed_outbound_destination_domains))) + output: Disallowed outbound connection destination (command=%proc.cmdline connection=%fd.name user=%user.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + + - macro: consider_all_inbound_conns + condition: (never_true) + + - list: allowed_inbound_source_ipaddrs + items: ['"127.0.0.1"'] + + - list: allowed_inbound_source_networks + items: ['"127.0.0.1/8"', '"10.0.0.0/8"'] + + - list: allowed_inbound_source_domains + items: [google.com] + + - rule: Unexpected inbound connection source + desc: Detect any inbound connection from a source outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_inbound_conns and inbound and not + ((fd.cip in (allowed_inbound_source_ipaddrs)) or + (fd.cnet in (allowed_inbound_source_networks)) or + (fd.cip.name in (allowed_inbound_source_domains))) + output: Disallowed inbound connection source (command=%proc.cmdline connection=%fd.name user=%user.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + + - list: bash_config_filenames + items: [.bashrc, .bash_profile, .bash_history, .bash_login, .bash_logout, .inputrc, .profile] + + - list: bash_config_files + items: [/etc/profile, /etc/bashrc] + + # Covers both csh and tcsh + - list: csh_config_filenames + items: [.cshrc, .login, .logout, .history, .tcshrc, .cshdirs] + + - list: csh_config_files + items: [/etc/csh.cshrc, /etc/csh.login] + + - list: zsh_config_filenames + items: [.zshenv, .zprofile, .zshrc, .zlogin, .zlogout] + + - list: shell_config_filenames + items: [bash_config_filenames, csh_config_filenames, zsh_config_filenames] + + - list: shell_config_files + items: [bash_config_files, csh_config_files] + + - list: shell_config_directories + items: [/etc/zsh] + + - rule: Modify Shell Configuration File + desc: Detect attempt to modify shell configuration files + condition: > + open_write and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) and + not proc.name in (shell_binaries) + output: > + a shell configuration file has been modified (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tag: [file, mitre_persistence] + + # This rule is not enabled by default, as there are many legitimate + # readers of shell config files. If you want to enable it, modify the + # following macro. + + - macro: consider_shell_config_reads + condition: (never_true) + + - rule: Read Shell Configuration File + desc: Detect attempts to read shell configuration files by non-shell programs + condition: > + open_read and + consider_shell_config_reads and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) and + (not proc.name in (shell_binaries)) + output: > + a shell configuration file was read by a non-shell program (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tag: [file, mitre_discovery] + + - macro: consider_all_cron_jobs + condition: (never_true) + + - rule: Schedule Cron Jobs + desc: Detect cron jobs scheduled + condition: > + consider_all_cron_jobs and + ((open_write and fd.name startswith /etc/cron) or + (spawned_process and proc.name = "crontab")) + output: > + Cron jobs were scheduled to run (user=%user.name command=%proc.cmdline + file=%fd.name container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tag: [file, mitre_persistence] + + # Use this to test whether the event occurred within a container. + + # When displaying container information in the output field, use + # %container.info, without any leading term (file=%fd.name + # %container.info user=%user.name, and not file=%fd.name + # container=%container.info user=%user.name). The output will change + # based on the context and whether or not -pk/-pm/-pc was specified on + # the command line. + - macro: container + condition: (container.id != host) + + - macro: container_started + condition: > + ((evt.type = container or + (evt.type=execve and evt.dir=< and proc.vpid=1)) and + container.image.repository != incomplete) + + - macro: interactive + condition: > + ((proc.aname=sshd and proc.name != sshd) or + proc.name=systemd-logind or proc.name=login) + + - list: cron_binaries + items: [anacron, cron, crond, crontab] + + # https://github.com/liske/needrestart + - list: needrestart_binaries + items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] + + # Possible scripts run by sshkit + - list: sshkit_script_binaries + items: [10_etc_sudoers., 10_passwd_group] + + - list: plesk_binaries + items: [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng] + + # System users that should never log into a system. Consider adding your own + # service users (e.g. 'apache' or 'mysqld') here. + - macro: system_users + condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data) + + # These macros will be removed soon. Only keeping them to maintain + # compatiblity with some widely used rules files. + # Begin Deprecated + - macro: parent_ansible_running_python + condition: (proc.pname in (python, pypy, python3) and proc.pcmdline contains ansible) + + - macro: parent_bro_running_python + condition: (proc.pname=python and proc.cmdline contains /usr/share/broctl) + + - macro: parent_python_running_denyhosts + condition: > + (proc.cmdline startswith "denyhosts.py /usr/bin/denyhosts.py" or + (proc.pname=python and + (proc.pcmdline contains /usr/sbin/denyhosts or + proc.pcmdline contains /usr/local/bin/denyhosts.py))) + + - macro: parent_python_running_sdchecks + condition: > + (proc.pname in (python, python2.7) and + (proc.pcmdline contains /opt/draios/bin/sdchecks)) + + - macro: python_running_sdchecks + condition: > + (proc.name in (python, python2.7) and + (proc.cmdline contains /opt/draios/bin/sdchecks)) + + - macro: parent_linux_image_upgrade_script + condition: proc.pname startswith linux-image- + + - macro: parent_java_running_echo + condition: (proc.pname=java and proc.cmdline startswith "sh -c echo") + + - macro: parent_scripting_running_builds + condition: > + (proc.pname in (php,php5-fpm,php-fpm7.1,python,ruby,ruby2.3,ruby2.1,node,conda) and ( + proc.cmdline startswith "sh -c git" or + proc.cmdline startswith "sh -c date" or + proc.cmdline startswith "sh -c /usr/bin/g++" or + proc.cmdline startswith "sh -c /usr/bin/gcc" or + proc.cmdline startswith "sh -c gcc" or + proc.cmdline startswith "sh -c if type gcc" or + proc.cmdline startswith "sh -c cd '/var/www/edi/';LC_ALL=en_US.UTF-8 git" or + proc.cmdline startswith "sh -c /var/www/edi/bin/sftp.sh" or + proc.cmdline startswith "sh -c /usr/src/app/crxlsx/bin/linux/crxlsx" or + proc.cmdline startswith "sh -c make parent" or + proc.cmdline startswith "node /jenkins/tools" or + proc.cmdline startswith "sh -c '/usr/bin/node'" or + proc.cmdline startswith "sh -c stty -a |" or + proc.pcmdline startswith "node /opt/nodejs/bin/yarn" or + proc.pcmdline startswith "node /usr/local/bin/yarn" or + proc.pcmdline startswith "node /root/.config/yarn" or + proc.pcmdline startswith "node /opt/yarn/bin/yarn.js")) + + + - macro: httpd_writing_ssl_conf + condition: > + (proc.pname=run-httpd and + (proc.cmdline startswith "sed -ri" or proc.cmdline startswith "sed -i") and + (fd.name startswith /etc/httpd/conf.d/ or fd.name startswith /etc/httpd/conf)) + + - macro: userhelper_writing_etc_security + condition: (proc.name=userhelper and fd.name startswith /etc/security) + + - macro: parent_Xvfb_running_xkbcomp + condition: (proc.pname=Xvfb and proc.cmdline startswith 'sh -c "/usr/bin/xkbcomp"') + + - macro: parent_nginx_running_serf + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c serf") + + - macro: parent_node_running_npm + condition: (proc.pcmdline startswith "node /usr/local/bin/npm" or + proc.pcmdline startswith "node /usr/local/nodejs/bin/npm" or + proc.pcmdline startswith "node /opt/rh/rh-nodejs6/root/usr/bin/npm") + + - macro: parent_java_running_sbt + condition: (proc.pname=java and proc.pcmdline contains sbt-launch.jar) + + - list: known_container_shell_spawn_cmdlines + items: [] + + - list: known_shell_spawn_binaries + items: [] + + ## End Deprecated + + - macro: ansible_running_python + condition: (proc.name in (python, pypy, python3) and proc.cmdline contains ansible) + + - macro: python_running_chef + condition: (proc.name=python and (proc.cmdline contains yum-dump.py or proc.cmdline="python /usr/bin/chef-monitor.py")) + + - macro: python_running_denyhosts + condition: > + (proc.name=python and + (proc.cmdline contains /usr/sbin/denyhosts or + proc.cmdline contains /usr/local/bin/denyhosts.py)) + + # Qualys seems to run a variety of shell subprocesses, at various + # levels. This checks at a few levels without the cost of a full + # proc.aname, which traverses the full parent heirarchy. + - macro: run_by_qualys + condition: > + (proc.pname=qualys-cloud-ag or + proc.aname[2]=qualys-cloud-ag or + proc.aname[3]=qualys-cloud-ag or + proc.aname[4]=qualys-cloud-ag) + + - macro: run_by_sumologic_securefiles + condition: > + ((proc.cmdline="usermod -a -G sumologic_collector" or + proc.cmdline="groupadd sumologic_collector") and + (proc.pname=secureFiles.sh and proc.aname[2]=java)) + + - macro: run_by_yum + condition: ((proc.pname=sh and proc.aname[2]=yum) or + (proc.aname[2]=sh and proc.aname[3]=yum)) + + - macro: run_by_ms_oms + condition: > + (proc.aname[3] startswith omsagent- or + proc.aname[3] startswith scx-) + + - macro: run_by_google_accounts_daemon + condition: > + (proc.aname[1] startswith google_accounts or + proc.aname[2] startswith google_accounts or + proc.aname[3] startswith google_accounts) + + # Chef is similar. + - macro: run_by_chef + condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or + proc.aname[2]=chef-client or proc.aname[3]=chef-client or + proc.name=chef-client) + + - macro: run_by_adclient + condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient) + + - macro: run_by_centrify + condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) + + - macro: run_by_puppet + condition: (proc.aname[2]=puppet or proc.aname[3]=puppet) + + # Also handles running semi-indirectly via scl + - macro: run_by_foreman + condition: > + (user.name=foreman and + (proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or + (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby))) + + - macro: java_running_sdjagent + condition: proc.name=java and proc.cmdline contains sdjagent.jar + + - macro: kubelet_running_loopback + condition: (proc.pname=kubelet and proc.name=loopback) + + - macro: python_mesos_marathon_scripting + condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") + + - macro: splunk_running_forwarder + condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") + + - macro: parent_supervise_running_multilog + condition: (proc.name=multilog and proc.pname=supervise) + + - macro: supervise_writing_status + condition: (proc.name in (supervise,svc) and fd.name startswith "/etc/sb/") + + - macro: pki_realm_writing_realms + condition: (proc.cmdline startswith "bash /usr/local/lib/pki/pki-realm" and fd.name startswith /etc/pki/realms) + + - macro: htpasswd_writing_passwd + condition: (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd) + + - macro: lvprogs_writing_conf + condition: > + (proc.name in (dmeventd,lvcreate,pvscan) and + (fd.name startswith /etc/lvm/archive or + fd.name startswith /etc/lvm/backup or + fd.name startswith /etc/lvm/cache)) + + - macro: ovsdb_writing_openvswitch + condition: (proc.name=ovsdb-server and fd.directory=/etc/openvswitch) + + - macro: perl_running_plesk + condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or + proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") + + - macro: perl_running_updmap + condition: (proc.cmdline startswith "perl /usr/bin/updmap") + + - macro: perl_running_centrifydc + condition: (proc.cmdline startswith "perl /usr/share/centrifydc") + + - macro: runuser_reading_pam + condition: (proc.name=runuser and fd.directory=/etc/pam.d) + + - macro: parent_ucf_writing_conf + condition: (proc.pname=ucf and proc.aname[2]=frontend) + + - macro: consul_template_writing_conf + condition: > + ((proc.name=consul-template and fd.name startswith /etc/haproxy) or + (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) + + - macro: countly_writing_nginx_conf + condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) + + - list: ms_oms_binaries + items: [omi.postinst, omsconfig.posti, scx.postinst, omsadmin.sh, omiagent] + + - macro: ms_oms_writing_conf + condition: > + ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor) + or proc.pname in (ms_oms_binaries) + or proc.aname[2] in (ms_oms_binaries)) + and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) + + - macro: ms_scx_writing_conf + condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) + + - macro: azure_scripts_writing_conf + condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) + + - macro: azure_networkwatcher_writing_conf + condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) + + - macro: couchdb_writing_conf + condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) + + - macro: update_texmf_writing_conf + condition: (proc.name=update-texmf and fd.name startswith /etc/texmf) + + - macro: slapadd_writing_conf + condition: (proc.name=slapadd and fd.name startswith /etc/ldap) + + - macro: openldap_writing_conf + condition: (proc.pname=run-openldap.sh and fd.name startswith /etc/openldap) + + - macro: ucpagent_writing_conf + condition: (proc.name=apiserver and container.image.repository=docker/ucp-agent and fd.name=/etc/authorization_config.cfg) + + - macro: iscsi_writing_conf + condition: (proc.name=iscsiadm and fd.name startswith /etc/iscsi) + + - macro: istio_writing_conf + condition: (proc.name=pilot-agent and fd.name startswith /etc/istio) + + - macro: symantec_writing_conf + condition: > + ((proc.name=symcfgd and fd.name startswith /etc/symantec) or + (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf)) + + - macro: liveupdate_writing_conf + condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) + + - macro: rancher_agent + condition: (proc.name=agent and container.image.repository contains "rancher/agent") + + - macro: rancher_network_manager + condition: (proc.name=rancher-bridge and container.image.repository contains "rancher/network-manager") + + - macro: sosreport_writing_files + condition: > + (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + + - macro: pkgmgmt_progs_writing_pki + condition: > + (proc.name=urlgrabber-ext- and proc.pname in (yum, yum-cron, repoquery) and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + + - macro: update_ca_trust_writing_pki + condition: (proc.pname=update-ca-trust and proc.name=trust and fd.name startswith /etc/pki) + + - macro: brandbot_writing_os_release + condition: proc.name=brandbot and fd.name=/etc/os-release + + - macro: selinux_writing_conf + condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) + + - list: veritas_binaries + items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] + + - macro: veritas_driver_script + condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") + + - macro: veritas_progs + condition: (proc.name in (veritas_binaries) or veritas_driver_script) + + - macro: veritas_writing_config + condition: (veritas_progs and (fd.name startswith /etc/vx or fd.name startswith /etc/opt/VRTS or fd.name startswith /etc/vom)) + + - macro: nginx_writing_conf + condition: (proc.name in (nginx,nginx-ingress-c,nginx-ingress) and (fd.name startswith /etc/nginx or fd.name startswith /etc/ingress-controller)) + + - macro: nginx_writing_certs + condition: > + (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) + + - macro: chef_client_writing_conf + condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) + + - macro: centrify_writing_krb + condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) + + - macro: cockpit_writing_conf + condition: > + ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) + and fd.name startswith /etc/cockpit) + + - macro: ipsec_writing_conf + condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) + + - macro: exe_running_docker_save + condition: (proc.cmdline startswith "exe /var/lib/docker" and proc.pname in (dockerd, docker)) + + # Ideally we'd have a length check here as well but sysdig + # filterchecks don't have operators like len() + - macro: sed_temporary_file + condition: (proc.name=sed and fd.name startswith "/etc/sed") + + - macro: python_running_get_pip + condition: (proc.cmdline startswith "python get-pip.py") + + - macro: python_running_ms_oms + condition: (proc.cmdline startswith "python /var/lib/waagent/") + + - macro: gugent_writing_guestagent_log + condition: (proc.name=gugent and fd.name=GuestAgent.log) + + - macro: dse_writing_tmp + condition: (proc.name=dse-entrypoint and fd.name=/root/tmp__) + + - macro: zap_writing_state + condition: (proc.name=java and proc.cmdline contains "jar /zap" and fd.name startswith /root/.ZAP) + + - macro: airflow_writing_state + condition: (proc.name=airflow and fd.name startswith /root/airflow) + + - macro: rpm_writing_root_rpmdb + condition: (proc.name=rpm and fd.directory=/root/.rpmdb) + + - macro: maven_writing_groovy + condition: (proc.name=java and proc.cmdline contains "classpath /usr/local/apache-maven" and fd.name startswith /root/.groovy) + + - macro: chef_writing_conf + condition: (proc.name=chef-client and fd.name startswith /root/.chef) + + - macro: kubectl_writing_state + condition: (proc.name in (kubectl,oc) and fd.name startswith /root/.kube) + + - macro: java_running_cassandra + condition: (proc.name=java and proc.cmdline contains "cassandra.jar") + + - macro: cassandra_writing_state + condition: (java_running_cassandra and fd.directory=/root/.cassandra) + + # Istio + - macro: galley_writing_state + condition: (proc.name=galley and fd.name in (known_istio_files)) + + - list: known_istio_files + items: [/healthready, /healthliveness] + + - macro: calico_writing_state + condition: (proc.name=kube-controller and fd.name startswith /status.json and k8s.pod.name startswith calico) + + - list: repository_files + items: [sources.list] + + - list: repository_directories + items: [/etc/apt/sources.list.d, /etc/yum.repos.d] + + - macro: access_repositories + condition: (fd.filename in (repository_files) or fd.directory in (repository_directories)) + + - rule: Update Package Repository + desc: Detect package repositories get updated + condition: > + open_write and access_repositories and not package_mgmt_procs + output: > + Repository files get updated (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + NOTICE + tags: [filesystem, mitre_persistence] + + - rule: Write below binary dir + desc: an attempt to write to any file below a set of binary directories + condition: > + bin_dir and evt.dir = < and open_write + and not package_mgmt_procs + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + output: > + File below a known binary directory opened for writing (user=%user.name + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # If you'd like to generally monitor a wider set of directories on top + # of the ones covered by the rule Write below binary dir, you can use + # the following rule and lists. + + - list: monitored_directories + items: [/boot, /lib, /lib64, /usr/lib, /usr/local/lib, /usr/local/sbin, /usr/local/bin, /root/.ssh, /etc/cardserver] + + # Until https://github.com/draios/sysdig/pull/1153, which fixes + # https://github.com/draios/sysdig/issues/1152, is widely available, + # we can't use glob operators to match pathnames. Until then, we do a + # looser check to match ssh directories. + # When fixed, we will use "fd.name glob '/home/*/.ssh/*'" + - macro: user_ssh_directory + condition: (fd.name startswith '/home' and fd.name contains '.ssh') + + # google_accounts_(daemon) + - macro: google_accounts_daemon_writing_ssh + condition: (proc.name=google_accounts and user_ssh_directory) + + - macro: cloud_init_writing_ssh + condition: (proc.name=cloud-init and user_ssh_directory) + + - macro: mkinitramfs_writing_boot + condition: (proc.pname in (mkinitramfs, update-initramf) and fd.directory=/boot) + + - macro: monitored_dir + condition: > + (fd.directory in (monitored_directories) + or user_ssh_directory) + and not mkinitramfs_writing_boot + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below monitored directories. + # + # Its default value is an expression that always is false, which + # becomes true when the "not ..." in the rule is applied. + - macro: user_known_write_monitored_dir_conditions + condition: (never_true) + + - rule: Write below monitored dir + desc: an attempt to write to any file below a set of binary directories + condition: > + evt.dir = < and open_write and monitored_dir + and not package_mgmt_procs + and not coreos_write_ssh_dir + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + and not google_accounts_daemon_writing_ssh + and not cloud_init_writing_ssh + and not user_known_write_monitored_dir_conditions + output: > + File below a monitored directory opened for writing (user=%user.name + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # This rule is disabled by default as many system management tools + # like ansible, etc can read these files/paths. Enable it using this macro. + + - macro: consider_ssh_reads + condition: (never_true) + + - rule: Read ssh information + desc: Any attempt to read files below ssh directories by non-ssh programs + condition: > + (consider_ssh_reads and + (open_read or open_directory) and + (user_ssh_directory or fd.name startswith /root/.ssh) and + (not proc.name in (ssh_binaries))) + output: > + ssh-related file/directory read by non-ssh program (user=%user.name + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_discovery] + + - list: safe_etc_dirs + items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig, /etc/fluent/configs.d] + + - macro: fluentd_writing_conf_files + condition: (proc.name=start-fluentd and fd.name in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf)) + + - macro: qualys_writing_conf_files + condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf) + + - macro: git_writing_nssdb + condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb) + + - macro: plesk_writing_keys + condition: (proc.name in (plesk_binaries) and fd.name startswith /etc/sw/keys) + + - macro: plesk_install_writing_apache_conf + condition: (proc.cmdline startswith "bash -hB /usr/lib/plesk-9.0/services/webserver.apache configure" + and fd.name="/etc/apache2/apache2.conf.tmp") + + - macro: plesk_running_mktemp + condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries)) + + - macro: networkmanager_writing_resolv_conf + condition: proc.aname[2]=nm-dispatcher and fd.name=/etc/resolv.conf + + - macro: add_shell_writing_shells_tmp + condition: (proc.name=add-shell and fd.name=/etc/shells.tmp) + + - macro: duply_writing_exclude_files + condition: (proc.name=touch and proc.pcmdline startswith "bash /usr/bin/duply" and fd.name startswith "/etc/duply") + + - macro: xmlcatalog_writing_files + condition: (proc.name=update-xmlcatal and fd.directory=/etc/xml) + + - macro: datadog_writing_conf + condition: ((proc.cmdline startswith "python /opt/datadog-agent" or + proc.cmdline startswith "entrypoint.sh /entrypoint.sh datadog start" or + proc.cmdline startswith "agent.py /opt/datadog-agent") + and fd.name startswith "/etc/dd-agent") + + - macro: rancher_writing_conf + condition: ((proc.name in (healthcheck, lb-controller, rancher-dns)) and + (container.image.repository contains "rancher/healthcheck" or + container.image.repository contains "rancher/lb-service-haproxy" or + container.image.repository contains "rancher/dns") and + (fd.name startswith "/etc/haproxy" or fd.name startswith "/etc/rancher-dns")) + + - macro: rancher_writing_root + condition: (proc.name=rancher-metadat and + (container.image.repository contains "rancher/metadata" or container.image.repository contains "rancher/lb-service-haproxy") and + fd.name startswith "/answers.json") + + - macro: checkpoint_writing_state + condition: (proc.name=checkpoint and + container.image.repository contains "coreos/pod-checkpointer" and + fd.name startswith "/etc/kubernetes") + + - macro: jboss_in_container_writing_passwd + condition: > + ((proc.cmdline="run-java.sh /opt/jboss/container/java/run/run-java.sh" + or proc.cmdline="run-java.sh /opt/run-java/run-java.sh") + and container + and fd.name=/etc/passwd) + + - macro: curl_writing_pki_db + condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) + + - macro: haproxy_writing_conf + condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) + and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) + + - macro: java_writing_conf + condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) + + - macro: rabbitmq_writing_conf + condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq) + + - macro: rook_writing_conf + condition: (proc.name=toolbox.sh and container.image.repository=rook/toolbox + and fd.directory=/etc/ceph) + + - macro: httpd_writing_conf_logs + condition: (proc.name=httpd and fd.name startswith /etc/httpd/) + + - macro: mysql_writing_conf + condition: > + ((proc.name in (start-mysql.sh, run-mysqld) or proc.pname=start-mysql.sh) and + (fd.name startswith /etc/mysql or fd.directory=/etc/my.cnf.d)) + + - macro: redis_writing_conf + condition: > + (proc.name in (run-redis, redis-launcher.) and fd.name=/etc/redis.conf or fd.name startswith /etc/redis) + + - macro: openvpn_writing_conf + condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) + + - macro: php_handlers_writing_conf + condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) + + - macro: sed_writing_temp_file + condition: > + ((proc.aname[3]=cron_start.sh and fd.name startswith /etc/security/sed) or + (proc.name=sed and (fd.name startswith /etc/apt/sources.list.d/sed or + fd.name startswith /etc/apt/sed or + fd.name startswith /etc/apt/apt.conf.d/sed))) + + - macro: cron_start_writing_pam_env + condition: (proc.cmdline="bash /usr/sbin/start-cron" and fd.name=/etc/security/pam_env.conf) + + # In some cases dpkg-reconfigur runs commands that modify /etc. Not + # putting the full set of package management programs yet. + - macro: dpkg_scripting + condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) + + - macro: ufw_writing_conf + condition: (proc.name=ufw and fd.directory=/etc/ufw) + + - macro: calico_writing_conf + condition: > + (proc.name = calico-node and fd.name startswith /etc/calico) + + - macro: prometheus_conf_writing_conf + condition: (proc.name=prometheus-conf and fd.name startswith /etc/prometheus/config_out) + + - macro: openshift_writing_conf + condition: (proc.name=oc and fd.name startswith /etc/origin/node) + + - macro: keepalived_writing_conf + condition: (proc.name=keepalived and fd.name=/etc/keepalived/keepalived.conf) + + - macro: etcd_manager_updating_dns + condition: (container and proc.name=etcd-manager and fd.name=/etc/hosts) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below specific directories below + # /etc. fluentd_writing_conf_files is a good example to follow, as it + # specifies both the program doing the writing as well as the specific + # files it is allowed to modify. + # + # In this file, it just takes one of the programs in the base macro + # and repeats it. + + - macro: user_known_write_etc_conditions + condition: proc.name=confd + + # This is a placeholder for user to extend the whitelist for write below etc rule + - macro: user_known_write_below_etc_activities + condition: (never_true) + + - macro: write_etc_common + condition: > + etc_dir and evt.dir = < and open_write + and proc_name_exists + and not proc.name in (passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, + package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, + dev_creation_binaries, shell_mgmt_binaries, + mail_config_binaries, + sshkit_script_binaries, + ldconfig.real, ldconfig, confd, gpg, insserv, + apparmor_parser, update-mime, tzdata.config, tzdata.postinst, + systemd, systemd-machine, systemd-sysuser, + debconf-show, rollerd, bind9.postinst, sv, + gen_resolvconf., update-ca-certi, certbot, runsv, + qualys-cloud-ag, locales.postins, nomachine_binaries, + adclient, certutil, crlutil, pam-auth-update, parallels_insta, + openshift-launc, update-rc.d, puppet) + and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) + and not fd.name pmatch (safe_etc_dirs) + and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) + and not sed_temporary_file + and not exe_running_docker_save + and not ansible_running_python + and not python_running_denyhosts + and not fluentd_writing_conf_files + and not user_known_write_etc_conditions + and not run_by_centrify + and not run_by_adclient + and not qualys_writing_conf_files + and not git_writing_nssdb + and not plesk_writing_keys + and not plesk_install_writing_apache_conf + and not plesk_running_mktemp + and not networkmanager_writing_resolv_conf + and not run_by_chef + and not add_shell_writing_shells_tmp + and not duply_writing_exclude_files + and not xmlcatalog_writing_files + and not parent_supervise_running_multilog + and not supervise_writing_status + and not pki_realm_writing_realms + and not htpasswd_writing_passwd + and not lvprogs_writing_conf + and not ovsdb_writing_openvswitch + and not datadog_writing_conf + and not curl_writing_pki_db + and not haproxy_writing_conf + and not java_writing_conf + and not dpkg_scripting + and not parent_ucf_writing_conf + and not rabbitmq_writing_conf + and not rook_writing_conf + and not php_handlers_writing_conf + and not sed_writing_temp_file + and not cron_start_writing_pam_env + and not httpd_writing_conf_logs + and not mysql_writing_conf + and not openvpn_writing_conf + and not consul_template_writing_conf + and not countly_writing_nginx_conf + and not ms_oms_writing_conf + and not ms_scx_writing_conf + and not azure_scripts_writing_conf + and not azure_networkwatcher_writing_conf + and not couchdb_writing_conf + and not update_texmf_writing_conf + and not slapadd_writing_conf + and not symantec_writing_conf + and not liveupdate_writing_conf + and not sosreport_writing_files + and not selinux_writing_conf + and not veritas_writing_config + and not nginx_writing_conf + and not nginx_writing_certs + and not chef_client_writing_conf + and not centrify_writing_krb + and not cockpit_writing_conf + and not ipsec_writing_conf + and not httpd_writing_ssl_conf + and not userhelper_writing_etc_security + and not pkgmgmt_progs_writing_pki + and not update_ca_trust_writing_pki + and not brandbot_writing_os_release + and not redis_writing_conf + and not openldap_writing_conf + and not ucpagent_writing_conf + and not iscsi_writing_conf + and not istio_writing_conf + and not ufw_writing_conf + and not calico_writing_conf + and not prometheus_conf_writing_conf + and not openshift_writing_conf + and not keepalived_writing_conf + and not rancher_writing_conf + and not checkpoint_writing_state + and not jboss_in_container_writing_passwd + and not etcd_manager_updating_dns + and not user_known_write_below_etc_activities + + - rule: Write below etc + desc: an attempt to write to any file below /etc + condition: write_etc_common + output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + - list: known_root_files + items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, + /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd, /root/.wget-hsts, /health, /exec.fifo] + + - list: known_root_directories + items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] + + - macro: known_root_conditions + condition: (fd.name startswith /root/orcexec. + or fd.name startswith /root/.m2 + or fd.name startswith /root/.npm + or fd.name startswith /root/.pki + or fd.name startswith /root/.ivy2 + or fd.name startswith /root/.config/Cypress + or fd.name startswith /root/.config/pulse + or fd.name startswith /root/.config/configstore + or fd.name startswith /root/jenkins/workspace + or fd.name startswith /root/.jenkins + or fd.name startswith /root/.cache + or fd.name startswith /root/.sbt + or fd.name startswith /root/.java + or fd.name startswith /root/.glide + or fd.name startswith /root/.sonar + or fd.name startswith /root/.v8flag + or fd.name startswith /root/infaagent + or fd.name startswith /root/.local/lib/python + or fd.name startswith /root/.pm2 + or fd.name startswith /root/.gnupg + or fd.name startswith /root/.pgpass + or fd.name startswith /root/.theano + or fd.name startswith /root/.gradle + or fd.name startswith /root/.android + or fd.name startswith /root/.ansible + or fd.name startswith /root/.crashlytics + or fd.name startswith /root/.dbus + or fd.name startswith /root/.composer + or fd.name startswith /root/.gconf + or fd.name startswith /root/.nv + or fd.name startswith /root/.local/share/jupyter + or fd.name startswith /root/oradiag_root + or fd.name startswith /root/workspace + or fd.name startswith /root/jvm + or fd.name startswith /root/.node-gyp) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below specific directories below + # / or /root. + # + # In this file, it just takes one of the condition in the base macro + # and repeats it. + - macro: user_known_write_root_conditions + condition: fd.name=/root/.bash_history + + # This is a placeholder for user to extend the whitelist for write below root rule + - macro: user_known_write_below_root_activities + condition: (never_true) + + - rule: Write below root + desc: an attempt to write to any file directly below / or /root + condition: > + root_dir and evt.dir = < and open_write + and not fd.name in (known_root_files) + and not fd.directory in (known_root_directories) + and not exe_running_docker_save + and not gugent_writing_guestagent_log + and not dse_writing_tmp + and not zap_writing_state + and not airflow_writing_state + and not rpm_writing_root_rpmdb + and not maven_writing_groovy + and not chef_writing_conf + and not kubectl_writing_state + and not cassandra_writing_state + and not galley_writing_state + and not calico_writing_state + and not rancher_writing_root + and not known_root_conditions + and not user_known_write_root_conditions + and not user_known_write_below_root_activities + output: "File below / or /root opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + - macro: cmp_cp_by_passwd + condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts) + + - rule: Read sensitive file trusted after startup + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information) by a trusted program after startup. Trusted programs might read these files + at startup to load initial state, but not afterwards. + condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" + output: > + Sensitive file opened for reading by trusted program after startup (user=%user.name + command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access] + + - list: read_sensitive_file_binaries + items: [ + iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, + vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, + pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, + scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd + ] + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs accessing sensitive files. + # fluentd_writing_conf_files is a good example to follow, as it + # specifies both the program doing the writing as well as the specific + # files it is allowed to modify. + # + # In this file, it just takes one of the macros in the base rule + # and repeats it. + + - macro: user_read_sensitive_file_conditions + condition: cmp_cp_by_passwd + + - rule: Read sensitive file untrusted + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information). Exceptions are made for known trusted programs. + condition: > + sensitive_files and open_read + and proc_name_exists + and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, + cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, + vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, + in.proftpd, mandb, salt-minion, postgres_mgmt_binaries) + and not cmp_cp_by_passwd + and not ansible_running_python + and not proc.cmdline contains /usr/bin/mandb + and not run_by_qualys + and not run_by_chef + and not run_by_google_accounts_daemon + and not user_read_sensitive_file_conditions + and not perl_running_plesk + and not perl_running_updmap + and not veritas_driver_script + and not perl_running_centrifydc + and not runuser_reading_pam + output: > + Sensitive file opened for reading by non-trusted program (user=%user.name program=%proc.name + command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access, mitre_discovery] + + # Only let rpm-related programs write to the rpm database + - rule: Write below rpm database + desc: an attempt to write to the rpm database by any non-rpm related program + condition: > + fd.name startswith /var/lib/rpm and open_write + and not rpm_procs + and not ansible_running_python + and not python_running_chef + and not exe_running_docker_save + output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, software_mgmt, mitre_persistence] + + - macro: postgres_running_wal_e + condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") + + - macro: redis_running_prepost_scripts + condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) + + - macro: rabbitmq_running_scripts + condition: > + (proc.pname=beam.smp and + (proc.cmdline startswith "sh -c exec ps" or + proc.cmdline startswith "sh -c exec inet_gethost" or + proc.cmdline= "sh -s unix:cmd" or + proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) + + - macro: rabbitmqctl_running_scripts + condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") + + - macro: run_by_appdynamics + condition: (proc.pname=java and proc.pcmdline startswith "java -jar -Dappdynamics") + + - rule: DB program spawned process + desc: > + a database-server related program spawned a new process other than itself. + This shouldn\'t occur and is a follow on from some SQL injection attacks. + condition: > + proc.pname in (db_server_binaries) + and spawned_process + and not proc.name in (db_server_binaries) + and not postgres_running_wal_e + output: > + Database-related program spawned process other than itself (user=%user.name + program=%proc.cmdline parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [process, database, mitre_execution] + + - rule: Modify binary dirs + desc: an attempt to modify any file below a set of binary directories. + condition: (bin_dir_rename) and modify and not package_mgmt_procs and not exe_running_docker_save + output: > + File below known binary directory renamed/removed (user=%user.name command=%proc.cmdline + pcmdline=%proc.pcmdline operation=%evt.type file=%fd.name %evt.args container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + - rule: Mkdir binary dirs + desc: an attempt to create a directory below a set of binary directories. + condition: mkdir and bin_dir_mkdir and not package_mgmt_procs + output: > + Directory below known binary directory created (user=%user.name + command=%proc.cmdline directory=%evt.arg.path container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # This list allows for easy additions to the set of commands allowed + # to change thread namespace without having to copy and override the + # entire change thread namespace rule. + - list: user_known_change_thread_namespace_binaries + items: [] + + - macro: user_known_change_thread_namespace_activities + condition: (never_true) + + - list: network_plugin_binaries + items: [aws-cni, azure-vnet] + + - macro: calico_node + condition: (container.image.repository endswith calico/node and proc.name=calico-node) + + - macro: weaveworks_scope + condition: (container.image.repository endswith weaveworks/scope and proc.name=scope) + + - rule: Change thread namespace + desc: > + an attempt to change a program/thread\'s namespace (commonly done + as a part of creating a container) by calling setns. + condition: > + evt.type = setns + and not proc.name in (docker_binaries, k8s_binaries, lxd_binaries, sysdigcloud_binaries, + sysdig, nsenter, calico, oci-umount, network_plugin_binaries) + and not proc.name in (user_known_change_thread_namespace_binaries) + and not proc.name startswith "runc" + and not proc.cmdline startswith "containerd" + and not proc.pname in (sysdigcloud_binaries) + and not python_running_sdchecks + and not java_running_sdjagent + and not kubelet_running_loopback + and not rancher_agent + and not rancher_network_manager + and not calico_node + and not weaveworks_scope + and not user_known_change_thread_namespace_activities + output: > + Namespace change (setns) by unexpected program (user=%user.name command=%proc.cmdline + parent=%proc.pname %container.info container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [process] + + # The binaries in this list and their descendents are *not* allowed + # spawn shells. This includes the binaries spawning shells directly as + # well as indirectly. For example, apache -> php/perl for + # mod_{php,perl} -> some shell is also not allowed, because the shell + # has apache as an ancestor. + + - list: protected_shell_spawning_binaries + items: [ + http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, + fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 + ] + + - macro: parent_java_running_zookeeper + condition: (proc.pname=java and proc.pcmdline contains org.apache.zookeeper.server) + + - macro: parent_java_running_kafka + condition: (proc.pname=java and proc.pcmdline contains kafka.Kafka) + + - macro: parent_java_running_elasticsearch + condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) + + - macro: parent_java_running_activemq + condition: (proc.pname=java and proc.pcmdline contains activemq.jar) + + - macro: parent_java_running_cassandra + condition: (proc.pname=java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) + + - macro: parent_java_running_jboss_wildfly + condition: (proc.pname=java and proc.pcmdline contains org.jboss) + + - macro: parent_java_running_glassfish + condition: (proc.pname=java and proc.pcmdline contains com.sun.enterprise.glassfish) + + - macro: parent_java_running_hadoop + condition: (proc.pname=java and proc.pcmdline contains org.apache.hadoop) + + - macro: parent_java_running_datastax + condition: (proc.pname=java and proc.pcmdline contains com.datastax) + + - macro: nginx_starting_nginx + condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") + + - macro: nginx_running_aws_s3_cp + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") + + - macro: consul_running_net_scripts + condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) + + - macro: consul_running_alert_checks + condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") + + - macro: serf_script + condition: (proc.cmdline startswith "sh -c serf") + + - macro: check_process_status + condition: (proc.cmdline startswith "sh -c kill -0 ") + + # In some cases, you may want to consider node processes run directly + # in containers as protected shell spawners. Examples include using + # pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct + # entrypoint of the container, and when the node app is a long-lived + # server using something like express. + # + # However, there are other uses of node related to build pipelines for + # which node is not really a server but instead a general scripting + # tool. In these cases, shells are very likely and in these cases you + # don't want to consider node processes protected shell spawners. + # + # We have to choose one of these cases, so we consider node processes + # as unprotected by default. If you want to consider any node process + # run in a container as a protected shell spawner, override the below + # macro to remove the "never_true" clause, which allows it to take effect. + - macro: possibly_node_in_container + condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) + + # Similarly, you may want to consider any shell spawned by apache + # tomcat as suspect. The famous apache struts attack (CVE-2017-5638) + # could be exploited to do things like spawn shells. + # + # However, many applications *do* use tomcat to run arbitrary shells, + # as a part of build pipelines, etc. + # + # Like for node, we make this case opt-in. + - macro: possibly_parent_java_running_tomcat + condition: (never_true and proc.pname=java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap) + + - macro: protected_shell_spawner + condition: > + (proc.aname in (protected_shell_spawning_binaries) + or parent_java_running_zookeeper + or parent_java_running_kafka + or parent_java_running_elasticsearch + or parent_java_running_activemq + or parent_java_running_cassandra + or parent_java_running_jboss_wildfly + or parent_java_running_glassfish + or parent_java_running_hadoop + or parent_java_running_datastax + or possibly_parent_java_running_tomcat + or possibly_node_in_container) + + - list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + + # Note that runsv is both in protected_shell_spawner and the + # exclusions by pname. This means that runsv can itself spawn shells + # (the ./run and ./finish scripts), but the processes runsv can not + # spawn shells. + - rule: Run shell untrusted + desc: an attempt to spawn a shell below a non-shell application. Specific applications are monitored. + condition: > + spawned_process + and shell_procs + and proc.pname exists + and protected_shell_spawner + and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, + needrestart_binaries, + mesos_shell_binaries, + erl_child_setup, exechealthz, + PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, + lb-controller, nvidia-installe, runsv, statsite, erlexec) + and not proc.cmdline in (known_shell_spawn_cmdlines) + and not proc.aname in (unicorn_launche) + and not consul_running_net_scripts + and not consul_running_alert_checks + and not nginx_starting_nginx + and not nginx_running_aws_s3_cp + and not run_by_package_mgmt_binaries + and not serf_script + and not check_process_status + and not run_by_foreman + and not python_mesos_marathon_scripting + and not splunk_running_forwarder + and not postgres_running_wal_e + and not redis_running_prepost_scripts + and not rabbitmq_running_scripts + and not rabbitmqctl_running_scripts + and not run_by_appdynamics + and not user_shell_container_exclusions + output: > + Shell spawned by untrusted binary (user=%user.name shell=%proc.name parent=%proc.pname + cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] + aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] container_id=%container.id image=%container.image.repository) + priority: DEBUG + tags: [shell, mitre_execution] + + - macro: allowed_openshift_registry_root + condition: > + (container.image.repository startswith openshift3/ or + container.image.repository startswith registry.redhat.io/openshift3/ or + container.image.repository startswith registry.access.redhat.com/openshift3/) + + # Source: https://docs.openshift.com/enterprise/3.2/install_config/install/disconnected_install.html + - macro: openshift_image + condition: > + (allowed_openshift_registry_root and + (container.image.repository endswith /logging-deployment or + container.image.repository endswith /logging-elasticsearch or + container.image.repository endswith /logging-kibana or + container.image.repository endswith /logging-fluentd or + container.image.repository endswith /logging-auth-proxy or + container.image.repository endswith /metrics-deployer or + container.image.repository endswith /metrics-hawkular-metrics or + container.image.repository endswith /metrics-cassandra or + container.image.repository endswith /metrics-heapster or + container.image.repository endswith /ose-haproxy-router or + container.image.repository endswith /ose-deployer or + container.image.repository endswith /ose-sti-builder or + container.image.repository endswith /ose-docker-builder or + container.image.repository endswith /ose-pod or + container.image.repository endswith /ose-node or + container.image.repository endswith /ose-docker-registry or + container.image.repository endswith /prometheus-node-exporter or + container.image.repository endswith /image-inspector)) + + # These images are allowed both to run with --privileged and to mount + # sensitive paths from the host filesystem. + # + # NOTE: This list is only provided for backwards compatibility with + # older local falco rules files that may have been appending to + # trusted_images. To make customizations, it's better to add images to + # either privileged_images or falco_sensitive_mount_images. + - list: trusted_images + items: [] + + # NOTE: This macro is only provided for backwards compatibility with + # older local falco rules files that may have been appending to + # trusted_images. To make customizations, it's better to add containers to + # user_trusted_containers, user_privileged_containers or user_sensitive_mount_containers. + - macro: trusted_containers + condition: (container.image.repository in (trusted_images)) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # trusted and therefore allowed to run privileged *and* with sensitive + # mounts. + # + # Like trusted_images, this is deprecated in favor of + # user_privileged_containers and user_sensitive_mount_containers and + # is only provided for backwards compatibility. + # + # In this file, it just takes one of the images in trusted_containers + # and repeats it. + - macro: user_trusted_containers + condition: (container.image.repository endswith sysdig/agent) + + - list: sematext_images + items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, + registry.access.redhat.com/sematext/sematext-agent-docker, + registry.access.redhat.com/sematext/agent, + registry.access.redhat.com/sematext/logagent] + + # These container images are allowed to run with --privileged + - list: falco_privileged_images + items: [ + docker.io/sysdig/agent, docker.io/sysdig/falco, docker.io/sysdig/sysdig, + gcr.io/google_containers/kube-proxy, docker.io/calico/node, + docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/mesosphere/mesos-slave, + docker.io/docker/ucp-agent, sematext_images, k8s.gcr.io/kube-proxy + ] + + - macro: falco_privileged_containers + condition: (openshift_image or + user_trusted_containers or + container.image.repository in (trusted_images) or + container.image.repository in (falco_privileged_images) or + container.image.repository startswith istio/proxy_ or + container.image.repository startswith quay.io/sysdig) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # allowed to run privileged + # + # In this file, it just takes one of the images in falco_privileged_images + # and repeats it. + - macro: user_privileged_containers + condition: (container.image.repository endswith sysdig/agent) + + - list: rancher_images + items: [ + rancher/network-manager, rancher/dns, rancher/agent, + rancher/lb-service-haproxy, rancher/metadata, rancher/healthcheck + ] + + # These container images are allowed to mount sensitive paths from the + # host filesystem. + - list: falco_sensitive_mount_images + items: [ + docker.io/sysdig/agent, docker.io/sysdig/falco, docker.io/sysdig/sysdig, + gcr.io/google_containers/hyperkube, + gcr.io/google_containers/kube-proxy, docker.io/calico/node, + docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul, + docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout, + docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter + ] + + - macro: falco_sensitive_mount_containers + condition: (user_trusted_containers or + container.image.repository in (trusted_images) or + container.image.repository in (falco_sensitive_mount_images) or + container.image.repository startswith quay.io/sysdig) + + # These container images are allowed to run with hostnetwork=true + - list: falco_hostnetwork_images + items: [] + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # allowed to perform sensitive mounts. + # + # In this file, it just takes one of the images in falco_sensitive_mount_images + # and repeats it. + - macro: user_sensitive_mount_containers + condition: (container.image.repository = docker.io/sysdig/agent) + + - rule: Launch Privileged Container + desc: Detect the initial process started in a privileged container. Exceptions are made for known trusted images. + condition: > + container_started and container + and container.privileged=true + and not falco_privileged_containers + and not user_privileged_containers + output: Privileged container started (user=%user.name command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: INFO + tags: [container, cis, mitre_privilege_escalation, mitre_lateral_movement] + + # For now, only considering a full mount of /etc as + # sensitive. Ideally, this would also consider all subdirectories + # below /etc as well, but the globbing mechanism used by sysdig + # doesn't allow exclusions of a full pattern, only single characters. + - macro: sensitive_mount + condition: (container.mount.dest[/proc*] != "N/A" or + container.mount.dest[/var/run/docker.sock] != "N/A" or + container.mount.dest[/var/lib/kubelet] != "N/A" or + container.mount.dest[/var/lib/kubelet/pki] != "N/A" or + container.mount.dest[/] != "N/A" or + container.mount.dest[/etc] != "N/A" or + container.mount.dest[/root*] != "N/A") + + # The steps libcontainer performs to set up the root program for a container are: + # - clone + exec self to a program runc:[0:PARENT] + # - clone a program runc:[1:CHILD] which sets up all the namespaces + # - clone a second program runc:[2:INIT] + exec to the root program. + # The parent of runc:[2:INIT] is runc:0:PARENT] + # As soon as 1:CHILD is created, 0:PARENT exits, so there's a race + # where at the time 2:INIT execs the root program, 0:PARENT might have + # already exited, or might still be around. So we handle both. + # We also let runc:[1:CHILD] count as the parent process, which can occur + # when we lose events and lose track of state. + + - macro: container_entrypoint + condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe)) + + - rule: Launch Sensitive Mount Container + desc: > + Detect the initial process started by a container that has a mount from a sensitive host directory + (i.e. /proc). Exceptions are made for known trusted images. + condition: > + container_started and container + and sensitive_mount + and not falco_sensitive_mount_containers + and not user_sensitive_mount_containers + output: Container with sensitive mount started (user=%user.name command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag mounts=%container.mounts) + priority: INFO + tags: [container, cis, mitre_lateral_movement] + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to run in + # your environment. In this main falco rules file, there isn't any way + # to know all the containers that can run, so any container is + # allowed, by using a filter that is guaranteed to evaluate to true. + # In the overridden macro, the condition would look something like + # (container.image.repository = vendor/container-1 or + # container.image.repository = vendor/container-2 or ...) + + - macro: allowed_containers + condition: (container.id exists) + + - rule: Launch Disallowed Container + desc: > + Detect the initial process started by a container that is not in a list of allowed containers. + condition: container_started and container and not allowed_containers + output: Container started and not in allowed list (user=%user.name command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [container, mitre_lateral_movement] + + # Anything run interactively by root + # - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive + # output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" + # priority: WARNING + + - rule: System user interactive + desc: an attempt to run interactive commands by a system (i.e. non-login) user + condition: spawned_process and system_users and interactive + output: "System user ran an interactive command (user=%user.name command=%proc.cmdline container_id=%container.id image=%container.image.repository)" + priority: INFO + tags: [users, mitre_remote_access_tools] + + - rule: Terminal shell in container + desc: A shell was used as the entrypoint/exec point into a container with an attached terminal. + condition: > + spawned_process and container + and shell_procs and proc.tty != 0 + and container_entrypoint + output: > + A shell was spawned in a container with an attached terminal (user=%user.name %container.info + shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [container, shell, mitre_execution] + + # For some container types (mesos), there isn't a container image to + # work with, and the container name is autogenerated, so there isn't + # any stable aspect of the software to work with. In this case, we + # fall back to allowing certain command lines. + + - list: known_shell_spawn_cmdlines + items: [ + '"sh -c uname -p 2> /dev/null"', + '"sh -c uname -s 2>&1"', + '"sh -c uname -r 2>&1"', + '"sh -c uname -v 2>&1"', + '"sh -c uname -a 2>&1"', + '"sh -c ruby -v 2>&1"', + '"sh -c getconf CLK_TCK"', + '"sh -c getconf PAGESIZE"', + '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c /sbin/ldconfig -p 2>/dev/null"', + '"sh -c stty -a 2>/dev/null"', + '"sh -c stty -a < /dev/tty"', + '"sh -c stty -g < /dev/tty"', + '"sh -c node index.js"', + '"sh -c node index"', + '"sh -c node ./src/start.js"', + '"sh -c node app.js"', + '"sh -c node -e \"require(''nan'')\""', + '"sh -c node -e \"require(''nan'')\")"', + '"sh -c node $NODE_DEBUG_OPTION index.js "', + '"sh -c crontab -l 2"', + '"sh -c lsb_release -a"', + '"sh -c lsb_release -is 2>/dev/null"', + '"sh -c whoami"', + '"sh -c node_modules/.bin/bower-installer"', + '"sh -c /bin/hostname -f 2> /dev/null"', + '"sh -c locale -a"', + '"sh -c -t -i"', + '"sh -c openssl version"', + '"bash -c id -Gn kafadmin"', + '"sh -c /bin/sh -c ''date +%%s''"' + ] + + # This list allows for easy additions to the set of commands allowed + # to run shells in containers without having to without having to copy + # and override the entire run shell in container macro. Once + # https://github.com/draios/falco/issues/255 is fixed this will be a + # bit easier, as someone could append of any of the existing lists. + - list: user_known_shell_spawn_binaries + items: [] + + # This macro allows for easy additions to the set of commands allowed + # to run shells in containers without having to override the entire + # rule. Its default value is an expression that always is false, which + # becomes true when the "not ..." in the rule is applied. + - macro: user_shell_container_exclusions + condition: (never_true) + + - macro: login_doing_dns_lookup + condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) + + # sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets + # systemd can listen on ports to launch things like sshd on demand + - rule: System procs network activity + desc: any network activity performed by system binaries that are not expected to send or receive any network traffic + condition: > + (fd.sockfamily = ip and (system_procs or proc.name in (shell_binaries))) + and (inbound_outbound) + and not proc.name in (systemd, hostid, id) + and not login_doing_dns_lookup + output: > + Known system binary sent/received network traffic + (user=%user.name command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + # When filled in, this should look something like: + # (proc.env contains "HTTP_PROXY=http://my.http.proxy.com ") + # The trailing space is intentional so avoid matching on prefixes of + # the actual proxy. + - macro: allowed_ssh_proxy_env + condition: (always_true) + + - list: http_proxy_binaries + items: [curl, wget] + + - macro: http_proxy_procs + condition: (proc.name in (http_proxy_binaries)) + + - rule: Program run with disallowed http proxy env + desc: An attempt to run a program with a disallowed HTTP_PROXY environment variable + condition: > + spawned_process and + http_proxy_procs and + not allowed_ssh_proxy_env and + proc.env icontains HTTP_PROXY + output: > + Program run with disallowed HTTP_PROXY environment variable + (user=%user.name command=%proc.cmdline env=%proc.env parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [host, users] + + # In some environments, any attempt by a interpreted program (perl, + # python, ruby, etc) to listen for incoming connections or perform + # outgoing connections might be suspicious. These rules are not + # enabled by default, but you can modify the following macros to + # enable them. + + - macro: consider_interpreted_inbound + condition: (never_true) + + - macro: consider_interpreted_outbound + condition: (never_true) + + - rule: Interpreted procs inbound network activity + desc: Any inbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (inbound and consider_interpreted_inbound + and interpreted_procs) + output: > + Interpreted program received/listened for network traffic + (user=%user.name command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + - rule: Interpreted procs outbound network activity + desc: Any outbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (outbound and consider_interpreted_outbound + and interpreted_procs) + output: > + Interpreted program performed outgoing network connection + (user=%user.name command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + - list: openvpn_udp_ports + items: [1194, 1197, 1198, 8080, 9201] + + - list: l2tp_udp_ports + items: [500, 1701, 4500, 10000] + + - list: statsd_ports + items: [8125] + + - list: ntp_ports + items: [123] + + # Some applications will connect a udp socket to an address only to + # test connectivity. Assuming the udp connect works, they will follow + # up with a tcp connect that actually sends/receives data. + # + # With that in mind, we listed a few commonly seen ports here to avoid + # some false positives. In addition, we make the main rule opt-in, so + # it's disabled by default. + + - list: test_connect_ports + items: [0, 9, 80, 3306] + + - macro: do_unexpected_udp_check + condition: (never_true) + + - list: expected_udp_ports + items: [53, openvpn_udp_ports, l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports] + + - macro: expected_udp_traffic + condition: fd.port in (expected_udp_ports) + + - rule: Unexpected UDP Traffic + desc: UDP traffic not on port 53 (DNS) or other commonly used ports + condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic + output: > + Unexpected UDP Traffic Seen + (user=%user.name command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + # With the current restriction on system calls handled by falco + # (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't + # trigger). + # - rule: Ssh error in syslog + # desc: any ssh errors (failed logins, disconnects, ...) sent to syslog + # condition: syslog and ssh_error_message and evt.dir = < + # output: "sshd sent error message to syslog (error=%evt.buffer)" + # priority: WARNING + + - macro: somebody_becoming_themself + condition: ((user.name=nobody and evt.arg.uid=nobody) or + (user.name=www-data and evt.arg.uid=www-data) or + (user.name=_apt and evt.arg.uid=_apt) or + (user.name=postfix and evt.arg.uid=postfix) or + (user.name=pki-agent and evt.arg.uid=pki-agent) or + (user.name=pki-acme and evt.arg.uid=pki-acme) or + (user.name=nfsnobody and evt.arg.uid=nfsnobody) or + (user.name=postgres and evt.arg.uid=postgres)) + + - macro: nrpe_becoming_nagios + condition: (proc.name=nrpe and evt.arg.uid=nagios) + + # In containers, the user name might be for a uid that exists in the + # container but not on the host. (See + # https://github.com/draios/sysdig/issues/954). So in that case, allow + # a setuid. + - macro: known_user_in_container + condition: (container and user.name != "N/A") + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs changing users by calling setuid. + # + # In this file, it just takes one of the condition in the base macro + # and repeats it. + - macro: user_known_non_sudo_setuid_conditions + condition: user.name=root + + # sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs + - rule: Non sudo setuid + desc: > + an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" + suing to itself are also excluded, as setuid calls typically involve dropping privileges. + condition: > + evt.type=setuid and evt.dir=> + and (known_user_in_container or not container) + and not user.name=root + and not somebody_becoming_themself + and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, + nomachine_binaries) + and not proc.name startswith "runc:" + and not java_running_sdjagent + and not nrpe_becoming_nagios + and not user_known_non_sudo_setuid_conditions + output: > + Unexpected setuid call by non-sudo, non-root program (user=%user.name cur_uid=%user.uid parent=%proc.pname + command=%proc.cmdline uid=%evt.arg.uid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [users, mitre_privilege_escalation] + + - rule: User mgmt binaries + desc: > + activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. + Activity in containers is also excluded--some containers create custom users on top + of a base linux distribution at startup. + Some innocuous commandlines that don't actually change anything are excluded. + condition: > + spawned_process and proc.name in (user_mgmt_binaries) and + not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and + not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and + not proc.cmdline startswith "passwd -S" and + not proc.cmdline startswith "useradd -D" and + not proc.cmdline startswith "systemd --version" and + not run_by_qualys and + not run_by_sumologic_securefiles and + not run_by_yum and + not run_by_ms_oms and + not run_by_google_accounts_daemon + output: > + User management binary command run outside of container + (user=%user.name command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) + priority: NOTICE + tags: [host, users, mitre_persistence] + + - list: allowed_dev_files + items: [ + /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, + /dev/random, /dev/urandom, /dev/console, /dev/kmsg + ] + + # (we may need to add additional checks against false positives, see: + # https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153) + - rule: Create files below dev + desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. + condition: > + fd.directory = /dev and + (evt.type = creat or (evt.type = open and evt.arg.flags contains O_CREAT)) + and not proc.name in (dev_creation_binaries) + and not fd.name in (allowed_dev_files) + and not fd.name startswith /dev/tty + output: "File created below /dev by untrusted program (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to allow + # access to EC2 metadata. In this main falco rules file, there isn't + # any way to know all the containers that should have access, so any + # container is alllowed, by repeating the "container" macro. In the + # overridden macro, the condition would look something like + # (container.image.repository = vendor/container-1 or + # container.image.repository = vendor/container-2 or ...) + - macro: ec2_metadata_containers + condition: container + + # On EC2 instances, 169.254.169.254 is a special IP used to fetch + # metadata about the instance. It may be desirable to prevent access + # to this IP from containers. + - rule: Contact EC2 Instance Metadata Service From Container + desc: Detect attempts to contact the EC2 Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers + output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, aws, container, mitre_discovery] + + # In a local/user rules file, you should override this macro with the + # IP address of your k8s api server. The IP 1.2.3.4 is a placeholder + # IP that is not likely to be seen in practice. + - macro: k8s_api_server + condition: (fd.sip="1.2.3.4" and fd.sport=8080) + + # In a local/user rules file, list the container images that are + # allowed to contact the K8s API Server from within a container. This + # might cover cases where the K8s infrastructure itself is running + # within a container. + - macro: k8s_containers + condition: > + (container.image.repository in (gcr.io/google_containers/hyperkube-amd64, + gcr.io/google_containers/kube2sky, sysdig/agent, sysdig/falco, + sysdig/sysdig)) + + - rule: Contact K8S API Server From Container + desc: Detect attempts to contact the K8S API Server from a container + condition: outbound and k8s_api_server and container and not k8s_containers + output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag connection=%fd.name) + priority: NOTICE + tags: [network, k8s, container, mitre_discovery] + + # In a local/user rules file, list the container images that are + # allowed to contact NodePort services from within a container. This + # might cover cases where the K8s infrastructure itself is running + # within a container. + # + # By default, all containers are allowed to contact NodePort services. + - macro: nodeport_containers + condition: container + + - rule: Unexpected K8s NodePort Connection + desc: Detect attempts to use K8s NodePorts from a container + condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers + output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, k8s, container, mitre_port_knocking] + + - list: network_tool_binaries + items: [nc, ncat, nmap, dig, tcpdump, tshark, ngrep] + + - macro: network_tool_procs + condition: (proc.name in (network_tool_binaries)) + + # Container is supposed to be immutable. Package management should be done in building the image. + - rule: Launch Package Management Process in Container + desc: Package management process ran inside container + condition: > + spawned_process and container and user.name != "_apt" and package_mgmt_procs and not package_mgmt_ancestor_procs + output: > + Package management process launched in container (user=%user.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: ERROR + tags: [process, mitre_persistence] + + - rule: Netcat Remote Code Execution in Container + desc: Netcat Program runs inside container that allows remote code execution + condition: > + spawned_process and container and + ((proc.name = "nc" and (proc.args contains "-e" or proc.args contains "-c")) or + (proc.name = "ncat" and (proc.args contains "--sh-exec" or proc.args contains "--exec" or proc.args contains "-e " + or proc.args contains "-c " or proc.args contains "--lua-exec")) + ) + output: > + Netcat runs inside container that allows remote code execution (user=%user.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [network, process, mitre_execution] + + - rule: Launch Suspicious Network Tool in Container + desc: Detect network tools launched inside container + condition: > + spawned_process and container and network_tool_procs + output: > + Network tool launched in container (user=%user.name command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + + # This rule is not enabled by default, as there are legitimate use + # cases for these tools on hosts. If you want to enable it, modify the + # following macro. + - macro: consider_network_tools_on_host + condition: (never_true) + + - rule: Launch Suspicious Network Tool on Host + desc: Detect network tools launched on the host + condition: > + spawned_process and + not container and + consider_network_tools_on_host and + network_tool_procs + output: > + Network tool launched on host (user=%user.name command=%proc.cmdline parent_process=%proc.pname) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + + - list: grep_binaries + items: [grep, egrep, fgrep] + + - macro: grep_commands + condition: (proc.name in (grep_binaries)) + + # a less restrictive search for things that might be passwords/ssh/user etc. + - macro: grep_more + condition: (never_true) + + - macro: private_key_or_password + condition: > + (proc.args icontains "BEGIN PRIVATE" or + proc.args icontains "BEGIN RSA PRIVATE" or + proc.args icontains "BEGIN DSA PRIVATE" or + proc.args icontains "BEGIN EC PRIVATE" or + (grep_more and + (proc.args icontains " pass " or + proc.args icontains " ssh " or + proc.args icontains " user ")) + ) + + - rule: Search Private Keys or Passwords + desc: > + Detect grep private keys or passwords activity. + condition: > + (spawned_process and + ((grep_commands and private_key_or_password) or + (proc.name = "find" and (proc.args contains "id_rsa" or proc.args contains "id_dsa"))) + ) + output: > + Grep private keys or passwords activities found + (user=%user.name command=%proc.cmdline container_id=%container.id container_name=%container.name + image=%container.image.repository:%container.image.tag) + priority: + WARNING + tags: [process, mitre_credential_access] + + - list: log_directories + items: [/var/log, /dev/log] + + - list: log_files + items: [syslog, auth.log, secure, kern.log, cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log] + + - macro: access_log_files + condition: (fd.directory in (log_directories) or fd.filename in (log_files)) + + # a placeholder for whitelist log files that could be cleared. Recommend the macro as (fd.name startswith "/var/log/app1*") + - macro: allowed_clear_log_files + condition: (never_true) + + - macro: trusted_logging_images + condition: (container.image.repository endswith "splunk/fluentd-hec") + + - rule: Clear Log Activities + desc: Detect clearing of critical log files + condition: > + open_write and + access_log_files and + evt.arg.flags contains "O_TRUNC" and + not trusted_logging_images and + not allowed_clear_log_files + output: > + Log files were tampered (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_defense_evasion] + + - list: data_remove_commands + items: [shred, mkfs, mke2fs] + + - macro: clear_data_procs + condition: (proc.name in (data_remove_commands)) + + - rule: Remove Bulk Data from Disk + desc: Detect process running to clear bulk data from disk + condition: spawned_process and clear_data_procs + output: > + Bulk data has been removed from disk (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [process, mitre_persistence] + + - rule: Delete Bash History + desc: Detect bash history deletion + condition: > + ((spawned_process and proc.name in (shred, rm, mv) and proc.args contains "bash_history") or + (open_write and fd.name contains "bash_history" and evt.arg.flags contains "O_TRUNC")) + output: > + Bash history has been deleted (user=%user.name command=%proc.cmdline file=%fd.name %container.info) + priority: + WARNING + tag: [process, mitre_defense_evation] + + - macro: consider_all_chmods + condition: (never_true) + + - rule: Set Setuid or Setgid bit + desc: > + When the setuid or setgid bits are set for an application, + this means that the application will run with the privileges of the owning user or group respectively. + Detect setuid or setgid bits set via chmod + condition: consider_all_chmods and spawned_process and proc.name = "chmod" and (proc.args contains "+s" or proc.args contains "4777") + output: > + Setuid or setgid bit is set via chmod (user=%user.name command=%proc.cmdline + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tag: [process, mitre_persistence] + + - list: exclude_hidden_directories + items: [/root/.cassandra] + + # To use this rule, you should modify consider_hidden_file_creation. + - macro: consider_hidden_file_creation + condition: (never_true) + + - rule: Create Hidden Files or Directories + desc: Detect hidden files or directories created + condition: > + ((mkdir and consider_hidden_file_creation and evt.arg.path contains "/.") or + (open_write and consider_hidden_file_creation and evt.arg.flags contains "O_CREAT" and + fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories))) + output: > + Hidden file or directory created (user=%user.name command=%proc.cmdline + file=%fd.name container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tag: [file, mitre_persistence] + + - list: remote_file_copy_binaries + items: [rsync, scp, sftp, dcp] + + - macro: remote_file_copy_procs + condition: (proc.name in (remote_File_copy_binaries)) + + - rule: Launch Remote File Copy Tools in Container + desc: Detect remote file copy tools launched in container + condition: > + spawned_process and container and remote_file_copy_procs + output: > + Remote file copy tool launched in container (user=%user.name command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_lateral_movement, mitre_exfiltration] + + - rule: Create Symlink Over Sensitive Files + desc: Detect symlink created over sensitive files + condition: > + create_symlink and + (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) + output: > + Symlinks created over senstivie files (user=%user.name command=%proc.cmdline target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname) + priority: NOTICE + tags: [file, mitre_exfiltration] + # Application rules have moved to application_rules.yaml. Please look + # there if you want to enable them by adding to + # falco_rules.local.yaml. + + k8s_audit_rules.yaml: | + # + # Copyright (C) 2016-2018 Draios Inc dba Sysdig. + # + # This file is part of falco. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + - required_engine_version: 2 + + # Like always_true/always_false, but works with k8s audit events + - macro: k8s_audit_always_true + condition: (jevt.rawtime exists) + + - macro: k8s_audit_never_true + condition: (jevt.rawtime=0) + + # Generally only consider audit events once the response has completed + - list: k8s_audit_stages + items: ["ResponseComplete"] + + # Generally exclude users starting with "system:" + - macro: non_system_user + condition: (not ka.user.name startswith "system:") + + # This macro selects the set of Audit Events used by the below rules. + - macro: kevt + condition: (jevt.value[/stage] in (k8s_audit_stages)) + + - macro: kevt_started + condition: (jevt.value[/stage]=ResponseStarted) + + # If you wish to restrict activity to a specific set of users, override/append to this list. + - list: allowed_k8s_users + items: ["minikube", "minikube-user", "kubelet", "kops"] + + - rule: Disallowed K8s User + desc: Detect any k8s operation by users outside of an allowed set of users. + condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) + output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to run in + # your environment. In this main falco rules file, there isn't any way + # to know all the containers that can run, so any container is + # allowed, by using the always_true macro. In the overridden macro, the condition + # would look something like (ka.req.container.image.repository=my-repo/my-image) + - macro: allowed_k8s_containers + condition: (k8s_audit_always_true) + + - macro: response_successful + condition: (ka.response.code startswith 2) + + - macro: kcreate + condition: ka.verb=create + + - macro: kmodify + condition: (ka.verb in (create,update,patch)) + + - macro: kdelete + condition: ka.verb=delete + + - macro: pod + condition: ka.target.resource=pods and not ka.target.subresource exists + + - macro: pod_subresource + condition: ka.target.resource=pods and ka.target.subresource exists + + - macro: deployment + condition: ka.target.resource=deployments + + - macro: service + condition: ka.target.resource=services + + - macro: configmap + condition: ka.target.resource=configmaps + + - macro: namespace + condition: ka.target.resource=namespaces + + - macro: serviceaccount + condition: ka.target.resource=serviceaccounts + + - macro: clusterrole + condition: ka.target.resource=clusterroles + + - macro: clusterrolebinding + condition: ka.target.resource=clusterrolebindings + + - macro: role + condition: ka.target.resource=roles + + - macro: health_endpoint + condition: ka.uri=/healthz + + - rule: Create Disallowed Pod + desc: > + Detect an attempt to start a pod with a container image outside of a list of allowed images. + condition: kevt and pod and kcreate and not allowed_k8s_containers + output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace image=%ka.req.container.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - rule: Create Privileged Pod + desc: > + Detect an attempt to start a pod with a privileged container + condition: kevt and pod and kcreate and ka.req.container.privileged=true and not ka.req.container.image.repository in (falco_privileged_images) + output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace image=%ka.req.container.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: sensitive_vol_mount + condition: > + (ka.req.volume.hostpath[/proc*]=true or + ka.req.volume.hostpath[/var/run/docker.sock]=true or + ka.req.volume.hostpath[/]=true or + ka.req.volume.hostpath[/etc]=true or + ka.req.volume.hostpath[/root*]=true) + + - rule: Create Sensitive Mount Pod + desc: > + Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). + Exceptions are made for known trusted images. + condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.container.image.repository in (falco_sensitive_mount_images) + output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace image=%ka.req.container.image mounts=%jevt.value[/requestObject/spec/volumes]) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Corresponds to K8s CIS Benchmark 1.7.4 + - rule: Create HostNetwork Pod + desc: Detect an attempt to start a pod using the host network. + condition: kevt and pod and kcreate and ka.req.container.host_network=true and not ka.req.container.image.repository in (falco_hostnetwork_images) + output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace image=%ka.req.container.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - rule: Create NodePort Service + desc: > + Detect an attempt to start a service with a NodePort service type + condition: kevt and service and kcreate and ka.req.service.type=NodePort + output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: contains_private_credentials + condition: > + (ka.req.configmap.obj contains "aws_access_key_id" or + ka.req.configmap.obj contains "aws-access-key-id" or + ka.req.configmap.obj contains "aws_s3_access_key_id" or + ka.req.configmap.obj contains "aws-s3-access-key-id" or + ka.req.configmap.obj contains "password" or + ka.req.configmap.obj contains "passphrase") + + - rule: Create/Modify Configmap With Private Credentials + desc: > + Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) + condition: kevt and configmap and kmodify and contains_private_credentials + output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Corresponds to K8s CIS Benchmark, 1.1.1. + - rule: Anonymous Request Allowed + desc: > + Detect any request made by the anonymous user that was allowed + condition: kevt and ka.user.name=system:anonymous and ka.auth.decision!=reject and not health_endpoint + output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case, + # notifies an attempt to exec/attach to a privileged container. + + # Ideally, we'd add a more stringent rule that detects attaches/execs + # to a privileged pod, but that requires the engine for k8s audit + # events to be stateful, so it could know if a container named in an + # attach request was created privileged or not. For now, we have a + # less severe rule that detects attaches/execs to any pod. + + - rule: Attach/Exec Pod + desc: > + Detect any attempt to attach/exec to a pod + condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) + output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) + priority: NOTICE + source: k8s_audit + tags: [k8s] + + # In a local/user rules fie, you can append to this list to add additional allowed namespaces + - list: allowed_namespaces + items: [kube-system, kube-public, default] + + - rule: Create Disallowed Namespace + desc: Detect any attempt to create a namespace outside of a set of known namespaces + condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces) + output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect any new pod created in the kube-system namespace + - rule: Pod Created in Kube Namespace + desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces + condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) + output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace image=%ka.req.container.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect creating a service account in the kube-system/kube-public namespace + - rule: Service Account Created in Kube Namespace + desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces + condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) + output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect any modify/delete to any ClusterRole starting with + # "system:". "system:coredns" is excluded as changes are expected in + # normal operation. + - rule: System ClusterRole Modified/Deleted + desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system + condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and ka.target.name!="system:coredns" + output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect any attempt to create a ClusterRoleBinding to the cluster-admin user + # (exapand this to any built-in cluster role that does "sensitive" things) + - rule: Attach to cluster-admin Role + desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user + condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin + output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - rule: ClusterRole With Wildcard Created + desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs + condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources contains '"*"' or ka.req.role.rules.verbs contains '"*"') + output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: writable_verbs + condition: > + (ka.req.role.rules.verbs contains create or + ka.req.role.rules.verbs contains update or + ka.req.role.rules.verbs contains patch or + ka.req.role.rules.verbs contains delete or + ka.req.role.rules.verbs contains deletecollection) + + - rule: ClusterRole With Write Privileges Created + desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions + condition: kevt and (role or clusterrole) and kcreate and writable_verbs + output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: NOTICE + source: k8s_audit + tags: [k8s] + + - rule: ClusterRole With Pod Exec Created + desc: Detect any attempt to create a Role/ClusterRole that can exec to pods + condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources contains "pods/exec" + output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # The rules below this point are less discriminatory and generally + # represent a stream of activity for a cluster. If you wish to disable + # these events, modify the following macro. + - macro: consider_activity_events + condition: (k8s_audit_always_true) + + - macro: kactivity + condition: (kevt and consider_activity_events) + + - rule: K8s Deployment Created + desc: Detect any attempt to create a deployment + condition: (kactivity and kcreate and deployment and response_successful) + output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Deployment Deleted + desc: Detect any attempt to delete a deployment + condition: (kactivity and kdelete and deployment and response_successful) + output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Service Created + desc: Detect any attempt to create a service + condition: (kactivity and kcreate and service and response_successful) + output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Service Deleted + desc: Detect any attempt to delete a service + condition: (kactivity and kdelete and service and response_successful) + output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s ConfigMap Created + desc: Detect any attempt to create a configmap + condition: (kactivity and kcreate and configmap and response_successful) + output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s ConfigMap Deleted + desc: Detect any attempt to delete a configmap + condition: (kactivity and kdelete and configmap and response_successful) + output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Namespace Created + desc: Detect any attempt to create a namespace + condition: (kactivity and kcreate and namespace and response_successful) + output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Namespace Deleted + desc: Detect any attempt to delete a namespace + condition: (kactivity and non_system_user and kdelete and namespace and response_successful) + output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Serviceaccount Created + desc: Detect any attempt to create a service account + condition: (kactivity and kcreate and serviceaccount and response_successful) + output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Serviceaccount Deleted + desc: Detect any attempt to delete a service account + condition: (kactivity and kdelete and serviceaccount and response_successful) + output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrole Created + desc: Detect any attempt to create a cluster role/role + condition: (kactivity and kcreate and (clusterrole or role) and response_successful) + output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrole Deleted + desc: Detect any attempt to delete a cluster role/role + condition: (kactivity and kdelete and (clusterrole or role) and response_successful) + output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrolebinding Created + desc: Detect any attempt to create a clusterrolebinding + condition: (kactivity and kcreate and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason foo=%ka.req.binding.subject.has_name[cluster-admin]) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrolebinding Deleted + desc: Detect any attempt to delete a clusterrolebinding + condition: (kactivity and kdelete and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + # This rule generally matches all events, and as a result is disabled + # by default. If you wish to enable these events, modify the + # following macro. + # condition: (jevt.rawtime exists) + - macro: consider_all_events + condition: (k8s_audit_never_true) + + - macro: kall + condition: (kevt and consider_all_events) + + - rule: All K8s Audit Events + desc: Match all K8s Audit Events + condition: kall + output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) + priority: DEBUG + source: k8s_audit + tags: [k8s] + + +--- +# Source: falco/templates/serviceaccount.yaml + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sysdig-falco + namespace: falco + labels: + app: sysdig-falco + chart: "falco-1.0.9" + release: "sysdig-falco" + heritage: "Tiller" + +--- +# Source: falco/templates/clusterrole.yaml + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: sysdig-falco + labels: + app: sysdig-falco + chart: "falco-1.0.9" + release: "sysdig-falco" + heritage: "Tiller" +rules: + - apiGroups: + - extensions + - "" + resources: + - nodes + - namespaces + - pods + - replicationcontrollers + - services + - events + - configmaps + - daemonsets + - deployments + - replicasets + verbs: + - get + - list + - watch + - nonResourceURLs: + - /healthz + - /healthz/* + verbs: + - get + +--- +# Source: falco/templates/clusterrolebinding.yaml + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: sysdig-falco + labels: + app: sysdig-falco + chart: "falco-1.0.9" + release: "sysdig-falco" + heritage: "Tiller" +subjects: + - kind: ServiceAccount + name: sysdig-falco + namespace: falco +roleRef: + kind: ClusterRole + name: sysdig-falco + apiGroup: rbac.authorization.k8s.io + +--- +# Source: falco/templates/daemonset.yaml +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: sysdig-falco + namespace: falco + labels: + app: sysdig-falco + chart: "falco-1.0.9" + release: "sysdig-falco" + heritage: "Tiller" +spec: + template: + metadata: + name: sysdig-falco + labels: + app: sysdig-falco + role: security + annotations: + checksum/config: b7c848243b3e62bafcdbd7fd17dfec395665f62d43cbdcae87ba5a2087d13fb2 + checksum/rules: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + spec: + serviceAccountName: sysdig-falco + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + + containers: + - name: falco + image: docker.io/falcosecurity/falco:0.17.1 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 1024Mi + requests: + cpu: 100m + memory: 512Mi + + securityContext: + privileged: true + args: + - /usr/bin/falco + - --cri + - /host/var/run/cri.sock + - -K + - /var/run/secrets/kubernetes.io/serviceaccount/token + - -k + - "https://$(KUBERNETES_SERVICE_HOST)" + - -pk + env: + volumeMounts: + - mountPath: /host/var/run/docker.sock + name: docker-socket + - mountPath: /host/var/run/cri.sock + name: cri-socket + - mountPath: /host/dev + name: dev-fs + readOnly: true + - mountPath: /host/proc + name: proc-fs + readOnly: true + - mountPath: /host/boot + name: boot-fs + readOnly: true + - mountPath: /host/lib/modules + name: lib-modules + readOnly: true + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /dev/shm + name: dshm + - mountPath: /etc/falco + name: config-volume + volumes: + - name: dshm + emptyDir: + medium: Memory + - name: docker-socket + hostPath: + path: /var/run/docker.sock + - name: cri-socket + hostPath: + path: /run/containerd/containerd.sock + - name: dev-fs + hostPath: + path: /dev + - name: proc-fs + hostPath: + path: /proc + - name: boot-fs + hostPath: + path: /boot + - name: lib-modules + hostPath: + path: /lib/modules + - name: usr-fs + hostPath: + path: /usr + - name: config-volume + configMap: + name: sysdig-falco + items: + - key: falco.yaml + path: falco.yaml + - key: falco_rules.yaml + path: falco_rules.yaml + - key: falco_rules.local.yaml + path: falco_rules.local.yaml + - key: application_rules.yaml + path: rules.available/application_rules.yaml + updateStrategy: + type: RollingUpdate + + +--- +# Source: falco/templates/auditservice.yaml + + +--- +# Source: falco/templates/auditsink.yaml + + +--- +# Source: falco/templates/configmap-rules.yaml + + +--- +# Source: falco/templates/deployment.yaml + + +--- +# Source: falco/templates/podsecuritypolicy.yaml + + +--- +# Source: falco/templates/secret.yaml + + + + diff --git a/cluster-config/gatekeeper.yaml b/cluster-config/gatekeeper.yaml new file mode 100644 index 0000000..858c399 --- /dev/null +++ b/cluster-config/gatekeeper.yaml @@ -0,0 +1,457 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" + name: gatekeeper-system +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: configs.config.gatekeeper.sh +spec: + group: config.gatekeeper.sh + names: + kind: Config + plural: configs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + sync: + description: Configuration for syncing k8s objects + properties: + syncOnly: + description: If non-empty, only entries on this list will be replicated + into OPA + items: + properties: + group: + type: string + kind: + type: string + version: + type: string + type: object + type: array + type: object + validation: + description: Configuration for validation + properties: + traces: + description: List of requests to trace. Both "user" and "kinds" + must be specified + items: + properties: + dump: + description: Also dump the state of OPA with the trace. Set + to `All` to dump everything. + type: string + kind: + description: Only trace requests of the following GroupVersionKind + properties: + group: + type: string + kind: + type: string + version: + type: string + type: object + user: + description: Only trace requests from the specified user + type: string + type: object + type: array + type: object + type: object + status: + properties: + byPod: + description: List of statuses as seen by individual pods + items: + properties: + allFinalizers: + description: List of Group/Version/Kinds with finalizers + items: + properties: + group: + type: string + kind: + type: string + version: + type: string + type: object + type: array + id: + description: a unique identifier for the pod that wrote the status + type: string + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: gatekeeper-manager-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - config.gatekeeper.sh + resources: + - configs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - config.gatekeeper.sh + resources: + - configs/status + verbs: + - get + - update + - patch +- apiGroups: + - constraints.gatekeeper.sh + resources: + - '*' + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - templates.gatekeeper.sh + resources: + - constrainttemplates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - templates.gatekeeper.sh + resources: + - constrainttemplates/status + verbs: + - get + - update + - patch +- apiGroups: + - constraints.gatekeeper.sh + resources: + - '*' + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + name: gatekeeper-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gatekeeper-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: gatekeeper-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: gatekeeper-webhook-server-secret + namespace: gatekeeper-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" + name: gatekeeper-controller-manager-service + namespace: gatekeeper-system +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" + name: gatekeeper-controller-manager + namespace: gatekeeper-system +spec: + selector: + matchLabels: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" + serviceName: gatekeeper-controller-manager-service + template: + metadata: + labels: + control-plane: controller-manager + controller-tools.k8s.io: "1.0" + spec: + containers: + - args: + - --auditInterval=30 + - --port=8443 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SECRET_NAME + value: gatekeeper-webhook-server-secret + image: quay.io/open-policy-agent/gatekeeper:v3.0.4-beta.2 + imagePullPolicy: Always + name: manager + ports: + - containerPort: 8443 + name: webhook-server + protocol: TCP + resources: + limits: + cpu: 100m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /certs + name: cert + readOnly: true + terminationGracePeriodSeconds: 60 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: gatekeeper-webhook-server-secret +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: constrainttemplates.templates.gatekeeper.sh +spec: + group: templates.gatekeeper.sh + names: + kind: ConstraintTemplate + plural: constrainttemplates + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + crd: + properties: + spec: + properties: + names: + properties: + kind: + type: string + type: object + validation: + type: object + type: object + type: object + targets: + items: + properties: + rego: + type: string + target: + type: string + type: object + type: array + type: object + status: + properties: + byPod: + items: + properties: + errors: + items: + properties: + code: + type: string + location: + type: string + message: + type: string + required: + - code + - message + type: object + type: array + id: + description: a unique identifier for the pod that wrote the status + type: string + type: object + type: array + created: + type: boolean + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + - name: v1alpha1 + served: true + storage: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/cluster-config/limitranges.yaml b/cluster-config/limitranges.yaml index c7d29c4..27be0b4 100644 --- a/cluster-config/limitranges.yaml +++ b/cluster-config/limitranges.yaml @@ -6,17 +6,17 @@ metadata: spec: limits: - default: - cpu: 0.5 + cpu: 500m memory: 512Mi defaultRequest: - cpu: 0.25 + cpu: 250m memory: 256Mi max: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 2Gi min: - cpu: 200m - memory: 256Mi + cpu: 10m + memory: 10Mi type: Container - max: storage: 2Gi @@ -32,17 +32,17 @@ metadata: spec: limits: - default: - cpu: 0.5 + cpu: 500m memory: 512Mi defaultRequest: - cpu: 0.25 + cpu: 250m memory: 256Mi max: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 2Gi min: - cpu: 200m - memory: 256Mi + cpu: 10m + memory: 10Mi type: Container - max: storage: 5Gi @@ -58,20 +58,20 @@ metadata: spec: limits: - default: - cpu: 0.5 + cpu: 500m memory: 512Mi defaultRequest: - cpu: 0.25 + cpu: 250m memory: 256Mi max: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 2Gi min: - cpu: 200m - memory: 256Mi + cpu: 10m + memory: 10Mi type: Container - max: storage: 10Gi min: storage: 1Gi - type: PersistentVolumeClaim \ No newline at end of file + type: PersistentVolumeClaim diff --git a/cluster-config/linkerd.yaml b/cluster-config/linkerd.yaml new file mode 100644 index 0000000..28127e4 --- /dev/null +++ b/cluster-config/linkerd.yaml @@ -0,0 +1,3248 @@ +--- +### +### Linkerd Namespace +### +--- +kind: Namespace +apiVersion: v1 +metadata: + name: linkerd + annotations: + linkerd.io/inject: disabled + labels: + linkerd.io/is-control-plane: "true" + config.linkerd.io/admission-webhooks: disabled +--- +### +### Identity Controller Service RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-identity +subjects: +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +--- +### +### Controller RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["jobs"] + verbs: ["list" , "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services", "replicationcontrollers", "namespaces"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-controller +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-controller + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +--- +### +### Destination Controller Service +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-destination +subjects: +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-destination + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +--- +### +### Heartbeat RBAC +### +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["linkerd-config"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-heartbeat + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd +--- +### +### Web RBAC +### +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-web-admin + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap-admin +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +--- +### +### Service Profile CRD +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serviceprofiles.linkerd.io + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: linkerd.io + versions: + - name: v1alpha1 + served: true + storage: false + - name: v1alpha2 + served: true + storage: true + scope: Namespaced + names: + plural: serviceprofiles + singular: serviceprofile + kind: ServiceProfile + shortNames: + - sp +--- +### +### TrafficSplit CRD +### Copied from https://github.com/deislabs/smi-sdk-go/blob/cea7e1e9372304bbb6c74a3f6ca788d9eaa9cc58/crds/split.yaml +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: trafficsplits.split.smi-spec.io + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: split.smi-spec.io + version: v1alpha1 + scope: Namespaced + names: + kind: TrafficSplit + shortNames: + - ts + plural: trafficsplits + singular: trafficsplit + additionalPrinterColumns: + - name: Service + type: string + description: The apex service of this split. + JSONPath: .spec.service +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Proxy Injector RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["namespaces", "replicationcontrollers"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets", "daemonsets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["jobs"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-proxy-injector + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-proxy-injector-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +type: Opaque +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lSQUk2N0FTRkNQT3NaaXBNOGVYYzgwdG93RFFZSktvWklodmNOQVFFTEJRQXcKTFRFck1Da0dBMVVFQXhNaWJHbHVhMlZ5WkMxd2NtOTRlUzFwYm1wbFkzUnZjaTVzYVc1clpYSmtMbk4yWXpBZQpGdzB4T1RFd016QXhNRE01TkRWYUZ3MHlNREV3TWpreE1ETTVORFZhTUMweEt6QXBCZ05WQkFNVElteHBibXRsCmNtUXRjSEp2ZUhrdGFXNXFaV04wYjNJdWJHbHVhMlZ5WkM1emRtTXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUEKQTRJQkR3QXdnZ0VLQW9JQkFRRGVIOG1oOWExT2dOVUppMVRMRFc2SFZSRzkydjk1cm9Fd05QVVpHcjRuNWtvagpyZHF2WnFJT2ZMdlhycWVBUTJIWnAvTW5JWUFwQjZLSjRtNjFYZkV6QlRYMkRGS0hEZzhBMDNPNXJRWU5SL0FlCkZMMVFTVjNJQjZGVGl2SVUyM0dIZVVCZXlKcGsvRkRXU21KYmluN0hnV1NDVndEMHIrZEtwUUp5UjlaczQ3ZHoKZG9ISExyaFNlUk4zNHBQWm4ycDh0QkV6akJQTVJURDVlWThleHBlZVdScjF6bm5vckg4emR2aVlqaEZ1Ym9HLwo0cXFjWnArd1picGEwcmZnMjNNcXJrcnNVUER5ZDlEdlVIWklVQm03a0d6ZkFSNmJUc2ZxMlRNVHRSc25EOFhOCmpHM0dyYy9ad3hXOHZVYnhGRU52RnNEdDlmU0RnYVNCd2tWR0h3cHBBZ01CQUFHalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFILwpCQVV3QXdFQi96QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMTXhLemQyTTBPd2lFMzI1STZIeGhZdmRjck1QCnRlU0ZIbjU2SjY2bGlxWC9MSFNQOWhJeklndm45NTEvU3NHYkRnZktnV0NiVWRydStOMDdobHlMQTEyZU9FQXAKeXloVVE1R0tFQWhURkZaUjlVNk5uVlcyMnAwSDhoQ3d5cWlrVVpBeFJQWU0xYk5reTNFMU9CT0wxMWxNbnRuQQo1NnF2SDdtYThuYkpJanVCblh3ZkM0RG9XZ1JDYTZEU2p6WFZVWHl3QTNQWUJtaE1QcVdYSGdiR1pLeVZnZzhhCmpHRWRuNFExSXR3WngyY3R4d3BjL0lLOStXRUlaNWgxc1NRVUcwTnhBQk1uczRnSTloM3Z5ZUZheWNOa09tSTIKSFNCN0E5Vks5OXJhYXRvMzFsQ2NydlBSV0VNcW83TDJwdURnM3M4UUd0ZTFMMThPanM4b0piZ0Judz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBM2gvSm9mV3RUb0RWQ1l0VXl3MXVoMVVSdmRyL2VhNkJNRFQxR1JxK0orWktJNjNhCnIyYWlEbnk3MTY2bmdFTmgyYWZ6SnlHQUtRZWlpZUp1dFYzeE13VTE5Z3hTaHc0UEFOTnp1YTBHRFVmd0hoUzkKVUVsZHlBZWhVNHJ5Rk50eGgzbEFYc2lhWlB4UTFrcGlXNHAreDRGa2dsY0E5Sy9uU3FVQ2NrZldiT08zYzNhQgp4eTY0VW5rVGQrS1QyWjlxZkxRUk00d1R6RVV3K1htUEhzYVhubGthOWM1NTZLeC9NM2I0bUk0UmJtNkJ2K0txCm5HYWZzR1c2V3RLMzROdHpLcTVLN0ZEdzhuZlE3MUIyU0ZBWnU1QnMzd0VlbTA3SDZ0a3pFN1ViSncvRnpZeHQKeHEzUDJjTVZ2TDFHOFJSRGJ4YkE3ZlgwZzRHa2djSkZSaDhLYVFJREFRQUJBb0lCQUdUQUNsYjU5bGZyTzdZTwppckZuTUVFV0p1djBqM2lEM0hrZkxDVHM2bDl5cHlJejAwTkh6QzNDQ2ZtMGNLbE81dTB3cnF1TEdIU0QwQWUyCnV2eHMxZUNNZG9OWjUzejJ0WHRzYmN0RVhjQmNadDllRE5pRHpwUFNVaXh3azRrK1YrcFk1d2VXK2VEZ1NWSkoKZnYrQ2ZDckYxKzcrdzZJNHZ6SXVVa08vcmRUSlJIcmZvVUh0ZU1nSjVaQ2xQSmVsbGFibUZKUkU0U3ZRMFVuNwpYTW9MdkVMTlJKODZjZURBS0JraHBVTmVrYzVYQlBPbGNFOUIxOVlqMG9VZXpId0VSUmJ2bEo1QVZsU1BlRHdWCkt4VlBPa3lxRWk1alU2YmlVMWdVZCtaVjlhbFdhTzhpNFIzck9sMjJjeVN3REJZUlpya1Z4ZjNEcC9SR1c0UlYKZUZPQ251RUNnWUVBK1lGeXIySEpoNzY1ZFZ5T0EzaVZlNngwUFpuby9qR2V5K09LendpN1JQRlBiQnpwdG9xTQprelRoZ09NTkpnVHhGUXEwMkZPejlMbDA0MEU2bmpJU1NCM1pYZEtkT1VRcXlRZElSWHYvOW9DNTV5dVY3RytTCjBlOWR3aExOaE1yM3lnYUErK2wwbVFjRzNyNnJoTVoyaXJGVmZ0Nkh2QVE3ZjAzelJFUzlUWTBDZ1lFQTQrZmkKNDNPZHNnM1RnOFJCbHZmZUh3Q0RZaTFmTE5EbjFpTVJvN1RDT01RMVNDZFNpZzJlbU04TVFnd3RGSkhOM2VGNQp4NUNPRjRjS0hvM2NvMUFEVXZLY2IzUHlpNnAyTUhrQzZQaFJUKzFQUmJXMnVRaGtERGNMV1JLUGVEQjdZWGpICjBqNGZaSml1OGVvZEhwK3hJRkp4KytrMnJRQXhXNWI1ek1HeVUwMENnWUVBOUlhbVJEWWRFYm1jL3VWZmxRMUgKeXpZL2tlUFdVZEdGcEtMemRtWkRTcFVJMXBjYmdqZ1UvMnhUR1VMc3J2Y200dzFrSWFNZStkZ01NM0RKSnlOQgpnWVluY3Vrb0IyakVXZEtXUk9yNjk3WSsrODFlbUtMY2pIMEFNV1RHSUwvZXl4Tkd0QnlmbGg0cDRJR1RqWVFICngzTjJxK2tocytXZC9lMENYa1RVRmowQ2dZRUFxK0VzZWtHdHdxVXV5Z2R2Ti81RDVMbXNYSlMvWEx0SXZYVEIKVHVkNk92bXUvWFc2aUNkeFh1bDM0eS92cDcyMnR3bmZvQ1JqSlZkUXlHNVE0bEhyejlqdEpoa1o2VDlyMTI3TQpSUXRSdDZmUHkxRWFOejRleGgrVlRWMzBHYjdtcmtJNjRWSzh0azk0VVRZeE0vRlgwbnFiUUZOSVN2MEVQSUVHCmIrZjlKMWtDZ1lCdUNISk5CYng4TVhXd2NWbkxCR1JWMUU4aWVFSGtId2Y4T2x3OHhhdzYzWmdSSU13RWdBd2gKMVljZ25GT1lTdXZZSzhwUXU5ek52bVZaSDROOE1yNHU1UmJXdjFrdndJVDdtNE51RTRUUGpBTmlzOHVnYmdPeApVNnBFenByaGVmem5MYk5lRUs3ZWdSOCtpd0FTM2JBbmZFcVFvT1NSZDcweUthdVJSdG1La1E9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: linkerd-proxy-injector-webhook-config + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-proxy-injector.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-proxy-injector + namespace: linkerd + path: "/" + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lSQUk2N0FTRkNQT3NaaXBNOGVYYzgwdG93RFFZSktvWklodmNOQVFFTEJRQXcKTFRFck1Da0dBMVVFQXhNaWJHbHVhMlZ5WkMxd2NtOTRlUzFwYm1wbFkzUnZjaTVzYVc1clpYSmtMbk4yWXpBZQpGdzB4T1RFd016QXhNRE01TkRWYUZ3MHlNREV3TWpreE1ETTVORFZhTUMweEt6QXBCZ05WQkFNVElteHBibXRsCmNtUXRjSEp2ZUhrdGFXNXFaV04wYjNJdWJHbHVhMlZ5WkM1emRtTXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUEKQTRJQkR3QXdnZ0VLQW9JQkFRRGVIOG1oOWExT2dOVUppMVRMRFc2SFZSRzkydjk1cm9Fd05QVVpHcjRuNWtvagpyZHF2WnFJT2ZMdlhycWVBUTJIWnAvTW5JWUFwQjZLSjRtNjFYZkV6QlRYMkRGS0hEZzhBMDNPNXJRWU5SL0FlCkZMMVFTVjNJQjZGVGl2SVUyM0dIZVVCZXlKcGsvRkRXU21KYmluN0hnV1NDVndEMHIrZEtwUUp5UjlaczQ3ZHoKZG9ISExyaFNlUk4zNHBQWm4ycDh0QkV6akJQTVJURDVlWThleHBlZVdScjF6bm5vckg4emR2aVlqaEZ1Ym9HLwo0cXFjWnArd1picGEwcmZnMjNNcXJrcnNVUER5ZDlEdlVIWklVQm03a0d6ZkFSNmJUc2ZxMlRNVHRSc25EOFhOCmpHM0dyYy9ad3hXOHZVYnhGRU52RnNEdDlmU0RnYVNCd2tWR0h3cHBBZ01CQUFHalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFILwpCQVV3QXdFQi96QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMTXhLemQyTTBPd2lFMzI1STZIeGhZdmRjck1QCnRlU0ZIbjU2SjY2bGlxWC9MSFNQOWhJeklndm45NTEvU3NHYkRnZktnV0NiVWRydStOMDdobHlMQTEyZU9FQXAKeXloVVE1R0tFQWhURkZaUjlVNk5uVlcyMnAwSDhoQ3d5cWlrVVpBeFJQWU0xYk5reTNFMU9CT0wxMWxNbnRuQQo1NnF2SDdtYThuYkpJanVCblh3ZkM0RG9XZ1JDYTZEU2p6WFZVWHl3QTNQWUJtaE1QcVdYSGdiR1pLeVZnZzhhCmpHRWRuNFExSXR3WngyY3R4d3BjL0lLOStXRUlaNWgxc1NRVUcwTnhBQk1uczRnSTloM3Z5ZUZheWNOa09tSTIKSFNCN0E5Vks5OXJhYXRvMzFsQ2NydlBSV0VNcW83TDJwdURnM3M4UUd0ZTFMMThPanM4b0piZ0Judz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + failurePolicy: Fail + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + sideEffects: None +--- +### +### Service Profile Validator RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-sp-validator + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-sp-validator-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +type: Opaque +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJekNDQWd1Z0F3SUJBZ0lSQU1JM0ZKVDVoaGlPdXArMDNZdkxuQmd3RFFZSktvWklodmNOQVFFTEJRQXcKS3pFcE1DY0dBMVVFQXhNZ2JHbHVhMlZ5WkMxemNDMTJZV3hwWkdGMGIzSXViR2x1YTJWeVpDNXpkbU13SGhjTgpNVGt4TURNd01UQXpPVFExV2hjTk1qQXhNREk1TVRBek9UUTFXakFyTVNrd0p3WURWUVFERXlCc2FXNXJaWEprCkxYTndMWFpoYkdsa1lYUnZjaTVzYVc1clpYSmtMbk4yWXpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQU5aUDVoYTcvbEVSZUpWRnAvNGExRjVGZEJabjB5Wk1VZTU1amM0MjhYZ0M1bFlxMG4xZwowRUd4R28rV214VWVranQ4SVNBUXpxSitWZHBlaTcwUmJWUkIxaTVhUmk1Qno0a0lpRlZlVnNCRjVjWDN1QTd4CngvZ0xVTVJYSmNNTjhMaUpWaGYwVTdHb1NkZVJjSHhodEE5aWtFai9GUWQyNUY5SFJRVUFHb3Q4bkxEZytBRkYKVjVBZ3VaT1A5WS9HN28zdmcvWldkdVczeVYzcDhDV3hzT0lqYllEejAraUlONnRqNENvTzUvbTVkUlhnVGtUcQphempDalhpWldUa2Y4enJ5WGJnYkJ1RzFHd0E2bHRhM3h5K3BVRG4vcVFoYTZ6bThUNGlhOTUwcUJHZzFNNzc5CnpaWXdtaFduRlVwWCs4QUtNdWFTaVZPREpDZFVJeHFxTjFrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUUQKQWdLa01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBRApBUUgvTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDSVFnR1Z0Z1dLenEwRGpsSVBQR1I1cGRRUVRyMys5VVVaCmc4ZG5IbFNxMkFlSko1L1c5bllxOXFpckRCRThjZjJxMTF2aitvTHNjcTJnU0g4NTZ0NGR2d3NMOFVUbXkrbVQKQjl5RllMekZia2tZbDZOUHB2M0l1TTBqWU5EeURCMmN0dlF2c0RjRndpU3E1bS9lN29ZQno0Y0tkb3NJdkllcQp5ZjhzM0NoVSt4ZzJxbnBTcElqVm10UnE2emwrMjRMMmtXMElHMDNENEtJazdzbjNtR2lYMXcrbjdjeWtDTzRhCmtLUDN1VURrQkE5OUlXaGFnWmNrOTViODB0SHZNNENzbWV2S29pblFrNGphZnhpc2E4Vlp3dlMzQ3NrOEJvNmcKZFg0SWZUaVRBc3ZUSzU0YzNFcHl0SmlsTDRuclBzWFk4YVIxQ0xaL3YybGFncmJSVk94YwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMWsvbUZyditVUkY0bFVXbi9oclVYa1YwRm1mVEpreFI3bm1OempieGVBTG1WaXJTCmZXRFFRYkVhajVhYkZSNlNPM3doSUJET29uNVYybDZMdlJGdFZFSFdMbHBHTGtIUGlRaUlWVjVXd0VYbHhmZTQKRHZISCtBdFF4RmNsd3czd3VJbFdGL1JUc2FoSjE1RndmR0cwRDJLUVNQOFZCM2JrWDBkRkJRQWFpM3ljc09ENApBVVZYa0NDNWs0LzFqOGJ1amUrRDlsWjI1YmZKWGVud0piR3c0aU50Z1BQVDZJZzNxMlBnS2c3bitibDFGZUJPClJPcHJPTUtOZUpsWk9SL3pPdkpkdUJzRzRiVWJBRHFXMXJmSEw2bFFPZitwQ0Zyck9ieFBpSnIzblNvRWFEVXoKdnYzTmxqQ2FGYWNWU2xmN3dBb3k1cEtKVTRNa0oxUWpHcW8zV1FJREFRQUJBb0lCQVFEQXlDUzQ1S2t6c1p5Zgo0b1hvcmFmRm1ZS0F1U3FXYyticnQwcFphdEV4M0tIb2R5TGJoK0F1aDZyVjZXdGR4eDlTQlNvakZ4dFp6bjFhCkx0eFMyajlYb05XUzRyL28wbWhtNDFjSVBCR2JhZ1QwWFE2ajJ0akFCeVhEU0lWK2ZhYkJ5VUM1MFo0TDNNOTMKNjZ1ZEo2SjZpSFkwUThsZDZwUXFKUWt3STFTOWpWaEJIQTVrQ3FqN1Rkd3hBWXNWajAxcUwyTDhuTUdnajY5Mwo4YWl0MWtQTDAwcWl2Nmc1Z2U1MU0xR2RNS1k5UHEvNWZENlh1K1N2SitBSlRMWHlxaHNRL3hMSlRKUWloYjhPCjNQamd3ZEJ0NmdXRGZzSDBZcGNaUkJRdmhHMlRML0FjKzRvT2htazluMGNDUjhPeXBvZE5LU1dUbmliWWhVakUKTTQ4ZmRPblZBb0dCQU9VdjlEZEcrRmZMV1lxcXN1S0Nud3VpcU9vWWJUNzV6ZW1jOHN6NzNuK3Zud0Frc05XSgowdDdsQjg5WHZxcVpoRHdkc0dMd01LNGV4eEVMMEc1cEJUWGpQT3JnVVM2R053MFM4eGlOZ0w1bng1TUwzQ01LCndRRE5CU2V1L1RtUE9NYnBUNzFUc2FaSytNOTdGOHIrSzVnakhmb3dEem1oMVhWcUYwb2VJVFhYQW9HQkFPOWkKY0lTS2d4RVdJNFRPM0xmTFpZS1dxZ1pjM2lZRlJzTlorYUd0bzlvN0dQcnFSNjNpN0F0Wk9OVDU0Nm51MkN2MgpqczBoSGlzNjNwM2NhYW9wTnlsOW5pRnhOYTc0R1VmNmRFQ0IydE41WWFwY3VBMy82TGRHa2hkSkQvSy9VTndICnFLSXJRTXR3SUY2eER4Um44bGttSjYzczE2Z3AvNDdlTDgwdkF2WlBBb0dCQU9Gb01JRE5SQ0xVMU5RVTNxcjQKWFFSTWptVHVjVU9tM0dZTy9wMnNla0NLc1k2cWgyRDlsRyswUWVJaklsc1hjWDdPbEZRNmR5WkpEdkFzOW1vZwowQWNYM3lvQ1JOWEZHdys0dTI1Q2ZZVEcvUG9haC8vTzY5T2wwSlFlL3Q1RmhrbmIzWmtQWDRGMTZ3L2t1Sm5sCk1Xc0gzc3ZaeUorZUV4clBwT2c0aWVFeEFvR0JBT2xpR2J5K3BlWEVwY0JYUll6UytoYUUrU0xXNjZZNXdyRzIKU05BOXo5OTFlbmMyYmNlejB0bUZsUWplei96ZTVWYUhQTlgwa2Nzbnk5NHdBN3hvRG1XZkdNb1Y2M2E0WHoxegppbDlSdXB3dVhPK1JFY2hrVk9oS0h5Z2dqVEhuSDloRWh6NEQyVm45NlRFb2drRXN6blBuNVF1R093S3lYM3RVClYxbzltUysxQW9HQU5GYUEvUzkxQmszdUpqeG9saGlFaTN2clUwVFI5TzlFb0xHemF6cm1SOEMralJ2WXFMdXMKSXJMRlNFWjJiM2Njc05Sc3MvVVFRSzZJSXRtL24xVmM1UUd3ekZtVHI4cjZINTlOLzlvTWJpYlVrSUpZMXlKTwpnUThsZkxOait6ZmJnTGM3MW9YYjRLSlhMaTd2UGJIQkRiS0pTdmRLR0lybTQvOWo3VEZndXhzPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: linkerd-sp-validator-webhook-config + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-sp-validator.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-sp-validator + namespace: linkerd + path: "/" + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJekNDQWd1Z0F3SUJBZ0lSQU1JM0ZKVDVoaGlPdXArMDNZdkxuQmd3RFFZSktvWklodmNOQVFFTEJRQXcKS3pFcE1DY0dBMVVFQXhNZ2JHbHVhMlZ5WkMxemNDMTJZV3hwWkdGMGIzSXViR2x1YTJWeVpDNXpkbU13SGhjTgpNVGt4TURNd01UQXpPVFExV2hjTk1qQXhNREk1TVRBek9UUTFXakFyTVNrd0p3WURWUVFERXlCc2FXNXJaWEprCkxYTndMWFpoYkdsa1lYUnZjaTVzYVc1clpYSmtMbk4yWXpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQU5aUDVoYTcvbEVSZUpWRnAvNGExRjVGZEJabjB5Wk1VZTU1amM0MjhYZ0M1bFlxMG4xZwowRUd4R28rV214VWVranQ4SVNBUXpxSitWZHBlaTcwUmJWUkIxaTVhUmk1Qno0a0lpRlZlVnNCRjVjWDN1QTd4CngvZ0xVTVJYSmNNTjhMaUpWaGYwVTdHb1NkZVJjSHhodEE5aWtFai9GUWQyNUY5SFJRVUFHb3Q4bkxEZytBRkYKVjVBZ3VaT1A5WS9HN28zdmcvWldkdVczeVYzcDhDV3hzT0lqYllEejAraUlONnRqNENvTzUvbTVkUlhnVGtUcQphempDalhpWldUa2Y4enJ5WGJnYkJ1RzFHd0E2bHRhM3h5K3BVRG4vcVFoYTZ6bThUNGlhOTUwcUJHZzFNNzc5CnpaWXdtaFduRlVwWCs4QUtNdWFTaVZPREpDZFVJeHFxTjFrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUUQKQWdLa01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBRApBUUgvTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDSVFnR1Z0Z1dLenEwRGpsSVBQR1I1cGRRUVRyMys5VVVaCmc4ZG5IbFNxMkFlSko1L1c5bllxOXFpckRCRThjZjJxMTF2aitvTHNjcTJnU0g4NTZ0NGR2d3NMOFVUbXkrbVQKQjl5RllMekZia2tZbDZOUHB2M0l1TTBqWU5EeURCMmN0dlF2c0RjRndpU3E1bS9lN29ZQno0Y0tkb3NJdkllcQp5ZjhzM0NoVSt4ZzJxbnBTcElqVm10UnE2emwrMjRMMmtXMElHMDNENEtJazdzbjNtR2lYMXcrbjdjeWtDTzRhCmtLUDN1VURrQkE5OUlXaGFnWmNrOTViODB0SHZNNENzbWV2S29pblFrNGphZnhpc2E4Vlp3dlMzQ3NrOEJvNmcKZFg0SWZUaVRBc3ZUSzU0YzNFcHl0SmlsTDRuclBzWFk4YVIxQ0xaL3YybGFncmJSVk94YwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + failurePolicy: Fail + rules: + - operations: [ "CREATE" , "UPDATE" ] + apiGroups: ["linkerd.io"] + apiVersions: ["v1alpha1", "v1alpha2"] + resources: ["serviceprofiles"] + sideEffects: None +--- +### +### Tap RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods", "services", "replicationcontrollers", "namespaces"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["jobs"] + verbs: ["list" , "get", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap-admin + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["tap.linkerd.io"] + resources: ["*"] + verbs: ["watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: linkerd-linkerd-tap-auth-delegator + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-linkerd-tap-auth-reader + namespace: kube-system + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-tap-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +type: Opaque +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFVENDQWZtZ0F3SUJBZ0lSQUpVNXAybEE1NXkxSDVrNXRDU3psMWN3RFFZSktvWklodmNOQVFFTEJRQXcKSWpFZ01CNEdBMVVFQXhNWGJHbHVhMlZ5WkMxMFlYQXViR2x1YTJWeVpDNXpkbU13SGhjTk1Ua3hNRE13TVRBegpPVFExV2hjTk1qQXhNREk1TVRBek9UUTFXakFpTVNBd0hnWURWUVFERXhkc2FXNXJaWEprTFhSaGNDNXNhVzVyClpYSmtMbk4yWXpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBUFJUZTdSaVUxT3cKLzlTWFJLVmdlTk81SU50cUhISDFqVjkzNVpzQkRBaWVIdFgyckpQcEpvVHFWYW1pd3J0SThVY25xTERKZlJ3NAp2Zk1kMjB3NVl0Rldvckd0aDVOVWYrZkF5TTFIWWdwZ29ZbGRoTlJIZ2ptT1NXRTV5ZlFhTzdrbzlab1JLTTFqCnJnL2lRRVcrQ2V2Tk9JSWJMTlBDNmJaMlYyNVR4R2FWanNJS0JiR09YSFBBdk45TndTNFN4Y0FnZHRWOEZBY3kKRDAxUVFpZitFNXBTTklneUpPY0huUmpCcHBhdnFFbjVpTzlOWjNKWlRpaUcxL01zMEVTWkF6eFFnS1BPNUgzWApNWkNIV2krT2VJbHF1aGFYWTJtZEczYUV4L3ZFTnlaNlF2QjVOZUJsK3VjbmcrWUwyOVkzWVhEYjUxREs2M1RRCksxTERxUExJV3RrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXTUJRR0NDc0cKQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQTBHQ1NxR1NJYjNEUUVCQ3dVQQpBNElCQVFBV25QcmZsbXRaSWhOdUJNMWtXdDM1K0wyeTdRWGJ1NmplcmltN0JIZFFTdGRJRlhTSE1wWGdQbnlKClVKdFJ0WGhHc1Z1ak5EZGtFdGlVL2d6Tm44N2tobzUvQkVOb1p5TUduR25kRHh2aXRrcVVnMERwWCttYzg3OFQKa3FEZFFwaytzOEJOWEVNTU9LbTM2aDBWdEkvYVRJUktJV2RvT2tPYzFuZWEyQ3laNlVNTWR5RTBBYnhqVlpsYwpWV1VwMkhvU3BwczNkdUV3c0NhNVFLa1F1c2FxY25HTGsvMHBuU1I1eEFvK0ZSV2R6NXpkZDV3Zjh6VnVXSDh1CmVLQkxGMUdqcmtSSnl3bjE3anY0TDUrU2RGaTVGSlNxNklNV1QwRjQ3eHVheHhwa2c1cEt2YVdHZndMKzRYRVIKT25QSFJZcEhCeGc2VHVha0RkUzljcmFDUmpoZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBOUZON3RHSlRVN0QvMUpkRXBXQjQwN2tnMjJvY2NmV05YM2ZsbXdFTUNKNGUxZmFzCmsra21oT3BWcWFMQ3UwanhSeWVvc01sOUhEaTk4eDNiVERsaTBWYWlzYTJIazFSLzU4REl6VWRpQ21DaGlWMkUKMUVlQ09ZNUpZVG5KOUJvN3VTajFtaEVveldPdUQrSkFSYjRKNjgwNGdoc3MwOExwdG5aWGJsUEVacFdPd2dvRgpzWTVjYzhDODMwM0JMaExGd0NCMjFYd1VCeklQVFZCQ0ovNFRtbEkwaURJazV3ZWRHTUdtbHErb1NmbUk3MDFuCmNsbE9LSWJYOHl6UVJKa0RQRkNBbzg3a2ZkY3hrSWRhTDQ1NGlXcTZGcGRqYVowYmRvVEgrOFEzSm5wQzhIazEKNEdYNjV5ZUQ1Z3ZiMWpkaGNOdm5VTXJyZE5BclVzT284c2hhMlFJREFRQUJBb0lCQUNFNWVRaitsZlk3aTQ2TgpRM2g1dGZTVXFETXVLdEFVWkRVblNNSGJVcGVaYVBnUG8wZzUzRk1XeXE5V3dzUlZGdnBtR2pwd0srTTZsQm9MClpWYm9OdGZnd0I4SlNmWnU5elc4c1pHNHMweFY5NkdHWmhtMzM2MlQrOGFNNVRtY2w2S0dQMS9FRlpoWWhjTUQKTVZwS2pXN1RhV3ZINWtDbHNNSnZOVlI3YjZuRFNLNVZveFpxTERSQjJqeG5DNzVSOWlSOHJaekRPSTVBRmdrdQo2RXA4aEpiM3U4VDc0dWJUbHpEMUY4QkFyOC9LdkljRWtwcVROS2k2eDNTYjFWK0JBM3V5eUh1RW5oVUpnVW8rCitUZ3k2QUdqdk9TQnNtVFFvZExiRW9iWkhWMC9wRXRsQWFreGd3Q3RQa1p3czF2SmZ5TW9Mb3JqMUlGMlRHdisKcnpBNkxVRUNnWUVBL3YrV1J1alNBM2Fkdzk0clh6OHFtckRhakU2MTZQVHZRcE4rK1phMlBIOFVsTSttZmZkbQpldml0RXdaemVreTVSWjNYRjg1MFA1SitIT2ZhaXBURE1tWE9zR2JQNndsZ0xkbE5KdVRxaHVpNEJoWVo4VTA2CnpKSWpiMjJWcUQ2SlNqQXZaT3pNUE5BRjF2ZkFlZWNtS1hNRXdIRkZPVEpLTUs5aDZZTlMzdnNDZ1lFQTlVa3EKS3VlYm8vTVV4S2phY3o1OENHZHZyZ3dKSGlqK01naEVtaHQ3elZyRmxwK0J4aHg3OWxVS3JMQXFRTkcrd3BRUwp3cERpVWtQSDY4ZXFPc3BQRU5GRmZDM3Bqb3liSkQxc3FFanB6RVRtT0pkSDd1KzNKUGZGMkVVNjJMZzZkdE5UCkY4aE9lZ0dGNWthUmE2KzdheHFWMXRCSmdXUERvSDFnRERoL05Uc0NnWUVBMzdvbmczZHFLNWU2dThzRmlyL0YKQll2OWMvVHJMY1VZMXZUS0J2bndVZ3U5WndMNU5MUUh1Z1RNOVZCSHFSbi93OURFa3lLVVkrS0h1NXpBNWEyMwp4VlFGa0Vma1RIY2haNWlzVkYrc1ZVbUJpL1dBTlhKcEVHdDNvNkFtR2tNaitXOWxaY3ROTmwzZGdjcEZtYTd3CmtKVlZxZkszcFlNMkRKa1Q4dWxCb0ZrQ2dZRUF4ZFQrOTUvWi9qKytUbWRKMlkxYUNJVTBwQkg5QUtCYmRRc0sKR2FsMURzblpkNFNxN0pob3V1TVlPdlBUQUhpcS9ORGsySngyalpjeGxjOXlVdng5RExsYzY2MDBKTTd2Q2ltbwppNHBUVjNkeW44NFVMSGlYSkcvYTJIWHQ0b0drVmRaWHZHVElOc3NucmZUWXNSd0pjM2I3eDBneklQTFFBNDliCmVqL1BDYVVDZ1lFQXVpYmp1dmtVVERjakhXQ0F3anNZbHkxTEZMNGJWZ3FLVnl2VG84Ty9aZWxLQXFIRkRGVEMKNXFUNTZycERhVnJhbnU5S3AxTlk5aWZ3Tkc1RkhaMVp0US9wbG9teDZIQjBOSVUwY3dKSE51VEdUMGRUOUo0WApmSVVuQTlYRy9GQjMzMm5xbzQzZlZwVk5MY2JPN3pRT09PaFMxeUVKRFVmSWxjZUlDNURidWFnPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1alpha1.tap.linkerd.io + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +spec: + group: tap.linkerd.io + version: v1alpha1 + groupPriorityMinimum: 1000 + versionPriority: 100 + service: + name: linkerd-tap + namespace: linkerd + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFVENDQWZtZ0F3SUJBZ0lSQUpVNXAybEE1NXkxSDVrNXRDU3psMWN3RFFZSktvWklodmNOQVFFTEJRQXcKSWpFZ01CNEdBMVVFQXhNWGJHbHVhMlZ5WkMxMFlYQXViR2x1YTJWeVpDNXpkbU13SGhjTk1Ua3hNRE13TVRBegpPVFExV2hjTk1qQXhNREk1TVRBek9UUTFXakFpTVNBd0hnWURWUVFERXhkc2FXNXJaWEprTFhSaGNDNXNhVzVyClpYSmtMbk4yWXpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBUFJUZTdSaVUxT3cKLzlTWFJLVmdlTk81SU50cUhISDFqVjkzNVpzQkRBaWVIdFgyckpQcEpvVHFWYW1pd3J0SThVY25xTERKZlJ3NAp2Zk1kMjB3NVl0Rldvckd0aDVOVWYrZkF5TTFIWWdwZ29ZbGRoTlJIZ2ptT1NXRTV5ZlFhTzdrbzlab1JLTTFqCnJnL2lRRVcrQ2V2Tk9JSWJMTlBDNmJaMlYyNVR4R2FWanNJS0JiR09YSFBBdk45TndTNFN4Y0FnZHRWOEZBY3kKRDAxUVFpZitFNXBTTklneUpPY0huUmpCcHBhdnFFbjVpTzlOWjNKWlRpaUcxL01zMEVTWkF6eFFnS1BPNUgzWApNWkNIV2krT2VJbHF1aGFYWTJtZEczYUV4L3ZFTnlaNlF2QjVOZUJsK3VjbmcrWUwyOVkzWVhEYjUxREs2M1RRCksxTERxUExJV3RrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXTUJRR0NDc0cKQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQTBHQ1NxR1NJYjNEUUVCQ3dVQQpBNElCQVFBV25QcmZsbXRaSWhOdUJNMWtXdDM1K0wyeTdRWGJ1NmplcmltN0JIZFFTdGRJRlhTSE1wWGdQbnlKClVKdFJ0WGhHc1Z1ak5EZGtFdGlVL2d6Tm44N2tobzUvQkVOb1p5TUduR25kRHh2aXRrcVVnMERwWCttYzg3OFQKa3FEZFFwaytzOEJOWEVNTU9LbTM2aDBWdEkvYVRJUktJV2RvT2tPYzFuZWEyQ3laNlVNTWR5RTBBYnhqVlpsYwpWV1VwMkhvU3BwczNkdUV3c0NhNVFLa1F1c2FxY25HTGsvMHBuU1I1eEFvK0ZSV2R6NXpkZDV3Zjh6VnVXSDh1CmVLQkxGMUdqcmtSSnl3bjE3anY0TDUrU2RGaTVGSlNxNklNV1QwRjQ3eHVheHhwa2c1cEt2YVdHZndMKzRYRVIKT25QSFJZcEhCeGc2VHVha0RkUzljcmFDUmpoZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +--- +### +### Control Plane PSP +### +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: linkerd-linkerd-control-plane + labels: + linkerd.io/control-plane-ns: linkerd +spec: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + allowedCapabilities: + - NET_ADMIN + - NET_RAW + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + volumes: + - configMap + - emptyDir + - secret + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ['policy', 'extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - linkerd-linkerd-control-plane +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +- kind: ServiceAccount + name: linkerd-grafana + namespace: linkerd +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +data: + global: | + {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"stable-2.6.0","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0\neS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5\nMTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j\nYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF\ntW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw\nQDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC\nMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh\niLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM\ngtg+AACcDQ==\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} + proxy: | + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"100m","requestMemory":"20Mi","limitCpu":"1","limitMemory":"250Mi"},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":true,"proxyVersion":"stable-2.6.0","proxyInitImageVersion":"v1.2.0"} + install: | + {"uuid":"bd840de5-0554-4c39-8b94-5f574d83e32c","cliVersion":"stable-2.6.0","flags":[{"name":"ha","value":"true"}]} +--- +### +### Identity Controller Service +### +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-identity-issuer + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-issuer-expiry: 2020-10-29T10:39:55Z +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnekNDQVNtZ0F3SUJBZ0lCQVRBS0JnZ3Foa2pPUFFRREFqQXBNU2N3SlFZRFZRUURFeDVwWkdWdWRHbDAKZVM1c2FXNXJaWEprTG1Oc2RYTjBaWEl1Ykc5allXd3dIaGNOTVRreE1ETXdNVEF6T1RNMVdoY05NakF4TURJNQpNVEF6T1RVMVdqQXBNU2N3SlFZRFZRUURFeDVwWkdWdWRHbDBlUzVzYVc1clpYSmtMbU5zZFhOMFpYSXViRzlqCllXd3dXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBVDFvZGI0U2dpR2tTOHZoSTJ5VURzOWpvWUYKdFc5N3NCZzNHOVQ1RnJlU1N6b1dMQ2FmbXRPY0lmZzBybVNjR3MxRWg2R2dDUERkRWRpcFZNWWRXWm5WbzBJdwpRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DCk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0NnWUlLb1pJemowRUF3SURTQUF3UlFJaEFQcnE4cHV4WUQvSkxudmgKaUxMc2o2MWg1NzZGWXUzYWM1YXhUSmxVWmhOOEFpQUcya1VTRngwTTdaU3NmWjNmLzMxSGJzNWU5ZTNYbndSTQpndGcrQUFDY0RRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + key.pem: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUNNZmlkcGlDWnRDSFo0UFgyTG9Lc1ZVajdYM1Z3MkdlWjZ6TzNyQkFEdVRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFOWFIVytFb0locEV2TDRTTnNsQTdQWTZHQmJWdmU3QVlOeHZVK1JhM2trczZGaXdtbjVyVApuQ0g0Tks1a25Cck5SSWVob0FqdzNSSFlxVlRHSFZtWjFRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQ== +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: identity + ports: + - name: grpc + port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + name: linkerd-identity + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - identity + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - identity + topologyKey: kubernetes.io/hostname + containers: + - args: + - identity + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9990 + initialDelaySeconds: 10 + name: identity + ports: + - containerPort: 8080 + name: grpc + - containerPort: 9990 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9990 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "10Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/identity/issuer + name: identity-issuer + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: localhost.:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-identity + volumes: + - configMap: + name: linkerd-config + name: config + - name: identity-issuer + secret: + secretName: linkerd-identity-issuer + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Controller +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-controller-api + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: controller + ports: + - name: http + port: 8085 + targetPort: 8085 +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-destination + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: controller + ports: + - name: grpc + port: 8086 + targetPort: 8086 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + name: linkerd-controller + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - controller + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - public-api + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: public-api + ports: + - containerPort: 8085 + name: http + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - args: + - destination + - -addr=:8086 + - -controller-namespace=linkerd + - -enable-h2-upgrade=true + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9996 + initialDelaySeconds: 10 + name: destination + ports: + - containerPort: 8086 + name: grpc + - containerPort: 9996 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9996 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-controller + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Destination Controller Service +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-dst + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: destination + ports: + - name: grpc + port: 8086 + targetPort: 8086 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + name: linkerd-destination + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - destination + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - destination + topologyKey: kubernetes.io/hostname + containers: + - args: + - destination + - -addr=:8086 + - -controller-namespace=linkerd + - -enable-h2-upgrade=true + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9996 + initialDelaySeconds: 10 + name: destination + ports: + - containerPort: 8086 + name: grpc + - containerPort: 9996 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9996 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: localhost.:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-destination + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Heartbeat +### +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + schedule: "49 10 * * * " + successfulJobsHistoryLimit: 0 + jobTemplate: + spec: + template: + metadata: + labels: + linkerd.io/control-plane-component: heartbeat + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + spec: + nodeSelector: + beta.kubernetes.io/os: linux + serviceAccountName: linkerd-heartbeat + restartPolicy: Never + containers: + - name: heartbeat + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + args: + - "heartbeat" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" + - "-controller-namespace=linkerd" + - "-log-level=info" + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 +--- +### +### Web +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: web + ports: + - name: http + port: 8084 + targetPort: 8084 + - name: admin-http + port: 9994 + targetPort: 9994 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + name: linkerd-web + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 + - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/web:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9994 + initialDelaySeconds: 10 + name: web + ports: + - containerPort: 8084 + name: http + - containerPort: 9994 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9994 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-web + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.11.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus + name: prometheus-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + resources: + limits: + cpu: "1" + memory: "1024Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Proxy Injector +### +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + name: linkerd-proxy-injector + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: proxy-injector + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-proxy-injector + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname + containers: + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector + ports: + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-proxy-injector + volumes: + - configMap: + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +### +### Service Profile Validator +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: sp-validator + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-sp-validator + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - sp-validator + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - sp-validator + topologyKey: kubernetes.io/hostname + containers: + - args: + - sp-validator + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9997 + initialDelaySeconds: 10 + name: sp-validator + ports: + - containerPort: 8443 + name: sp-validator + - containerPort: 9997 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9997 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-sp-validator + volumes: + - name: tls + secret: + secretName: linkerd-sp-validator-tls + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Tap +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: tap + ports: + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver + port: 443 + targetPort: apiserver +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + name: linkerd-tap + namespace: linkerd +spec: + replicas: 3 + selector: + matchLabels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli stable-2.6.0 + linkerd.io/identity-mode: default + linkerd.io/proxy-version: stable-2.6.0 + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + spec: + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - tap + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - tap + topologyKey: kubernetes.io/hostname + containers: + - args: + - tap + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/controller:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9998 + initialDelaySeconds: 10 + name: tap + ports: + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9998 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd2_proxy=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0 + eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDMwMTAzOTM1WhcNMjAxMDI5 + MTAzOTU1WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j + YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT1odb4SgiGkS8vhI2yUDs9joYF + tW97sBg3G9T5FreSSzoWLCafmtOcIfg0rmScGs1Eh6GgCPDdEdipVMYdWZnVo0Iw + QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIhAPrq8puxYD/JLnvh + iLLsj61h576FYu3ac5axTJlUZhN8AiAG2kUSFx0M7ZSsfZ3f/31Hbs5e9e3XnwRM + gtg+AACcDQ== + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:stable-2.6.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /metrics + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.2.0 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-tap + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls +--- diff --git a/cluster-config/namespaces.yaml b/cluster-config/namespaces.yaml index ed672b5..7c9d3da 100644 --- a/cluster-config/namespaces.yaml +++ b/cluster-config/namespaces.yaml @@ -1,14 +1,64 @@ apiVersion: v1 kind: Namespace +metadata: + name: kube-system + labels: + app: kube-system + control-plane: controller-manager +--- +apiVersion: v1 +kind: Namespace metadata: name: dev + labels: + app: dev + annotations: + linkerd.io/inject: enabled --- apiVersion: v1 kind: Namespace metadata: name: staging + labels: + app: staging + annotations: + linkerd.io/inject: enabled --- apiVersion: v1 kind: Namespace metadata: name: production + labels: + app: production + annotations: + linkerd.io/inject: enabled +--- +apiVersion: v1 +kind: Namespace +metadata: + name: falco + labels: + app: falco +--- +apiVersion: v1 +kind: Namespace +metadata: + name: linkerd + labels: + app: linkerd +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ingress + labels: + app: ingress + annotations: + linkerd.io/inject: enabled +--- +apiVersion: v1 +kind: Namespace +metadata: + name: gatekeeper-system + labels: + app: gatekeeper-system diff --git a/cluster-config/nginx-ingress.yaml b/cluster-config/nginx-ingress.yaml new file mode 100644 index 0000000..a7d11ba --- /dev/null +++ b/cluster-config/nginx-ingress.yaml @@ -0,0 +1,548 @@ +--- +# Source: nginx-ingress/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress + namespace: ingress +--- +# Source: nginx-ingress/templates/default-backend-serviceaccount.yaml + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress-backend + namespace: ingress + +--- +# Source: nginx-ingress/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update +--- +# Source: nginx-ingress/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress +subjects: + - kind: ServiceAccount + name: nginx-ingress + namespace: ingress +--- +# Source: nginx-ingress/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress + namespace: ingress +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader-nginx + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: nginx-ingress/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + heritage: Tiller + release: nginx-ingress + name: nginx-ingress + namespace: ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress +subjects: + - kind: ServiceAccount + name: nginx-ingress + namespace: ingress +--- +# Source: nginx-ingress/templates/controller-service.yaml + +apiVersion: v1 +kind: Service +metadata: + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + component: "controller" + heritage: Tiller + release: nginx-ingress + name: nginx-ingress-controller + namespace: ingress +spec: + clusterIP: "" + externalTrafficPolicy: "Cluster" + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app: nginx-ingress + component: "controller" + release: nginx-ingress + type: "LoadBalancer" + +--- +# Source: nginx-ingress/templates/default-backend-service.yaml + +apiVersion: v1 +kind: Service +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + component: "default-backend" + heritage: Tiller + release: nginx-ingress + name: nginx-ingress-default-backend + namespace: ingress +spec: + clusterIP: "" + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + selector: + app: nginx-ingress + component: "default-backend" + release: nginx-ingress + type: "ClusterIP" + +--- +# Source: nginx-ingress/templates/controller-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + component: "controller" + heritage: Tiller + release: nginx-ingress + name: nginx-ingress-controller + namespace: ingress +spec: + selector: + matchLabels: + app: nginx-ingress + release: nginx-ingress + replicas: 1 + revisionHistoryLimit: 10 + strategy: + {} + + minReadySeconds: 0 + template: + metadata: + labels: + app: nginx-ingress + component: "controller" + release: nginx-ingress + spec: + dnsPolicy: ClusterFirst + containers: + - name: nginx-ingress-controller + image: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1" + imagePullPolicy: "IfNotPresent" + args: + - /nginx-ingress-controller + - --default-backend-service=ingress/nginx-ingress-default-backend + - --election-id=ingress-controller-leader + - --ingress-class=nginx + - --configmap=ingress/nginx-ingress-controller + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 33 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 300m + memory: 300Mi + requests: + cpu: 300m + memory: 300Mi + + hostNetwork: false + nodeSelector: + beta.kubernetes.io/os: linux + + serviceAccountName: nginx-ingress + terminationGracePeriodSeconds: 60 + +--- +# Source: nginx-ingress/templates/default-backend-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-ingress + chart: nginx-ingress-1.24.4 + component: "default-backend" + heritage: Tiller + release: nginx-ingress + name: nginx-ingress-default-backend + namespace: ingress +spec: + selector: + matchLabels: + app: nginx-ingress + release: nginx-ingress + replicas: 1 + revisionHistoryLimit: 10 + template: + metadata: + labels: + app: nginx-ingress + component: "default-backend" + release: nginx-ingress + spec: + containers: + - name: nginx-ingress-default-backend + image: "k8s.gcr.io/defaultbackend-amd64:1.5" + imagePullPolicy: "IfNotPresent" + args: + securityContext: + runAsUser: 65534 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + + nodeSelector: + beta.kubernetes.io/os: linux + + serviceAccountName: nginx-ingress-backend + terminationGracePeriodSeconds: 60 + +--- +# Source: nginx-ingress/templates/addheaders-configmap.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/role.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml + + +--- +# Source: nginx-ingress/templates/admission-webhooks/validating-webhook.yaml + + +--- +# Source: nginx-ingress/templates/controller-configmap.yaml + + +--- +# Source: nginx-ingress/templates/controller-daemonset.yaml + + +--- +# Source: nginx-ingress/templates/controller-hpa.yaml + + +--- +# Source: nginx-ingress/templates/controller-metrics-service.yaml + + +--- +# Source: nginx-ingress/templates/controller-poddisruptionbudget.yaml + + +--- +# Source: nginx-ingress/templates/controller-prometheusrules.yaml + + +--- +# Source: nginx-ingress/templates/controller-psp.yaml + + +--- +# Source: nginx-ingress/templates/controller-servicemonitor.yaml + + +--- +# Source: nginx-ingress/templates/controller-webhook-service.yaml + + +--- +# Source: nginx-ingress/templates/default-backend-poddisruptionbudget.yaml + + +--- +# Source: nginx-ingress/templates/default-backend-psp.yaml + +--- +# Source: nginx-ingress/templates/default-backend-role.yaml + +--- +# Source: nginx-ingress/templates/default-backend-rolebinding.yaml + +--- +# Source: nginx-ingress/templates/proxyheaders-configmap.yaml + + +--- +# Source: nginx-ingress/templates/tcp-configmap.yaml + + +--- +# Source: nginx-ingress/templates/udp-configmap.yaml + + diff --git a/cluster-config/np-allow-consolidated.yaml b/cluster-config/np-allow-consolidated.yaml new file mode 100644 index 0000000..512ea32 --- /dev/null +++ b/cluster-config/np-allow-consolidated.yaml @@ -0,0 +1,134 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-dev + namespace: dev +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: {} + - namespaceSelector: + matchLabels: + linkerd.io/is-control-plane: "true" + - namespaceSelector: + matchLabels: + app: ingress + egress: + - to: + - podSelector: {} + - namespaceSelector: + matchLabels: + linkerd.io/is-control-plane: "true" + - to: + - namespaceSelector: + matchLabels: + app: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + ports: + - port: 445 + protocol: TCP +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-ingress + namespace: ingress +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress + ingress: + - from: [] + ports: + - port: 80 + protocol: TCP + - from: + - namespaceSelector: + matchLabels: + linkerd.io/is-control-plane: "true" + - podSelector: {} + egress: + - to: + - podSelector: {} + - namespaceSelector: + matchLabels: + linkerd.io/is-control-plane: "true" + - namespaceSelector: + matchLabels: + app: dev + - ports: + - port: 443 + to: + - ipBlock: + cidr: 40.85.169.219/32 + - to: + - namespaceSelector: + matchLabels: + app: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-linkerd + namespace: linkerd +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - namespaceSelector: {} + podSelector: + matchExpressions: + - key: linkerd.io/proxy-deployment + operator: Exists + - podSelector: {} + - namespaceSelector: + matchLabels: + app: kube-system + egress: + - to: + - podSelector: {} + - namespaceSelector: + matchExpressions: + - {key: app, operator: In, values: [dev,ingress]} + - to: + - namespaceSelector: + matchLabels: + app: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - ports: + - port: 443 + to: + - ipBlock: + cidr: 40.85.169.219/32 + diff --git a/cluster-config/np-deny-all.yaml b/cluster-config/np-deny-all.yaml new file mode 100644 index 0000000..16daf23 --- /dev/null +++ b/cluster-config/np-deny-all.yaml @@ -0,0 +1,32 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all-dev + namespace: dev +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all-ingress + namespace: ingress +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all-linkerd + namespace: linkerd +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress diff --git a/cluster-config/quotas.yaml b/cluster-config/quotas.yaml index c188e2f..d401b32 100644 --- a/cluster-config/quotas.yaml +++ b/cluster-config/quotas.yaml @@ -5,10 +5,10 @@ metadata: namespace: dev spec: hard: - requests.cpu: "1" - requests.memory: 1Gi - limits.cpu: "2" - limits.memory: 2Gi + requests.cpu: "4" + requests.memory: 4Gi + limits.cpu: "4" + limits.memory: 4Gi persistentvolumeclaims: "5" requests.storage: "10Gi" --- @@ -19,8 +19,8 @@ metadata: namespace: staging spec: hard: - requests.cpu: "2" - requests.memory: 2Gi + requests.cpu: "4" + requests.memory: 4Gi limits.cpu: "4" limits.memory: 4Gi persistentvolumeclaims: "5" @@ -33,8 +33,8 @@ metadata: namespace: production spec: hard: - requests.cpu: "4" - requests.memory: 4Gi + requests.cpu: "8" + requests.memory: 8Gi limits.cpu: "8" limits.memory: 8Gi persistentvolumeclaims: "10" diff --git a/cluster-config/rbac-namespaces.yaml b/cluster-config/rbac-namespaces.yaml index 983a43a..c1e03c5 100644 --- a/cluster-config/rbac-namespaces.yaml +++ b/cluster-config/rbac-namespaces.yaml @@ -43,7 +43,7 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: aksrbac-production-reader + name: aksrbac-staging-reader namespace: production roleRef: apiGroup: rbac.authorization.k8s.io