This commit is contained in:
Kevin Harris 2019-10-30 08:19:18 -04:00
Родитель 111ed29438
Коммит 86f6cadddd
12 изменённых файлов: 8183 добавлений и 28 удалений

120
cluster-config/app.yaml Normal file
Просмотреть файл

@ -0,0 +1,120 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: imageclassifierweb
namespace: dev
labels:
app: imageclassifierweb
spec:
replicas: 1
selector:
matchLabels:
app: imageclassifierweb
template:
metadata:
labels:
app: imageclassifierweb
spec:
containers:
- name: imageclassifierweb
image: kevingbb/imageclassifierweb:v1
imagePullPolicy: Always
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
limits:
memory: 250Mi
cpu: 250m
requests:
memory: 100Mi
cpu: 100m
dnsPolicy: ClusterFirst
---
apiVersion: v1
kind: Service
metadata:
name: imageclassifierweb
namespace: dev
labels:
app: imageclassifierweb
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "contosofinsvcsubnet"
spec:
type: LoadBalancer
loadBalancerIP: 100.64.2.232
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: imageclassifierweb
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: imageclassifierweb
namespace: dev
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/upstream-vhost: imageclassifierweb.dev.svc.cluster.local:80
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_hide_header l5d-remote-ip;
proxy_hide_header l5d-server-id;
spec:
rules:
- http:
paths:
- backend:
serviceName: imageclassifierweb
servicePort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: imageclassifierworker
namespace: dev
labels:
app: imageclassifierworker
spec:
replicas: 1
selector:
matchLabels:
app: imageclassifierworker
template:
metadata:
labels:
app: imageclassifierworker
spec:
securityContext:
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
containers:
- name: imageclassifierworker
image: kevingbb/imageclassifierworker:v1
imagePullPolicy: IfNotPresent
env:
- name: API_BASE_URL
value: http://imageclassifierweb
volumeMounts:
- mountPath: /app/assets/images
name: fruitshare
resources:
limits:
memory: 1G
cpu: 1000m
requests:
memory: 500Mi
cpu: 500m
volumes:
- name: fruitshare
azureFile:
secretName: fruit-secret
shareName: fruit
readOnly: false
dnsPolicy: ClusterFirst

Просмотреть файл

@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
data:
schema-version:
#string.used by agent to parse config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent.
v1
config-version:
#string.used by customer to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated)
ver1
log-data-collection-settings: |-
# Log data collection settings
[log_collection_settings]
[log_collection_settings.stdout]
# In the absense of this configmap, default value for enabled is true
enabled = true
# exclude_namespaces setting holds good only if enabled is set to true
# kube-system log collection is disabled by default in the absence of 'log_collection_settings.stdout' setting. If you want to enable kube-system, remove it from the following setting.
# If you want to continue to disable kube-system log collection keep this namespace in the following setting and add any other namespace you want to disable log collection to the array.
# In the absense of this configmap, default value for exclude_namespaces = ["kube-system"]
exclude_namespaces = ["kube-system"]
[log_collection_settings.stderr]
# Default value for enabled is true
enabled = true
# exclude_namespaces setting holds good only if enabled is set to true
# kube-system log collection is disabled by default in the absence of 'log_collection_settings.stderr' setting. If you want to enable kube-system, remove it from the following setting.
# If you want to continue to disable kube-system log collection keep this namespace in the following setting and add any other namespace you want to disable log collection to the array.
# In the absense of this cofigmap, default value for exclude_namespaces = ["kube-system"]
exclude_namespaces = ["kube-system"]
[log_collection_settings.env_var]
# In the absense of this configmap, default value for enabled is true
enabled = true
prometheus-data-collection-settings: |-
# Custom Prometheus metrics data collection settings
[prometheus_data_collection_settings.cluster]
# Cluster level scrape endpoint(s). These metrics will be scraped from agent's Replicaset (singleton)
#Interval specifying how often to scrape for metrics. This is duration of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are ns, us (or µs), ms, s, m, h.
interval = "1m"
## Uncomment the following settings with valid string arrays for prometheus scraping
#fieldpass = ["metric_to_pass1", "metric_to_pass12"]
#fielddrop = ["metric_to_drop"]
# An array of urls to scrape metrics from.
urls = ["https://linkerd-tap.linkerd.svc.cluster.local/metrics"]
# An array of Kubernetes services to scrape metrics from.
kubernetes_services = ["https://metrics-server.kube-system.svc.cluster.local/metrics"]
# When monitor_kubernetes_pods = true, replicaset will scrape Kubernetes pods for the following prometheus annotations:
# - prometheus.io/scrape: Enable scraping for this pod
# - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
# set this to `https` & most likely set the tls config.
# - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
# - prometheus.io/port: If port is not 9102 use this annotation
monitor_kubernetes_pods = false
[prometheus_data_collection_settings.node]
# Node level scrape endpoint(s). These metrics will be scraped from agent's DaemonSet running in every node in the cluster
#Interval specifying how often to scrape for metrics. This is duration of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are ns, us (or µs), ms, s, m, h.
interval = "1m"
## Uncomment the following settings with valid string arrays for prometheus scraping
# An array of urls to scrape metrics from. $NODE_IP (all upper case) will substitute of running Node's IP address
# urls = ["http://$NODE_IP:9103/metrics"]
#fieldpass = ["metric_to_pass1", "metric_to_pass12"]
#fielddrop = ["metric_to_drop"]
agent-settings: |-
# agent health model feature settings
[agent_settings.health_model]
# In the absence of this configmap, default value for enabled is false
enabled = true
metadata:
name: container-azm-ms-agentconfig
namespace: kube-system

3484
cluster-config/falco.yaml Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,457 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: gatekeeper-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
labels:
controller-tools.k8s.io: "1.0"
name: configs.config.gatekeeper.sh
spec:
group: config.gatekeeper.sh
names:
kind: Config
plural: configs
scope: Namespaced
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
sync:
description: Configuration for syncing k8s objects
properties:
syncOnly:
description: If non-empty, only entries on this list will be replicated
into OPA
items:
properties:
group:
type: string
kind:
type: string
version:
type: string
type: object
type: array
type: object
validation:
description: Configuration for validation
properties:
traces:
description: List of requests to trace. Both "user" and "kinds"
must be specified
items:
properties:
dump:
description: Also dump the state of OPA with the trace. Set
to `All` to dump everything.
type: string
kind:
description: Only trace requests of the following GroupVersionKind
properties:
group:
type: string
kind:
type: string
version:
type: string
type: object
user:
description: Only trace requests from the specified user
type: string
type: object
type: array
type: object
type: object
status:
properties:
byPod:
description: List of statuses as seen by individual pods
items:
properties:
allFinalizers:
description: List of Group/Version/Kinds with finalizers
items:
properties:
group:
type: string
kind:
type: string
version:
type: string
type: object
type: array
id:
description: a unique identifier for the pod that wrote the status
type: string
type: object
type: array
type: object
version: v1alpha1
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: gatekeeper-manager-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- config.gatekeeper.sh
resources:
- configs
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- config.gatekeeper.sh
resources:
- configs/status
verbs:
- get
- update
- patch
- apiGroups:
- constraints.gatekeeper.sh
resources:
- '*'
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- templates.gatekeeper.sh
resources:
- constrainttemplates
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- templates.gatekeeper.sh
resources:
- constrainttemplates/status
verbs:
- get
- update
- patch
- apiGroups:
- constraints.gatekeeper.sh
resources:
- '*'
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: gatekeeper-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gatekeeper-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: gatekeeper-system
---
apiVersion: v1
kind: Secret
metadata:
name: gatekeeper-webhook-server-secret
namespace: gatekeeper-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: gatekeeper-controller-manager-service
namespace: gatekeeper-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: gatekeeper-controller-manager
namespace: gatekeeper-system
spec:
selector:
matchLabels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
serviceName: gatekeeper-controller-manager-service
template:
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
containers:
- args:
- --auditInterval=30
- --port=8443
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SECRET_NAME
value: gatekeeper-webhook-server-secret
image: quay.io/open-policy-agent/gatekeeper:v3.0.4-beta.2
imagePullPolicy: Always
name: manager
ports:
- containerPort: 8443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 60
volumes:
- name: cert
secret:
defaultMode: 420
secretName: gatekeeper-webhook-server-secret
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
labels:
controller-tools.k8s.io: "1.0"
name: constrainttemplates.templates.gatekeeper.sh
spec:
group: templates.gatekeeper.sh
names:
kind: ConstraintTemplate
plural: constrainttemplates
scope: Cluster
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
crd:
properties:
spec:
properties:
names:
properties:
kind:
type: string
type: object
validation:
type: object
type: object
type: object
targets:
items:
properties:
rego:
type: string
target:
type: string
type: object
type: array
type: object
status:
properties:
byPod:
items:
properties:
errors:
items:
properties:
code:
type: string
location:
type: string
message:
type: string
required:
- code
- message
type: object
type: array
id:
description: a unique identifier for the pod that wrote the status
type: string
type: object
type: array
created:
type: boolean
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
- name: v1alpha1
served: true
storage: false
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -6,17 +6,17 @@ metadata:
spec:
limits:
- default:
cpu: 0.5
cpu: 500m
memory: 512Mi
defaultRequest:
cpu: 0.25
cpu: 250m
memory: 256Mi
max:
cpu: 1
memory: 1Gi
cpu: 2
memory: 2Gi
min:
cpu: 200m
memory: 256Mi
cpu: 10m
memory: 10Mi
type: Container
- max:
storage: 2Gi
@ -32,17 +32,17 @@ metadata:
spec:
limits:
- default:
cpu: 0.5
cpu: 500m
memory: 512Mi
defaultRequest:
cpu: 0.25
cpu: 250m
memory: 256Mi
max:
cpu: 1
memory: 1Gi
cpu: 2
memory: 2Gi
min:
cpu: 200m
memory: 256Mi
cpu: 10m
memory: 10Mi
type: Container
- max:
storage: 5Gi
@ -58,20 +58,20 @@ metadata:
spec:
limits:
- default:
cpu: 0.5
cpu: 500m
memory: 512Mi
defaultRequest:
cpu: 0.25
cpu: 250m
memory: 256Mi
max:
cpu: 1
memory: 1Gi
cpu: 2
memory: 2Gi
min:
cpu: 200m
memory: 256Mi
cpu: 10m
memory: 10Mi
type: Container
- max:
storage: 10Gi
min:
storage: 1Gi
type: PersistentVolumeClaim
type: PersistentVolumeClaim

3248
cluster-config/linkerd.yaml Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,14 +1,64 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
labels:
app: kube-system
control-plane: controller-manager
---
apiVersion: v1
kind: Namespace
metadata:
name: dev
labels:
app: dev
annotations:
linkerd.io/inject: enabled
---
apiVersion: v1
kind: Namespace
metadata:
name: staging
labels:
app: staging
annotations:
linkerd.io/inject: enabled
---
apiVersion: v1
kind: Namespace
metadata:
name: production
labels:
app: production
annotations:
linkerd.io/inject: enabled
---
apiVersion: v1
kind: Namespace
metadata:
name: falco
labels:
app: falco
---
apiVersion: v1
kind: Namespace
metadata:
name: linkerd
labels:
app: linkerd
---
apiVersion: v1
kind: Namespace
metadata:
name: ingress
labels:
app: ingress
annotations:
linkerd.io/inject: enabled
---
apiVersion: v1
kind: Namespace
metadata:
name: gatekeeper-system
labels:
app: gatekeeper-system

Просмотреть файл

@ -0,0 +1,548 @@
---
# Source: nginx-ingress/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress
namespace: ingress
---
# Source: nginx-ingress/templates/default-backend-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress-backend
namespace: ingress
---
# Source: nginx-ingress/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
---
# Source: nginx-ingress/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress
subjects:
- kind: ServiceAccount
name: nginx-ingress
namespace: ingress
---
# Source: nginx-ingress/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress
namespace: ingress
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: nginx-ingress/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
heritage: Tiller
release: nginx-ingress
name: nginx-ingress
namespace: ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress
subjects:
- kind: ServiceAccount
name: nginx-ingress
namespace: ingress
---
# Source: nginx-ingress/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
component: "controller"
heritage: Tiller
release: nginx-ingress
name: nginx-ingress-controller
namespace: ingress
spec:
clusterIP: ""
externalTrafficPolicy: "Cluster"
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app: nginx-ingress
component: "controller"
release: nginx-ingress
type: "LoadBalancer"
---
# Source: nginx-ingress/templates/default-backend-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
component: "default-backend"
heritage: Tiller
release: nginx-ingress
name: nginx-ingress-default-backend
namespace: ingress
spec:
clusterIP: ""
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector:
app: nginx-ingress
component: "default-backend"
release: nginx-ingress
type: "ClusterIP"
---
# Source: nginx-ingress/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
component: "controller"
heritage: Tiller
release: nginx-ingress
name: nginx-ingress-controller
namespace: ingress
spec:
selector:
matchLabels:
app: nginx-ingress
release: nginx-ingress
replicas: 1
revisionHistoryLimit: 10
strategy:
{}
minReadySeconds: 0
template:
metadata:
labels:
app: nginx-ingress
component: "controller"
release: nginx-ingress
spec:
dnsPolicy: ClusterFirst
containers:
- name: nginx-ingress-controller
image: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1"
imagePullPolicy: "IfNotPresent"
args:
- /nginx-ingress-controller
- --default-backend-service=ingress/nginx-ingress-default-backend
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=ingress/nginx-ingress-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 33
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
resources:
limits:
cpu: 300m
memory: 300Mi
requests:
cpu: 300m
memory: 300Mi
hostNetwork: false
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: nginx-ingress
terminationGracePeriodSeconds: 60
---
# Source: nginx-ingress/templates/default-backend-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-ingress
chart: nginx-ingress-1.24.4
component: "default-backend"
heritage: Tiller
release: nginx-ingress
name: nginx-ingress-default-backend
namespace: ingress
spec:
selector:
matchLabels:
app: nginx-ingress
release: nginx-ingress
replicas: 1
revisionHistoryLimit: 10
template:
metadata:
labels:
app: nginx-ingress
component: "default-backend"
release: nginx-ingress
spec:
containers:
- name: nginx-ingress-default-backend
image: "k8s.gcr.io/defaultbackend-amd64:1.5"
imagePullPolicy: "IfNotPresent"
args:
securityContext:
runAsUser: 65534
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
ports:
- name: http
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: nginx-ingress-backend
terminationGracePeriodSeconds: 60
---
# Source: nginx-ingress/templates/addheaders-configmap.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/role.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml
---
# Source: nginx-ingress/templates/admission-webhooks/validating-webhook.yaml
---
# Source: nginx-ingress/templates/controller-configmap.yaml
---
# Source: nginx-ingress/templates/controller-daemonset.yaml
---
# Source: nginx-ingress/templates/controller-hpa.yaml
---
# Source: nginx-ingress/templates/controller-metrics-service.yaml
---
# Source: nginx-ingress/templates/controller-poddisruptionbudget.yaml
---
# Source: nginx-ingress/templates/controller-prometheusrules.yaml
---
# Source: nginx-ingress/templates/controller-psp.yaml
---
# Source: nginx-ingress/templates/controller-servicemonitor.yaml
---
# Source: nginx-ingress/templates/controller-webhook-service.yaml
---
# Source: nginx-ingress/templates/default-backend-poddisruptionbudget.yaml
---
# Source: nginx-ingress/templates/default-backend-psp.yaml
---
# Source: nginx-ingress/templates/default-backend-role.yaml
---
# Source: nginx-ingress/templates/default-backend-rolebinding.yaml
---
# Source: nginx-ingress/templates/proxyheaders-configmap.yaml
---
# Source: nginx-ingress/templates/tcp-configmap.yaml
---
# Source: nginx-ingress/templates/udp-configmap.yaml

Просмотреть файл

@ -0,0 +1,134 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-dev
namespace: dev
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {}
- namespaceSelector:
matchLabels:
linkerd.io/is-control-plane: "true"
- namespaceSelector:
matchLabels:
app: ingress
egress:
- to:
- podSelector: {}
- namespaceSelector:
matchLabels:
linkerd.io/is-control-plane: "true"
- to:
- namespaceSelector:
matchLabels:
app: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
- to:
ports:
- port: 445
protocol: TCP
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-ingress
namespace: ingress
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
ingress:
- from: []
ports:
- port: 80
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
linkerd.io/is-control-plane: "true"
- podSelector: {}
egress:
- to:
- podSelector: {}
- namespaceSelector:
matchLabels:
linkerd.io/is-control-plane: "true"
- namespaceSelector:
matchLabels:
app: dev
- ports:
- port: 443
to:
- ipBlock:
cidr: 40.85.169.219/32
- to:
- namespaceSelector:
matchLabels:
app: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-linkerd
namespace: linkerd
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector: {}
podSelector:
matchExpressions:
- key: linkerd.io/proxy-deployment
operator: Exists
- podSelector: {}
- namespaceSelector:
matchLabels:
app: kube-system
egress:
- to:
- podSelector: {}
- namespaceSelector:
matchExpressions:
- {key: app, operator: In, values: [dev,ingress]}
- to:
- namespaceSelector:
matchLabels:
app: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
- ports:
- port: 443
to:
- ipBlock:
cidr: 40.85.169.219/32

Просмотреть файл

@ -0,0 +1,32 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-dev
namespace: dev
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-ingress
namespace: ingress
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all-linkerd
namespace: linkerd
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

Просмотреть файл

@ -5,10 +5,10 @@ metadata:
namespace: dev
spec:
hard:
requests.cpu: "1"
requests.memory: 1Gi
limits.cpu: "2"
limits.memory: 2Gi
requests.cpu: "4"
requests.memory: 4Gi
limits.cpu: "4"
limits.memory: 4Gi
persistentvolumeclaims: "5"
requests.storage: "10Gi"
---
@ -19,8 +19,8 @@ metadata:
namespace: staging
spec:
hard:
requests.cpu: "2"
requests.memory: 2Gi
requests.cpu: "4"
requests.memory: 4Gi
limits.cpu: "4"
limits.memory: 4Gi
persistentvolumeclaims: "5"
@ -33,8 +33,8 @@ metadata:
namespace: production
spec:
hard:
requests.cpu: "4"
requests.memory: 4Gi
requests.cpu: "8"
requests.memory: 8Gi
limits.cpu: "8"
limits.memory: 8Gi
persistentvolumeclaims: "10"

Просмотреть файл

@ -43,7 +43,7 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: aksrbac-production-reader
name: aksrbac-staging-reader
namespace: production
roleRef:
apiGroup: rbac.authorization.k8s.io