Matt Boersma 2020-03-30 11:30:39 -06:00 коммит произвёл GitHub
Родитель 959247d3aa
Коммит 6381d647eb
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
30 изменённых файлов: 9244 добавлений и 1 удалений

2
Jenkinsfile поставляемый
Просмотреть файл

@ -5,7 +5,7 @@ defaultEnv = [
CREATE_VNET: false,
] + params
def k8sVersions = ["1.14", "1.15", "1.16", "1.17", "1.18"]
def k8sVersions = ["1.14", "1.15", "1.16", "1.17", "1.18", "1.19"]
def latestReleasedVersion = "1.17"
def tasks = [:]
def testConfigs = []

Просмотреть файл

@ -570,6 +570,11 @@ func TestExampleAPIModels(t *testing.T) {
apiModelPath: "../examples/kubernetes-releases/kubernetes1.18.json",
setArgs: defaultSet,
},
{
name: "1.19 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.19.json",
setArgs: defaultSet,
},
{
name: "vmss",
apiModelPath: "../examples/kubernetes-vmss/kubernetes.json",

Просмотреть файл

@ -0,0 +1,35 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Kubernetes",
"orchestratorRelease": "1.19"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v3"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 3,
"vmSize": "Standard_D2_v3"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"servicePrincipalProfile": {
"clientId": "",
"secret": ""
}
}
}

Просмотреть файл

@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: azure-cni-networkmonitor
namespace: kube-system
labels:
app: azure-cnms
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: azure-cnms
template:
metadata:
labels:
k8s-app: azure-cnms
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- operator: "Exists"
effect: NoExecute
- operator: "Exists"
effect: NoSchedule
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: azure-cnms
image: {{ContainerImage "azure-cni-networkmonitor"}}
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- name: ebtables-rule-repo
mountPath: /var/run
- name: log
mountPath: /var/log
- name: telemetry
mountPath: /opt/cni/bin
hostNetwork: true
volumes:
- name: log
hostPath:
path: /var/log
type: Directory
- name: ebtables-rule-repo
hostPath:
path: /var/run/
type: Directory
- name: telemetry
hostPath:
path: /opt/cni/bin
type: Directory

Просмотреть файл

@ -0,0 +1,84 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: azure-ip-masq-agent
namespace: kube-system
labels:
component: azure-ip-masq-agent
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
tier: node
spec:
selector:
matchLabels:
k8s-app: azure-ip-masq-agent
tier: node
template:
metadata:
labels:
k8s-app: azure-ip-masq-agent
tier: node
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- operator: "Exists"
effect: NoExecute
- operator: "Exists"
effect: NoSchedule
containers:
- name: azure-ip-masq-agent
image: {{ContainerImage "ip-masq-agent"}}
imagePullPolicy: IfNotPresent
args:
- --enable-ipv6={{ContainerConfig "enable-ipv6"}}
securityContext:
privileged: true
volumeMounts:
- name: azure-ip-masq-agent-config-volume
mountPath: /etc/config
resources:
requests:
cpu: {{ContainerCPUReqs "ip-masq-agent"}}
memory: {{ContainerMemReqs "ip-masq-agent"}}
limits:
cpu: {{ContainerCPULimits "ip-masq-agent"}}
memory: {{ContainerMemLimits "ip-masq-agent"}}
volumes:
- name: azure-ip-masq-agent-config-volume
configMap:
name: azure-ip-masq-agent-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: azure-ip-masq-agent-config
namespace: kube-system
labels:
component: azure-ip-masq-agent
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
data:
ip-masq-agent: |-
nonMasqueradeCIDRs:
- {{ContainerConfig "non-masquerade-cidr"}}
{{- if ContainerConfig "secondary-non-masquerade-cidr"}}
- {{ContainerConfig "secondary-non-masquerade-cidr"}}
{{end -}}
{{- if ContainerConfig "non-masq-cni-cidr"}}
- {{ContainerConfig "non-masq-cni-cidr"}}
masqLinkLocal: true
{{else}}
masqLinkLocal: false
{{end -}}
resyncInterval: 60s

Просмотреть файл

@ -0,0 +1,40 @@
apiVersion: audit.k8s.io/v1
kind: Policy
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- group: ""
resources: ["pods"]
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: ""
resources: ["endpoints", "services"]
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- /api*
- /version
- level: Request
resources:
- group: ""
resources: ["configmaps"]
namespaces: ["kube-system"]
- level: Request
resources:
- group: ""
resources: ["secrets"]
- level: Request
resources:
- group: ""
- group: extensions
- level: Metadata
omitStages:
- RequestReceived

Просмотреть файл

@ -0,0 +1,234 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: aad-pod-id-nmi-service-account
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: azureassignedidentities.aadpodidentity.k8s.io
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: aadpodidentity.k8s.io
version: v1
names:
kind: AzureAssignedIdentity
plural: azureassignedidentities
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: azureidentitybindings.aadpodidentity.k8s.io
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: aadpodidentity.k8s.io
version: v1
names:
kind: AzureIdentityBinding
plural: azureidentitybindings
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: azureidentities.aadpodidentity.k8s.io
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: aadpodidentity.k8s.io
version: v1
names:
kind: AzureIdentity
singular: azureidentity
plural: azureidentities
scope: Namespaced
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aad-pod-id-nmi-role
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: aad-pod-id-nmi-binding
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: aad-pod-id-nmi-binding
subjects:
- kind: ServiceAccount
name: aad-pod-id-nmi-service-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: aad-pod-id-nmi-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
component: nmi
tier: node
k8s-app: aad-pod-id
name: nmi
namespace: kube-system
spec:
selector:
matchLabels:
component: nmi
tier: node
template:
metadata:
labels:
component: nmi
tier: node
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-cluster-critical
serviceAccountName: aad-pod-id-nmi-service-account
hostNetwork: true
containers:
- name: nmi
image: {{ContainerImage "nmi"}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: {{ContainerCPUReqs "nmi"}}
memory: {{ContainerMemReqs "nmi"}}
limits:
cpu: {{ContainerCPULimits "nmi"}}
memory: {{ContainerMemLimits "nmi"}}
args:
- "--host-ip=$(HOST_IP)"
- "--node=$(NODE_NAME)"
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
nodeSelector:
beta.kubernetes.io/os: linux
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aad-pod-id-mic-service-account
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aad-pod-id-mic-role
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["*"]
- apiGroups: [""]
resources: ["pods"]
verbs: [ "list", "watch" ]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["aadpodidentity.k8s.io"]
resources: ["azureidentitybindings", "azureidentities"]
verbs: ["get", "list", "watch", "post"]
- apiGroups: ["aadpodidentity.k8s.io"]
resources: ["azureassignedidentities"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: aad-pod-id-mic-binding
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: aad-pod-id-mic-binding
subjects:
- kind: ServiceAccount
name: aad-pod-id-mic-service-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: aad-pod-id-mic-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: mic
k8s-app: aad-pod-id
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: mic
namespace: kube-system
spec:
selector:
matchLabels:
component: mic
template:
metadata:
labels:
component: mic
spec:
serviceAccountName: aad-pod-id-mic-service-account
containers:
- name: mic
image: {{ContainerImage "mic"}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: {{ContainerCPUReqs "mic"}}
memory: {{ContainerMemReqs "mic"}}
limits:
cpu: {{ContainerCPULimits "mic"}}
memory: {{ContainerMemLimits "mic"}}
args:
- --cloudconfig=/etc/kubernetes/azure.json
- --logtostderr
volumeMounts:
- name: k8s-azure-file
mountPath: /etc/kubernetes/azure.json
readOnly: true
volumes:
- name: k8s-azure-file
hostPath:
path: /etc/kubernetes/azure.json

Просмотреть файл

@ -0,0 +1,125 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: aci-connector
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: aci-connector
labels:
app: aci-connector
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- services
- endpoints
- events
- secrets
- nodes
- nodes/status
- pods/status
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: aci-connector
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aci-connector
subjects:
- kind: ServiceAccount
name: aci-connector
namespace: kube-system
---
apiVersion: v1
kind: Secret
metadata:
name: aci-connector-secret
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
type: Opaque
data:
credentials.json: <creds>
cert.pem: <cert>
key.pem: <key>
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: aci-connector
namespace: kube-system
labels:
app: aci-connector
name: aci-connector
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
matchLabels:
app: aci-connector
template:
metadata:
labels:
app: aci-connector
spec:
serviceAccountName: aci-connector
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: aci-connector
image: {{ContainerImage "aci-connector"}}
imagePullPolicy: Always
env:
- name: KUBELET_PORT
value: "10250"
- name: AZURE_AUTH_LOCATION
value: /etc/virtual-kubelet/credentials.json
- name: ACI_RESOURCE_GROUP
value: <rgName>
- name: ACI_REGION
value: {{ContainerConfig "region"}}
- name: APISERVER_CERT_LOCATION
value: /etc/virtual-kubelet/cert.pem
- name: APISERVER_KEY_LOCATION
value: /etc/virtual-kubelet/key.pem
- name: VKUBELET_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
resources:
requests:
cpu: {{ContainerCPUReqs "aci-connector"}}
memory: {{ContainerMemReqs "aci-connector"}}
limits:
cpu: {{ContainerCPULimits "aci-connector"}}
memory: {{ContainerMemLimits "aci-connector"}}
volumeMounts:
- name: credentials
mountPath: "/etc/virtual-kubelet"
readOnly: true
command: ["virtual-kubelet"]
args: ["--provider", "azure", "--nodename", "{{ContainerConfig "nodeName"}}" , "--os", "{{ContainerConfig "os"}}", "--taint", "{{ContainerConfig "taint"}}"]
volumes:
- name: credentials
secret:
secretName: aci-connector-secret
#EOF

Просмотреть файл

@ -0,0 +1,276 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/cluster-service: "true"
name: system:azure-cloud-provider
rules:
- apiGroups: [""]
resources: ["events"]
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
kubernetes.io/cluster-service: "true"
name: system:azure-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:azure-cloud-provider
subjects:
- kind: ServiceAccount
name: azure-cloud-provider
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:azure-persistent-volume-binder
labels:
kubernetes.io/cluster-service: "true"
rules:
- apiGroups: ['']
resources: ['secrets']
verbs: ['get','create']
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:azure-persistent-volume-binder
labels:
kubernetes.io/cluster-service: "true"
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:azure-persistent-volume-binder
subjects:
- kind: ServiceAccount
name: persistent-volume-binder
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/cluster-service: "true"
name: system:azure-cloud-provider-secret-getter
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
kubernetes.io/cluster-service: "true"
name: system:azure-cloud-provider-secret-getter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:azure-cloud-provider-secret-getter
subjects:
- kind: ServiceAccount
name: azure-cloud-provider
namespace: kube-system
{{- if UsesCloudControllerManager}}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
labels:
addonmanager.kubernetes.io/mode: Reconcile
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
provisioner: disk.csi.azure.com
parameters:
skuName: Standard_LRS
kind: managed
cachingMode: ReadOnly
reclaimPolicy: Delete
allowVolumeExpansion: true
{{- if HasAvailabilityZones}}
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: topology.disk.csi.azure.com/zone
values: {{GetZones}}
{{else}}
volumeBindingMode: Immediate
{{end}}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-premium
labels:
addonmanager.kubernetes.io/mode: Reconcile
provisioner: disk.csi.azure.com
parameters:
skuName: Premium_LRS
kind: managed
cachingMode: ReadOnly
reclaimPolicy: Delete
allowVolumeExpansion: true
{{- if HasAvailabilityZones}}
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: topology.disk.csi.azure.com/zone
values: {{GetZones}}
{{else}}
volumeBindingMode: Immediate
{{end}}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-standard
labels:
addonmanager.kubernetes.io/mode: Reconcile
provisioner: disk.csi.azure.com
parameters:
skuName: Standard_LRS
kind: managed
cachingMode: ReadOnly
reclaimPolicy: Delete
allowVolumeExpansion: true
{{- if HasAvailabilityZones}}
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: topology.disk.csi.azure.com/zone
values: {{GetZones}}
{{else}}
volumeBindingMode: Immediate
{{end}}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: azurefile
labels:
addonmanager.kubernetes.io/mode: Reconcile
provisioner: file.csi.azure.com
parameters:
skuName: Standard_LRS
reclaimPolicy: Delete
volumeBindingMode: Immediate
{{else}}
{{- if NeedsStorageAccountStorageClasses}}
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: default
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
cachingmode: ReadOnly
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: unmanaged-premium
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
kind: shared
storageaccounttype: Premium_LRS
cachingmode: ReadOnly
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: unmanaged-standard
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
kind: shared
storageaccounttype: Standard_LRS
cachingmode: ReadOnly
{{- if not IsAzureStackCloud}}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: azurefile
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-file
parameters:
skuName: Standard_LRS
{{end}}
{{end}}
{{- if NeedsManagedDiskStorageClasses}}
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: default
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
kind: Managed
storageaccounttype: Standard_LRS
cachingmode: ReadOnly
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: managed-premium
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
kind: Managed
storageaccounttype: Premium_LRS
cachingmode: ReadOnly
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: managed-standard
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-disk
parameters:
kind: Managed
storageaccounttype: Standard_LRS
cachingmode: ReadOnly
{{- if not IsAzureStackCloud}}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: azurefile
annotations:
labels:
kubernetes.io/cluster-service: "true"
provisioner: kubernetes.io/azure-file
parameters:
skuName: Standard_LRS
{{end -}}
{{end -}}
{{end -}}

Просмотреть файл

@ -0,0 +1,110 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: azure-npm
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: azure-npm
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: azure-npm-binding
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: azure-npm
namespace: kube-system
roleRef:
kind: ClusterRole
name: azure-npm
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: azure-npm
namespace: kube-system
labels:
app: azure-npm
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: azure-npm
template:
metadata:
labels:
k8s-app: azure-npm
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: azure-npm
image: {{ContainerImage "azure-npm-daemonset"}}
securityContext:
privileged: true
resources:
requests:
cpu: {{ContainerCPUReqs "azure-npm-daemonset"}}
memory: {{ContainerMemReqs "azure-npm-daemonset"}}
limits:
cpu: {{ContainerCPULimits "azure-npm-daemonset"}}
memory: {{ContainerMemLimits "azure-npm-daemonset"}}
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
- name: log
mountPath: /var/log
hostNetwork: true
volumes:
- name: log
hostPath:
path: /var/log
type: Directory
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: File
serviceAccountName: azure-npm

Просмотреть файл

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: blobfuse-flexvol-installer
namespace: kube-system
labels:
k8s-app: blobfuse
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
name: blobfuse
template:
metadata:
labels:
name: blobfuse
kubernetes.io/cluster-service: "true"
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-cluster-critical
containers:
- name: blobfuse-flexvol-installer
image: {{ContainerImage "blobfuse-flexvolume"}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: {{ContainerCPUReqs "blobfuse-flexvolume"}}
memory: {{ContainerMemReqs "blobfuse-flexvolume"}}
limits:
cpu: {{ContainerCPULimits "blobfuse-flexvolume"}}
memory: {{ContainerMemLimits "blobfuse-flexvolume"}}
volumeMounts:
- name: volplugins
mountPath: /etc/kubernetes/volumeplugins/
- name: varlog
mountPath: /var/log/
volumes:
- name: varlog
hostPath:
path: /var/log/
- name: volplugins
hostPath:
path: /etc/kubernetes/volumeplugins/
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,751 @@
{{- /* Source: calico/templates/calico-config.yaml
This ConfigMap is used to configure a self-hosted Calico installation. */}}
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "EnsureExists"
data:
{{- /* You must set a non-zero value for Typha replicas below. */}}
typha_service_name: "calico-typha"
{{- /* The CNI network configuration to install on each node. The special
values in this config will be automatically populated. */}}
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": <calicoIPAMConfig>,
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
{{- /* Source: calico/templates/kdd-crds.yaml */}}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
{{- /* Source: calico/templates/rbac.yaml
Include a clusterrole for the calico-node DaemonSet,
and bind it to the calico-node serviceaccount. */}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
{{- /* The CNI plugin needs to get pods, nodes, and namespaces. */}}
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
{{- /* Used to discover service IPs for advertisement. */}}
- watch
- list
{{- /* Used to discover Typhas. */}}
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
{{- /* Needed for clearing NodeNetworkUnavailable flag. */}}
- patch
{{- /* Calico stores some configuration information in node annotations. */}}
- update
{{- /* Watch for changes to Kubernetes NetworkPolicies. */}}
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
{{- /* Used by Calico for policy information. */}}
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
{{- /* The CNI plugin patches pods/status. */}}
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
{{- /* Calico monitors various CRDs for config. */}}
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
{{- /* Calico must create and update some CRDs on startup. */}}
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
{{- /* Calico stores some configuration information on the node. */}}
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
{{- /* These permissions are only requried for upgrade from v2.6, and can
be removed after upgrade or on fresh installations. */}}
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
{{- /* Source: calico/templates/calico-typha.yaml
This manifest creates a Service, which will be backed by Calico's Typha daemon.
Typha sits in between Felix and the API server, reducing Calico's load on the API server. */}}
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
{{- /* This manifest creates a Deployment of Typha to back the above service. */}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
{{- /* Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
typha_service_name variable in the calico-config ConfigMap above.
We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
(when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. */}}
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
{{- /* Mark the pod as a critical add-on for rescheduling. */}}
- key: CriticalAddonsOnly
operator: Exists
{{- /* Since Calico can't network a pod until Typha is up, we need to run Typha itself
as a host-networked pod. */}}
serviceAccountName: calico-node
containers:
- image: {{ContainerImage "calico-typha"}}
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
{{- /* Enable "info" logging by default. Can be set to "debug" to increase verbosity. */}}
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
{{- /* Disable logging to file and syslog since those don't make sense in Kubernetes. */}}
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
{{- /* Monitor the Kubernetes API to find the number of running instances and rebalance
connections. */}}
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
{{- /* Configure route aggregation based on pod CIDR. */}}
- name: USE_POD_CIDR
value: "true"
- name: FELIX_INTERFACEPREFIX
value: "azv"
# Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
#- name: TYPHA_PROMETHEUSMETRICSENABLED
# value: "true"
#- name: TYPHA_PROMETHEUSMETRICSPORT
# value: "9093"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
{{- /* Source: calico/templates/calico-node.yaml
This manifest installs the calico-node container, as well
as the CNI plugins and network config on
each master and worker node in a Kubernetes cluster. */}}
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-cluster-critical
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
{{- /* Make sure calico-node gets scheduled on all nodes. */}}
- effect: NoSchedule
operator: Exists
{{- /* Mark the pod as a critical add-on for rescheduling. */}}
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
{{- /* Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force deletion":
https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. */}}
terminationGracePeriodSeconds: 0
initContainers:
{{- /* This container installs the CNI binaries
and CNI network config file on each node. */}}
- name: install-cni
image: {{ContainerImage "calico-cni"}}
command: ["/install-cni.sh"]
env:
{{- /* Name of the CNI config file to create. */}}
- name: CNI_CONF_NAME
value: "10-calico.conflist"
{{- /* The CNI network config to install on each node. */}}
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
{{- /* Set the hostname based on the k8s node name. */}}
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- /* Prevents the container from sleeping forever. */}}
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
containers:
{{- /* Runs calico-node container on each Kubernetes node. This
container programs network policy and routes on each
host. */}}
- name: calico-node
image: {{ContainerImage "calico-node"}}
env:
{{- /* Use Kubernetes API as the backing datastore. */}}
- name: DATASTORE_TYPE
value: "kubernetes"
{{- /* Configure route aggregation based on pod CIDR. */}}
- name: USE_POD_CIDR
value: "true"
{{- /* Typha support: controlled by the ConfigMap. */}}
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
{{- /* Wait for the datastore. */}}
- name: WAIT_FOR_DATASTORE
value: "true"
{{- /* Set based on the k8s node name. */}}
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- /* Don't enable BGP. */}}
- name: CALICO_NETWORKING_BACKEND
value: "none"
{{- /* Cluster type to identify the deployment type */}}
- name: CLUSTER_TYPE
value: "k8s"
{{- /* The default IPv4 pool to create on startup if none exists. Pod IPs will be
chosen from this range. Changing this value after installation will have
no effect. This should fall within `--cluster-cidr`. */}}
- name: CALICO_IPV4POOL_CIDR
value: "<kubeClusterCidr>"
{{- /* Disable file logging so `kubectl logs` works. */}}
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
{{- /* Set Felix endpoint to host default action to ACCEPT. */}}
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
{{- /* Disable IPv6 on Kubernetes. */}}
- name: FELIX_IPV6SUPPORT
value: "false"
{{- /* Set Felix logging to "info" */}}
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
- name: CALICO_IPV4POOL_IPIP
value: "off"
- name: FELIX_INTERFACEPREFIX
value: "azv"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
volumes:
{{- /* Used by calico-node. */}}
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{{- /* Used to install CNI. */}}
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
---
{{- /* Typha Horizontal Autoscaler ConfigMap */}}
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "EnsureExists"
data:
ladder: |-
{
"coresToReplicas": [],
"nodesToReplicas":
[
[1, 1],
[10, 2],
[100, 3],
[250, 4],
[500, 5],
[1000, 6],
[1500, 7],
[2000, 8]
]
}
---
{{- /* Typha Horizontal Autoscaler Deployment */}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: calico-typha-autoscaler
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [65534]
fsGroup: 65534
containers:
- image: {{ContainerImage "calico-cluster-proportional-autoscaler"}}
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=calico-typha-horizontal-autoscaler
- --target=deployment/calico-typha
- --logtostderr=true
- --v=2
resources:
requests:
cpu: 10m
limits:
cpu: 10m
serviceAccountName: typha-cpha
---
{{- /* Typha Horizontal Autoscaler Cluster Role */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: typha-cpha
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
{{- /* Typha Horizontal Autoscaler Cluster Role Binding */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: typha-cpha
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: typha-cpha
subjects:
- kind: ServiceAccount
name: typha-cpha
namespace: kube-system
---
{{- /* Typha Horizontal Autoscaler Role */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: typha-cpha
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["deployments/scale"]
verbs: ["get", "update"]
---
{{- /* Typha Horizontal Autoscaler Role Binding */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: typha-cpha
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: typha-cpha
subjects:
- kind: ServiceAccount
name: typha-cpha
namespace: kube-system
---
{{- /* Typha Horizontal Autoscaler Service Account */}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: typha-cpha
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"

Просмотреть файл

@ -0,0 +1,856 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "EnsureExists"
data:
etcd-config: |-
---
endpoints:
- https://cilium-etcd-client.kube-system.svc:2379
ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt'
key-file: '/var/lib/etcd-secrets/etcd-client.key'
cert-file: '/var/lib/etcd-secrets/etcd-client.crt'
debug: "false"
enable-ipv4: "true"
enable-ipv6: "false"
clean-cilium-state: "false"
clean-cilium-bpf-state: "false"
monitor-aggregation-level: "none"
ct-global-max-entries-tcp: "524288"
ct-global-max-entries-other: "262144"
preallocate-bpf-maps: "false"
sidecar-istio-proxy-image: "cilium/istio_proxy"
tunnel: "vxlan"
cluster-name: default
flannel-master-device: ""
flannel-uninstall-on-exit: "false"
flannel-manage-existing-containers: "false"
tofqdns-enable-poller: "false"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: "Reconcile"
name: cilium
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels:
k8s-app: cilium
kubernetes.io/cluster-service: "true"
spec:
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --kvstore=etcd
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
- name: CILIUM_ENABLE_IPV4
valueFrom:
configMapKeyRef:
key: enable-ipv4
name: cilium-config
optional: true
- name: CILIUM_ENABLE_IPV6
valueFrom:
configMapKeyRef:
key: enable-ipv6
name: cilium-config
optional: true
- name: CILIUM_PROMETHEUS_SERVE_ADDR
valueFrom:
configMapKeyRef:
key: prometheus-serve-addr
name: cilium-metrics-config
optional: true
- name: CILIUM_LEGACY_HOST_ALLOWS_WORLD
valueFrom:
configMapKeyRef:
key: legacy-host-allows-world
name: cilium-config
optional: true
- name: CILIUM_SIDECAR_ISTIO_PROXY_IMAGE
valueFrom:
configMapKeyRef:
key: sidecar-istio-proxy-image
name: cilium-config
optional: true
- name: CILIUM_TUNNEL
valueFrom:
configMapKeyRef:
key: tunnel
name: cilium-config
optional: true
- name: CILIUM_MONITOR_AGGREGATION_LEVEL
valueFrom:
configMapKeyRef:
key: monitor-aggregation-level
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_GLOBAL_CT_MAX_TCP
valueFrom:
configMapKeyRef:
key: ct-global-max-entries-tcp
name: cilium-config
optional: true
- name: CILIUM_GLOBAL_CT_MAX_ANY
valueFrom:
configMapKeyRef:
key: ct-global-max-entries-other
name: cilium-config
optional: true
- name: CILIUM_PREALLOCATE_BPF_MAPS
valueFrom:
configMapKeyRef:
key: preallocate-bpf-maps
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_MANAGE_EXISTING_CONTAINERS
valueFrom:
configMapKeyRef:
key: flannel-manage-existing-containers
name: cilium-config
optional: true
- name: CILIUM_DATAPATH_MODE
valueFrom:
configMapKeyRef:
key: datapath-mode
name: cilium-config
optional: true
- name: CILIUM_IPVLAN_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: ipvlan-master-device
name: cilium-config
optional: true
- name: CILIUM_INSTALL_IPTABLES_RULES
valueFrom:
configMapKeyRef:
key: install-iptables-rules
name: cilium-config
optional: true
- name: CILIUM_MASQUERADE
valueFrom:
configMapKeyRef:
key: masquerade
name: cilium-config
optional: true
- name: CILIUM_AUTO_DIRECT_NODE_ROUTES
valueFrom:
configMapKeyRef:
key: auto-direct-node-routes
name: cilium-config
optional: true
- name: CILIUM_TOFQDNS_ENABLE_POLLER
valueFrom:
configMapKeyRef:
key: tofqdns-enable-poller
name: cilium-config
optional: true
image: {{ContainerImage "cilium-agent"}}
imagePullPolicy: Always
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
exec:
command:
- cilium
- status
failureThreshold: 10
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: cilium-agent
ports:
- containerPort: 9090
hostPort: 9090
name: prometheus
protocol: TCP
readinessProbe:
exec:
command:
- cilium
- status
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/run/docker.sock
name: docker-socket
readOnly: true
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
hostPID: false
initContainers:
- command:
- /init-container.sh
env:
- name: CLEAN_CILIUM_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CLEAN_CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
image: {{ContainerImage "clean-cilium-state"}}
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /var/run/docker.sock
type: Socket
name: docker-socket
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
- name: etcd-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-etcd-secrets
- name: clustermesh-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-clustermesh
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- args:
- --debug=$(CILIUM_DEBUG)
- --kvstore=etcd
- --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config
command:
- cilium-operator
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_NAME
valueFrom:
configMapKeyRef:
key: cluster-name
name: cilium-config
optional: true
- name: CILIUM_CLUSTER_ID
valueFrom:
configMapKeyRef:
key: cluster-id
name: cilium-config
optional: true
- name: CILIUM_DISABLE_ENDPOINT_CRD
valueFrom:
configMapKeyRef:
key: disable-endpoint-crd
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
image: {{ContainerImage "cilium-operator"}}
imagePullPolicy: Always
name: cilium-operator
volumeMounts:
- mountPath: /var/lib/etcd-config
name: etcd-config-path
readOnly: true
- mountPath: /var/lib/etcd-secrets
name: etcd-secrets
readOnly: true
dnsPolicy: ClusterFirst
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
volumes:
- configMap:
defaultMode: 420
items:
- key: etcd-config
path: etcd.config
name: cilium-config
name: etcd-config-path
- name: etcd-secrets
secret:
defaultMode: 420
optional: true
secretName: cilium-etcd-secrets
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups:
- ""
resources:
- pods
- deployments
- componentstatuses
verbs:
- '*'
- apiGroups:
- ""
resources:
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: cilium-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-etcd-operator
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups:
- etcd.database.coreos.com
resources:
- etcdclusters
verbs:
- get
- delete
- create
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- delete
- get
- create
- apiGroups:
- ""
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- componentstatuses
verbs:
- get
- apiGroups:
- extensions
resources:
- deployments
verbs:
- delete
- create
- get
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
name: cilium-etcd-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-etcd-operator
subjects:
- kind: ServiceAccount
name: cilium-etcd-operator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: etcd-operator
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups:
- etcd.database.coreos.com
resources:
- etcdclusters
- etcdbackups
- etcdrestores
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- deployments
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
- apiGroups:
- extensions
resources:
- deployments
verbs:
- create
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: etcd-operator
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: etcd-operator
subjects:
- kind: ServiceAccount
name: cilium-etcd-sa
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-etcd-operator
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-etcd-sa
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
addonmanager.kubernetes.io/mode: "Reconcile"
name: cilium-etcd-operator
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
io.cilium/app: etcd-operator
name: cilium-etcd-operator
spec:
containers:
- command:
- /usr/bin/cilium-etcd-operator
env:
- name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN
value: cluster.local
- name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE
value: "3"
- name: CILIUM_ETCD_OPERATOR_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_ETCD_OPERATOR_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: CILIUM_ETCD_OPERATOR_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
image: {{ContainerImage "cilium-etcd-operator"}}
imagePullPolicy: IfNotPresent
name: cilium-etcd-operator
dnsPolicy: ClusterFirst
hostNetwork: true
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium-etcd-operator
serviceAccountName: cilium-etcd-operator
tolerations:
- operator: Exists
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
addonmanager.kubernetes.io/mode: "Reconcile"
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- nodes
- endpoints
- componentstatuses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumendpoints
- ciliumendpoints/status
verbs:
- '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: "Reconcile"

Просмотреть файл

@ -0,0 +1,88 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-node-manager
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cloud-node-manager
labels:
k8s-app: cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch","list","get","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-node-manager
labels:
k8s-app: cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-node-manager
subjects:
- kind: ServiceAccount
name: cloud-node-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cloud-node-manager
namespace: kube-system
labels:
component: cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: cloud-node-manager
template:
metadata:
labels:
k8s-app: cloud-node-manager
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
serviceAccountName: cloud-node-manager
hostNetwork: true {{/* required to fetch correct hostname */}}
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- operator: "Exists"
effect: NoExecute
- operator: "Exists"
effect: NoSchedule
containers:
- name: cloud-node-manager
image: {{ContainerImage "cloud-node-manager"}}
imagePullPolicy: IfNotPresent
command: ["cloud-node-manager"]
resources:
requests:
cpu: 50m
memory: 50Mi
limits:
cpu: 2000m
memory: 512Mi

Просмотреть файл

@ -0,0 +1,271 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
rules:
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets","replicasets","daemonsets"]
verbs: ["watch","list","get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["watch", "list", "get"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "list", "watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames:
- "cluster-autoscaler-status"
- "cluster-autoscaler-priority-expander"
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: v1
data:
ClientID: <clientID>
ClientSecret: <clientSec>
ResourceGroup: <rg>
SubscriptionID: <subID>
TenantID: <tenantID>
VMType: {{GetBase64EncodedVMType}}
kind: Secret
metadata:
name: cluster-autoscaler-azure
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cluster-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: {{GetMode}}
name: cluster-autoscaler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
spec:
priorityClassName: system-node-critical{{GetHostNetwork}}
serviceAccountName: cluster-autoscaler
tolerations:
- effect: NoSchedule
operator: "Equal"
value: "true"
key: node-role.kubernetes.io/master
nodeSelector:
kubernetes.io/role: master
beta.kubernetes.io/os: linux
containers:
- image: {{ContainerImage "cluster-autoscaler"}}
imagePullPolicy: IfNotPresent
name: cluster-autoscaler
resources:
limits:
cpu: {{ContainerCPULimits "cluster-autoscaler"}}
memory: {{ContainerMemLimits "cluster-autoscaler"}}
requests:
cpu: {{ContainerCPUReqs "cluster-autoscaler"}}
memory: {{ContainerMemReqs "cluster-autoscaler"}}
command:
- ./cluster-autoscaler
- --logtostderr=true
- --cloud-provider=azure
- --skip-nodes-with-local-storage=false
- --scan-interval={{ContainerConfig "scan-interval"}}
- --expendable-pods-priority-cutoff={{ContainerConfig "expendable-pods-priority-cutoff"}}
- --ignore-daemonsets-utilization={{ContainerConfig "ignore-daemonsets-utilization"}}
- --ignore-mirror-pods-utilization={{ContainerConfig "ignore-mirror-pods-utilization"}}
- --max-autoprovisioned-node-group-count={{ContainerConfig "max-autoprovisioned-node-group-count"}}
- --max-empty-bulk-delete={{ContainerConfig "max-empty-bulk-delete"}}
- --max-failing-time={{ContainerConfig "max-failing-time"}}
- --max-graceful-termination-sec={{ContainerConfig "max-graceful-termination-sec"}}
- --max-inactivity={{ContainerConfig "max-inactivity"}}
- --max-node-provision-time={{ContainerConfig "max-node-provision-time"}}
- --max-nodes-total={{ContainerConfig "max-nodes-total"}}
- --max-total-unready-percentage={{ContainerConfig "max-total-unready-percentage"}}
- --memory-total={{ContainerConfig "memory-total"}}
- --min-replica-count={{ContainerConfig "min-replica-count"}}
- --namespace=kube-system
- --new-pod-scale-up-delay={{ContainerConfig "new-pod-scale-up-delay"}}
- --node-autoprovisioning-enabled={{ContainerConfig "node-autoprovisioning-enabled"}}
- --ok-total-unready-count={{ContainerConfig "ok-total-unready-count"}}
- --scale-down-candidates-pool-min-count={{ContainerConfig "scale-down-candidates-pool-min-count"}}
- --scale-down-candidates-pool-ratio={{ContainerConfig "scale-down-candidates-pool-ratio"}}
- --scale-down-delay-after-add={{ContainerConfig "scale-down-delay-after-add"}}
- --scale-down-delay-after-delete={{ContainerConfig "scale-down-delay-after-delete"}}
- --scale-down-delay-after-failure={{ContainerConfig "scale-down-delay-after-failure"}}
- --scale-down-enabled={{ContainerConfig "scale-down-enabled"}}
- --scale-down-non-empty-candidates-count={{ContainerConfig "scale-down-non-empty-candidates-count"}}
- --scale-down-unneeded-time={{ContainerConfig "scale-down-unneeded-time"}}
- --scale-down-unready-time={{ContainerConfig "scale-down-unready-time"}}
- --scale-down-utilization-threshold={{ContainerConfig "scale-down-utilization-threshold"}}
- --skip-nodes-with-local-storage={{ContainerConfig "skip-nodes-with-local-storage"}}
- --skip-nodes-with-system-pods={{ContainerConfig "skip-nodes-with-system-pods"}}
- --stderrthreshold={{ContainerConfig "stderrthreshold"}}
- --unremovable-node-recheck-timeout={{ContainerConfig "unremovable-node-recheck-timeout"}}
- --v={{ContainerConfig "v"}}
- --write-status-configmap={{ContainerConfig "write-status-configmap"}}
- --balance-similar-node-groups={{ContainerConfig "balance-similar-node-groups"}}
{{GetClusterAutoscalerNodesConfig}}
env:
- name: ARM_CLOUD
value: "{{GetCloud}}"
- name: ARM_SUBSCRIPTION_ID
valueFrom:
secretKeyRef:
key: SubscriptionID
name: cluster-autoscaler-azure
- name: ARM_RESOURCE_GROUP
valueFrom:
secretKeyRef:
key: ResourceGroup
name: cluster-autoscaler-azure
- name: ARM_TENANT_ID
valueFrom:
secretKeyRef:
key: TenantID
name: cluster-autoscaler-azure
- name: ARM_CLIENT_ID
valueFrom:
secretKeyRef:
key: ClientID
name: cluster-autoscaler-azure
- name: ARM_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: ClientSecret
name: cluster-autoscaler-azure
- name: ARM_VM_TYPE
valueFrom:
secretKeyRef:
key: VMType
name: cluster-autoscaler-azure
- name: ARM_USE_MANAGED_IDENTITY_EXTENSION
value: "{{UseManagedIdentity}}"
volumeMounts:
- mountPath: /etc/ssl/certs/ca-certificates.crt
name: ssl-certs
readOnly: true{{GetVolumeMounts}}
dnsPolicy: ClusterFirst
restartPolicy: Always
volumes:
- hostPath:
path: /etc/ssl/certs/ca-certificates.crt
type: ""
name: ssl-certs{{GetVolumes}}
#EOF

Просмотреть файл

@ -0,0 +1,156 @@
{{- /* This file was pulled from:
https://github.com/coreos/flannel (HEAD at time of pull was 4973e02e539378) */}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
addonmanager.kubernetes.io/mode: EnsureExists
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "<kubeClusterCidr>",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
tier: node
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/os: linux
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: flannel
containers:
- name: kube-flannel
image: {{ContainerImage "kube-flannel"}}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{ContainerImage "install-cni"}}
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
{{- /* This file was pulled from:
https://github.com/coreos/flannel (HEAD at time of pull was 4973e02e539378) */}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
labels:
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system

Просмотреть файл

@ -0,0 +1,170 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:heapster-with-nanny
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
rules:
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- events
- namespaces
- nodes
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: system:heapster-with-nanny
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster-with-nanny
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: heapster-config
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
data:
NannyConfiguration: |-
apiVersion: nannyconfig/v1alpha1
kind: NannyConfiguration
---
apiVersion: v1
kind: Service
metadata:
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
template:
metadata:
labels:
k8s-app: heapster
spec:
priorityClassName: system-node-critical
containers:
- image: {{ContainerImage "heapster"}}
imagePullPolicy: IfNotPresent
name: heapster
resources:
requests:
cpu: {{ContainerCPUReqs "heapster"}}
memory: {{ContainerMemReqs "heapster"}}
limits:
cpu: {{ContainerCPULimits "heapster"}}
memory: {{ContainerMemLimits "heapster"}}
livenessProbe:
httpGet:
path: /healthz
port: 8082
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: {{ContainerImage "heapster-nanny"}}
imagePullPolicy: IfNotPresent
name: heapster-nanny
resources:
requests:
cpu: {{ContainerCPUReqs "heapster-nanny"}}
memory: {{ContainerMemReqs "heapster-nanny"}}
limits:
cpu: {{ContainerCPULimits "heapster-nanny"}}
memory: {{ContainerMemLimits "heapster-nanny"}}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: heapster-config-volume
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
- --cpu=80m
- --extra-cpu=0.5m
- --memory=140Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster
- --container=heapster
- --poll-period=300000
- --estimator=exponential
volumes:
- name: heapster-config-volume
configMap:
name: heapster-config
serviceAccountName: heapster
tolerations:
- key: CriticalAddonsOnly
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: keyvault-flexvolume
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: keyvault-flexvolume
namespace: kube-system
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: keyvault-flexvolume
template:
metadata:
labels:
app: keyvault-flexvolume
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-cluster-critical
tolerations:
containers:
- name: keyvault-flexvolume
image: {{ContainerImage "keyvault-flexvolume"}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: {{ContainerCPUReqs "keyvault-flexvolume"}}
memory: {{ContainerMemReqs "keyvault-flexvolume"}}
limits:
cpu: {{ContainerCPULimits "keyvault-flexvolume"}}
memory: {{ContainerMemLimits "keyvault-flexvolume"}}
env:
- name: TARGET_DIR
value: /etc/kubernetes/volumeplugins
volumeMounts:
- mountPath: /etc/kubernetes/volumeplugins
name: volplugins
volumes:
- hostPath:
path: /etc/kubernetes/volumeplugins
name: volplugins
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,209 @@
{{- /* Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
in sync with this file. */}}
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ContainerConfig "clusterIP"}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
{{- /* replicas: not specified here:
1. In order to make Addon Manager do not reconcile this replicas parameter.
2. Default is 1.
3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. */}}
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-node-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{ContainerImage "kubedns"}}
imagePullPolicy: IfNotPresent
resources:
{{- /* TODO: Set memory limits when we've profiled the container for large
clusters, then set request = limit to keep this container in
guaranteed class. Currently, this container falls into the
"burstable" category so the kubelet doesn't backoff from restarting it. */}}
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{ContainerConfig "domain"}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{ContainerImage "dnsmasq"}}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{ContainerImage "sidecar"}}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ContainerConfig "domain"}},5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ContainerConfig "domain"}},5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default
serviceAccountName: kube-dns
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,128 @@
---
apiVersion: v1
kind: ConfigMap
data:
config.yaml: |
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clientConnection:
kubeconfig: /var/lib/kubelet/kubeconfig
clusterCIDR: "{{ContainerConfig "cluster-cidr"}}"
mode: "{{ContainerConfig "proxy-mode"}}"
{{- if ContainerConfig "bind-address"}}
bindAddress: "{{ContainerConfig "bind-address"}}"
{{end}}
{{- if ContainerConfig "healthz-bind-address"}}
healthzBindAddress: "{{ContainerConfig "healthz-bind-address"}}"
{{end}}
{{- if ContainerConfig "metrics-bind-address"}}
metricsBindAddress: "{{ContainerConfig "metrics-bind-address"}}"
{{end}}
featureGates:
{{ContainerConfig "featureGates"}}
metadata:
name: kube-proxy-config
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
component: kube-proxy
tier: node
k8s-app: kube-proxy
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
component: kube-proxy
tier: node
k8s-app: kube-proxy
name: kube-proxy
namespace: kube-system
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
component: kube-proxy
tier: node
k8s-app: kube-proxy
template:
metadata:
labels:
component: kube-proxy
tier: node
k8s-app: kube-proxy
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
tolerations:
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- operator: "Exists"
effect: NoExecute
- operator: "Exists"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
containers:
- command:
- kube-proxy
- --config=/var/lib/kube-proxy/config.yaml
image: {{ContainerImage "kube-proxy"}}
imagePullPolicy: IfNotPresent
name: kube-proxy
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /etc/kubernetes
name: etc-kubernetes
readOnly: true
- mountPath: /var/lib/kubelet/kubeconfig
name: kubeconfig
readOnly: true
- mountPath: /run/xtables.lock
name: iptableslock
- mountPath: /lib/modules/
name: kernelmodules
readOnly: true
- mountPath: /var/lib/kube-proxy/config.yaml
subPath: config.yaml
name: kube-proxy-config-volume
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- hostPath:
path: /var/lib/kubelet/kubeconfig
name: kubeconfig
- hostPath:
path: /etc/kubernetes
name: etc-kubernetes
- hostPath:
path: /run/xtables.lock
name: iptableslock
- hostPath:
path: /lib/modules/
name: kernelmodules
- configMap:
name: kube-proxy-config
name: kube-proxy-config-volume
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rescheduler
namespace: kube-system
labels:
k8s-app: rescheduler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
matchLabels:
k8s-app: rescheduler
template:
metadata:
labels:
k8s-app: rescheduler
spec:
priorityClassName: system-node-critical
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- image: {{ContainerImage "rescheduler"}}
imagePullPolicy: IfNotPresent
name: rescheduler
resources:
requests:
cpu: {{ContainerCPUReqs "rescheduler"}}
memory: {{ContainerMemReqs "rescheduler"}}
limits:
cpu: {{ContainerCPULimits "rescheduler"}}
memory: {{ContainerMemLimits "rescheduler"}}
command:
- sh
- -c
- '/rescheduler'

Просмотреть файл

@ -0,0 +1,129 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
kubernetes.io/cluster-service: "true"
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- args:
- --auto-generate-certificates
- --heapster-host=http://heapster.kube-system:80
image: {{ContainerImage "kubernetes-dashboard"}}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: "/"
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
protocol: TCP
resources:
requests:
cpu: {{ContainerCPUReqs "kubernetes-dashboard"}}
memory: {{ContainerMemReqs "kubernetes-dashboard"}}
limits:
cpu: {{ContainerCPULimits "kubernetes-dashboard"}}
memory: {{ContainerMemLimits "kubernetes-dashboard"}}
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
volumes:
- name: kubernetes-dashboard-certs
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,149 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- deployments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: {{ContainerImage "metrics-server"}}
imagePullPolicy: IfNotPresent
command:
- /metrics-server
- --kubelet-insecure-tls
nodeSelector:
beta.kubernetes.io/os: linux
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100

Просмотреть файл

@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: nvidia-device-plugin
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: nvidia-device-plugin
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: nvidia-device-plugin
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: nvidia-device-plugin
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: accelerator
operator: In
values:
- nvidia
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: nvidia.com/gpu
effect: NoSchedule
operator: Equal
value: "true"
containers:
- image: {{ContainerImage "nvidia-device-plugin"}}
name: nvidia-device-plugin-ctr
resources:
requests:
cpu: {{ContainerCPUReqs "nvidia-device-plugin"}}
memory: {{ContainerMemReqs "nvidia-device-plugin"}}
limits:
cpu: {{ContainerCPULimits "nvidia-device-plugin"}}
memory: {{ContainerMemLimits "nvidia-device-plugin"}}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
nodeSelector:
beta.kubernetes.io/os: linux
accelerator: nvidia

Просмотреть файл

@ -0,0 +1,132 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: "*"
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- "*"
volumes:
- "*"
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
rule: MustRunAs
ranges:
{{- /* Forbid adding the root group. */}}
- min: 1
max: 65535
fsGroup:
rule: MustRunAs
ranges:
{{- /* Forbid adding the root group. */}}
- min: 1
max: 65535
readOnlyRootFilesystem: false
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- privileged
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- restricted
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: default:restricted
labels:
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:restricted
subjects:
- kind: Group
name: system:authenticated
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: default:privileged
labels:
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:privileged
subjects:
- kind: Group
name: system:authenticated
apiGroup: rbac.authorization.k8s.io
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io

Просмотреть файл

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: smb-flexvol-installer
namespace: kube-system
labels:
k8s-app: smb
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
name: smb
template:
metadata:
labels:
name: smb
kubernetes.io/cluster-service: "true"
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
containers:
- name: smb-flexvol-installer
image: {{ContainerImage "smb-flexvolume"}}
imagePullPolicy: Always
resources:
requests:
cpu: {{ContainerCPUReqs "smb-flexvolume"}}
memory: {{ContainerMemReqs "smb-flexvolume"}}
limits:
cpu: {{ContainerCPULimits "smb-flexvolume"}}
memory: {{ContainerMemLimits "smb-flexvolume"}}
volumeMounts:
- name: volplugins
mountPath: /etc/kubernetes/volumeplugins/
- name: varlog
mountPath: /var/log/
volumes:
- name: varlog
hostPath:
path: /var/log/
- name: volplugins
hostPath:
path: /etc/kubernetes/volumeplugins/
type: DirectoryOrCreate
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -0,0 +1,100 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
app: helm
name: tiller
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: tiller-deploy
namespace: kube-system
spec:
ports:
- name: tiller
port: 44134
targetPort: tiller
selector:
app: helm
name: tiller
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: helm
name: tiller
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: tiller-deploy
namespace: kube-system
spec:
selector:
matchLabels:
app: helm
name: tiller
template:
metadata:
labels:
app: helm
name: tiller
spec:
serviceAccountName: tiller
containers:
- env:
- name: TILLER_NAMESPACE
value: kube-system
- name: TILLER_HISTORY_MAX
value: "{{ContainerConfig "max-history"}}"
image: {{ContainerImage "tiller"}}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /liveness
port: 44135
initialDelaySeconds: 1
timeoutSeconds: 1
name: tiller
ports:
- containerPort: 44134
name: tiller
readinessProbe:
httpGet:
path: /readiness
port: 44135
initialDelaySeconds: 1
timeoutSeconds: 1
resources:
requests:
cpu: {{ContainerCPUReqs "tiller"}}
memory: {{ContainerMemReqs "tiller"}}
limits:
cpu: {{ContainerCPULimits "tiller"}}
memory: {{ContainerMemLimits "tiller"}}
nodeSelector:
beta.kubernetes.io/os: linux

Просмотреть файл

@ -184,6 +184,7 @@ var AllKubernetesSupportedVersions = map[string]bool{
"1.18.0-alpha.5": false,
"1.18.0-beta.1": false,
"1.18.0": true,
"1.19.0-alpha.1": true,
}
// GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release

Просмотреть файл

@ -101,6 +101,12 @@ func getDefaultImage(image, kubernetesImageBaseType string) string {
// The map supports GCR or MCR image string flavors
var kubernetesImageBaseVersionedImages = map[string]map[string]map[string]string{
common.KubernetesImageBaseTypeGCR: {
"1.19": {
common.AddonResizerComponentName: "addon-resizer:1.8.7",
common.MetricsServerAddonName: "metrics-server-amd64:v0.3.5",
common.AddonManagerComponentName: "kube-addon-manager-amd64:v9.0.2",
common.ClusterAutoscalerAddonName: "cluster-autoscaler:v1.18.0",
},
"1.18": {
common.AddonResizerComponentName: "addon-resizer:1.8.7",
common.MetricsServerAddonName: "metrics-server-amd64:v0.3.5",
@ -204,6 +210,12 @@ var kubernetesImageBaseVersionedImages = map[string]map[string]map[string]string
},
},
common.KubernetesImageBaseTypeMCR: {
"1.19": {
common.AddonResizerComponentName: "oss/kubernetes/autoscaler/addon-resizer:1.8.7",
common.MetricsServerAddonName: "oss/kubernetes/metrics-server:v0.3.5",
common.AddonManagerComponentName: "oss/kubernetes/kube-addon-manager:v9.0.2",
common.ClusterAutoscalerAddonName: "oss/kubernetes/autoscaler/cluster-autoscaler:v1.18.0",
},
"1.18": {
common.AddonResizerComponentName: "oss/kubernetes/autoscaler/addon-resizer:1.8.7",
common.MetricsServerAddonName: "oss/kubernetes/metrics-server:v0.3.5",
@ -380,6 +392,84 @@ func getK8sVersionComponents(version, kubernetesImageBaseType string, overrides
var ret map[string]string
k8sComponent := kubernetesImageBaseVersionedImages[kubernetesImageBaseType][majorMinor]
switch majorMinor {
case "1.19":
ret = map[string]string{
common.APIServerComponentName: getDefaultImage(common.APIServerComponentName, kubernetesImageBaseType) + ":v" + version,
common.ControllerManagerComponentName: getDefaultImage(common.ControllerManagerComponentName, kubernetesImageBaseType) + ":v" + version,
common.KubeProxyAddonName: getDefaultImage(common.KubeProxyAddonName, kubernetesImageBaseType) + ":v" + version,
common.SchedulerComponentName: getDefaultImage(common.SchedulerComponentName, kubernetesImageBaseType) + ":v" + version,
common.CloudControllerManagerComponentName: azureCloudControllerManagerImageReference,
common.CloudNodeManagerAddonName: azureCloudNodeManagerImageReference,
common.WindowsArtifactComponentName: "v" + version + "/windowszip/v" + version + "-1int.zip",
common.DashboardAddonName: getDefaultImage(common.DashboardAddonName, kubernetesImageBaseType),
common.ExecHealthZComponentName: getDefaultImage(common.ExecHealthZComponentName, kubernetesImageBaseType),
common.AddonResizerComponentName: k8sComponent[common.AddonResizerComponentName],
common.HeapsterAddonName: getDefaultImage(common.HeapsterAddonName, kubernetesImageBaseType),
common.MetricsServerAddonName: k8sComponent[common.MetricsServerAddonName],
common.CoreDNSAddonName: getDefaultImage(common.CoreDNSAddonName, kubernetesImageBaseType),
common.KubeDNSAddonName: getDefaultImage(common.KubeDNSAddonName, kubernetesImageBaseType),
common.AddonManagerComponentName: k8sComponent[common.AddonManagerComponentName],
common.DNSMasqComponentName: getDefaultImage(common.DNSMasqComponentName, kubernetesImageBaseType),
common.PauseComponentName: pauseImageReference,
common.TillerAddonName: tillerImageReference,
common.ReschedulerAddonName: getDefaultImage(common.ReschedulerAddonName, kubernetesImageBaseType),
common.ACIConnectorAddonName: virtualKubeletImageReference,
common.AzureCNINetworkMonitorAddonName: azureCNINetworkMonitorImageReference,
common.ClusterAutoscalerAddonName: k8sComponent[common.ClusterAutoscalerAddonName],
common.DNSSidecarComponentName: getDefaultImage(common.DNSSidecarComponentName, kubernetesImageBaseType),
common.BlobfuseFlexVolumeAddonName: blobfuseFlexVolumeImageReference,
common.SMBFlexVolumeAddonName: smbFlexVolumeImageReference,
common.KeyVaultFlexVolumeAddonName: keyvaultFlexVolumeImageReference,
common.IPMASQAgentAddonName: getDefaultImage(common.IPMASQAgentAddonName, kubernetesImageBaseType),
common.DNSAutoscalerAddonName: getDefaultImage(common.DNSAutoscalerAddonName, kubernetesImageBaseType),
common.AzureNetworkPolicyAddonName: azureNPMContainerImageReference,
common.CalicoTyphaComponentName: calicoTyphaImageReference,
common.CalicoCNIComponentName: calicoCNIImageReference,
common.CalicoNodeComponentName: calicoNodeImageReference,
common.CalicoPod2DaemonComponentName: calicoPod2DaemonImageReference,
common.CalicoClusterAutoscalerComponentName: calicoClusterProportionalAutoscalerImageReference,
common.CiliumAgentContainerName: ciliumAgentImageReference,
common.CiliumCleanStateContainerName: ciliumCleanStateImageReference,
common.CiliumOperatorContainerName: ciliumOperatorImageReference,
common.CiliumEtcdOperatorContainerName: ciliumEtcdOperatorImageReference,
common.AntreaControllerContainerName: antreaControllerImageReference,
common.AntreaAgentContainerName: antreaAgentImageReference,
common.AntreaOVSContainerName: antreaOVSImageReference,
"antrea" + common.AntreaInstallCNIContainerName: antreaInstallCNIImageReference,
common.NMIContainerName: aadPodIdentityNMIImageReference,
common.MICContainerName: aadPodIdentityMICImageReference,
common.AzurePolicyAddonName: azurePolicyImageReference,
common.GatekeeperContainerName: gatekeeperImageReference,
common.NodeProblemDetectorAddonName: nodeProblemDetectorImageReference,
common.CSIProvisionerContainerName: csiProvisionerImageReference,
common.CSIAttacherContainerName: csiAttacherImageReference,
common.CSIClusterDriverRegistrarContainerName: csiClusterDriverRegistrarImageReference,
common.CSILivenessProbeContainerName: csiLivenessProbeImageReference,
common.CSINodeDriverRegistrarContainerName: csiNodeDriverRegistrarImageReference,
common.CSISnapshotterContainerName: csiSnapshotterImageReference,
common.CSIResizerContainerName: csiResizerImageReference,
common.CSIAzureDiskContainerName: csiAzureDiskImageReference,
common.CSIAzureFileContainerName: csiAzureFileImageReference,
common.KubeFlannelContainerName: kubeFlannelImageReference,
"flannel" + common.FlannelInstallCNIContainerName: flannelInstallCNIImageReference,
common.KubeRBACProxyContainerName: KubeRBACProxyImageReference,
common.ScheduledMaintenanceManagerContainerName: ScheduledMaintenanceManagerImageReference,
"nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency,
"nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod,
"podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout,
"routeperiod": DefaultKubernetesCtrlMgrRouteReconciliationPeriod,
"backoffretries": strconv.Itoa(DefaultKubernetesCloudProviderBackoffRetries),
"backoffjitter": strconv.FormatFloat(DefaultKubernetesCloudProviderBackoffJitter, 'f', -1, 64),
"backoffduration": strconv.Itoa(DefaultKubernetesCloudProviderBackoffDuration),
"backoffexponent": strconv.FormatFloat(DefaultKubernetesCloudProviderBackoffExponent, 'f', -1, 64),
"ratelimitqps": strconv.FormatFloat(DefaultKubernetesCloudProviderRateLimitQPS, 'f', -1, 64),
"ratelimitqpswrite": strconv.FormatFloat(DefaultKubernetesCloudProviderRateLimitQPSWrite, 'f', -1, 64),
"ratelimitbucket": strconv.Itoa(DefaultKubernetesCloudProviderRateLimitBucket),
"ratelimitbucketwrite": strconv.Itoa(DefaultKubernetesCloudProviderRateLimitBucketWrite),
"gchighthreshold": strconv.Itoa(DefaultKubernetesGCHighThreshold),
"gclowthreshold": strconv.Itoa(DefaultKubernetesGCLowThreshold),
common.NVIDIADevicePluginAddonName: nvidiaDevicePluginImageReference,
}
case "1.18":
ret = map[string]string{
common.APIServerComponentName: getDefaultImage(common.APIServerComponentName, kubernetesImageBaseType) + ":v" + version,

Разница между файлами не показана из-за своего большого размера Загрузить разницу