зеркало из https://github.com/Azure/aks-engine.git
Add first cut of the OpenShift orchestrator (#2611)
* Add first cut of the OpenShift orchestrator
This commit is contained in:
Родитель
7cbcbb45e3
Коммит
6a8f72385a
|
@ -19,6 +19,7 @@ pkg/operations/junit.xml
|
|||
pkg/operations/kubernetesupgrade/junit.xml
|
||||
pkg/acsengine/templates.go
|
||||
pkg/i18n/translations.go
|
||||
pkg/openshift/certgen/templates/bindata.go
|
||||
|
||||
_logs/
|
||||
test/acs-engine-test/report/TestReport.json
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
const (
|
||||
rootName = "acs-engine"
|
||||
rootShortDescription = "ACS-Engine deploys and manages container orchestrators in Azure"
|
||||
rootLongDescription = "ACS-Engine deploys and manages Kubernetes, Swarm Mode, and DC/OS clusters in Azure"
|
||||
rootLongDescription = "ACS-Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
{
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "OpenShift",
|
||||
"openShiftConfig": {
|
||||
"clusterUsername": "demo",
|
||||
"clusterPassword": "demo"
|
||||
}
|
||||
},
|
||||
"azProfile": {
|
||||
"tenantId": "YOUR TENANT ID",
|
||||
"subscriptionId": "YOUR SUBSCRIPTION ID",
|
||||
"resourceGroup": "YOUR RESOURCE GROUP",
|
||||
"location": "YOUR LOCATION"
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 1,
|
||||
"dnsPrefix": "YOUR DNS PREFIX",
|
||||
"imageReference": {
|
||||
"name": "YOUR IMAGE",
|
||||
"resourceGroup": "YOUR IMAGE RESOURCE GROUP"
|
||||
},
|
||||
"storageProfile": "ManagedDisks",
|
||||
"vmSize": "Standard_D4s_v3"
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"availabilityProfile": "AvailabilitySet",
|
||||
"count": 1,
|
||||
"imageReference": {
|
||||
"name": "YOUR IMAGE",
|
||||
"resourceGroup": "YOUR IMAGE RESOURCE GROUP"
|
||||
},
|
||||
"name": "compute",
|
||||
"storageProfile": "ManagedDisks",
|
||||
"vmSize": "Standard_D4s_v3"
|
||||
},
|
||||
{
|
||||
"availabilityProfile": "AvailabilitySet",
|
||||
"count": 1,
|
||||
"imageReference": {
|
||||
"name": "YOUR IMAGE",
|
||||
"resourceGroup": "YOUR IMAGE RESOURCE GROUP"
|
||||
},
|
||||
"role": "infra",
|
||||
"name": "infra",
|
||||
"storageProfile": "ManagedDisks",
|
||||
"vmSize": "Standard_D4s_v3"
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"adminUsername": "cloud-user",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": "YOUR KEY"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "YOUR CLIENTID",
|
||||
"secret": "YOUR SECRET"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
{{if IsPublic .Ports}}
|
||||
{{ if not IsKubernetes}}
|
||||
{{ if and (not IsKubernetes) (not IsOpenShift)}}
|
||||
"{{.Name}}FQDN": {
|
||||
"type": "string",
|
||||
"value": "[reference(concat('Microsoft.Network/publicIPAddresses/', variables('{{.Name}}IPAddressName'))).dnsSettings.fqdn]"
|
||||
|
|
|
@ -31,6 +31,14 @@
|
|||
"subnet": {
|
||||
"id": "[variables('{{$.Name}}VnetSubnetID')]"
|
||||
}
|
||||
{{if eq $.Role "infra"}}
|
||||
,
|
||||
"loadBalancerBackendAddressPools": [
|
||||
{
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/backendAddressPools/backend')]"
|
||||
}
|
||||
]
|
||||
{{end}}
|
||||
}
|
||||
}
|
||||
{{if lt $seq $.IPAddressCount}},{{end}}
|
||||
|
@ -160,7 +168,9 @@
|
|||
"osProfile": {
|
||||
"adminUsername": "[variables('username')]",
|
||||
"computername": "[concat(variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')))]",
|
||||
{{if not IsOpenShift}}
|
||||
{{GetKubernetesAgentCustomData .}}
|
||||
{{end}}
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": "true",
|
||||
"ssh": {
|
||||
|
@ -270,7 +280,11 @@
|
|||
"autoUpgradeMinorVersion": true,
|
||||
"settings": {},
|
||||
"protectedSettings": {
|
||||
{{if IsOpenShift }}
|
||||
"script": "{{ Base64 (OpenShiftGetNodeSh .) }}"
|
||||
{{else}}
|
||||
"commandToExecute": "[concat(variables('provisionScriptParametersCommon'),' /usr/bin/nohup /bin/bash -c \"/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1\"')]"
|
||||
{{end}}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,115 @@
|
|||
{{if IsOpenShift}}
|
||||
{
|
||||
"name": "router-ip",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"apiVersion": "2017-08-01",
|
||||
"location": "[variables('location')]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static",
|
||||
"dnsSettings": {
|
||||
"domainNameLabel": "[concat(variables('masterFqdnPrefix'), '-router')]"
|
||||
}
|
||||
},
|
||||
"sku": {
|
||||
"name": "Basic"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "router-lb",
|
||||
"type": "Microsoft.Network/loadBalancers",
|
||||
"apiVersion": "2017-10-01",
|
||||
"location": "[variables('location')]",
|
||||
"dependsOn": [
|
||||
"['Microsoft.Network/publicIPAddresses/router-ip']"
|
||||
],
|
||||
"properties": {
|
||||
"frontendIPConfigurations": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'router-ip')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"backendAddressPools": [
|
||||
{
|
||||
"name": "backend"
|
||||
}
|
||||
],
|
||||
"loadBalancingRules": [
|
||||
{
|
||||
"name": "port-80",
|
||||
"properties": {
|
||||
"frontendIPConfiguration": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/frontendIPConfigurations/frontend')]"
|
||||
},
|
||||
"frontendPort": 80,
|
||||
"backendPort": 80,
|
||||
"enableFloatingIP": false,
|
||||
"idleTimeoutInMinutes": 4,
|
||||
"protocol": "Tcp",
|
||||
"loadDistribution": "Default",
|
||||
"backendAddressPool": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/backendAddressPools/backend')]"
|
||||
},
|
||||
"probe": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/probes/port-80')]"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "port-443",
|
||||
"properties": {
|
||||
"frontendIPConfiguration": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/frontendIPConfigurations/frontend')]"
|
||||
},
|
||||
"frontendPort": 443,
|
||||
"backendPort": 443,
|
||||
"enableFloatingIP": false,
|
||||
"idleTimeoutInMinutes": 4,
|
||||
"protocol": "Tcp",
|
||||
"loadDistribution": "Default",
|
||||
"backendAddressPool": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/backendAddressPools/backend')]"
|
||||
},
|
||||
"probe": {
|
||||
"id": "[concat(resourceId('Microsoft.Network/loadBalancers', 'router-lb'), '/probes/port-443')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"probes": [
|
||||
{
|
||||
"name": "port-80",
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"port": 80,
|
||||
"intervalInSeconds": 5,
|
||||
"numberOfProbes": 2
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "port-443",
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"port": 443,
|
||||
"intervalInSeconds": 5,
|
||||
"numberOfProbes": 2
|
||||
}
|
||||
}
|
||||
],
|
||||
"inboundNatRules": [],
|
||||
"outboundNatRules": [],
|
||||
"inboundNatPools": []
|
||||
},
|
||||
"sku": {
|
||||
"name": "Basic"
|
||||
}
|
||||
},
|
||||
{{end}}
|
||||
{{if .MasterProfile.IsManagedDisks}}
|
||||
{
|
||||
"apiVersion": "[variables('apiVersionStorageManagedDisks')]",
|
||||
|
@ -94,6 +206,36 @@
|
|||
"sourcePortRange": "*"
|
||||
}
|
||||
},
|
||||
{{end}}
|
||||
{{if IsOpenShift}}
|
||||
{
|
||||
"name": "allow_http",
|
||||
"properties": {
|
||||
"access": "Allow",
|
||||
"description": "Allow http traffic to infra nodes",
|
||||
"destinationAddressPrefix": "*",
|
||||
"destinationPortRange": "80",
|
||||
"direction": "Inbound",
|
||||
"priority": 110,
|
||||
"protocol": "Tcp",
|
||||
"sourceAddressPrefix": "*",
|
||||
"sourcePortRange": "*"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "allow_https",
|
||||
"properties": {
|
||||
"access": "Allow",
|
||||
"description": "Allow https traffic to infra nodes",
|
||||
"destinationAddressPrefix": "*",
|
||||
"destinationPortRange": "443",
|
||||
"direction": "Inbound",
|
||||
"priority": 111,
|
||||
"protocol": "Tcp",
|
||||
"sourceAddressPrefix": "*",
|
||||
"sourcePortRange": "*"
|
||||
}
|
||||
},
|
||||
{{end}}
|
||||
{
|
||||
"name": "allow_ssh",
|
||||
|
@ -115,7 +257,7 @@
|
|||
"access": "Allow",
|
||||
"description": "Allow kube-apiserver (tls) traffic to master",
|
||||
"destinationAddressPrefix": "*",
|
||||
"destinationPortRange": "443-443",
|
||||
"destinationPortRange": {{if IsOpenShift}}"8443-8443"{{else}}"443-443"{{end}},
|
||||
"direction": "Inbound",
|
||||
"priority": 100,
|
||||
"protocol": "Tcp",
|
||||
|
@ -182,8 +324,8 @@
|
|||
"id": "[concat(variables('masterLbID'), '/backendAddressPools/', variables('masterLbBackendPoolName'))]"
|
||||
},
|
||||
"protocol": "tcp",
|
||||
"frontendPort": 443,
|
||||
"backendPort": 443,
|
||||
"frontendPort": {{if IsOpenShift}}8443{{else}}443{{end}},
|
||||
"backendPort": {{if IsOpenShift}}8443{{else}}443{{end}},
|
||||
"enableFloatingIP": false,
|
||||
"idleTimeoutInMinutes": 5,
|
||||
"loadDistribution": "Default",
|
||||
|
@ -198,7 +340,7 @@
|
|||
"name": "tcpHTTPSProbe",
|
||||
"properties": {
|
||||
"protocol": "tcp",
|
||||
"port": 443,
|
||||
"port": {{if IsOpenShift}}8443{{else}}443{{end}},
|
||||
"intervalInSeconds": "5",
|
||||
"numberOfProbes": "2"
|
||||
}
|
||||
|
@ -563,12 +705,12 @@
|
|||
"backendAddressPool": {
|
||||
"id": "[concat(variables('masterInternalLbID'), '/backendAddressPools/', variables('masterLbBackendPoolName'))]"
|
||||
},
|
||||
"backendPort": 4443,
|
||||
"backendPort": {{if IsOpenShift}}8443{{else}}4443{{end}},
|
||||
"enableFloatingIP": false,
|
||||
"frontendIPConfiguration": {
|
||||
"id": "[variables('masterInternalLbIPConfigID')]"
|
||||
},
|
||||
"frontendPort": 443,
|
||||
"frontendPort": {{if IsOpenShift}}8443{{else}}443{{end}},
|
||||
"idleTimeoutInMinutes": 5,
|
||||
"protocol": "tcp"
|
||||
}
|
||||
|
@ -580,7 +722,7 @@
|
|||
"properties": {
|
||||
"intervalInSeconds": "5",
|
||||
"numberOfProbes": "2",
|
||||
"port": 4443,
|
||||
"port": {{if IsOpenShift}}8443{{else}}4443{{end}},
|
||||
"protocol": "tcp"
|
||||
}
|
||||
}
|
||||
|
@ -637,7 +779,9 @@
|
|||
"osProfile": {
|
||||
"adminUsername": "[variables('username')]",
|
||||
"computername": "[concat(variables('masterVMNamePrefix'), copyIndex(variables('masterOffset')))]",
|
||||
{{if IsKubernetes}}
|
||||
{{GetKubernetesMasterCustomData .}}
|
||||
{{end}}
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
|
@ -760,7 +904,11 @@
|
|||
"autoUpgradeMinorVersion": true,
|
||||
"settings": {},
|
||||
"protectedSettings": {
|
||||
{{if IsOpenShift}}
|
||||
"script": "{{ Base64 OpenShiftGetMasterSh }}"
|
||||
{{else}}
|
||||
"commandToExecute": "[concat(variables('provisionScriptParametersCommon'),' ',variables('provisionScriptParametersMaster'), ' MASTER_INDEX=',copyIndex(variables('masterOffset')),' /usr/bin/nohup /bin/bash -c \"stat /opt/azure/containers/provision.complete > /dev/null 2>&1 || /bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1\"')]"
|
||||
{{end}}
|
||||
}
|
||||
}
|
||||
}{{WriteLinkedTemplatesForExtensions}}
|
||||
|
|
|
@ -2,8 +2,11 @@
|
|||
"etcdDownloadURLBase": "[parameters('etcdDownloadURLBase')]",
|
||||
"etcdVersion": "[parameters('etcdVersion')]",
|
||||
"maxVMsPerPool": 100,
|
||||
{{ if not IsOpenShift }}
|
||||
"apiServerCertificate": "[parameters('apiServerCertificate')]",
|
||||
{{ end }}
|
||||
{{ if not IsHostedMaster }}
|
||||
{{ if not IsOpenShift }}
|
||||
"apiServerPrivateKey": "[parameters('apiServerPrivateKey')]",
|
||||
"etcdServerCertificate": "[parameters('etcdServerCertificate')]",
|
||||
"etcdServerPrivateKey": "[parameters('etcdServerPrivateKey')]",
|
||||
|
@ -65,12 +68,15 @@
|
|||
"etcdServerCertFilepath": "/etc/kubernetes/certs/etcdserver.crt",
|
||||
"etcdServerKeyFilepath": "/etc/kubernetes/certs/etcdserver.key",
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{ if not IsOpenShift }}
|
||||
"caCertificate": "[parameters('caCertificate')]",
|
||||
"caPrivateKey": "[parameters('caPrivateKey')]",
|
||||
"clientCertificate": "[parameters('clientCertificate')]",
|
||||
"clientPrivateKey": "[parameters('clientPrivateKey')]",
|
||||
"kubeConfigCertificate": "[parameters('kubeConfigCertificate')]",
|
||||
"kubeConfigPrivateKey": "[parameters('kubeConfigPrivateKey')]",
|
||||
{{end}}
|
||||
"kubernetesHyperkubeSpec": "[parameters('kubernetesHyperkubeSpec')]",
|
||||
"kubernetesCcmImageSpec": "[parameters('kubernetesCcmImageSpec')]",
|
||||
"kubernetesAddonManagerSpec": "[parameters('kubernetesAddonManagerSpec')]",
|
||||
|
@ -213,11 +219,13 @@
|
|||
"provisionScript": "{{GetKubernetesB64Provision}}",
|
||||
"provisionSource": "{{GetKubernetesB64ProvisionSource}}",
|
||||
"mountetcdScript": "{{GetKubernetesB64Mountetcd}}",
|
||||
{{if not IsOpenShift}}
|
||||
{{if not IsHostedMaster}}
|
||||
"provisionScriptParametersMaster": "[concat('MASTER_NODE=true APISERVER_PRIVATE_KEY=',variables('apiServerPrivateKey'),' CA_CERTIFICATE=',variables('caCertificate'),' CA_PRIVATE_KEY=',variables('caPrivateKey'),' MASTER_FQDN=',variables('masterFqdnPrefix'),' KUBECONFIG_CERTIFICATE=',variables('kubeConfigCertificate'),' KUBECONFIG_KEY=',variables('kubeConfigPrivateKey'),' ETCD_SERVER_CERTIFICATE=',variables('etcdServerCertificate'),' ETCD_CLIENT_CERTIFICATE=',variables('etcdClientCertificate'),' ETCD_SERVER_PRIVATE_KEY=',variables('etcdServerPrivateKey'),' ETCD_CLIENT_PRIVATE_KEY=',variables('etcdClientPrivateKey'),' ETCD_PEER_CERTIFICATES=',string(variables('etcdPeerCertificates')),' ETCD_PEER_PRIVATE_KEYS=',string(variables('etcdPeerPrivateKeys')),' ADMINUSER=',variables('username'))]",
|
||||
"provisionScriptParametersCommon": "[concat('TENANT_ID=',variables('tenantID'),' HYPERKUBE_URL=',variables('kubernetesHyperkubeSpec'),' APISERVER_PUBLIC_KEY=',variables('apiserverCertificate'),' SUBSCRIPTION_ID=',variables('subscriptionId'),' RESOURCE_GROUP=',variables('resourceGroup'),' LOCATION=',variables('location'),' SUBNET=',variables('subnetName'),' NETWORK_SECURITY_GROUP=',variables('nsgName'),' VIRTUAL_NETWORK=',variables('virtualNetworkName'),' VIRTUAL_NETWORK_RESOURCE_GROUP=',variables('virtualNetworkResourceGroupName'),' ROUTE_TABLE=',variables('routeTableName'),' PRIMARY_AVAILABILITY_SET=',variables('primaryAvailabilitySetName'),' SERVICE_PRINCIPAL_CLIENT_ID=',variables('servicePrincipalClientId'),' SERVICE_PRINCIPAL_CLIENT_SECRET=',variables('singleQuote'),variables('servicePrincipalClientSecret'),variables('singleQuote'),' KUBELET_PRIVATE_KEY=',variables('clientPrivateKey'),' TARGET_ENVIRONMENT=',variables('targetEnvironment'),' NETWORK_POLICY=',variables('networkPolicy'),' FQDNSuffix=',variables('fqdnEndpointSuffix'),' VNET_CNI_PLUGINS_URL=',variables('vnetCniLinuxPluginsURL'),' CNI_PLUGINS_URL=',variables('cniPluginsURL'),' CLOUDPROVIDER_BACKOFF=',variables('cloudProviderBackoff'),' CLOUDPROVIDER_BACKOFF_RETRIES=',variables('cloudProviderBackoffRetries'),' CLOUDPROVIDER_BACKOFF_EXPONENT=',variables('cloudProviderBackoffExponent'),' CLOUDPROVIDER_BACKOFF_DURATION=',variables('cloudProviderBackoffDuration'),' CLOUDPROVIDER_BACKOFF_JITTER=',variables('cloudProviderBackoffJitter'),' CLOUDPROVIDER_RATELIMIT=',variables('cloudProviderRatelimit'),' CLOUDPROVIDER_RATELIMIT_QPS=',variables('cloudProviderRatelimitQPS'),' CLOUDPROVIDER_RATELIMIT_BUCKET=',variables('cloudProviderRatelimitBucket'),' USE_MANAGED_IDENTITY_EXTENSION=',variables('useManagedIdentityExtension'),' USE_INSTANCE_METADATA=',variables('useInstanceMetadata'),' CONTAINER_RUNTIME=',variables('containerRuntime'),' KUBECONFIG_SERVER=',variables('kubeconfigServer'))]",
|
||||
{{else}}
|
||||
"provisionScriptParametersCommon": "[concat('TENANT_ID=',variables('tenantID'),' HYPERKUBE_URL=',variables('kubernetesHyperkubeSpec'),' APISERVER_PUBLIC_KEY=',variables('apiserverCertificate'),' SUBSCRIPTION_ID=',variables('subscriptionId'),' RESOURCE_GROUP=',variables('resourceGroup'),' LOCATION=',variables('location'),' SUBNET=',variables('subnetName'),' NETWORK_SECURITY_GROUP=',variables('nsgName'),' VIRTUAL_NETWORK=',variables('virtualNetworkName'),' VIRTUAL_NETWORK_RESOURCE_GROUP=',variables('virtualNetworkResourceGroupName'),' ROUTE_TABLE=',variables('routeTableName'),' PRIMARY_AVAILABILITY_SET=',variables('primaryAvailabilitySetName'),' SERVICE_PRINCIPAL_CLIENT_ID=',variables('servicePrincipalClientId'),' SERVICE_PRINCIPAL_CLIENT_SECRET=',variables('singleQuote'),variables('servicePrincipalClientSecret'),variables('singleQuote'),' KUBELET_PRIVATE_KEY=',variables('clientPrivateKey'),' TARGET_ENVIRONMENT=',variables('targetEnvironment'),' NETWORK_POLICY=',variables('networkPolicy'),' FQDNSuffix=',variables('fqdnEndpointSuffix'),' VNET_CNI_PLUGINS_URL=',variables('vnetCniLinuxPluginsURL'),' CNI_PLUGINS_URL=',variables('cniPluginsURL'),' CLOUDPROVIDER_BACKOFF=',variables('cloudProviderBackoff'),' CLOUDPROVIDER_BACKOFF_RETRIES=',variables('cloudProviderBackoffRetries'),' CLOUDPROVIDER_BACKOFF_EXPONENT=',variables('cloudProviderBackoffExponent'),' CLOUDPROVIDER_BACKOFF_DURATION=',variables('cloudProviderBackoffDuration'),' CLOUDPROVIDER_BACKOFF_JITTER=',variables('cloudProviderBackoffJitter'),' CLOUDPROVIDER_RATELIMIT=',variables('cloudProviderRatelimit'),' CLOUDPROVIDER_RATELIMIT_QPS=',variables('cloudProviderRatelimitQPS'),' CLOUDPROVIDER_RATELIMIT_BUCKET=',variables('cloudProviderRatelimitBucket'),' USE_MANAGED_IDENTITY_EXTENSION=',variables('useManagedIdentityExtension'),' USE_INSTANCE_METADATA=',variables('useInstanceMetadata'),' CONTAINER_RUNTIME=',variables('containerRuntime'))]",
|
||||
{{end}}
|
||||
{{end}}
|
||||
"generateProxyCertsScript": "{{GetKubernetesB64GenerateProxyCerts}}",
|
||||
"orchestratorNameVersionTag": "{{.OrchestratorProfile.OrchestratorType}}:{{.OrchestratorProfile.OrchestratorVersion}}",
|
||||
|
@ -364,9 +372,11 @@
|
|||
"scope": "[resourceGroup().id]",
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"singleQuote": "'",
|
||||
"targetEnvironment": "[parameters('targetEnvironment')]",
|
||||
"dockerEngineDownloadRepo": "[parameters('dockerEngineDownloadRepo')]",
|
||||
"dockerEngineVersion": "[parameters('dockerEngineVersion')]"
|
||||
"targetEnvironment": "[parameters('targetEnvironment')]"
|
||||
{{if not IsOpenShift}}
|
||||
, "dockerEngineDownloadRepo": "[parameters('dockerEngineDownloadRepo')]"
|
||||
, "dockerEngineVersion": "[parameters('dockerEngineVersion')]"
|
||||
{{end}}
|
||||
{{if .LinuxProfile.HasSecrets}}
|
||||
, "linuxProfileSecrets" :
|
||||
[
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
"type": "string"
|
||||
},
|
||||
{{else}}
|
||||
{{if not IsOpenShift}}
|
||||
"etcdServerCertificate": {
|
||||
"metadata": {
|
||||
"description": "The base 64 server certificate used on the master"
|
||||
|
@ -111,6 +112,8 @@
|
|||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{if not IsOpenShift}}
|
||||
"apiServerCertificate": {
|
||||
"metadata": {
|
||||
"description": "The base 64 server certificate used on the master"
|
||||
|
@ -160,6 +163,7 @@
|
|||
},
|
||||
"type": "securestring"
|
||||
},
|
||||
{{end}}
|
||||
"generatorCode": {
|
||||
{{PopulateClassicModeDefaultValue "generatorCode"}}
|
||||
"metadata": {
|
||||
|
@ -552,6 +556,7 @@
|
|||
},
|
||||
"type": "string"
|
||||
},
|
||||
{{if not IsOpenShift}}
|
||||
"dockerEngineDownloadRepo": {
|
||||
"defaultValue": "https://aptdocker.azureedge.net/repo",
|
||||
"metadata": {
|
||||
|
@ -574,6 +579,7 @@
|
|||
],
|
||||
"type": "string"
|
||||
},
|
||||
{{end}}
|
||||
"networkPolicy": {
|
||||
"defaultValue": "{{.OrchestratorProfile.KubernetesConfig.NetworkPolicy}}",
|
||||
"metadata": {
|
||||
|
|
|
@ -0,0 +1,142 @@
|
|||
#!/bin/bash -x
|
||||
|
||||
# TODO: /etc/dnsmasq.d/origin-upstream-dns.conf is currently hardcoded; it
|
||||
# probably shouldn't be
|
||||
|
||||
SERVICE_TYPE=origin
|
||||
IMAGE_BASE=openshift/origin
|
||||
if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then
|
||||
SERVICE_TYPE=atomic-openshift
|
||||
IMAGE_BASE=registry.reg-aws.openshift.com:443/openshift3/ose
|
||||
fi
|
||||
VERSION="$(rpm -q $SERVICE_TYPE --queryformat %{VERSION})"
|
||||
|
||||
# TODO: remove this once we generate the registry certificate
|
||||
cat >>/etc/sysconfig/docker <<'EOF'
|
||||
INSECURE_REGISTRY='--insecure-registry 172.30.0.0/16'
|
||||
EOF
|
||||
|
||||
systemctl restart docker.service
|
||||
|
||||
echo "BOOTSTRAP_CONFIG_NAME=node-config-master" >>/etc/sysconfig/${SERVICE_TYPE}-node
|
||||
|
||||
for dst in tcp,2379 tcp,2380 tcp,8443 tcp,8444 tcp,8053 udp,8053 tcp,9090; do
|
||||
proto=${dst%%,*}
|
||||
port=${dst##*,}
|
||||
iptables -A OS_FIREWALL_ALLOW -p $proto -m state --state NEW -m $proto --dport $port -j ACCEPT
|
||||
done
|
||||
|
||||
iptables-save >/etc/sysconfig/iptables
|
||||
|
||||
sed -i -e "s#--master=.*#--master=https://$(hostname --fqdn):8443#" /etc/sysconfig/${SERVICE_TYPE}-master-api
|
||||
|
||||
rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/*
|
||||
|
||||
oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json
|
||||
|
||||
( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz)
|
||||
|
||||
chown -R etcd:etcd /etc/etcd
|
||||
chmod 0600 /etc/origin/master/htpasswd
|
||||
chmod 1777 /tmp
|
||||
|
||||
cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt
|
||||
update-ca-trust
|
||||
|
||||
###
|
||||
# retrieve the public ip via dns for the router public ip and sub it in for the routingConfig.subdomain
|
||||
###
|
||||
routerLBHost="{{.RouterLBHostname}}"
|
||||
routerLBIP=$(dig +short $routerLBHost)
|
||||
|
||||
for i in /etc/origin/master/master-config.yaml /tmp/bootstrapconfigs/* /tmp/ansible/azure-local-master-inventory.yml; do
|
||||
sed -i "s/TEMPROUTERIP/${routerLBIP}/; s|TEMPIMAGEBASE|$IMAGE_BASE|" $i
|
||||
done
|
||||
|
||||
# TODO: when enabling secure registry, may need:
|
||||
# ln -s /etc/origin/node/node-client-ca.crt /etc/docker/certs.d/docker-registry.default.svc:5000
|
||||
|
||||
# note: ${SERVICE_TYPE}-node crash loops until master is up
|
||||
for unit in etcd.service ${SERVICE_TYPE}-master-api.service ${SERVICE_TYPE}-master-controllers.service; do
|
||||
systemctl enable $unit
|
||||
systemctl start $unit
|
||||
done
|
||||
|
||||
mkdir -p /root/.kube
|
||||
cp /etc/origin/master/admin.kubeconfig /root/.kube/config
|
||||
|
||||
export KUBECONFIG=/etc/origin/master/admin.kubeconfig
|
||||
|
||||
while ! curl -o /dev/null -m 2 -kfs https://localhost:8443/healthz; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! oc get svc kubernetes &>/dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
oc create -f - <<'EOF'
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: azure
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
skuName: Premium_LRS
|
||||
location: {{ .Location }}
|
||||
EOF
|
||||
|
||||
oc create configmap node-config-master --namespace openshift-node --from-file=node-config.yaml=/tmp/bootstrapconfigs/master-config.yaml
|
||||
oc create configmap node-config-compute --namespace openshift-node --from-file=node-config.yaml=/tmp/bootstrapconfigs/compute-config.yaml
|
||||
oc create configmap node-config-infra --namespace openshift-node --from-file=node-config.yaml=/tmp/bootstrapconfigs/infra-config.yaml
|
||||
|
||||
# must start ${SERVICE_TYPE}-node after master is fully up and running
|
||||
# otherwise the implicit dns change may cause master startup to fail
|
||||
systemctl enable ${SERVICE_TYPE}-node.service
|
||||
systemctl start ${SERVICE_TYPE}-node.service &
|
||||
|
||||
# TODO: run a CSR auto-approver
|
||||
# https://github.com/kargakis/acs-engine/issues/46
|
||||
csrs=($(oc get csr -o name))
|
||||
while [[ ${#csrs[@]} != "3" ]]; do
|
||||
sleep 2
|
||||
csrs=($(oc get csr -o name))
|
||||
if [[ ${#csrs[@]} == "3" ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
for csr in ${csrs[@]}; do
|
||||
oc adm certificate approve $csr
|
||||
done
|
||||
|
||||
csrs=($(oc get csr -o name))
|
||||
while [[ ${#csrs[@]} != "6" ]]; do
|
||||
sleep 2
|
||||
csrs=($(oc get csr -o name))
|
||||
if [[ ${#csrs[@]} == "6" ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
for csr in ${csrs[@]}; do
|
||||
oc adm certificate approve $csr
|
||||
done
|
||||
|
||||
chmod +x /tmp/ansible/ansible.sh
|
||||
docker run \
|
||||
--rm \
|
||||
-u "$(id -u)" \
|
||||
-v /etc/origin:/etc/origin:z \
|
||||
-v /tmp/ansible:/opt/app-root/src:z \
|
||||
-v /root/.kube:/opt/app-root/src/.kube:z \
|
||||
-w /opt/app-root/src \
|
||||
-e IMAGE_BASE="$IMAGE_BASE" \
|
||||
-e VERSION="$VERSION" \
|
||||
-e HOSTNAME="$(hostname)" \
|
||||
"$IMAGE_BASE-ansible:v$VERSION" \
|
||||
/opt/app-root/src/ansible.sh
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,37 @@
|
|||
#!/bin/bash -x
|
||||
|
||||
# TODO: /etc/dnsmasq.d/origin-upstream-dns.conf is currently hardcoded; it
|
||||
# probably shouldn't be
|
||||
SERVICE_TYPE=origin
|
||||
if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then
|
||||
SERVICE_TYPE=atomic-openshift
|
||||
fi
|
||||
|
||||
# TODO: remove this once we generate the registry certificate
|
||||
cat >>/etc/sysconfig/docker <<'EOF'
|
||||
INSECURE_REGISTRY='--insecure-registry 172.30.0.0/16'
|
||||
EOF
|
||||
|
||||
systemctl restart docker.service
|
||||
|
||||
{{if eq .Role "infra"}}
|
||||
echo "BOOTSTRAP_CONFIG_NAME=node-config-infra" >>/etc/sysconfig/${SERVICE_TYPE}-node
|
||||
{{else}}
|
||||
echo "BOOTSTRAP_CONFIG_NAME=node-config-compute" >>/etc/sysconfig/${SERVICE_TYPE}-node
|
||||
{{end}}
|
||||
|
||||
rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/*
|
||||
|
||||
( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz)
|
||||
|
||||
cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt
|
||||
update-ca-trust
|
||||
|
||||
# TODO: when enabling secure registry, may need:
|
||||
# ln -s /etc/origin/node/node-client-ca.crt /etc/docker/certs.d/docker-registry.default.svc:5000
|
||||
|
||||
# note: ${SERVICE_TYPE}-node crash loops until master is up
|
||||
systemctl enable ${SERVICE_TYPE}-node.service
|
||||
systemctl start ${SERVICE_TYPE}-node.service
|
||||
|
||||
exit 0
|
|
@ -1,6 +1,10 @@
|
|||
package acsengine
|
||||
|
||||
const (
|
||||
// DefaultOpenShiftMasterSubnet is the default value for master subnet for Openshift.
|
||||
DefaultOpenShiftMasterSubnet = "10.0.0.0/24"
|
||||
// DefaultOpenShiftFirstConsecutiveStaticIP is the default static ip address for master 0 for Openshift.
|
||||
DefaultOpenShiftFirstConsecutiveStaticIP = "10.0.0.11"
|
||||
// DefaultMasterSubnet specifies the default master subnet for DCOS or Swarm
|
||||
DefaultMasterSubnet = "172.16.0.0/24"
|
||||
// DefaultFirstConsecutiveStaticIP specifies the static IP address on master 0 for DCOS or Swarm
|
||||
|
@ -100,6 +104,8 @@ const (
|
|||
DefaultGeneratorCode = "acsengine"
|
||||
// DefaultOrchestratorName specifies the 3 character orchestrator code of the cluster template and affects resource naming.
|
||||
DefaultOrchestratorName = "k8s"
|
||||
// DefaultOpenshiftOrchestratorName specifies the 3 character orchestrator code of the cluster template and affects resource naming.
|
||||
DefaultOpenshiftOrchestratorName = "ocp"
|
||||
// DefaultEtcdVersion specifies the default etcd version to install
|
||||
DefaultEtcdVersion = "3.2.16"
|
||||
// DefaultEtcdDiskSize specifies the default size for Kubernetes master etcd disk volumes in GB
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/api/common"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/acs-engine/pkg/openshift/certgen"
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
"github.com/Masterminds/semver"
|
||||
)
|
||||
|
||||
|
@ -296,7 +298,9 @@ func setOrchestratorDefaults(cs *api.ContainerService) {
|
|||
o.OrchestratorVersion = common.GetValidPatchVersion(
|
||||
o.OrchestratorType,
|
||||
o.OrchestratorVersion)
|
||||
if o.OrchestratorType == api.Kubernetes {
|
||||
|
||||
switch o.OrchestratorType {
|
||||
case api.Kubernetes:
|
||||
k8sVersion := o.OrchestratorVersion
|
||||
|
||||
if o.KubernetesConfig == nil {
|
||||
|
@ -489,13 +493,25 @@ func setOrchestratorDefaults(cs *api.ContainerService) {
|
|||
// Configure scheduler
|
||||
setSchedulerConfig(cs)
|
||||
|
||||
} else if o.OrchestratorType == api.DCOS {
|
||||
case api.DCOS:
|
||||
if o.DcosConfig == nil {
|
||||
o.DcosConfig = &api.DcosConfig{}
|
||||
}
|
||||
if o.DcosConfig.DcosWindowsBootstrapURL == "" {
|
||||
o.DcosConfig.DcosWindowsBootstrapURL = DefaultDCOSSpecConfig.DCOSWindowsBootstrapDownloadURL
|
||||
}
|
||||
case api.OpenShift:
|
||||
a.MasterProfile.Distro = api.RHEL
|
||||
kc := a.OrchestratorProfile.OpenShiftConfig.KubernetesConfig
|
||||
if kc == nil {
|
||||
kc = &api.KubernetesConfig{}
|
||||
}
|
||||
if kc.ContainerRuntime == "" {
|
||||
kc.ContainerRuntime = DefaultContainerRuntime
|
||||
}
|
||||
if kc.NetworkPolicy == "" {
|
||||
kc.NetworkPolicy = DefaultNetworkPolicy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -545,6 +561,11 @@ func setMasterNetworkDefaults(a *api.Properties, isUpgrade bool) {
|
|||
a.MasterProfile.FirstConsecutiveStaticIP = DefaultFirstConsecutiveKubernetesStaticIP
|
||||
}
|
||||
}
|
||||
} else if a.OrchestratorProfile.OrchestratorType == api.OpenShift {
|
||||
a.MasterProfile.Subnet = DefaultOpenShiftMasterSubnet
|
||||
if !isUpgrade || len(a.MasterProfile.FirstConsecutiveStaticIP) == 0 {
|
||||
a.MasterProfile.FirstConsecutiveStaticIP = DefaultOpenShiftFirstConsecutiveStaticIP
|
||||
}
|
||||
} else if a.HasWindows() {
|
||||
a.MasterProfile.Subnet = DefaultSwarmWindowsMasterSubnet
|
||||
// FirstConsecutiveStaticIP is not reset if it is upgrade and some value already exists
|
||||
|
@ -585,7 +606,8 @@ func setAgentNetworkDefaults(a *api.Properties) {
|
|||
if a.MasterProfile != nil && !a.MasterProfile.IsCustomVNET() {
|
||||
subnetCounter := 0
|
||||
for _, profile := range a.AgentPoolProfiles {
|
||||
if a.OrchestratorProfile.OrchestratorType == api.Kubernetes {
|
||||
if a.OrchestratorProfile.OrchestratorType == api.Kubernetes ||
|
||||
a.OrchestratorProfile.OrchestratorType == api.OpenShift {
|
||||
profile.Subnet = a.MasterProfile.Subnet
|
||||
} else {
|
||||
profile.Subnet = fmt.Sprintf(DefaultAgentSubnetTemplate, subnetCounter)
|
||||
|
@ -636,7 +658,96 @@ func setStorageDefaults(a *api.Properties) {
|
|||
}
|
||||
}
|
||||
|
||||
func openShiftSetDefaultCerts(a *api.Properties) (bool, error) {
|
||||
externalMasterHostname := fmt.Sprintf("%s.%s.cloudapp.azure.com", a.MasterProfile.DNSPrefix, a.AzProfile.Location)
|
||||
routerLBHostname := fmt.Sprintf("%s-router.%s.cloudapp.azure.com", a.MasterProfile.DNSPrefix, a.AzProfile.Location)
|
||||
c := certgen.Config{
|
||||
Master: &certgen.Master{
|
||||
Hostname: fmt.Sprintf("%s-master-%s-0", DefaultOpenshiftOrchestratorName, GenerateClusterID(a)),
|
||||
IPs: []net.IP{
|
||||
net.ParseIP(a.MasterProfile.FirstConsecutiveStaticIP),
|
||||
},
|
||||
Port: 8443,
|
||||
},
|
||||
ExternalMasterHostname: externalMasterHostname,
|
||||
ClusterUsername: a.OrchestratorProfile.OpenShiftConfig.ClusterUsername,
|
||||
ClusterPassword: a.OrchestratorProfile.OpenShiftConfig.ClusterPassword,
|
||||
AzureConfig: certgen.AzureConfig{
|
||||
TenantID: a.AzProfile.TenantID,
|
||||
SubscriptionID: a.AzProfile.SubscriptionID,
|
||||
AADClientID: a.ServicePrincipalProfile.ClientID,
|
||||
AADClientSecret: a.ServicePrincipalProfile.Secret,
|
||||
ResourceGroup: a.AzProfile.ResourceGroup,
|
||||
Location: a.AzProfile.Location,
|
||||
},
|
||||
}
|
||||
a.OrchestratorProfile.OpenShiftConfig.ExternalMasterHostname = externalMasterHostname
|
||||
a.OrchestratorProfile.OpenShiftConfig.RouterLBHostname = routerLBHostname
|
||||
|
||||
err := c.PrepareMasterCerts()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = c.PrepareMasterKubeConfigs()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = c.PrepareMasterFiles()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = c.PrepareBootstrapKubeConfig()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if a.OrchestratorProfile.OpenShiftConfig.ConfigBundles == nil {
|
||||
a.OrchestratorProfile.OpenShiftConfig.ConfigBundles = make(map[string][]byte)
|
||||
}
|
||||
|
||||
masterBundle, err := getConfigBundle(c.WriteMaster)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
a.OrchestratorProfile.OpenShiftConfig.ConfigBundles["master"] = masterBundle
|
||||
|
||||
nodeBundle, err := getConfigBundle(c.WriteNode)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
a.OrchestratorProfile.OpenShiftConfig.ConfigBundles["bootstrap"] = nodeBundle
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type writeFn func(filesystem.Filesystem) error
|
||||
|
||||
func getConfigBundle(write writeFn) ([]byte, error) {
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
fs, err := filesystem.NewTGZFile(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = write(fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = fs.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func setDefaultCerts(a *api.Properties) (bool, error) {
|
||||
if a.MasterProfile != nil && a.OrchestratorProfile.OrchestratorType == api.OpenShift {
|
||||
return openShiftSetDefaultCerts(a)
|
||||
}
|
||||
|
||||
if a.MasterProfile == nil || a.OrchestratorProfile.OrchestratorType != api.Kubernetes {
|
||||
return false, nil
|
||||
|
|
|
@ -39,6 +39,9 @@ const (
|
|||
kubernetesJumpboxCustomDataYaml = "k8s/kubernetesjumpboxcustomdata.yml"
|
||||
kubeConfigJSON = "k8s/kubeconfig.json"
|
||||
kubernetesWindowsAgentCustomDataPS1 = "k8s/kuberneteswindowssetup.ps1"
|
||||
// OpenShift custom scripts
|
||||
openshiftNodeScript = "k8s/openshiftnodescript.sh"
|
||||
openshiftMasterScript = "k8s/openshiftmasterscript.sh"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -106,6 +109,7 @@ var dcosTemplateFiles = []string{dcosBaseFile, dcosAgentResourcesVMAS, dcosAgent
|
|||
var kubernetesTemplateFiles = []string{kubernetesBaseFile, kubernetesAgentResourcesVMAS, kubernetesAgentVars, kubernetesMasterResources, kubernetesMasterVars, kubernetesParams, kubernetesWinAgentVars}
|
||||
var swarmTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResourcesVMAS, swarmAgentVars, swarmAgentResourcesVMSS, swarmAgentResourcesClassic, swarmBaseFile, swarmMasterResources, swarmMasterVars, swarmWinAgentResourcesVMAS, swarmWinAgentResourcesVMSS}
|
||||
var swarmModeTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResourcesVMAS, swarmAgentVars, swarmAgentResourcesVMSS, swarmAgentResourcesClassic, swarmBaseFile, swarmMasterResources, swarmMasterVars, swarmWinAgentResourcesVMAS, swarmWinAgentResourcesVMSS}
|
||||
var openshiftTemplateFiles = append(kubernetesTemplateFiles, openshiftNodeScript, openshiftMasterScript)
|
||||
|
||||
/**
|
||||
The following parameters could be either a plain text, or referenced to a secret in a keyvault:
|
||||
|
@ -169,6 +173,7 @@ func (t *TemplateGenerator) verifyFiles() error {
|
|||
allFiles := commonTemplateFiles
|
||||
allFiles = append(allFiles, dcosTemplateFiles...)
|
||||
allFiles = append(allFiles, kubernetesTemplateFiles...)
|
||||
allFiles = append(allFiles, openshiftTemplateFiles...)
|
||||
allFiles = append(allFiles, swarmTemplateFiles...)
|
||||
for _, file := range allFiles {
|
||||
if _, err := Asset(file); err != nil {
|
||||
|
@ -354,6 +359,9 @@ func (t *TemplateGenerator) prepareTemplateFiles(properties *api.Properties) ([]
|
|||
case api.SwarmMode:
|
||||
files = append(commonTemplateFiles, swarmModeTemplateFiles...)
|
||||
baseFile = swarmBaseFile
|
||||
case api.OpenShift:
|
||||
files = append(commonTemplateFiles, openshiftTemplateFiles...)
|
||||
baseFile = kubernetesBaseFile
|
||||
default:
|
||||
return nil, "", t.Translator.Errorf("orchestrator '%s' is unsupported", properties.OrchestratorProfile.OrchestratorType)
|
||||
}
|
||||
|
@ -405,13 +413,15 @@ func GetCloudSpecConfig(location string) AzureEnvironmentSpecConfig {
|
|||
// ValidateDistro checks if the requested orchestrator type is supported on the requested Linux distro.
|
||||
func ValidateDistro(cs *api.ContainerService) bool {
|
||||
// Check Master distro
|
||||
if cs.Properties.MasterProfile != nil && cs.Properties.MasterProfile.Distro == api.RHEL && cs.Properties.OrchestratorProfile.OrchestratorType != api.SwarmMode {
|
||||
if cs.Properties.MasterProfile != nil && cs.Properties.MasterProfile.Distro == api.RHEL &&
|
||||
(cs.Properties.OrchestratorProfile.OrchestratorType != api.SwarmMode && cs.Properties.OrchestratorProfile.OrchestratorType != api.OpenShift) {
|
||||
log.Fatalf("Orchestrator type %s not suported on RHEL Master", cs.Properties.OrchestratorProfile.OrchestratorType)
|
||||
return false
|
||||
}
|
||||
// Check Agent distros
|
||||
for _, agentProfile := range cs.Properties.AgentPoolProfiles {
|
||||
if agentProfile.Distro == api.RHEL && cs.Properties.OrchestratorProfile.OrchestratorType != api.SwarmMode {
|
||||
if agentProfile.Distro == api.RHEL &&
|
||||
(cs.Properties.OrchestratorProfile.OrchestratorType != api.SwarmMode && cs.Properties.OrchestratorProfile.OrchestratorType != api.OpenShift) {
|
||||
log.Fatalf("Orchestrator type %s not suported on RHEL Agent", cs.Properties.OrchestratorProfile.OrchestratorType)
|
||||
return false
|
||||
}
|
||||
|
@ -470,7 +480,7 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
|
|||
if properties.MasterProfile != nil {
|
||||
if properties.MasterProfile.IsCustomVNET() {
|
||||
addValue(parametersMap, "masterVnetSubnetID", properties.MasterProfile.VnetSubnetID)
|
||||
if properties.OrchestratorProfile.IsKubernetes() {
|
||||
if properties.OrchestratorProfile.IsKubernetes() || properties.OrchestratorProfile.IsOpenShift() {
|
||||
addValue(parametersMap, "vnetCidr", properties.MasterProfile.VnetCidr)
|
||||
}
|
||||
} else {
|
||||
|
@ -511,7 +521,8 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
|
|||
}
|
||||
|
||||
// Kubernetes Parameters
|
||||
if properties.OrchestratorProfile.OrchestratorType == api.Kubernetes {
|
||||
if properties.OrchestratorProfile.IsKubernetes() ||
|
||||
properties.OrchestratorProfile.IsOpenShift() {
|
||||
k8sVersion := properties.OrchestratorProfile.OrchestratorVersion
|
||||
|
||||
kubernetesHyperkubeSpec := properties.OrchestratorProfile.KubernetesConfig.KubernetesImageBase + KubeConfigs[k8sVersion]["hyperkube"]
|
||||
|
@ -560,11 +571,14 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
|
|||
addValue(parametersMap, "kubernetesCcmImageSpec", kubernetesCcmSpec)
|
||||
}
|
||||
|
||||
addValue(parametersMap, "dockerEngineDownloadRepo", cloudSpecConfig.DockerSpecConfig.DockerEngineRepo)
|
||||
if !properties.OrchestratorProfile.IsOpenShift() {
|
||||
addValue(parametersMap, "dockerEngineDownloadRepo", cloudSpecConfig.DockerSpecConfig.DockerEngineRepo)
|
||||
addValue(parametersMap, "dockerEngineVersion", dockerEngineVersion)
|
||||
}
|
||||
|
||||
addValue(parametersMap, "kubeDNSServiceIP", properties.OrchestratorProfile.KubernetesConfig.DNSServiceIP)
|
||||
addValue(parametersMap, "kubeServiceCidr", properties.OrchestratorProfile.KubernetesConfig.ServiceCIDR)
|
||||
addValue(parametersMap, "kubernetesHyperkubeSpec", kubernetesHyperkubeSpec)
|
||||
addValue(parametersMap, "dockerEngineVersion", dockerEngineVersion)
|
||||
addValue(parametersMap, "kubernetesAddonManagerSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion]["addonmanager"])
|
||||
addValue(parametersMap, "kubernetesAddonResizerSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion]["addonresizer"])
|
||||
addValue(parametersMap, "kubernetesDNSMasqSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion]["dnsmasq"])
|
||||
|
@ -657,6 +671,8 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
|
|||
addValue(parametersMap, "generatorCode", generatorCode)
|
||||
if properties.HostedMasterProfile != nil {
|
||||
addValue(parametersMap, "orchestratorName", "aks")
|
||||
} else if properties.OrchestratorProfile.IsOpenShift() {
|
||||
addValue(parametersMap, "orchestratorName", DefaultOpenshiftOrchestratorName)
|
||||
} else {
|
||||
addValue(parametersMap, "orchestratorName", DefaultOrchestratorName)
|
||||
}
|
||||
|
@ -804,7 +820,7 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
|
|||
if properties.WindowsProfile.WindowsImageSourceURL != "" {
|
||||
addValue(parametersMap, "agentWindowsSourceUrl", properties.WindowsProfile.WindowsImageSourceURL)
|
||||
}
|
||||
if properties.OrchestratorProfile.OrchestratorType == api.Kubernetes {
|
||||
if properties.OrchestratorProfile.IsKubernetes() || properties.OrchestratorProfile.IsOpenShift() {
|
||||
k8sVersion := properties.OrchestratorProfile.OrchestratorVersion
|
||||
addValue(parametersMap, "kubeBinariesSASURL", cloudSpecConfig.KubernetesSpecConfig.KubeBinariesSASURLBase+KubeConfigs[k8sVersion]["windowszip"])
|
||||
addValue(parametersMap, "windowsPackageSASURLBase", cloudSpecConfig.KubernetesSpecConfig.WindowsPackageSASURLBase)
|
||||
|
@ -904,12 +920,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
"IsKubernetesVersionGe": func(version string) bool {
|
||||
orchestratorVersion, _ := semver.NewVersion(cs.Properties.OrchestratorProfile.OrchestratorVersion)
|
||||
constraint, _ := semver.NewConstraint(">=" + version)
|
||||
return cs.Properties.OrchestratorProfile.OrchestratorType == api.Kubernetes && constraint.Check(orchestratorVersion)
|
||||
return cs.Properties.OrchestratorProfile.IsKubernetes() && constraint.Check(orchestratorVersion)
|
||||
},
|
||||
"IsKubernetesVersionLt": func(version string) bool {
|
||||
orchestratorVersion, _ := semver.NewVersion(cs.Properties.OrchestratorProfile.OrchestratorVersion)
|
||||
constraint, _ := semver.NewConstraint("<" + version)
|
||||
return cs.Properties.OrchestratorProfile.OrchestratorType == api.Kubernetes && constraint.Check(orchestratorVersion)
|
||||
return cs.Properties.OrchestratorProfile.IsKubernetes() && constraint.Check(orchestratorVersion)
|
||||
},
|
||||
"IsKubernetesVersionTilde": func(version string) bool {
|
||||
// examples include
|
||||
|
@ -917,7 +933,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
// ~1.2.x is equivalent to >= 1.2.0, < 1.3.0
|
||||
orchestratorVersion, _ := semver.NewVersion(cs.Properties.OrchestratorProfile.OrchestratorVersion)
|
||||
constraint, _ := semver.NewConstraint("~" + version)
|
||||
return cs.Properties.OrchestratorProfile.OrchestratorType == api.Kubernetes && constraint.Check(orchestratorVersion)
|
||||
return cs.Properties.OrchestratorProfile.IsKubernetes() && constraint.Check(orchestratorVersion)
|
||||
},
|
||||
"GetMasterKubernetesLabels": func(rg string) string {
|
||||
var buf bytes.Buffer
|
||||
|
@ -939,6 +955,9 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
return buf.String()
|
||||
},
|
||||
"GetKubeletConfigKeyVals": func(kc *api.KubernetesConfig) string {
|
||||
if kc == nil {
|
||||
return ""
|
||||
}
|
||||
kubeletConfig := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig
|
||||
if kc.KubeletConfig != nil {
|
||||
kubeletConfig = kc.KubeletConfig
|
||||
|
@ -975,7 +994,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
return false
|
||||
},
|
||||
"RequiresFakeAgentOutput": func() bool {
|
||||
return cs.Properties.OrchestratorProfile.OrchestratorType == api.Kubernetes
|
||||
return cs.Properties.OrchestratorProfile.IsKubernetes() || cs.Properties.OrchestratorProfile.IsOpenShift()
|
||||
},
|
||||
"IsSwarmMode": func() bool {
|
||||
return cs.Properties.OrchestratorProfile.IsSwarmMode()
|
||||
|
@ -983,6 +1002,9 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
"IsKubernetes": func() bool {
|
||||
return cs.Properties.OrchestratorProfile.IsKubernetes()
|
||||
},
|
||||
"IsOpenShift": func() bool {
|
||||
return cs.Properties.OrchestratorProfile.IsOpenShift()
|
||||
},
|
||||
"IsPublic": func(ports []int) bool {
|
||||
return len(ports) > 0
|
||||
},
|
||||
|
@ -1114,7 +1136,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
"GetAgentAllowedSizes": func() string {
|
||||
if t.ClassicMode {
|
||||
return GetClassicAllowedSizes()
|
||||
} else if cs.Properties.OrchestratorProfile.OrchestratorType == api.Kubernetes {
|
||||
} else if cs.Properties.OrchestratorProfile.IsKubernetes() || cs.Properties.OrchestratorProfile.IsOpenShift() {
|
||||
return GetKubernetesAgentAllowedSizes()
|
||||
}
|
||||
return GetMasterAgentAllowedSizes()
|
||||
|
@ -1704,6 +1726,36 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
"EnablePodSecurityPolicy": func() bool {
|
||||
return helpers.IsTrueBoolPointer(cs.Properties.OrchestratorProfile.KubernetesConfig.EnablePodSecurityPolicy)
|
||||
},
|
||||
"OpenShiftGetMasterSh": func() string {
|
||||
tb := MustAsset("k8s/openshiftmasterscript.sh")
|
||||
t := template.Must(template.New("master").Parse(string(tb)))
|
||||
b := &bytes.Buffer{}
|
||||
t.Execute(b, struct {
|
||||
ConfigBundle string
|
||||
ExternalMasterHostname string
|
||||
RouterLBHostname string
|
||||
Location string
|
||||
}{
|
||||
ConfigBundle: base64.StdEncoding.EncodeToString(cs.Properties.OrchestratorProfile.OpenShiftConfig.ConfigBundles["master"]),
|
||||
ExternalMasterHostname: cs.Properties.OrchestratorProfile.OpenShiftConfig.ExternalMasterHostname,
|
||||
RouterLBHostname: cs.Properties.OrchestratorProfile.OpenShiftConfig.RouterLBHostname,
|
||||
Location: cs.Properties.AzProfile.Location,
|
||||
})
|
||||
return b.String()
|
||||
},
|
||||
"OpenShiftGetNodeSh": func(profile *api.AgentPoolProfile) string {
|
||||
tb := MustAsset("k8s/openshiftnodescript.sh")
|
||||
t := template.Must(template.New("node").Parse(string(tb)))
|
||||
b := &bytes.Buffer{}
|
||||
t.Execute(b, struct {
|
||||
ConfigBundle string
|
||||
Role api.AgentPoolProfileRole
|
||||
}{
|
||||
ConfigBundle: base64.StdEncoding.EncodeToString(cs.Properties.OrchestratorProfile.OpenShiftConfig.ConfigBundles["bootstrap"]),
|
||||
Role: profile.Role,
|
||||
})
|
||||
return b.String()
|
||||
},
|
||||
// inspired by http://stackoverflow.com/questions/18276173/calling-a-template-with-several-pipeline-parameters/18276968#18276968
|
||||
"dict": func(values ...interface{}) (map[string]interface{}, error) {
|
||||
if len(values)%2 != 0 {
|
||||
|
@ -1731,7 +1783,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
|
|||
|
||||
func makeMasterExtensionScriptCommands(cs *api.ContainerService) string {
|
||||
copyIndex := "',copyIndex(),'"
|
||||
if cs.Properties.OrchestratorProfile.IsKubernetes() {
|
||||
if cs.Properties.OrchestratorProfile.IsKubernetes() || cs.Properties.OrchestratorProfile.IsOpenShift() {
|
||||
copyIndex = "',copyIndex(variables('masterOffset')),'"
|
||||
}
|
||||
return makeExtensionScriptCommands(cs.Properties.MasterProfile.PreprovisionExtension,
|
||||
|
@ -2520,7 +2572,7 @@ func getMasterLinkedTemplateText(masterProfile *api.MasterProfile, orchestratorT
|
|||
|
||||
loopCount := "[variables('masterCount')]"
|
||||
loopOffset := ""
|
||||
if orchestratorType == api.Kubernetes {
|
||||
if orchestratorType == api.Kubernetes || orchestratorType == api.OpenShift {
|
||||
// Due to upgrade k8s sometimes needs to install just some of the nodes.
|
||||
loopCount = "[sub(variables('masterCount'), variables('masterOffset'))]"
|
||||
loopOffset = "variables('masterOffset')"
|
||||
|
|
|
@ -12,6 +12,8 @@ const (
|
|||
Kubernetes string = "Kubernetes"
|
||||
// SwarmMode is the string constant for the Swarm Mode orchestrator type
|
||||
SwarmMode string = "SwarmMode"
|
||||
// OpenShift is the string constant for the OpenShift orchestrator type
|
||||
OpenShift string = "OpenShift"
|
||||
)
|
||||
|
||||
// validation values
|
||||
|
@ -77,3 +79,15 @@ var AllDCOSSupportedVersions = []string{
|
|||
DCOSVersion1Dot9Dot8,
|
||||
DCOSVersion1Dot8Dot8,
|
||||
}
|
||||
|
||||
const (
|
||||
// OpenShiftVersion3Dot9Dot0 is the major.minor.patch string for the 3.9.0 version of OpenShift
|
||||
OpenShiftVersion3Dot9Dot0 string = "3.9.0"
|
||||
// OpenShiftDefaultVersion is the default major.minor.patch version for OpenShift
|
||||
OpenShiftDefaultVersion string = OpenShiftVersion3Dot9Dot0
|
||||
)
|
||||
|
||||
// GetAllSupportedOpenShiftVersions returns a slice of all supported OpenShift versions.
|
||||
func GetAllSupportedOpenShiftVersions() []string {
|
||||
return []string{OpenShiftVersion3Dot9Dot0}
|
||||
}
|
||||
|
|
|
@ -223,6 +223,9 @@ func GetSupportedVersions(orchType string, hasWindows bool) (versions []string,
|
|||
}
|
||||
return GetAllSupportedKubernetesVersions(), GetDefaultKubernetesVersion()
|
||||
|
||||
case OpenShift:
|
||||
return GetAllSupportedOpenShiftVersions(), string(OpenShiftDefaultVersion)
|
||||
|
||||
case DCOS:
|
||||
return AllDCOSSupportedVersions, DCOSDefaultVersion
|
||||
default:
|
||||
|
|
|
@ -12,6 +12,8 @@ const (
|
|||
Kubernetes string = "Kubernetes"
|
||||
// SwarmMode is the string constant for the Swarm Mode orchestrator type
|
||||
SwarmMode string = "SwarmMode"
|
||||
// OpenShift is the string constant for the OpenShift orchestrator type
|
||||
OpenShift string = "OpenShift"
|
||||
)
|
||||
|
||||
// the OSTypes supported by vlabs
|
||||
|
@ -114,3 +116,10 @@ const (
|
|||
// DefaultPrivateClusterEnabled determines the acs-engine provided default for enabling kubernetes Private Cluster
|
||||
DefaultPrivateClusterEnabled = false
|
||||
)
|
||||
|
||||
const (
|
||||
// AgentPoolProfileRoleEmpty is the empty role
|
||||
AgentPoolProfileRoleEmpty AgentPoolProfileRole = ""
|
||||
// AgentPoolProfileRoleInfra is the infra role
|
||||
AgentPoolProfileRoleInfra AgentPoolProfileRole = "infra"
|
||||
)
|
||||
|
|
|
@ -161,6 +161,8 @@ func ConvertOrchestratorVersionProfileToVLabs(api *OrchestratorVersionProfile) *
|
|||
vlabsProfile.OrchestratorType = vlabs.Swarm
|
||||
case SwarmMode:
|
||||
vlabsProfile.OrchestratorType = vlabs.SwarmMode
|
||||
case OpenShift:
|
||||
vlabsProfile.OrchestratorType = vlabs.OpenShift
|
||||
}
|
||||
vlabsProfile.OrchestratorVersion = api.OrchestratorVersion
|
||||
vlabsProfile.Default = api.Default
|
||||
|
@ -472,6 +474,10 @@ func convertPropertiesToVLabs(api *Properties, vlabsProps *vlabs.Properties) {
|
|||
vlabsProps.AADProfile = &vlabs.AADProfile{}
|
||||
convertAADProfileToVLabs(api.AADProfile, vlabsProps.AADProfile)
|
||||
}
|
||||
if api.AzProfile != nil {
|
||||
vlabsProps.AzProfile = &vlabs.AzProfile{}
|
||||
convertAzProfileToVLabs(api.AzProfile, vlabsProps.AzProfile)
|
||||
}
|
||||
}
|
||||
|
||||
func convertLinuxProfileToV20160930(api *LinuxProfile, obj *v20160930.LinuxProfile) {
|
||||
|
@ -633,12 +639,26 @@ func convertOrchestratorProfileToVLabs(api *OrchestratorProfile, o *vlabs.Orches
|
|||
convertKubernetesConfigToVLabs(api.KubernetesConfig, o.KubernetesConfig)
|
||||
}
|
||||
|
||||
if api.OpenShiftConfig != nil {
|
||||
o.OpenShiftConfig = &vlabs.OpenShiftConfig{}
|
||||
convertOpenShiftConfigToVLabs(api.OpenShiftConfig, o.OpenShiftConfig)
|
||||
}
|
||||
|
||||
if api.DcosConfig != nil {
|
||||
o.DcosConfig = &vlabs.DcosConfig{}
|
||||
convertDcosConfigToVLabs(api.DcosConfig, o.DcosConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func convertOpenShiftConfigToVLabs(api *OpenShiftConfig, vl *vlabs.OpenShiftConfig) {
|
||||
vl.KubernetesConfig = &vlabs.KubernetesConfig{}
|
||||
if api.KubernetesConfig != nil {
|
||||
convertKubernetesConfigToVLabs(api.KubernetesConfig, vl.KubernetesConfig)
|
||||
}
|
||||
vl.ClusterUsername = api.ClusterUsername
|
||||
vl.ClusterPassword = api.ClusterPassword
|
||||
}
|
||||
|
||||
func convertDcosConfigToVLabs(api *DcosConfig, vlabs *vlabs.DcosConfig) {
|
||||
vlabs.DcosBootstrapURL = api.DcosBootstrapURL
|
||||
vlabs.DcosWindowsBootstrapURL = api.DcosWindowsBootstrapURL
|
||||
|
@ -1072,3 +1092,10 @@ func convertAADProfileToVLabs(api *AADProfile, vlabs *vlabs.AADProfile) {
|
|||
vlabs.TenantID = api.TenantID
|
||||
vlabs.AdminGroupID = api.AdminGroupID
|
||||
}
|
||||
|
||||
func convertAzProfileToVLabs(api *AzProfile, vlabs *vlabs.AzProfile) {
|
||||
vlabs.Location = api.Location
|
||||
vlabs.ResourceGroup = api.ResourceGroup
|
||||
vlabs.SubscriptionID = api.SubscriptionID
|
||||
vlabs.TenantID = api.TenantID
|
||||
}
|
||||
|
|
|
@ -366,8 +366,7 @@ func convertVLabsProperties(vlabs *vlabs.Properties, api *Properties) {
|
|||
apiProfile := &AgentPoolProfile{}
|
||||
convertVLabsAgentPoolProfile(p, apiProfile)
|
||||
// by default vlabs will use managed disks for all orchestrators but kubernetes as it has encryption at rest.
|
||||
if !api.OrchestratorProfile.IsKubernetes() {
|
||||
// by default vlabs will use managed disks for all orchestrators but kubernetes as it has encryption at rest.
|
||||
if !api.OrchestratorProfile.IsKubernetes() && !api.OrchestratorProfile.IsOpenShift() {
|
||||
if len(p.StorageProfile) == 0 {
|
||||
apiProfile.StorageProfile = ManagedDisks
|
||||
}
|
||||
|
@ -401,6 +400,17 @@ func convertVLabsProperties(vlabs *vlabs.Properties, api *Properties) {
|
|||
api.AADProfile = &AADProfile{}
|
||||
convertVLabsAADProfile(vlabs.AADProfile, api.AADProfile)
|
||||
}
|
||||
if vlabs.AzProfile != nil {
|
||||
api.AzProfile = &AzProfile{}
|
||||
convertVLabsAZProfile(vlabs.AzProfile, api.AzProfile)
|
||||
}
|
||||
}
|
||||
|
||||
func convertVLabsAZProfile(vlabs *vlabs.AzProfile, api *AzProfile) {
|
||||
api.Location = vlabs.Location
|
||||
api.ResourceGroup = vlabs.ResourceGroup
|
||||
api.SubscriptionID = vlabs.SubscriptionID
|
||||
api.TenantID = vlabs.TenantID
|
||||
}
|
||||
|
||||
func convertV20160930LinuxProfile(obj *v20160930.LinuxProfile, api *LinuxProfile) {
|
||||
|
@ -560,6 +570,22 @@ func convertVLabsOrchestratorProfile(vp *vlabs.Properties, api *OrchestratorProf
|
|||
vlabscs := vp.OrchestratorProfile
|
||||
api.OrchestratorType = vlabscs.OrchestratorType
|
||||
switch api.OrchestratorType {
|
||||
case OpenShift:
|
||||
if vlabscs.OpenShiftConfig != nil {
|
||||
api.OpenShiftConfig = &OpenShiftConfig{}
|
||||
convertVLabsOpenShiftConfig(vlabscs.OpenShiftConfig, api.OpenShiftConfig)
|
||||
}
|
||||
// Set api.KubernetesConfig to api.OpenShiftConfig.KubernetesConfig so
|
||||
// acs-engine can reuse the same code used for generating parameters from
|
||||
// KubernetesConfig for OpenShiftConfig.
|
||||
if api.OpenShiftConfig != nil && api.OpenShiftConfig.KubernetesConfig != nil {
|
||||
api.KubernetesConfig = api.OpenShiftConfig.KubernetesConfig
|
||||
}
|
||||
api.OrchestratorVersion = common.RationalizeReleaseAndVersion(
|
||||
vlabscs.OrchestratorType,
|
||||
vlabscs.OrchestratorRelease,
|
||||
vlabscs.OrchestratorVersion,
|
||||
false)
|
||||
case Kubernetes:
|
||||
if vlabscs.KubernetesConfig != nil {
|
||||
api.KubernetesConfig = &KubernetesConfig{}
|
||||
|
@ -604,6 +630,18 @@ func convertVLabsDcosConfig(vlabs *vlabs.DcosConfig, api *DcosConfig) {
|
|||
api.DcosProviderPackageID = vlabs.DcosProviderPackageID
|
||||
}
|
||||
|
||||
func convertVLabsOpenShiftConfig(vlabs *vlabs.OpenShiftConfig, api *OpenShiftConfig) {
|
||||
// NOTE: This is a hack to avoid breaking the rest of the acs-engine
|
||||
// code when KubernetesConfig is accessed for various things. We don't
|
||||
// use anything from it today. Maybe do something cleaner here.
|
||||
api.KubernetesConfig = &KubernetesConfig{}
|
||||
if vlabs.KubernetesConfig != nil {
|
||||
convertVLabsKubernetesConfig(vlabs.KubernetesConfig, api.KubernetesConfig)
|
||||
}
|
||||
api.ClusterUsername = vlabs.ClusterUsername
|
||||
api.ClusterPassword = vlabs.ClusterPassword
|
||||
}
|
||||
|
||||
func convertVLabsKubernetesConfig(vlabs *vlabs.KubernetesConfig, api *KubernetesConfig) {
|
||||
api.KubernetesImageBase = vlabs.KubernetesImageBase
|
||||
api.ClusterSubnet = vlabs.ClusterSubnet
|
||||
|
@ -933,6 +971,7 @@ func convertVLabsAgentPoolProfile(vlabs *vlabs.AgentPoolProfile, api *AgentPoolP
|
|||
api.ImageRef.Name = vlabs.ImageRef.Name
|
||||
api.ImageRef.ResourceGroup = vlabs.ImageRef.ResourceGroup
|
||||
}
|
||||
api.Role = AgentPoolProfileRole(vlabs.Role)
|
||||
}
|
||||
|
||||
func convertVLabsKeyVaultSecrets(vlabs *vlabs.KeyVaultSecrets, api *KeyVaultSecrets) {
|
||||
|
|
|
@ -3,6 +3,9 @@ package api
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/api/common"
|
||||
"github.com/Azure/acs-engine/pkg/api/v20170701"
|
||||
"github.com/Azure/acs-engine/pkg/api/vlabs"
|
||||
|
@ -153,6 +156,61 @@ func TestKubernetesVlabsDefaults(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertVLabsOrchestratorProfile(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
props *vlabs.Properties
|
||||
expect *OrchestratorProfile
|
||||
}{
|
||||
"nilOpenShiftConfig": {
|
||||
props: &vlabs.Properties{
|
||||
OrchestratorProfile: &vlabs.OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
},
|
||||
},
|
||||
expect: &OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: common.OpenShiftDefaultVersion,
|
||||
},
|
||||
},
|
||||
"setOpenShiftConfig": {
|
||||
props: &vlabs.Properties{
|
||||
OrchestratorProfile: &vlabs.OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OpenShiftConfig: &vlabs.OpenShiftConfig{
|
||||
KubernetesConfig: &vlabs.KubernetesConfig{
|
||||
NetworkPolicy: "azure",
|
||||
ContainerRuntime: "docker",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expect: &OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: common.OpenShiftDefaultVersion,
|
||||
KubernetesConfig: &KubernetesConfig{
|
||||
NetworkPolicy: "azure",
|
||||
ContainerRuntime: "docker",
|
||||
},
|
||||
OpenShiftConfig: &OpenShiftConfig{
|
||||
KubernetesConfig: &KubernetesConfig{
|
||||
NetworkPolicy: "azure",
|
||||
ContainerRuntime: "docker",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Logf("running scenario %q", name)
|
||||
actual := &OrchestratorProfile{}
|
||||
convertVLabsOrchestratorProfile(test.props, actual)
|
||||
if !equality.Semantic.DeepEqual(test.expect, actual) {
|
||||
t.Errorf(spew.Sprintf("Expected:\n%+v\nGot:\n%+v", test.expect, actual))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeKubernetesProperties() *Properties {
|
||||
ap := &Properties{}
|
||||
ap.OrchestratorProfile = &OrchestratorProfile{}
|
||||
|
|
|
@ -20,6 +20,7 @@ func init() {
|
|||
DCOS: dcosInfo,
|
||||
Swarm: swarmInfo,
|
||||
SwarmMode: dockerceInfo,
|
||||
OpenShift: openShiftInfo,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,6 +34,8 @@ func validate(orchestrator, version string) (string, error) {
|
|||
return Swarm, nil
|
||||
case strings.EqualFold(orchestrator, SwarmMode):
|
||||
return SwarmMode, nil
|
||||
case strings.EqualFold(orchestrator, OpenShift):
|
||||
return OpenShift, nil
|
||||
case orchestrator == "":
|
||||
if version != "" {
|
||||
return "", fmt.Errorf("Must specify orchestrator for version '%s'", version)
|
||||
|
@ -228,3 +231,44 @@ func dockerceInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, e
|
|||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func openShiftInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, error) {
|
||||
orchs := []*OrchestratorVersionProfile{}
|
||||
if csOrch.OrchestratorVersion == "" {
|
||||
// get info for all supported versions
|
||||
for _, ver := range common.GetAllSupportedOpenShiftVersions() {
|
||||
// TODO: populate OrchestratorVersionProfile.Upgrades
|
||||
orchs = append(orchs,
|
||||
&OrchestratorVersionProfile{
|
||||
OrchestratorProfile: OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: ver,
|
||||
},
|
||||
Default: ver == common.OpenShiftDefaultVersion,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
ver, err := semver.NewVersion(csOrch.OrchestratorVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cons, err := semver.NewConstraint("<3.9.0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cons.Check(ver) {
|
||||
return nil, fmt.Errorf("OpenShift version %s is not supported", csOrch.OrchestratorVersion)
|
||||
}
|
||||
|
||||
// TODO: populate OrchestratorVersionProfile.Upgrades
|
||||
orchs = append(orchs,
|
||||
&OrchestratorVersionProfile{
|
||||
OrchestratorProfile: OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: csOrch.OrchestratorVersion,
|
||||
},
|
||||
Default: csOrch.OrchestratorVersion == common.OpenShiftDefaultVersion,
|
||||
})
|
||||
}
|
||||
return orchs, nil
|
||||
}
|
||||
|
|
|
@ -139,7 +139,13 @@ func TestGetOrchestratorVersionProfileListV20170930(t *testing.T) {
|
|||
Expect(e).To(BeNil())
|
||||
numSwarmVersions := 1
|
||||
numDockerCEVersions := 1
|
||||
totalNumVersions := numSwarmVersions + numDockerCEVersions + len(common.GetAllSupportedKubernetesVersions()) + len(common.AllDCOSSupportedVersions)
|
||||
|
||||
totalNumVersions := numSwarmVersions +
|
||||
numDockerCEVersions +
|
||||
len(common.GetAllSupportedKubernetesVersions()) +
|
||||
len(common.AllDCOSSupportedVersions) +
|
||||
len(common.GetAllSupportedOpenShiftVersions())
|
||||
|
||||
Expect(len(list.Properties.Orchestrators)).To(Equal(totalNumVersions))
|
||||
|
||||
// v20170930 - kubernetes only
|
||||
|
@ -171,3 +177,35 @@ func TestKubernetesInfo(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestOpenshiftInfo(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
|
||||
invalid := []string{
|
||||
"invalid number",
|
||||
"invalid.number",
|
||||
"a4.b7.c3",
|
||||
"31.29.",
|
||||
".17.02",
|
||||
"43.156.89.",
|
||||
"1.2.a"}
|
||||
|
||||
for _, v := range invalid {
|
||||
csOrch := &OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: v,
|
||||
}
|
||||
|
||||
_, e := openShiftInfo(csOrch)
|
||||
Expect(e).NotTo(BeNil())
|
||||
}
|
||||
|
||||
// test good value
|
||||
csOrch := &OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: common.OpenShiftDefaultVersion,
|
||||
}
|
||||
|
||||
_, e := openShiftInfo(csOrch)
|
||||
Expect(e).To(BeNil())
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ type Properties struct {
|
|||
CustomProfile *CustomProfile `json:"customProfile,omitempty"`
|
||||
HostedMasterProfile *HostedMasterProfile `json:"hostedMasterProfile,omitempty"`
|
||||
AddonProfiles map[string]AddonProfile `json:"addonProfiles,omitempty"`
|
||||
AzProfile *AzProfile `json:"azProfile,omitempty"`
|
||||
}
|
||||
|
||||
// AddonProfile represents an addon for managed cluster
|
||||
|
@ -67,6 +68,14 @@ type AddonProfile struct {
|
|||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
||||
// AzProfile holds the azure context for where the cluster resides
|
||||
type AzProfile struct {
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
||||
ResourceGroup string `json:"resourceGroup,omitempty"`
|
||||
Location string `json:"location,omitempty"`
|
||||
}
|
||||
|
||||
// ServicePrincipalProfile contains the client and secret used by the cluster for Azure Resource CRUD
|
||||
type ServicePrincipalProfile struct {
|
||||
ClientID string `json:"clientId"`
|
||||
|
@ -165,6 +174,7 @@ type OrchestratorProfile struct {
|
|||
OrchestratorType string `json:"orchestratorType"`
|
||||
OrchestratorVersion string `json:"orchestratorVersion"`
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
OpenShiftConfig *OpenShiftConfig `json:"openshiftConfig,omitempty"`
|
||||
DcosConfig *DcosConfig `json:"dcosConfig,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -305,6 +315,20 @@ type DcosConfig struct {
|
|||
DcosProviderPackageID string `json:"dcosProviderPackageID,omitempty"` // repo url is the location of the build,
|
||||
}
|
||||
|
||||
// OpenShiftConfig holds configuration for OpenShift
|
||||
type OpenShiftConfig struct {
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
|
||||
// ClusterUsername and ClusterPassword are temporary before AAD
|
||||
// authentication is enabled, and will be removed subsequently.
|
||||
ClusterUsername string `json:"clusterUsername,omitempty"`
|
||||
ClusterPassword string `json:"clusterPassword,omitempty"`
|
||||
|
||||
ConfigBundles map[string][]byte `json:"-"`
|
||||
ExternalMasterHostname string `json:"-"`
|
||||
RouterLBHostname string `json:"-"`
|
||||
}
|
||||
|
||||
// MasterProfile represents the definition of the master cluster
|
||||
type MasterProfile struct {
|
||||
Count int `json:"count"`
|
||||
|
@ -358,20 +382,21 @@ type Extension struct {
|
|||
|
||||
// AgentPoolProfile represents an agent pool definition
|
||||
type AgentPoolProfile struct {
|
||||
Name string `json:"name"`
|
||||
Count int `json:"count"`
|
||||
VMSize string `json:"vmSize"`
|
||||
OSDiskSizeGB int `json:"osDiskSizeGB,omitempty"`
|
||||
DNSPrefix string `json:"dnsPrefix,omitempty"`
|
||||
OSType OSType `json:"osType,omitempty"`
|
||||
Ports []int `json:"ports,omitempty"`
|
||||
AvailabilityProfile string `json:"availabilityProfile"`
|
||||
StorageProfile string `json:"storageProfile,omitempty"`
|
||||
DiskSizesGB []int `json:"diskSizesGB,omitempty"`
|
||||
VnetSubnetID string `json:"vnetSubnetID,omitempty"`
|
||||
Subnet string `json:"subnet"`
|
||||
IPAddressCount int `json:"ipAddressCount,omitempty"`
|
||||
Distro Distro `json:"distro,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Count int `json:"count"`
|
||||
VMSize string `json:"vmSize"`
|
||||
OSDiskSizeGB int `json:"osDiskSizeGB,omitempty"`
|
||||
DNSPrefix string `json:"dnsPrefix,omitempty"`
|
||||
OSType OSType `json:"osType,omitempty"`
|
||||
Ports []int `json:"ports,omitempty"`
|
||||
AvailabilityProfile string `json:"availabilityProfile"`
|
||||
StorageProfile string `json:"storageProfile,omitempty"`
|
||||
DiskSizesGB []int `json:"diskSizesGB,omitempty"`
|
||||
VnetSubnetID string `json:"vnetSubnetID,omitempty"`
|
||||
Subnet string `json:"subnet"`
|
||||
IPAddressCount int `json:"ipAddressCount,omitempty"`
|
||||
Distro Distro `json:"distro,omitempty"`
|
||||
Role AgentPoolProfileRole `json:"role,omitempty"`
|
||||
|
||||
FQDN string `json:"fqdn,omitempty"`
|
||||
CustomNodeLabels map[string]string `json:"customNodeLabels,omitempty"`
|
||||
|
@ -381,6 +406,9 @@ type AgentPoolProfile struct {
|
|||
ImageRef *ImageReference `json:"imageReference,omitempty"`
|
||||
}
|
||||
|
||||
// AgentPoolProfileRole represents an agent role
|
||||
type AgentPoolProfileRole string
|
||||
|
||||
// DiagnosticsProfile setting to enable/disable capturing
|
||||
// diagnostics for VMs hosting container cluster.
|
||||
type DiagnosticsProfile struct {
|
||||
|
@ -682,6 +710,11 @@ func (o *OrchestratorProfile) IsKubernetes() bool {
|
|||
return o.OrchestratorType == Kubernetes
|
||||
}
|
||||
|
||||
// IsOpenShift returns true if this template is for OpenShift orchestrator
|
||||
func (o *OrchestratorProfile) IsOpenShift() bool {
|
||||
return o.OrchestratorType == OpenShift
|
||||
}
|
||||
|
||||
// IsDCOS returns true if this template is for DCOS orchestrator
|
||||
func (o *OrchestratorProfile) IsDCOS() bool {
|
||||
return o.OrchestratorType == DCOS
|
||||
|
|
|
@ -15,6 +15,8 @@ const (
|
|||
Kubernetes string = "Kubernetes"
|
||||
// SwarmMode is the string constant for the Swarm Mode orchestrator type
|
||||
SwarmMode string = "SwarmMode"
|
||||
// OpenShift is the string constant for the OpenShift orchestrator type
|
||||
OpenShift string = "OpenShift"
|
||||
)
|
||||
|
||||
// the OSTypes supported by vlabs
|
||||
|
@ -89,3 +91,10 @@ const (
|
|||
// DefaultNetworkPolicyWindows defines the network policy to use by default for clusters with Windows agent pools
|
||||
DefaultNetworkPolicyWindows = "azure"
|
||||
)
|
||||
|
||||
const (
|
||||
// AgentPoolProfileRoleEmpty is the empty role
|
||||
AgentPoolProfileRoleEmpty AgentPoolProfileRole = ""
|
||||
// AgentPoolProfileRoleInfra is the infra role
|
||||
AgentPoolProfileRoleInfra AgentPoolProfileRole = "infra"
|
||||
)
|
||||
|
|
|
@ -40,6 +40,15 @@ type Properties struct {
|
|||
ServicePrincipalProfile *ServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"`
|
||||
CertificateProfile *CertificateProfile `json:"certificateProfile,omitempty"`
|
||||
AADProfile *AADProfile `json:"aadProfile,omitempty"`
|
||||
AzProfile *AzProfile `json:"azProfile,omitempty"`
|
||||
}
|
||||
|
||||
// AzProfile holds the azure context for where the cluster resides
|
||||
type AzProfile struct {
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
||||
ResourceGroup string `json:"resourceGroup,omitempty"`
|
||||
Location string `json:"location,omitempty"`
|
||||
}
|
||||
|
||||
// ServicePrincipalProfile contains the client and secret used by the cluster for Azure Resource CRUD
|
||||
|
@ -157,6 +166,7 @@ type OrchestratorProfile struct {
|
|||
OrchestratorRelease string `json:"orchestratorRelease,omitempty"`
|
||||
OrchestratorVersion string `json:"orchestratorVersion,omitempty"`
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
OpenShiftConfig *OpenShiftConfig `json:"openshiftConfig,omitempty"`
|
||||
DcosConfig *DcosConfig `json:"dcosConfig,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -181,6 +191,8 @@ func (o *OrchestratorProfile) UnmarshalJSON(b []byte) error {
|
|||
o.OrchestratorType = Kubernetes
|
||||
case strings.EqualFold(orchestratorType, SwarmMode):
|
||||
o.OrchestratorType = SwarmMode
|
||||
case strings.EqualFold(orchestratorType, OpenShift):
|
||||
o.OrchestratorType = OpenShift
|
||||
default:
|
||||
return fmt.Errorf("OrchestratorType has unknown orchestrator: %s", orchestratorType)
|
||||
}
|
||||
|
@ -297,6 +309,16 @@ type DcosConfig struct {
|
|||
DcosProviderPackageID string `json:"dcosProviderPackageID,omitempty"` // repo url is the location of the build,
|
||||
}
|
||||
|
||||
// OpenShiftConfig holds configuration for OpenShift
|
||||
type OpenShiftConfig struct {
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
|
||||
// ClusterUsername and ClusterPassword are temporary before AAD
|
||||
// authentication is enabled, and will be removed subsequently.
|
||||
ClusterUsername string `json:"clusterUsername,omitempty"`
|
||||
ClusterPassword string `json:"clusterPassword,omitempty"`
|
||||
}
|
||||
|
||||
// MasterProfile represents the definition of the master cluster
|
||||
type MasterProfile struct {
|
||||
Count int `json:"count" validate:"required,eq=1|eq=3|eq=5"`
|
||||
|
@ -355,21 +377,22 @@ type Extension struct {
|
|||
|
||||
// AgentPoolProfile represents an agent pool definition
|
||||
type AgentPoolProfile struct {
|
||||
Name string `json:"name" validate:"required"`
|
||||
Count int `json:"count" validate:"required,min=1,max=100"`
|
||||
VMSize string `json:"vmSize" validate:"required"`
|
||||
OSDiskSizeGB int `json:"osDiskSizeGB,omitempty" validate:"min=0,max=1023"`
|
||||
DNSPrefix string `json:"dnsPrefix,omitempty"`
|
||||
OSType OSType `json:"osType,omitempty"`
|
||||
Ports []int `json:"ports,omitempty" validate:"dive,min=1,max=65535"`
|
||||
AvailabilityProfile string `json:"availabilityProfile"`
|
||||
StorageProfile string `json:"storageProfile" validate:"eq=StorageAccount|eq=ManagedDisks|len=0"`
|
||||
DiskSizesGB []int `json:"diskSizesGB,omitempty" validate:"max=4,dive,min=1,max=1023"`
|
||||
VnetSubnetID string `json:"vnetSubnetID,omitempty"`
|
||||
IPAddressCount int `json:"ipAddressCount,omitempty" validate:"min=0,max=256"`
|
||||
Distro Distro `json:"distro,omitempty"`
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
ImageRef *ImageReference `json:"imageReference,omitempty"`
|
||||
Name string `json:"name" validate:"required"`
|
||||
Count int `json:"count" validate:"required,min=1,max=100"`
|
||||
VMSize string `json:"vmSize" validate:"required"`
|
||||
OSDiskSizeGB int `json:"osDiskSizeGB,omitempty" validate:"min=0,max=1023"`
|
||||
DNSPrefix string `json:"dnsPrefix,omitempty"`
|
||||
OSType OSType `json:"osType,omitempty"`
|
||||
Ports []int `json:"ports,omitempty" validate:"dive,min=1,max=65535"`
|
||||
AvailabilityProfile string `json:"availabilityProfile"`
|
||||
StorageProfile string `json:"storageProfile" validate:"eq=StorageAccount|eq=ManagedDisks|len=0"`
|
||||
DiskSizesGB []int `json:"diskSizesGB,omitempty" validate:"max=4,dive,min=1,max=1023"`
|
||||
VnetSubnetID string `json:"vnetSubnetID,omitempty"`
|
||||
IPAddressCount int `json:"ipAddressCount,omitempty" validate:"min=0,max=256"`
|
||||
Distro Distro `json:"distro,omitempty"`
|
||||
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
|
||||
ImageRef *ImageReference `json:"imageReference,omitempty"`
|
||||
Role AgentPoolProfileRole `json:"role,omitempty"`
|
||||
|
||||
// subnet is internal
|
||||
subnet string
|
||||
|
@ -380,6 +403,9 @@ type AgentPoolProfile struct {
|
|||
Extensions []Extension `json:"extensions"`
|
||||
}
|
||||
|
||||
// AgentPoolProfileRole represents an agent role
|
||||
type AgentPoolProfileRole string
|
||||
|
||||
// AADProfile specifies attributes for AAD integration
|
||||
type AADProfile struct {
|
||||
// The client AAD application ID.
|
||||
|
|
|
@ -143,7 +143,19 @@ func (o *OrchestratorProfile) Validate(isUpdate bool) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
case OpenShift:
|
||||
// TODO: add appropriate additional validation logic
|
||||
version := common.RationalizeReleaseAndVersion(
|
||||
o.OrchestratorType,
|
||||
o.OrchestratorRelease,
|
||||
o.OrchestratorVersion,
|
||||
false)
|
||||
if version == "" {
|
||||
return fmt.Errorf("OrchestratorProfile is not able to be rationalized, check supported Release or Version")
|
||||
}
|
||||
if o.OpenShiftConfig == nil || o.OpenShiftConfig.ClusterUsername == "" || o.OpenShiftConfig.ClusterPassword == "" {
|
||||
return fmt.Errorf("ClusterUsername and ClusterPassword must both be specified")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("OrchestratorProfile has unknown orchestrator: %s", o.OrchestratorType)
|
||||
}
|
||||
|
@ -167,8 +179,12 @@ func (o *OrchestratorProfile) Validate(isUpdate bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
if o.OrchestratorType != Kubernetes && o.KubernetesConfig != nil {
|
||||
return fmt.Errorf("KubernetesConfig can be specified only when OrchestratorType is Kubernetes")
|
||||
if (o.OrchestratorType != Kubernetes && o.OrchestratorType != OpenShift) && o.KubernetesConfig != nil {
|
||||
return fmt.Errorf("KubernetesConfig can be specified only when OrchestratorType is Kubernetes or OpenShift")
|
||||
}
|
||||
|
||||
if o.OrchestratorType != OpenShift && o.OpenShiftConfig != nil {
|
||||
return fmt.Errorf("OpenShiftConfig can be specified only when OrchestratorType is OpenShift")
|
||||
}
|
||||
|
||||
if o.OrchestratorType != DCOS && o.DcosConfig != nil && (*o.DcosConfig != DcosConfig{}) {
|
||||
|
@ -405,12 +421,32 @@ func (a *Properties) Validate(isUpdate bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
if a.OrchestratorProfile.OrchestratorType == OpenShift && agentPoolProfile.AvailabilityProfile != AvailabilitySet {
|
||||
return fmt.Errorf("Only AvailabilityProfile: AvailabilitySet is supported for Orchestrator 'OpenShift'")
|
||||
}
|
||||
|
||||
validRoles := []AgentPoolProfileRole{AgentPoolProfileRoleEmpty}
|
||||
if a.OrchestratorProfile.OrchestratorType == OpenShift {
|
||||
validRoles = append(validRoles, AgentPoolProfileRoleInfra)
|
||||
}
|
||||
var found bool
|
||||
for _, validRole := range validRoles {
|
||||
if agentPoolProfile.Role == validRole {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Role %q is not supported for Orchestrator %s", agentPoolProfile.Role, a.OrchestratorProfile.OrchestratorType)
|
||||
}
|
||||
|
||||
/* this switch statement is left to protect newly added orchestrators until they support Managed Disks*/
|
||||
if agentPoolProfile.StorageProfile == ManagedDisks {
|
||||
switch a.OrchestratorProfile.OrchestratorType {
|
||||
case DCOS:
|
||||
case Swarm:
|
||||
case Kubernetes:
|
||||
case OpenShift:
|
||||
case SwarmMode:
|
||||
default:
|
||||
return fmt.Errorf("HA volumes are currently unsupported for Orchestrator %s", a.OrchestratorProfile.OrchestratorType)
|
||||
|
@ -489,6 +525,19 @@ func (a *Properties) Validate(isUpdate bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
switch a.OrchestratorProfile.OrchestratorType {
|
||||
case OpenShift:
|
||||
if a.AzProfile == nil || a.AzProfile.Location == "" ||
|
||||
a.AzProfile.ResourceGroup == "" || a.AzProfile.SubscriptionID == "" ||
|
||||
a.AzProfile.TenantID == "" {
|
||||
return fmt.Errorf("'azProfile' must be supplied in full for orchestrator '%v'", OpenShift)
|
||||
}
|
||||
default:
|
||||
if a.AzProfile != nil {
|
||||
return fmt.Errorf("'azProfile' is only supported by orchestrator '%v'", OpenShift)
|
||||
}
|
||||
}
|
||||
|
||||
for _, extension := range a.ExtensionProfiles {
|
||||
if extension.ExtensionParametersKeyVaultRef != nil {
|
||||
if e := validate.Var(extension.ExtensionParametersKeyVaultRef.VaultID, "required"); e != nil {
|
||||
|
|
|
@ -77,6 +77,26 @@ func Test_OrchestratorProfile_Validate(t *testing.T) {
|
|||
t.Errorf("should not have failed on version with v prefix")
|
||||
}
|
||||
|
||||
o = &OrchestratorProfile{
|
||||
OrchestratorType: OpenShift,
|
||||
OrchestratorVersion: "v1.0",
|
||||
}
|
||||
|
||||
if err := o.Validate(false); err == nil {
|
||||
t.Errorf("should have failed on old version")
|
||||
}
|
||||
if err := o.Validate(true); err != nil {
|
||||
t.Errorf("should not have failed on old version")
|
||||
}
|
||||
|
||||
o = &OrchestratorProfile{
|
||||
OrchestratorType: Kubernetes,
|
||||
OrchestratorVersion: "v1.9.0",
|
||||
OpenShiftConfig: &OpenShiftConfig{},
|
||||
}
|
||||
if err := o.Validate(false); err == nil {
|
||||
t.Errorf("should have failed on OpenShift config specified with non OpenShift orchestrator type")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KubernetesConfig_Validate(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
package certgen
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"math/big"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
)
|
||||
|
||||
// Config represents an OpenShift configuration
|
||||
type Config struct {
|
||||
ExternalMasterHostname string
|
||||
serial serial
|
||||
cas map[string]CertAndKey
|
||||
AuthSecret string
|
||||
EncSecret string
|
||||
Master *Master
|
||||
Bootstrap KubeConfig
|
||||
ClusterUsername string
|
||||
ClusterPassword string
|
||||
AzureConfig AzureConfig
|
||||
}
|
||||
|
||||
// AzureConfig represents the azure.conf configuration
|
||||
type AzureConfig struct {
|
||||
TenantID string
|
||||
SubscriptionID string
|
||||
AADClientID string
|
||||
AADClientSecret string
|
||||
ResourceGroup string
|
||||
Location string
|
||||
}
|
||||
|
||||
// Master represents an OpenShift master configuration
|
||||
type Master struct {
|
||||
Hostname string
|
||||
IPs []net.IP
|
||||
Port int16
|
||||
|
||||
certs map[string]CertAndKey
|
||||
etcdcerts map[string]CertAndKey
|
||||
kubeconfigs map[string]KubeConfig
|
||||
}
|
||||
|
||||
// CertAndKey is a certificate and key
|
||||
type CertAndKey struct {
|
||||
cert *x509.Certificate
|
||||
key *rsa.PrivateKey
|
||||
}
|
||||
|
||||
type serial struct {
|
||||
m sync.Mutex
|
||||
i int64
|
||||
}
|
||||
|
||||
func (s *serial) Get() *big.Int {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
s.i++
|
||||
return big.NewInt(s.i)
|
||||
}
|
||||
|
||||
// WriteMaster writes the config files for a Master node to a Filesystem.
|
||||
func (c *Config) WriteMaster(fs filesystem.Filesystem) error {
|
||||
err := c.WriteMasterCerts(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.WriteMasterKeypair(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.WriteMasterKubeConfigs(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.WriteMasterFiles(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.WriteNode(fs)
|
||||
}
|
||||
|
||||
// WriteNode writes the config files for bootstrapping a node to a Filesystem.
|
||||
func (c *Config) WriteNode(fs filesystem.Filesystem) error {
|
||||
err := c.WriteBootstrapCerts(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.WriteBootstrapKubeConfig(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.WriteNodeFiles(fs)
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// Package certgen provides utilities related
|
||||
// to generating all the necessary artifacts
|
||||
// for an OpenShift deployment.
|
||||
package certgen
|
|
@ -0,0 +1,96 @@
|
|||
package certgen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// PrepareMasterFiles creates the shared authentication and encryption secrets
|
||||
func (c *Config) PrepareMasterFiles() error {
|
||||
b := make([]byte, 24)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.AuthSecret = base64.StdEncoding.EncodeToString(b)
|
||||
|
||||
_, err = rand.Read(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.EncSecret = base64.StdEncoding.EncodeToString(b)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMasterFiles writes the templated master config
|
||||
func (c *Config) WriteMasterFiles(fs filesystem.Filesystem) error {
|
||||
for _, name := range getAssets() {
|
||||
if !strings.HasPrefix(name, "master/") {
|
||||
continue
|
||||
}
|
||||
tb := assetMustExist(name)
|
||||
|
||||
t, err := template.New("template").Funcs(template.FuncMap{
|
||||
"QuoteMeta": regexp.QuoteMeta,
|
||||
"Bcrypt": func(password string) (string, error) {
|
||||
h, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||
return string(h), err
|
||||
},
|
||||
}).Parse(string(tb))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
err = t.Execute(b, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = fs.WriteFile(strings.TrimPrefix(name, "master/"), b.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteNodeFiles writes the templated node config
|
||||
func (c *Config) WriteNodeFiles(fs filesystem.Filesystem) error {
|
||||
for _, name := range getAssets() {
|
||||
if !strings.HasPrefix(name, "node/") {
|
||||
continue
|
||||
}
|
||||
|
||||
tb := assetMustExist(name)
|
||||
|
||||
t, err := template.New("template").Funcs(template.FuncMap{
|
||||
"QuoteMeta": regexp.QuoteMeta,
|
||||
}).Parse(string(tb))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
err = t.Execute(b, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = fs.WriteFile(strings.TrimPrefix(name, "node/"), b.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,284 @@
|
|||
package certgen
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// KubeConfig represents a kubeconfig
|
||||
type KubeConfig struct {
|
||||
APIVersion string `yaml:"apiVersion,omitempty"`
|
||||
Kind string `yaml:"kind,omitempty"`
|
||||
Clusters []Cluster `yaml:"clusters,omitempty"`
|
||||
Contexts []Context `yaml:"contexts,omitempty"`
|
||||
CurrentContext string `yaml:"current-context,omitempty"`
|
||||
Preferences map[string]interface{} `yaml:"preferences,omitempty"`
|
||||
Users []User `yaml:"users,omitempty"`
|
||||
}
|
||||
|
||||
// Cluster represents a kubeconfig cluster
|
||||
type Cluster struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Cluster ClusterInfo `yaml:"cluster,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterInfo represents a kubeconfig clusterinfo
|
||||
type ClusterInfo struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
CertificateAuthorityData string `yaml:"certificate-authority-data,omitempty"`
|
||||
}
|
||||
|
||||
// Context represents a kubeconfig context
|
||||
type Context struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Context ContextInfo `yaml:"context,omitempty"`
|
||||
}
|
||||
|
||||
// ContextInfo represents a kubeconfig contextinfo
|
||||
type ContextInfo struct {
|
||||
Cluster string `yaml:"cluster,omitempty"`
|
||||
Namespace string `yaml:"namespace,omitempty"`
|
||||
User string `yaml:"user,omitempty"`
|
||||
}
|
||||
|
||||
// User represents a kubeconfig user
|
||||
type User struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
User UserInfo `yaml:"user,omitempty"`
|
||||
}
|
||||
|
||||
// UserInfo represents a kubeconfig userinfo
|
||||
type UserInfo struct {
|
||||
ClientCertificateData string `yaml:"client-certificate-data,omitempty"`
|
||||
ClientKeyData string `yaml:"client-key-data,omitempty"`
|
||||
}
|
||||
|
||||
// PrepareMasterKubeConfigs creates the master kubeconfigs
|
||||
func (c *Config) PrepareMasterKubeConfigs() error {
|
||||
endpoint := fmt.Sprintf("%s:%d", c.Master.Hostname, c.Master.Port)
|
||||
endpointName := strings.Replace(endpoint, ".", "-", -1)
|
||||
|
||||
externalEndpoint := fmt.Sprintf("%s:%d", c.ExternalMasterHostname, c.Master.Port)
|
||||
externalEndpointName := strings.Replace(externalEndpoint, ".", "-", -1)
|
||||
|
||||
localhostEndpoint := fmt.Sprintf("localhost:%d", c.Master.Port)
|
||||
localhostEndpointName := strings.Replace(localhostEndpoint, ".", "-", -1)
|
||||
|
||||
cacert, err := certAsBytes(c.cas["etc/origin/master/ca"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
admincert, err := certAsBytes(c.Master.certs["etc/origin/master/admin"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adminkey, err := privateKeyAsBytes(c.Master.certs["etc/origin/master/admin"].key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mastercert, err := certAsBytes(c.Master.certs["etc/origin/master/openshift-master"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
masterkey, err := privateKeyAsBytes(c.Master.certs["etc/origin/master/openshift-master"].key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aggregatorcert, err := certAsBytes(c.Master.certs["etc/origin/master/aggregator-front-proxy"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aggregatorkey, err := privateKeyAsBytes(c.Master.certs["etc/origin/master/aggregator-front-proxy"].key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Master.kubeconfigs = map[string]KubeConfig{
|
||||
"etc/origin/master/admin.kubeconfig": {
|
||||
APIVersion: "v1",
|
||||
Kind: "Config",
|
||||
Clusters: []Cluster{
|
||||
{
|
||||
Name: externalEndpointName,
|
||||
Cluster: ClusterInfo{
|
||||
Server: fmt.Sprintf("https://%s", externalEndpoint),
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(cacert),
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []Context{
|
||||
{
|
||||
Name: fmt.Sprintf("default/%s/system:admin", externalEndpointName),
|
||||
Context: ContextInfo{
|
||||
Cluster: externalEndpointName,
|
||||
Namespace: "default",
|
||||
User: fmt.Sprintf("system:admin/%s", externalEndpointName),
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: fmt.Sprintf("default/%s/system:admin", externalEndpointName),
|
||||
Users: []User{
|
||||
{
|
||||
Name: fmt.Sprintf("system:admin/%s", externalEndpointName),
|
||||
User: UserInfo{
|
||||
ClientCertificateData: base64.StdEncoding.EncodeToString(admincert),
|
||||
ClientKeyData: base64.StdEncoding.EncodeToString(adminkey),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"etc/origin/master/aggregator-front-proxy.kubeconfig": {
|
||||
APIVersion: "v1",
|
||||
Kind: "Config",
|
||||
Clusters: []Cluster{
|
||||
{
|
||||
Name: localhostEndpointName,
|
||||
Cluster: ClusterInfo{
|
||||
Server: fmt.Sprintf("https://%s", localhostEndpoint),
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(cacert),
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []Context{
|
||||
{
|
||||
Name: fmt.Sprintf("default/%s/aggregator-front-proxy", localhostEndpointName),
|
||||
Context: ContextInfo{
|
||||
Cluster: localhostEndpointName,
|
||||
Namespace: "default",
|
||||
User: fmt.Sprintf("aggregator-front-proxy/%s", localhostEndpointName),
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: fmt.Sprintf("default/%s/aggregator-front-proxy", localhostEndpointName),
|
||||
Users: []User{
|
||||
{
|
||||
Name: fmt.Sprintf("aggregator-front-proxy/%s", localhostEndpointName),
|
||||
User: UserInfo{
|
||||
ClientCertificateData: base64.StdEncoding.EncodeToString(aggregatorcert),
|
||||
ClientKeyData: base64.StdEncoding.EncodeToString(aggregatorkey),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"etc/origin/master/openshift-master.kubeconfig": {
|
||||
APIVersion: "v1",
|
||||
Kind: "Config",
|
||||
Clusters: []Cluster{
|
||||
{
|
||||
Name: endpointName,
|
||||
Cluster: ClusterInfo{
|
||||
Server: fmt.Sprintf("https://%s", endpoint),
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(cacert),
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []Context{
|
||||
{
|
||||
Name: fmt.Sprintf("default/%s/system:openshift-master", endpointName),
|
||||
Context: ContextInfo{
|
||||
Cluster: endpointName,
|
||||
Namespace: "default",
|
||||
User: fmt.Sprintf("system:openshift-master/%s", endpointName),
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: fmt.Sprintf("default/%s/system:openshift-master", endpointName),
|
||||
Users: []User{
|
||||
{
|
||||
Name: fmt.Sprintf("system:openshift-master/%s", endpointName),
|
||||
User: UserInfo{
|
||||
ClientCertificateData: base64.StdEncoding.EncodeToString(mastercert),
|
||||
ClientKeyData: base64.StdEncoding.EncodeToString(masterkey),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareBootstrapKubeConfig creates the node bootstrap kubeconfig
|
||||
func (c *Config) PrepareBootstrapKubeConfig() error {
|
||||
ep := fmt.Sprintf("%s:%d", c.ExternalMasterHostname, c.Master.Port)
|
||||
epName := strings.Replace(ep, ".", "-", -1)
|
||||
|
||||
cacert, err := certAsBytes(c.cas["etc/origin/master/ca"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrapCert, err := certAsBytes(c.Master.certs["etc/origin/master/node-bootstrapper"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bootstrapKey, err := privateKeyAsBytes(c.Master.certs["etc/origin/master/node-bootstrapper"].key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Bootstrap = KubeConfig{
|
||||
APIVersion: "v1",
|
||||
Kind: "Config",
|
||||
Clusters: []Cluster{
|
||||
{
|
||||
Name: epName,
|
||||
Cluster: ClusterInfo{
|
||||
Server: fmt.Sprintf("https://%s", ep),
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(cacert),
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []Context{
|
||||
{
|
||||
Name: fmt.Sprintf("default/%s/system:serviceaccount:openshift-infra:node-bootstrapper", epName),
|
||||
Context: ContextInfo{
|
||||
Cluster: epName,
|
||||
Namespace: "default",
|
||||
User: fmt.Sprintf("system:serviceaccount:openshift-infra:node-bootstrapper/%s", epName),
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: fmt.Sprintf("default/%s/system:serviceaccount:openshift-infra:node-bootstrapper", epName),
|
||||
Users: []User{
|
||||
{
|
||||
Name: fmt.Sprintf("system:serviceaccount:openshift-infra:node-bootstrapper/%s", epName),
|
||||
User: UserInfo{
|
||||
ClientCertificateData: base64.StdEncoding.EncodeToString(bootstrapCert),
|
||||
ClientKeyData: base64.StdEncoding.EncodeToString(bootstrapKey),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMasterKubeConfigs writes the master kubeconfigs
|
||||
func (c *Config) WriteMasterKubeConfigs(fs filesystem.Filesystem) error {
|
||||
for filename, kubeconfig := range c.Master.kubeconfigs {
|
||||
b, err := yaml.Marshal(&kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fs.WriteFile(filename, b, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBootstrapKubeConfig writes the node bootstrap kubeconfig
|
||||
func (c *Config) WriteBootstrapKubeConfig(fs filesystem.Filesystem) error {
|
||||
b, err := yaml.Marshal(&c.Bootstrap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.WriteFile("etc/origin/node/bootstrap.kubeconfig", b, 0600)
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package certgen
|
||||
|
||||
// novalidate.go is split out of files.go to avoid static validation.
|
||||
// `make test-style` is failing non-deterministically (flakying) with
|
||||
// the following message:
|
||||
//
|
||||
// pkg/certgen/files.go:36:23:warning: AssetNames not declared by package templates (unused)
|
||||
|
||||
import (
|
||||
"github.com/Azure/acs-engine/pkg/openshift/certgen/templates"
|
||||
)
|
||||
|
||||
func getAssets() []string {
|
||||
return templates.AssetNames()
|
||||
}
|
||||
|
||||
func assetMustExist(name string) []byte {
|
||||
return templates.MustAsset(name)
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
ETCD_NAME={{ .Master.Hostname }}
|
||||
ETCD_LISTEN_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380
|
||||
ETCD_DATA_DIR=/var/lib/etcd/
|
||||
#ETCD_WAL_DIR=""
|
||||
#ETCD_SNAPSHOT_COUNT=10000
|
||||
ETCD_HEARTBEAT_INTERVAL=500
|
||||
ETCD_ELECTION_TIMEOUT=2500
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379
|
||||
#ETCD_MAX_SNAPSHOTS=5
|
||||
#ETCD_MAX_WALS=5
|
||||
#ETCD_CORS=
|
||||
|
||||
|
||||
#[cluster]
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380
|
||||
ETCD_INITIAL_CLUSTER={{ .Master.Hostname }}=https://{{ (index .Master.IPs 0).String }}:2380
|
||||
ETCD_INITIAL_CLUSTER_STATE=new
|
||||
ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1
|
||||
#ETCD_DISCOVERY=
|
||||
#ETCD_DISCOVERY_SRV=
|
||||
#ETCD_DISCOVERY_FALLBACK=proxy
|
||||
#ETCD_DISCOVERY_PROXY=
|
||||
ETCD_ADVERTISE_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379
|
||||
#ETCD_STRICT_RECONFIG_CHECK="false"
|
||||
#ETCD_AUTO_COMPACTION_RETENTION="0"
|
||||
#ETCD_ENABLE_V2="true"
|
||||
ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
|
||||
#[proxy]
|
||||
#ETCD_PROXY=off
|
||||
#ETCD_PROXY_FAILURE_WAIT="5000"
|
||||
#ETCD_PROXY_REFRESH_INTERVAL="30000"
|
||||
#ETCD_PROXY_DIAL_TIMEOUT="1000"
|
||||
#ETCD_PROXY_WRITE_TIMEOUT="5000"
|
||||
#ETCD_PROXY_READ_TIMEOUT="0"
|
||||
|
||||
#[security]
|
||||
ETCD_TRUSTED_CA_FILE=/etc/etcd/ca.crt
|
||||
ETCD_CLIENT_CERT_AUTH="true"
|
||||
ETCD_CERT_FILE=/etc/etcd/server.crt
|
||||
ETCD_KEY_FILE=/etc/etcd/server.key
|
||||
#ETCD_AUTO_TLS="false"
|
||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/etcd/ca.crt
|
||||
ETCD_PEER_CLIENT_CERT_AUTH="true"
|
||||
ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt
|
||||
ETCD_PEER_KEY_FILE=/etc/etcd/peer.key
|
||||
#ETCD_PEER_AUTO_TLS="false"
|
||||
|
||||
#[logging]
|
||||
ETCD_DEBUG="False"
|
||||
|
||||
#[profiling]
|
||||
#ETCD_ENABLE_PPROF="false"
|
||||
#ETCD_METRICS="basic"
|
||||
#
|
||||
#[auth]
|
||||
#ETCD_AUTH_TOKEN="simple"
|
|
@ -0,0 +1 @@
|
|||
{{ .ClusterUsername }}:{{ Bcrypt .ClusterPassword }}
|
|
@ -0,0 +1,211 @@
|
|||
admissionConfig:
|
||||
pluginConfig:
|
||||
BuildDefaults:
|
||||
configuration:
|
||||
apiVersion: v1
|
||||
env: []
|
||||
kind: BuildDefaultsConfig
|
||||
resources:
|
||||
limits: {}
|
||||
requests: {}
|
||||
BuildOverrides:
|
||||
configuration:
|
||||
apiVersion: v1
|
||||
kind: BuildOverridesConfig
|
||||
PodPreset:
|
||||
configuration:
|
||||
apiVersion: v1
|
||||
disable: false
|
||||
kind: DefaultAdmissionConfig
|
||||
openshift.io/ImagePolicy:
|
||||
configuration:
|
||||
apiVersion: v1
|
||||
executionRules:
|
||||
- matchImageAnnotations:
|
||||
- key: images.openshift.io/deny-execution
|
||||
value: 'true'
|
||||
name: execution-denied
|
||||
onResources:
|
||||
- resource: pods
|
||||
- resource: builds
|
||||
reject: true
|
||||
skipOnResolutionFailure: true
|
||||
kind: ImagePolicyConfig
|
||||
aggregatorConfig:
|
||||
proxyClientInfo:
|
||||
certFile: aggregator-front-proxy.crt
|
||||
keyFile: aggregator-front-proxy.key
|
||||
apiLevels:
|
||||
- v1
|
||||
apiVersion: v1
|
||||
authConfig:
|
||||
requestHeader:
|
||||
clientCA: front-proxy-ca.crt
|
||||
clientCommonNames:
|
||||
- aggregator-front-proxy
|
||||
extraHeaderPrefixes:
|
||||
- X-Remote-Extra-
|
||||
groupHeaders:
|
||||
- X-Remote-Group
|
||||
usernameHeaders:
|
||||
- X-Remote-User
|
||||
controllerConfig:
|
||||
election:
|
||||
lockName: openshift-master-controllers
|
||||
serviceServingCert:
|
||||
signer:
|
||||
certFile: service-signer.crt
|
||||
keyFile: service-signer.key
|
||||
controllers: '*'
|
||||
corsAllowedOrigins:
|
||||
- (?i)//127\.0\.0\.1(:|\z)
|
||||
- (?i)//localhost(:|\z)
|
||||
- (?i)//{{ QuoteMeta (index .Master.IPs 0).String }}(:|\z)
|
||||
- (?i)//kubernetes\.default(:|\z)
|
||||
- (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z)
|
||||
- (?i)//kubernetes(:|\z)
|
||||
- (?i)//{{ QuoteMeta .ExternalMasterHostname }}(:|\z)
|
||||
- (?i)//openshift\.default(:|\z)
|
||||
- (?i)//{{ QuoteMeta .Master.Hostname }}(:|\z)
|
||||
- (?i)//openshift\.default\.svc(:|\z)
|
||||
- (?i)//kubernetes\.default\.svc(:|\z)
|
||||
- (?i)//172\.30\.0\.1(:|\z)
|
||||
- (?i)//openshift\.default\.svc\.cluster\.local(:|\z)
|
||||
- (?i)//openshift(:|\z)
|
||||
dnsConfig:
|
||||
bindAddress: 0.0.0.0:8053
|
||||
bindNetwork: tcp4
|
||||
etcdClientInfo:
|
||||
ca: master.etcd-ca.crt
|
||||
certFile: master.etcd-client.crt
|
||||
keyFile: master.etcd-client.key
|
||||
urls:
|
||||
- https://{{ .Master.Hostname }}:2379
|
||||
etcdStorageConfig:
|
||||
kubernetesStoragePrefix: kubernetes.io
|
||||
kubernetesStorageVersion: v1
|
||||
openShiftStoragePrefix: openshift.io
|
||||
openShiftStorageVersion: v1
|
||||
imageConfig:
|
||||
format: TEMPIMAGEBASE-${component}:${version}
|
||||
latest: false
|
||||
kind: MasterConfig
|
||||
kubeletClientInfo:
|
||||
ca: ca-bundle.crt
|
||||
certFile: master.kubelet-client.crt
|
||||
keyFile: master.kubelet-client.key
|
||||
port: 10250
|
||||
kubernetesMasterConfig:
|
||||
apiServerArguments:
|
||||
runtime-config:
|
||||
- apis/settings.k8s.io/v1alpha1=true
|
||||
storage-backend:
|
||||
- etcd3
|
||||
storage-media-type:
|
||||
- application/vnd.kubernetes.protobuf
|
||||
cloud-provider:
|
||||
- "azure"
|
||||
cloud-config:
|
||||
- "/etc/azure/azure.conf"
|
||||
controllerArguments:
|
||||
cluster-signing-cert-file:
|
||||
- "/etc/origin/master/ca.crt"
|
||||
cluster-signing-key-file:
|
||||
- "/etc/origin/master/ca.key"
|
||||
cloud-provider:
|
||||
- "azure"
|
||||
cloud-config:
|
||||
- "/etc/azure/azure.conf"
|
||||
masterCount: 1
|
||||
masterIP: {{ (index .Master.IPs 0).String }}
|
||||
podEvictionTimeout: null
|
||||
proxyClientInfo:
|
||||
certFile: master.proxy-client.crt
|
||||
keyFile: master.proxy-client.key
|
||||
schedulerArguments: null
|
||||
schedulerConfigFile: /etc/origin/master/scheduler.json
|
||||
servicesNodePortRange: ''
|
||||
servicesSubnet: 172.30.0.0/16
|
||||
staticNodeNames: []
|
||||
masterClients:
|
||||
externalKubernetesClientConnectionOverrides:
|
||||
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
|
||||
burst: 400
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
qps: 200
|
||||
externalKubernetesKubeConfig: ''
|
||||
openshiftLoopbackClientConnectionOverrides:
|
||||
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
|
||||
burst: 600
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
qps: 300
|
||||
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
|
||||
masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}
|
||||
networkConfig:
|
||||
clusterNetworkCIDR: 10.128.0.0/14
|
||||
clusterNetworks:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostSubnetLength: 9
|
||||
externalIPNetworkCIDRs:
|
||||
- 0.0.0.0/0
|
||||
hostSubnetLength: 9
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
serviceNetworkCIDR: 172.30.0.0/16
|
||||
oauthConfig:
|
||||
assetPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}/console/
|
||||
grantConfig:
|
||||
method: auto
|
||||
identityProviders:
|
||||
- challenge: true
|
||||
login: true
|
||||
mappingMethod: claim
|
||||
name: htpasswd_auth
|
||||
provider:
|
||||
apiVersion: v1
|
||||
file: /etc/origin/master/htpasswd
|
||||
kind: HTPasswdPasswordIdentityProvider
|
||||
masterCA: ca-bundle.crt
|
||||
masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}
|
||||
masterURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}
|
||||
sessionConfig:
|
||||
sessionMaxAgeSeconds: 3600
|
||||
sessionName: ssn
|
||||
sessionSecretsFile: /etc/origin/master/session-secrets.yaml
|
||||
tokenConfig:
|
||||
accessTokenMaxAgeSeconds: 86400
|
||||
authorizeTokenMaxAgeSeconds: 500
|
||||
pauseControllers: false
|
||||
policyConfig:
|
||||
bootstrapPolicyFile: /etc/origin/master/policy.json
|
||||
openshiftInfrastructureNamespace: openshift-infra
|
||||
openshiftSharedResourcesNamespace: openshift
|
||||
projectConfig:
|
||||
defaultNodeSelector: node-role.kubernetes.io/compute=true
|
||||
projectRequestMessage: ''
|
||||
projectRequestTemplate: ''
|
||||
securityAllocator:
|
||||
mcsAllocatorRange: s0:/2
|
||||
mcsLabelsPerProject: 5
|
||||
uidAllocatorRange: 1000000000-1999999999/10000
|
||||
routingConfig:
|
||||
subdomain: TEMPROUTERIP.nip.io
|
||||
serviceAccountConfig:
|
||||
limitSecretReferences: false
|
||||
managedNames:
|
||||
- default
|
||||
- builder
|
||||
- deployer
|
||||
masterCA: ca-bundle.crt
|
||||
privateKeyFile: serviceaccounts.private.key
|
||||
publicKeyFiles:
|
||||
- serviceaccounts.public.key
|
||||
servingInfo:
|
||||
bindAddress: 0.0.0.0:{{ .Master.Port }}
|
||||
bindNetwork: tcp4
|
||||
certFile: master.server.crt
|
||||
clientCA: ca.crt
|
||||
keyFile: master.server.key
|
||||
maxRequestsInFlight: 500
|
||||
requestTimeoutSeconds: 3600
|
||||
volumeConfig:
|
||||
dynamicProvisioningEnabled: true
|
|
@ -0,0 +1,88 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Policy",
|
||||
"predicates": [
|
||||
{
|
||||
"name": "NoVolumeZoneConflict"
|
||||
},
|
||||
{
|
||||
"name": "MaxEBSVolumeCount"
|
||||
},
|
||||
{
|
||||
"name": "MaxGCEPDVolumeCount"
|
||||
},
|
||||
{
|
||||
"name": "MaxAzureDiskVolumeCount"
|
||||
},
|
||||
{
|
||||
"name": "MatchInterPodAffinity"
|
||||
},
|
||||
{
|
||||
"name": "NoDiskConflict"
|
||||
},
|
||||
{
|
||||
"name": "GeneralPredicates"
|
||||
},
|
||||
{
|
||||
"name": "PodToleratesNodeTaints"
|
||||
},
|
||||
{
|
||||
"name": "CheckNodeMemoryPressure"
|
||||
},
|
||||
{
|
||||
"name": "CheckNodeDiskPressure"
|
||||
},
|
||||
{
|
||||
"name": "CheckVolumeBinding"
|
||||
},
|
||||
{
|
||||
"argument": {
|
||||
"serviceAffinity": {
|
||||
"labels": [
|
||||
"region"
|
||||
]
|
||||
}
|
||||
},
|
||||
"name": "Region"
|
||||
}
|
||||
],
|
||||
"priorities": [
|
||||
{
|
||||
"name": "SelectorSpreadPriority",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"name": "InterPodAffinityPriority",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"name": "LeastRequestedPriority",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"name": "BalancedResourceAllocation",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"name": "NodePreferAvoidPodsPriority",
|
||||
"weight": 10000
|
||||
},
|
||||
{
|
||||
"name": "NodeAffinityPriority",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"name": "TaintTolerationPriority",
|
||||
"weight": 1
|
||||
},
|
||||
{
|
||||
"argument": {
|
||||
"serviceAntiAffinity": {
|
||||
"label": "zone"
|
||||
}
|
||||
},
|
||||
"name": "Zone",
|
||||
"weight": 2
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: SessionSecrets
|
||||
secrets:
|
||||
- authentication: "{{ .AuthSecret }}"
|
||||
encryption: "{{ .EncSecret }}"
|
|
@ -0,0 +1,111 @@
|
|||
#!/bin/bash -x
|
||||
|
||||
# TODO: do this, and more (registry console, asb), the proper way
|
||||
|
||||
# we get "dial tcp: lookup foo.eastus.cloudapp.azure.com on 10.0.0.11:53: read
|
||||
# udp 172.17.0.2:56662->10.0.0.11:53: read: no route to host errors" at
|
||||
# start-up: wait until these subside.
|
||||
while ! oc version &>/dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
oc patch project default -p '{"metadata":{"annotations":{"openshift.io/node-selector": ""}}}'
|
||||
|
||||
oc adm registry --images="$IMAGE_BASE-\${component}:\${version}" --selector='region=infra'
|
||||
|
||||
# Deploy the router reusing relevant parts from openshift-ansible
|
||||
ANSIBLE_ROLES_PATH=/usr/share/ansible/openshift-ansible/roles/ ansible-playbook -c local deploy-router.yml -i azure-local-master-inventory.yml
|
||||
|
||||
oc create -f - <<'EOF'
|
||||
kind: Project
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: openshift-web-console
|
||||
annotations:
|
||||
openshift.io/node-selector: ""
|
||||
EOF
|
||||
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/openshift_web_console/files/console-template.yaml \
|
||||
-p API_SERVER_CONFIG="$(sed -e s/127.0.0.1/{{ .ExternalMasterHostname }}/g </usr/share/ansible/openshift-ansible/roles/openshift_web_console/files/console-config.yaml)" \
|
||||
-p NODE_SELECTOR='{"node-role.kubernetes.io/master":"true"}' \
|
||||
-p IMAGE="$IMAGE_BASE-web-console:v$VERSION" \
|
||||
| oc create -f -
|
||||
|
||||
oc create -f - <<'EOF'
|
||||
kind: Project
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-service-catalog
|
||||
annotations:
|
||||
openshift.io/node-selector: ""
|
||||
EOF
|
||||
|
||||
oc create secret generic -n kube-service-catalog apiserver-ssl \
|
||||
--from-file=tls.crt=/etc/origin/service-catalog/apiserver.crt \
|
||||
--from-file=tls.key=/etc/origin/service-catalog/apiserver.key
|
||||
|
||||
oc create secret generic -n kube-service-catalog service-catalog-ssl \
|
||||
--from-file=tls.crt=/etc/origin/service-catalog/apiserver.crt
|
||||
|
||||
oc create -f - <<EOF
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.servicecatalog.k8s.io
|
||||
spec:
|
||||
caBundle: $(base64 -w0 </etc/origin/service-catalog/ca.crt)
|
||||
group: servicecatalog.k8s.io
|
||||
groupPriorityMinimum: 20
|
||||
service:
|
||||
name: apiserver
|
||||
namespace: kube-service-catalog
|
||||
version: v1beta1
|
||||
versionPriority: 10
|
||||
EOF
|
||||
|
||||
oc project kube-service-catalog
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml | oc create -f -
|
||||
oc project default
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml | oc create -f -
|
||||
oc auth reconcile -f /usr/share/ansible/openshift-ansible/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml
|
||||
oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:kube-service-catalog:service-catalog-apiserver
|
||||
oc adm policy add-cluster-role-to-user admin system:serviceaccount:kube-service-catalog:default
|
||||
oc process -f service-catalog.yaml \
|
||||
-p CA_HASH="$(base64 -w0 </etc/origin/service-catalog/ca.crt | sha1sum | cut -d' ' -f1)" \
|
||||
-p ETCD_SERVER="$HOSTNAME" \
|
||||
-p IMAGE="$IMAGE_BASE-service-catalog:v$VERSION" \
|
||||
| oc create -f -
|
||||
oc rollout status -n kube-service-catalog daemonset apiserver
|
||||
|
||||
oc create -f - <<'EOF'
|
||||
kind: Project
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: openshift-template-service-broker
|
||||
annotations:
|
||||
openshift.io/node-selector: ""
|
||||
EOF
|
||||
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/template_service_broker/files/apiserver-template.yaml \
|
||||
-p IMAGE="$IMAGE_BASE-template-service-broker:v$VERSION" \
|
||||
-p NODE_SELECTOR='{"region":"infra"}' \
|
||||
| oc create -f -
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/template_service_broker/files/rbac-template.yaml | oc auth reconcile -f -
|
||||
|
||||
while true; do
|
||||
oc process -f /usr/share/ansible/openshift-ansible/roles/template_service_broker/files/template-service-broker-registration.yaml \
|
||||
-p CA_BUNDLE=$(base64 -w 0 </etc/origin/master/service-signer.crt) \
|
||||
| oc create -f - && break
|
||||
sleep 10
|
||||
done
|
||||
|
||||
MAJORVERSION="${VERSION%.*}"
|
||||
for file in /usr/share/ansible/openshift-ansible/roles/openshift_examples/files/examples/v$MAJORVERSION/db-templates/*.json \
|
||||
/usr/share/ansible/openshift-ansible/roles/openshift_examples/files/examples/v$MAJORVERSION/image-streams/*-rhel7.json \
|
||||
/usr/share/ansible/openshift-ansible/roles/openshift_examples/files/examples/v$MAJORVERSION/quickstart-templates/*.json \
|
||||
/usr/share/ansible/openshift-ansible/roles/openshift_examples/files/examples/v$MAJORVERSION/xpaas-streams/*.json \
|
||||
/usr/share/ansible/openshift-ansible/roles/openshift_examples/files/examples/v$MAJORVERSION/xpaas-templates/*.json; do
|
||||
oc create -n openshift -f $file
|
||||
done
|
||||
|
||||
# TODO: possibly wait here for convergence?
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
localmaster:
|
||||
hosts:
|
||||
localhost
|
||||
vars:
|
||||
ansible_connection: local
|
||||
ansible_python_interpreter: /usr/bin/python2
|
||||
|
||||
oreg_url_master: 'TEMPIMAGEBASE-${component}:${version}'
|
||||
|
||||
# The value of TEMPROUTERIP will be substituted during the acs-engine ARM
|
||||
# Deployment process with the correct ip address
|
||||
openshift_master_default_subdomain: 'TEMPROUTERIP.nip.io'
|
||||
|
||||
# FIXME
|
||||
# This should be type=infra, but we have to live with region=infra for now
|
||||
# because of legacy reasons
|
||||
openshift_router_selector: 'region=infra'
|
||||
openshift_deployment_type: 'openshift-enterprise'
|
||||
# NOTE: Do not define openshift_hosted_router_replicas so that the task file
|
||||
# router.yml inside the openshift_hosted role from openshift-ansible will
|
||||
# autopopulate it using the openshift_hosted_router_selector and querying
|
||||
# the number of infra nodes
|
||||
|
||||
openshift:
|
||||
common:
|
||||
config_base: /etc/origin/
|
|
@ -0,0 +1,9 @@
|
|||
- name: Deploy the local OpenShift Router
|
||||
hosts: localmaster
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Import and use router task file from openshift-ansible openshift_hosted role
|
||||
import_role:
|
||||
name: openshift_hosted
|
||||
tasks_from: router.yml
|
|
@ -0,0 +1,187 @@
|
|||
apiVersion: v1
|
||||
kind: Template
|
||||
objects:
|
||||
- apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: apiserver
|
||||
name: apiserver
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: apiserver
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
ca_hash: ${CA_HASH}
|
||||
labels:
|
||||
app: apiserver
|
||||
spec:
|
||||
serviceAccountName: service-catalog-apiserver
|
||||
nodeSelector:
|
||||
openshift-infra: apiserver
|
||||
containers:
|
||||
- args:
|
||||
- apiserver
|
||||
- --storage-type
|
||||
- etcd
|
||||
- --secure-port
|
||||
- "6443"
|
||||
- --etcd-servers
|
||||
- https://${ETCD_SERVER}:2379
|
||||
- --etcd-cafile
|
||||
- /etc/origin/master/master.etcd-ca.crt
|
||||
- --etcd-certfile
|
||||
- /etc/origin/master/master.etcd-client.crt
|
||||
- --etcd-keyfile
|
||||
- /etc/origin/master/master.etcd-client.key
|
||||
- -v
|
||||
- "10"
|
||||
- --cors-allowed-origins
|
||||
- localhost
|
||||
- --admission-control
|
||||
- KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck
|
||||
- --feature-gates
|
||||
- OriginatingIdentity=true
|
||||
image: ${IMAGE}
|
||||
command: ["/usr/bin/service-catalog"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: apiserver
|
||||
ports:
|
||||
- containerPort: 6443
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/kubernetes-service-catalog
|
||||
name: apiserver-ssl
|
||||
readOnly: true
|
||||
- mountPath: /etc/origin/master
|
||||
name: etcd-host-cert
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: apiserver-ssl
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: apiserver-ssl
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: apiserver.crt
|
||||
- key: tls.key
|
||||
path: apiserver.key
|
||||
- hostPath:
|
||||
path: /etc/origin/master
|
||||
name: etcd-host-cert
|
||||
- emptyDir: {}
|
||||
name: data-dir
|
||||
- kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: apiserver
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
ports:
|
||||
- name: secure
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 6443
|
||||
selector:
|
||||
app: apiserver
|
||||
sessionAffinity: None
|
||||
- apiVersion: v1
|
||||
kind: Route
|
||||
metadata:
|
||||
name: apiserver
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
port:
|
||||
targetPort: secure
|
||||
tls:
|
||||
termination: passthrough
|
||||
to:
|
||||
kind: Service
|
||||
name: apiserver
|
||||
weight: 100
|
||||
wildcardPolicy: None
|
||||
- apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: controller-manager
|
||||
name: controller-manager
|
||||
namespace: ${NAMESPACE}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: controller-manager
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: controller-manager
|
||||
spec:
|
||||
serviceAccountName: service-catalog-controller
|
||||
nodeSelector:
|
||||
openshift-infra: apiserver
|
||||
containers:
|
||||
- env:
|
||||
- name: K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- controller-manager
|
||||
- --port
|
||||
- "8080"
|
||||
- -v
|
||||
- "5"
|
||||
- --leader-election-namespace
|
||||
- kube-service-catalog
|
||||
- --broker-relist-interval
|
||||
- "5m"
|
||||
- --feature-gates
|
||||
- OriginatingIdentity=true
|
||||
image: ${IMAGE}
|
||||
command: ["/usr/bin/service-catalog"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: controller-manager
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/kubernetes-service-catalog
|
||||
name: service-catalog-ssl
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: service-catalog-ssl
|
||||
secret:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: apiserver.crt
|
||||
secretName: apiserver-ssl
|
||||
parameters:
|
||||
- name: CA_HASH
|
||||
- name: ETCD_SERVER
|
||||
- name: NAMESPACE
|
||||
value: kube-service-catalog
|
||||
- name: IMAGE
|
|
@ -0,0 +1,46 @@
|
|||
allowDisabledDocker: false
|
||||
apiVersion: v1
|
||||
dnsBindAddress: 127.0.0.1:53
|
||||
dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
|
||||
dnsDomain: cluster.local
|
||||
dnsIP: 172.17.0.1
|
||||
dockerConfig:
|
||||
execHandlerName: ""
|
||||
iptablesSyncPeriod: "30s"
|
||||
imageConfig:
|
||||
format: TEMPIMAGEBASE-${component}:${version}
|
||||
latest: False
|
||||
kind: NodeConfig
|
||||
kubeletArguments:
|
||||
node-labels:
|
||||
- node-role.kubernetes.io/compute=true
|
||||
- region=primary
|
||||
cloud-provider:
|
||||
- "azure"
|
||||
cloud-config:
|
||||
- "/etc/azure/azure.conf"
|
||||
masterClientConnectionOverrides:
|
||||
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
burst: 200
|
||||
qps: 100
|
||||
masterKubeConfig: node.kubeconfig
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
|
||||
# deprecates networkPluginName above. The two should match.
|
||||
networkConfig:
|
||||
mtu: 1450
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
podManifestConfig:
|
||||
servingInfo:
|
||||
bindAddress: 0.0.0.0:10250
|
||||
certFile: server.crt
|
||||
clientCA: ca.crt
|
||||
keyFile: server.key
|
||||
volumeDirectory: /var/lib/origin/openshift.local.volumes
|
||||
proxyArguments:
|
||||
proxy-mode:
|
||||
- iptables
|
||||
volumeConfig:
|
||||
localQuota:
|
||||
perFSGroup:
|
|
@ -0,0 +1,45 @@
|
|||
allowDisabledDocker: false
|
||||
apiVersion: v1
|
||||
dnsBindAddress: 127.0.0.1:53
|
||||
dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
|
||||
dnsDomain: cluster.local
|
||||
dnsIP: 172.17.0.1
|
||||
dockerConfig:
|
||||
execHandlerName: ""
|
||||
iptablesSyncPeriod: "30s"
|
||||
imageConfig:
|
||||
format: TEMPIMAGEBASE-${component}:${version}
|
||||
latest: False
|
||||
kind: NodeConfig
|
||||
kubeletArguments:
|
||||
node-labels:
|
||||
- region=infra
|
||||
cloud-provider:
|
||||
- "azure"
|
||||
cloud-config:
|
||||
- "/etc/azure/azure.conf"
|
||||
masterClientConnectionOverrides:
|
||||
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
burst: 200
|
||||
qps: 100
|
||||
masterKubeConfig: node.kubeconfig
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
|
||||
# deprecates networkPluginName above. The two should match.
|
||||
networkConfig:
|
||||
mtu: 1450
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
podManifestConfig:
|
||||
servingInfo:
|
||||
bindAddress: 0.0.0.0:10250
|
||||
certFile: server.crt
|
||||
clientCA: ca.crt
|
||||
keyFile: server.key
|
||||
volumeDirectory: /var/lib/origin/openshift.local.volumes
|
||||
proxyArguments:
|
||||
proxy-mode:
|
||||
- iptables
|
||||
volumeConfig:
|
||||
localQuota:
|
||||
perFSGroup:
|
|
@ -0,0 +1,46 @@
|
|||
allowDisabledDocker: false
|
||||
apiVersion: v1
|
||||
dnsBindAddress: 127.0.0.1:53
|
||||
dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
|
||||
dnsDomain: cluster.local
|
||||
dnsIP: 172.17.0.1
|
||||
dockerConfig:
|
||||
execHandlerName: ""
|
||||
iptablesSyncPeriod: "30s"
|
||||
imageConfig:
|
||||
format: TEMPIMAGEBASE-${component}:${version}
|
||||
latest: False
|
||||
kind: NodeConfig
|
||||
kubeletArguments:
|
||||
node-labels:
|
||||
- node-role.kubernetes.io/master=true
|
||||
- openshift-infra=apiserver
|
||||
cloud-provider:
|
||||
- "azure"
|
||||
cloud-config:
|
||||
- "/etc/azure/azure.conf"
|
||||
masterClientConnectionOverrides:
|
||||
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
burst: 200
|
||||
qps: 100
|
||||
masterKubeConfig: node.kubeconfig
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
|
||||
# deprecates networkPluginName above. The two should match.
|
||||
networkConfig:
|
||||
mtu: 1450
|
||||
networkPluginName: redhat/openshift-ovs-subnet
|
||||
podManifestConfig:
|
||||
servingInfo:
|
||||
bindAddress: 0.0.0.0:10250
|
||||
certFile: server.crt
|
||||
clientCA: ca.crt
|
||||
keyFile: server.key
|
||||
volumeDirectory: /var/lib/origin/openshift.local.volumes
|
||||
proxyArguments:
|
||||
proxy-mode:
|
||||
- iptables
|
||||
volumeConfig:
|
||||
localQuota:
|
||||
perFSGroup:
|
|
@ -0,0 +1,7 @@
|
|||
tenantId: {{ .AzureConfig.TenantID }}
|
||||
subscriptionId: {{ .AzureConfig.SubscriptionID }}
|
||||
aadClientId: {{ .AzureConfig.AADClientID }}
|
||||
aadClientSecret: {{ .AzureConfig.AADClientSecret }}
|
||||
aadTenantId: {{ .AzureConfig.TenantID }}
|
||||
resourceGroup: {{ .AzureConfig.ResourceGroup }}
|
||||
location: {{ .AzureConfig.Location }}
|
|
@ -0,0 +1,2 @@
|
|||
server=/in-addr.arpa/127.0.0.1
|
||||
server=/cluster.local/127.0.0.1
|
|
@ -0,0 +1 @@
|
|||
nameserver 168.63.129.16
|
|
@ -0,0 +1,4 @@
|
|||
//go:generate go get -u github.com/go-bindata/go-bindata/...
|
||||
//go:generate go-bindata -pkg templates master/... node/...
|
||||
|
||||
package templates
|
|
@ -0,0 +1,502 @@
|
|||
package certgen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
)
|
||||
|
||||
type authKeyID struct {
|
||||
KeyIdentifier []byte `asn1:"optional,tag:0"`
|
||||
AuthorityCertIssuer generalName `asn1:"optional,tag:1"`
|
||||
AuthorityCertSerialNumber *big.Int `asn1:"optional,tag:2"`
|
||||
}
|
||||
|
||||
type generalName struct {
|
||||
DirectoryName pkix.RDNSequence `asn1:"optional,explicit,tag:4"`
|
||||
}
|
||||
|
||||
func newCertAndKey(filename string, template, signingcert *x509.Certificate, signingkey *rsa.PrivateKey, etcdcaspecial, etcdclientspecial bool) (CertAndKey, error) {
|
||||
bits := 2048
|
||||
if etcdcaspecial {
|
||||
bits = 4096
|
||||
}
|
||||
|
||||
key, err := rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return CertAndKey{}, err
|
||||
}
|
||||
|
||||
if signingcert == nil {
|
||||
// make it self-signed
|
||||
signingcert = template
|
||||
signingkey = key
|
||||
}
|
||||
|
||||
if etcdcaspecial {
|
||||
template.SubjectKeyId = intsha1(key.N)
|
||||
ext := pkix.Extension{
|
||||
Id: []int{2, 5, 29, 35},
|
||||
}
|
||||
var err error
|
||||
ext.Value, err = asn1.Marshal(authKeyID{
|
||||
AuthorityCertIssuer: generalName{DirectoryName: signingcert.Subject.ToRDNSequence()},
|
||||
AuthorityCertSerialNumber: signingcert.SerialNumber,
|
||||
})
|
||||
if err != nil {
|
||||
return CertAndKey{}, err
|
||||
}
|
||||
template.ExtraExtensions = append(template.Extensions, ext)
|
||||
template.MaxPathLenZero = true
|
||||
}
|
||||
|
||||
if etcdclientspecial {
|
||||
template.SubjectKeyId = intsha1(key.N)
|
||||
ext := pkix.Extension{
|
||||
Id: []int{2, 5, 29, 35},
|
||||
}
|
||||
var err error
|
||||
ext.Value, err = asn1.Marshal(authKeyID{
|
||||
KeyIdentifier: intsha1(signingkey.N),
|
||||
AuthorityCertIssuer: generalName{DirectoryName: signingcert.Subject.ToRDNSequence()},
|
||||
AuthorityCertSerialNumber: signingcert.SerialNumber,
|
||||
})
|
||||
if err != nil {
|
||||
return CertAndKey{}, err
|
||||
}
|
||||
template.ExtraExtensions = append(template.Extensions, ext)
|
||||
}
|
||||
|
||||
b, err := x509.CreateCertificate(rand.Reader, template, signingcert, key.Public(), signingkey)
|
||||
if err != nil {
|
||||
return CertAndKey{}, err
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(b)
|
||||
if err != nil {
|
||||
return CertAndKey{}, err
|
||||
}
|
||||
|
||||
return CertAndKey{cert: cert, key: key}, nil
|
||||
}
|
||||
|
||||
func certAsBytes(cert *x509.Certificate) ([]byte, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
err := pem.Encode(buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func writeCert(fs filesystem.Filesystem, filename string, cert *x509.Certificate) error {
|
||||
b, err := certAsBytes(cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.WriteFile(filename, b, 0666)
|
||||
}
|
||||
|
||||
func privateKeyAsBytes(key *rsa.PrivateKey) ([]byte, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
err := pem.Encode(buf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func writePrivateKey(fs filesystem.Filesystem, filename string, key *rsa.PrivateKey) error {
|
||||
b, err := privateKeyAsBytes(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.WriteFile(filename, b, 0600)
|
||||
}
|
||||
|
||||
func writePublicKey(fs filesystem.Filesystem, filename string, key *rsa.PublicKey) error {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
b, err := x509.MarshalPKIXPublicKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pem.Encode(buf, &pem.Block{Type: "PUBLIC KEY", Bytes: b})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.WriteFile(filename, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
// PrepareMasterCerts creates the master certs
|
||||
func (c *Config) PrepareMasterCerts() error {
|
||||
if c.cas == nil {
|
||||
c.cas = map[string]CertAndKey{}
|
||||
}
|
||||
|
||||
if c.Master.certs == nil {
|
||||
c.Master.certs = map[string]CertAndKey{}
|
||||
}
|
||||
|
||||
if c.Master.etcdcerts == nil {
|
||||
c.Master.etcdcerts = map[string]CertAndKey{}
|
||||
}
|
||||
|
||||
ips := append([]net.IP{}, c.Master.IPs...)
|
||||
ips = append(ips, net.ParseIP("172.30.0.1"))
|
||||
|
||||
dns := []string{
|
||||
c.ExternalMasterHostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster.local", c.Master.Hostname, "openshift",
|
||||
"openshift.default", "openshift.default.svc",
|
||||
"openshift.default.svc.cluster.local",
|
||||
}
|
||||
for _, ip := range ips {
|
||||
dns = append(dns, ip.String())
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
cacerts := []struct {
|
||||
filename string
|
||||
template *x509.Certificate
|
||||
}{
|
||||
{
|
||||
filename: "etc/origin/master/ca",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: fmt.Sprintf("openshift-signer@%d", now.Unix())},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/front-proxy-ca",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: fmt.Sprintf("openshift-signer@%d", now.Unix())},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/frontproxy-ca",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: fmt.Sprintf("aggregator-proxy-car@%d", now.Unix())},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/master.etcd-ca",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: fmt.Sprintf("etcd-signer@%d", now.Unix())},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/service-signer",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: fmt.Sprintf("openshift-service-serving-signer@%d", now.Unix())},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/service-catalog/ca",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "service-catalog-signer"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, cacert := range cacerts {
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: c.serial.Get(),
|
||||
NotBefore: now,
|
||||
NotAfter: now.AddDate(5, 0, 0),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
template.Subject = cacert.template.Subject
|
||||
|
||||
certAndKey, err := newCertAndKey(cacert.filename, template, nil, nil, cacert.filename == "etc/origin/master/master.etcd-ca", false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cas[cacert.filename] = certAndKey
|
||||
}
|
||||
|
||||
certs := []struct {
|
||||
filename string
|
||||
template *x509.Certificate
|
||||
signer string
|
||||
}{
|
||||
{
|
||||
filename: "etc/origin/master/admin",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{Organization: []string{"system:cluster-admins", "system:masters"}, CommonName: "system:admin"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/aggregator-front-proxy",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "aggregator-front-proxy"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
signer: "etc/origin/master/front-proxy-ca",
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/etcd.server",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: c.Master.IPs[0].String()},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: dns,
|
||||
IPAddresses: ips,
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/master.etcd-client",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: c.Master.Hostname},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
DNSNames: []string{c.Master.Hostname}, // TODO
|
||||
IPAddresses: []net.IP{c.Master.IPs[0]}, // TODO
|
||||
},
|
||||
signer: "etc/origin/master/master.etcd-ca",
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/master.kubelet-client",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{Organization: []string{"system:node-admins"}, CommonName: "system:openshift-node-admin"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/master.proxy-client",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "system:master-proxy"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/master.server",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: c.Master.IPs[0].String()},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: dns,
|
||||
IPAddresses: ips,
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/openshift-aggregator",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "system:openshift-aggregator"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
signer: "etc/origin/master/frontproxy-ca",
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/openshift-master",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{Organization: []string{"system:masters", "system:openshift-master"}, CommonName: "system:openshift-master"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/master/node-bootstrapper",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "system:serviceaccount:openshift-infra:node-bootstrapper"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
},
|
||||
},
|
||||
{
|
||||
filename: "etc/origin/service-catalog/apiserver",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "apiserver.kube-service-catalog"},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: []string{"apiserver.kube-service-catalog", "apiserver.kube-service-catalog.svc", "apiserver.kube-service-catalog.svc.cluster.local"},
|
||||
},
|
||||
signer: "etc/origin/service-catalog/ca",
|
||||
},
|
||||
// TODO: registry cert
|
||||
}
|
||||
|
||||
for _, cert := range certs {
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: c.serial.Get(),
|
||||
NotBefore: now,
|
||||
NotAfter: now.AddDate(2, 0, 0),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
template.Subject = cert.template.Subject
|
||||
template.ExtKeyUsage = cert.template.ExtKeyUsage
|
||||
template.DNSNames = cert.template.DNSNames
|
||||
template.IPAddresses = cert.template.IPAddresses
|
||||
|
||||
if cert.signer == "" {
|
||||
cert.signer = "etc/origin/master/ca"
|
||||
}
|
||||
|
||||
certAndKey, err := newCertAndKey(cert.filename, template, c.cas[cert.signer].cert, c.cas[cert.signer].key, false, cert.filename == "etc/origin/master/master.etcd-client")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Master.certs[cert.filename] = certAndKey
|
||||
}
|
||||
|
||||
etcdcerts := []struct {
|
||||
filename string
|
||||
template *x509.Certificate
|
||||
signer string
|
||||
}{
|
||||
{
|
||||
filename: "etc/etcd/peer",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: c.Master.Hostname},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: []string{c.Master.Hostname}, // TODO
|
||||
IPAddresses: []net.IP{c.Master.IPs[0]}, // TODO
|
||||
},
|
||||
signer: "etc/origin/master/master.etcd-ca",
|
||||
},
|
||||
{
|
||||
filename: "etc/etcd/server",
|
||||
template: &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: c.Master.Hostname},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: []string{c.Master.Hostname}, // TODO
|
||||
IPAddresses: []net.IP{c.Master.IPs[0]}, // TODO
|
||||
},
|
||||
signer: "etc/origin/master/master.etcd-ca",
|
||||
},
|
||||
}
|
||||
|
||||
for _, cert := range etcdcerts {
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: c.serial.Get(),
|
||||
NotBefore: now,
|
||||
NotAfter: now.AddDate(5, 0, 0),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
template.Subject = cert.template.Subject
|
||||
template.ExtKeyUsage = cert.template.ExtKeyUsage
|
||||
template.DNSNames = cert.template.DNSNames
|
||||
template.IPAddresses = cert.template.IPAddresses
|
||||
|
||||
certAndKey, err := newCertAndKey(cert.filename, template, c.cas[cert.signer].cert, c.cas[cert.signer].key, false, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Master.etcdcerts[cert.filename] = certAndKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMasterCerts writes the master certs
|
||||
func (c *Config) WriteMasterCerts(fs filesystem.Filesystem) error {
|
||||
for filename, ca := range c.cas {
|
||||
err := writeCert(fs, fmt.Sprintf("%s.crt", filename), ca.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writePrivateKey(fs, fmt.Sprintf("%s.key", filename), ca.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := writeCert(fs, "etc/origin/master/ca-bundle.crt", c.cas["etc/origin/master/ca"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeCert(fs, "etc/origin/master/client-ca-bundle.crt", c.cas["etc/origin/master/ca"].cert) // TODO: confirm if needed
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeCert(fs, "etc/etcd/ca.crt", c.cas["etc/origin/master/master.etcd-ca"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for filename, cert := range c.Master.certs {
|
||||
err := writeCert(fs, fmt.Sprintf("%s.crt", filename), cert.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writePrivateKey(fs, fmt.Sprintf("%s.key", filename), cert.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for filename, cert := range c.Master.etcdcerts {
|
||||
err := writeCert(fs, fmt.Sprintf("%s.crt", filename), cert.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writePrivateKey(fs, fmt.Sprintf("%s.key", filename), cert.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return fs.WriteFile("etc/origin/master/ca.serial.txt", []byte(fmt.Sprintf("%02X\n", c.serial.Get())), 0666)
|
||||
}
|
||||
|
||||
// WriteBootstrapCerts writes the node bootstrap certs
|
||||
func (c *Config) WriteBootstrapCerts(fs filesystem.Filesystem) error {
|
||||
err := writeCert(fs, "etc/origin/node/ca.crt", c.cas["etc/origin/master/ca"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeCert(fs, "etc/origin/node/node-bootstrapper.crt", c.Master.certs["etc/origin/master/node-bootstrapper"].cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writePrivateKey(fs, "etc/origin/node/node-bootstrapper.key", c.Master.certs["etc/origin/master/node-bootstrapper"].key)
|
||||
}
|
||||
|
||||
// WriteMasterKeypair writes the master service account keypair
|
||||
func (c *Config) WriteMasterKeypair(fs filesystem.Filesystem) error {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writePrivateKey(fs, "etc/origin/master/serviceaccounts.private.key", key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writePublicKey(fs, "etc/origin/master/serviceaccounts.public.key", &key.PublicKey)
|
||||
}
|
||||
|
||||
func intsha1(n *big.Int) []byte {
|
||||
h := sha1.New()
|
||||
h.Write(n.Bytes())
|
||||
return h.Sum(nil)
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
// Package openshift provides utilities related
|
||||
// to the OpenShift orchestrator.
|
||||
package openshift
|
|
@ -0,0 +1,141 @@
|
|||
package filesystem
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var umask int
|
||||
|
||||
func init() {
|
||||
umask = syscall.Umask(0)
|
||||
syscall.Umask(umask)
|
||||
}
|
||||
|
||||
// Filesystem provides methods which are runnable on a bare filesystem or a
|
||||
// tar.gz file
|
||||
type Filesystem interface {
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type filesystem struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var _ Filesystem = &filesystem{}
|
||||
|
||||
// NewFilesystem returns a Filesystem interface backed by a bare filesystem
|
||||
func NewFilesystem(name string) (Filesystem, error) {
|
||||
err := os.RemoveAll(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(name, 0777)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &filesystem{name}, nil
|
||||
}
|
||||
|
||||
func (f *filesystem) mkdirAll(name string, perm os.FileMode) error {
|
||||
return os.MkdirAll(name, perm)
|
||||
}
|
||||
|
||||
func (f *filesystem) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
err := f.mkdirAll(filepath.Dir(filepath.Join(f.name, filename)), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(filepath.Join(f.name, filename), data, perm)
|
||||
}
|
||||
|
||||
func (filesystem) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type tgzfile struct {
|
||||
gz *gzip.Writer
|
||||
tw *tar.Writer
|
||||
now time.Time
|
||||
dirs map[string]struct{}
|
||||
}
|
||||
|
||||
var _ Filesystem = &tgzfile{}
|
||||
|
||||
// NewTGZFile returns a Filesystem interface backed by a tar.gz file
|
||||
func NewTGZFile(w io.Writer) (Filesystem, error) {
|
||||
gz := gzip.NewWriter(w)
|
||||
tw := &tgzfile{
|
||||
gz: gz,
|
||||
tw: tar.NewWriter(gz),
|
||||
now: time.Now(),
|
||||
dirs: map[string]struct{}{},
|
||||
}
|
||||
|
||||
return tw, nil
|
||||
}
|
||||
|
||||
func (t *tgzfile) mkdirAll(name string, perm os.FileMode) error {
|
||||
parts := strings.Split(name, "/")
|
||||
for i := 1; i < len(parts); i++ {
|
||||
name = filepath.Join(parts[:i]...)
|
||||
if _, exists := t.dirs[name]; exists {
|
||||
continue
|
||||
}
|
||||
err := t.tw.WriteHeader(&tar.Header{
|
||||
Name: name,
|
||||
Mode: int64(int(perm) &^ umask),
|
||||
ModTime: t.now,
|
||||
Typeflag: tar.TypeDir,
|
||||
Uname: "root",
|
||||
Gname: "root",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.dirs[name] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tgzfile) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
err := t.mkdirAll(filepath.Dir(filename), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = t.tw.WriteHeader(&tar.Header{
|
||||
Name: filename,
|
||||
Mode: int64(int(perm) &^ umask),
|
||||
Size: int64(len(data)),
|
||||
ModTime: t.now,
|
||||
Typeflag: tar.TypeReg,
|
||||
Uname: "root",
|
||||
Gname: "root",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = t.tw.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *tgzfile) Close() error {
|
||||
err := t.tw.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.gz.Close()
|
||||
}
|
|
@ -42,6 +42,8 @@ gometalinter \
|
|||
--exclude pkg/i18n/i18n.go \
|
||||
--exclude pkg/i18n/translations.go \
|
||||
--exclude pkg/acsengine/templates.go \
|
||||
--exclude pkg/openshift/certgen/templates/bindata.go \
|
||||
--exclude pkg/openshift/certgen/novalidate.go \
|
||||
./... || exit_code=1
|
||||
|
||||
echo
|
||||
|
@ -56,6 +58,8 @@ gometalinter \
|
|||
--deadline 60s \
|
||||
--exclude pkg/i18n/translations.go \
|
||||
--exclude pkg/acsengine/templates.go \
|
||||
--exclude pkg/openshift/certgen/templates/bindata.go \
|
||||
--exclude pkg/openshift/certgen/novalidate.go \
|
||||
./... || exit_code=1
|
||||
|
||||
exit $exit_code
|
||||
|
|
Загрузка…
Ссылка в новой задаче