Remove DCOS and Swarm support (#92)

This commit is contained in:
Matt Boersma 2018-12-05 16:20:16 -07:00 коммит произвёл Tariq Ibrahim
Родитель 49ed37bef3
Коммит c9b815515e
141 изменённых файлов: 49 добавлений и 4294 удалений

2
.github/ISSUE_TEMPLATE.md поставляемый
Просмотреть файл

@ -27,7 +27,7 @@ might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Orchestrator and version (e.g. Kubernetes, DC/OS, Swarm)**
**Kubernetes version**:
**What happened**:

Просмотреть файл

@ -14,8 +14,6 @@ config_updater:
label:
additional_labels:
- orchestrator/k8s
- orchestrator/dcos
- orchestrator/swarm
- DO-NOT-MERGE

Просмотреть файл

@ -1,242 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package cmd
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/Azure/aks-engine/pkg/i18n"
"github.com/Azure/aks-engine/pkg/operations/dcosupgrade"
"github.com/leonelquinteros/gotext"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
dcosUpgradeName = "dcos-upgrade"
dcosUpgradeShortDescription = "Upgrade an existing DC/OS cluster"
dcosUpgradeLongDescription = "Upgrade an existing DC/OS cluster"
)
type dcosUpgradeCmd struct {
authArgs
// user input
resourceGroupName string
deploymentDirectory string
upgradeVersion string
location string
sshPrivateKeyPath string
// derived
containerService *api.ContainerService
apiVersion string
currentDcosVersion string
client armhelpers.AKSEngineClient
locale *gotext.Locale
nameSuffix string
sshPrivateKey []byte
}
func newDcosUpgradeCmd() *cobra.Command {
uc := dcosUpgradeCmd{}
dcosUpgradeCmd := &cobra.Command{
Use: dcosUpgradeName,
Short: dcosUpgradeShortDescription,
Long: dcosUpgradeLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return uc.run(cmd, args)
},
}
f := dcosUpgradeCmd.Flags()
f.StringVarP(&uc.location, "location", "l", "", "location the cluster is deployed in (required)")
f.StringVarP(&uc.resourceGroupName, "resource-group", "g", "", "the resource group where the cluster is deployed (required)")
f.StringVar(&uc.deploymentDirectory, "deployment-dir", "", "the location of the output from `generate` (required)")
f.StringVar(&uc.sshPrivateKeyPath, "ssh-private-key-path", "", "ssh private key path (default: <deployment-dir>/id_rsa)")
f.StringVar(&uc.upgradeVersion, "upgrade-version", "", "desired DC/OS version (required)")
addAuthFlags(&uc.authArgs, f)
return dcosUpgradeCmd
}
func (uc *dcosUpgradeCmd) validate(cmd *cobra.Command) error {
log.Infoln("validating...")
var err error
uc.locale, err = i18n.LoadTranslations()
if err != nil {
return errors.Wrap(err, "error loading translation files")
}
if len(uc.resourceGroupName) == 0 {
cmd.Usage()
return errors.New("--resource-group must be specified")
}
if len(uc.location) == 0 {
cmd.Usage()
return errors.New("--location must be specified")
}
uc.location = helpers.NormalizeAzureRegion(uc.location)
if len(uc.upgradeVersion) == 0 {
cmd.Usage()
return errors.New("--upgrade-version must be specified")
}
if len(uc.deploymentDirectory) == 0 {
cmd.Usage()
return errors.New("--deployment-dir must be specified")
}
if len(uc.sshPrivateKeyPath) == 0 {
uc.sshPrivateKeyPath = filepath.Join(uc.deploymentDirectory, "id_rsa")
}
if uc.sshPrivateKey, err = ioutil.ReadFile(uc.sshPrivateKeyPath); err != nil {
cmd.Usage()
return errors.Wrap(err, "ssh-private-key-path must be specified")
}
if err = uc.authArgs.validateAuthArgs(); err != nil {
return err
}
return nil
}
func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error {
var err error
if uc.client, err = uc.authArgs.getClient(); err != nil {
return errors.Wrap(err, "Failed to get client")
}
ctx := context.Background()
_, err = uc.client.EnsureResourceGroup(ctx, uc.resourceGroupName, uc.location, nil)
if err != nil {
return errors.Wrap(err, "Error ensuring resource group")
}
// load apimodel from the deployment directory
apiModelPath := path.Join(uc.deploymentDirectory, "apimodel.json")
if _, err = os.Stat(apiModelPath); os.IsNotExist(err) {
return errors.Errorf("specified api model does not exist (%s)", apiModelPath)
}
apiloader := &api.Apiloader{
Translator: &i18n.Translator{
Locale: uc.locale,
},
}
uc.containerService, uc.apiVersion, err = apiloader.LoadContainerServiceFromFile(apiModelPath, true, true, nil)
if err != nil {
return errors.Wrap(err, "error parsing the api model")
}
uc.currentDcosVersion = uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion
if uc.currentDcosVersion == uc.upgradeVersion {
return errors.Errorf("already running DCOS %s", uc.upgradeVersion)
}
if len(uc.containerService.Location) == 0 {
uc.containerService.Location = uc.location
} else if uc.containerService.Location != uc.location {
return errors.New("--location does not match api model location")
}
// get available upgrades for container service
orchestratorInfo, err := api.GetOrchestratorVersionProfile(uc.containerService.Properties.OrchestratorProfile, false)
if err != nil {
return errors.Wrap(err, "error getting list of available upgrades")
}
// add the current version if upgrade has failed
orchestratorInfo.Upgrades = append(orchestratorInfo.Upgrades, &api.OrchestratorProfile{
OrchestratorType: uc.containerService.Properties.OrchestratorProfile.OrchestratorType,
OrchestratorVersion: uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion})
// validate desired upgrade version and set goal state
found := false
for _, up := range orchestratorInfo.Upgrades {
if up.OrchestratorVersion == uc.upgradeVersion {
uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion = uc.upgradeVersion
found = true
break
}
}
if !found {
return errors.Errorf("upgrade to DCOS %s is not supported", uc.upgradeVersion)
}
// Read name suffix to identify nodes in the resource group that belong
// to this cluster.
templatePath := path.Join(uc.deploymentDirectory, "azuredeploy.json")
contents, _ := ioutil.ReadFile(templatePath)
var template interface{}
json.Unmarshal(contents, &template)
templateMap := template.(map[string]interface{})
templateParameters := templateMap["parameters"].(map[string]interface{})
nameSuffixParam := templateParameters["nameSuffix"].(map[string]interface{})
uc.nameSuffix = nameSuffixParam["defaultValue"].(string)
log.Infof("Name suffix: %s", uc.nameSuffix)
return nil
}
func (uc *dcosUpgradeCmd) run(cmd *cobra.Command, args []string) error {
err := uc.validate(cmd)
if err != nil {
log.Fatalf("error validating upgrade command: %v", err)
}
err = uc.loadCluster(cmd)
if err != nil {
log.Fatalf("error loading existing cluster: %v", err)
}
upgradeCluster := dcosupgrade.UpgradeCluster{
Translator: &i18n.Translator{
Locale: uc.locale,
},
Logger: log.NewEntry(log.New()),
Client: uc.client,
}
if err = upgradeCluster.UpgradeCluster(uc.authArgs.SubscriptionID, uc.resourceGroupName, uc.currentDcosVersion,
uc.containerService, uc.nameSuffix, uc.sshPrivateKey); err != nil {
log.Fatalf("Error upgrading cluster: %v", err)
}
apiloader := &api.Apiloader{
Translator: &i18n.Translator{
Locale: uc.locale,
},
}
b, err := apiloader.SerializeContainerService(uc.containerService, uc.apiVersion)
if err != nil {
return err
}
f := helpers.FileSaver{
Translator: &i18n.Translator{
Locale: uc.locale,
},
}
return f.SaveFile(uc.deploymentDirectory, "apimodel.json", b)
}

Просмотреть файл

@ -1,167 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package cmd
import (
"io/ioutil"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var _ = Describe("the upgrade command", func() {
It("should create a DCOS upgrade command", func() {
output := newDcosUpgradeCmd()
Expect(output.Use).Should(Equal(dcosUpgradeName))
Expect(output.Short).Should(Equal(dcosUpgradeShortDescription))
Expect(output.Long).Should(Equal(dcosUpgradeLongDescription))
Expect(output.Flags().Lookup("location")).NotTo(BeNil())
Expect(output.Flags().Lookup("resource-group")).NotTo(BeNil())
Expect(output.Flags().Lookup("deployment-dir")).NotTo(BeNil())
Expect(output.Flags().Lookup("ssh-private-key-path")).NotTo(BeNil())
Expect(output.Flags().Lookup("upgrade-version")).NotTo(BeNil())
})
It("should validate DCOS upgrade command", func() {
r := &cobra.Command{}
privKey, err := ioutil.TempFile("", "id_rsa")
Expect(err).To(BeNil())
defer os.Remove(privKey.Name())
cases := []struct {
uc *dcosUpgradeCmd
expectedErr error
hideLocalAzConfig bool
}{
{
uc: &dcosUpgradeCmd{
resourceGroupName: "",
deploymentDirectory: "_output/test",
upgradeVersion: "1.8.9",
location: "centralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
},
},
expectedErr: errors.New("--resource-group must be specified"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "_output/test",
upgradeVersion: "1.8.9",
location: "",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
},
},
expectedErr: errors.New("--location must be specified"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "_output/test",
upgradeVersion: "",
location: "southcentralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
},
},
expectedErr: errors.New("--upgrade-version must be specified"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "",
upgradeVersion: "1.9.0",
location: "southcentralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
},
},
expectedErr: errors.New("--deployment-dir must be specified"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "",
upgradeVersion: "1.9.0",
location: "southcentralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
},
},
expectedErr: errors.New("--deployment-dir must be specified"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "_output/mydir",
upgradeVersion: "1.9.0",
location: "southcentralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{},
},
expectedErr: errors.New("--subscription-id is required (and must be a valid UUID)"),
hideLocalAzConfig: true,
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "_output/mydir",
upgradeVersion: "1.9.0",
location: "southcentralus",
authArgs: authArgs{},
},
expectedErr: errors.New("ssh-private-key-path must be specified: open _output/mydir/id_rsa: no such file or directory"),
},
{
uc: &dcosUpgradeCmd{
resourceGroupName: "test",
deploymentDirectory: "_output/mydir",
upgradeVersion: "1.9.0",
location: "southcentralus",
sshPrivateKeyPath: privKey.Name(),
authArgs: authArgs{
rawSubscriptionID: "99999999-0000-0000-0000-000000000000",
RawAzureEnvironment: "AzurePublicCloud",
AuthMethod: "device",
},
},
expectedErr: nil,
},
}
for _, c := range cases {
if c.hideLocalAzConfig {
// Temporarily unset HOME env var so local subscription won't override test config
home := os.Getenv("HOME")
os.Setenv("HOME", "")
err = c.uc.validate(r)
os.Setenv("HOME", home)
} else {
err = c.uc.validate(r)
}
if c.expectedErr != nil && err != nil {
Expect(err.Error()).To(Equal(c.expectedErr.Error()))
} else {
Expect(err).To(BeNil())
Expect(c.expectedErr).To(BeNil())
}
}
})
})

Просмотреть файл

@ -24,8 +24,8 @@ import (
const (
rootName = "aks-engine"
rootShortDescription = "AKS-Engine deploys and manages container orchestrators in Azure"
rootLongDescription = "AKS-Engine deploys and manages Kubernetes, Swarm Mode, and DC/OS clusters in Azure"
rootShortDescription = "AKS-Engine deploys and manages Kubernetes clusters in Azure"
rootLongDescription = "AKS-Engine deploys and manages Kubernetes clusters in Azure"
)
var (
@ -64,7 +64,6 @@ func NewRootCmd() *cobra.Command {
rootCmd.AddCommand(newOrchestratorsCmd())
rootCmd.AddCommand(newUpgradeCmd())
rootCmd.AddCommand(newScaleCmd())
rootCmd.AddCommand(newDcosUpgradeCmd())
rootCmd.AddCommand(getCompletionCmd(rootCmd))
return rootCmd

Просмотреть файл

@ -16,7 +16,7 @@ func TestNewRootCmd(t *testing.T) {
if output.Use != rootName || output.Short != rootShortDescription || output.Long != rootLongDescription {
t.Fatalf("root command should have use %s equal %s, short %s equal %s and long %s equal to %s", output.Use, rootName, output.Short, rootShortDescription, output.Long, rootLongDescription)
}
expectedCommands := []*cobra.Command{getCompletionCmd(output), newDcosUpgradeCmd(), newDeployCmd(), newGenerateCmd(), newOrchestratorsCmd(), newScaleCmd(), newUpgradeCmd(), newVersionCmd()}
expectedCommands := []*cobra.Command{getCompletionCmd(output), newDeployCmd(), newGenerateCmd(), newOrchestratorsCmd(), newScaleCmd(), newUpgradeCmd(), newVersionCmd()}
rc := output.Commands()
for i, c := range expectedCommands {
if rc[i].Use != c.Use {

Просмотреть файл

@ -381,13 +381,6 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
if sc.agentPool.IsAvailabilitySets() {
addValue(parametersJSON, fmt.Sprintf("%sOffset", sc.agentPool.Name), highestUsedIndex+1)
}
case api.Swarm:
case api.SwarmMode:
case api.DCOS:
if sc.agentPool.IsAvailabilitySets() {
return errors.Errorf("scaling isn't supported for orchestrator %q, with availability sets", orchestratorInfo.OrchestratorType)
}
transformer.NormalizeForVMSSScaling(sc.logger, templateJSON)
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Builds Docker Enabled Clusters
# Microsoft Azure Kubernetes Engine - Builds Kubernetes Clusters
## Overview
@ -8,14 +8,11 @@ This cluster definition examples demonstrate how to create a customized Docker E
* [AKS Engine](acsengine.md) - shows you how to build and use the AKS engine to generate custom Docker enabled container clusters
* [Cluster Definition](clusterdefinition.md) - describes the components of the cluster definition file
* [DC/OS Walkthrough](dcos.md) - shows how to create a DC/OS enabled Docker cluster on Azure
* [Kubernetes Walkthrough](kubernetes.md) - shows how to create a Kubernetes enabled Docker cluster on Azure
* [Kubernetes Windows Walkthrough](kubernetes/windows.md) - shows how to create a hybrid Kubernetes Windows enabled Docker cluster on Azure.
* [Kubernetes with GPU support Walkthrough](kubernetes/gpu.md) - shows how to create a Kubernetes cluster with GPU support.
* [Kubernetes AAD integration Walkthrough](kubernetes/aad.md) - shows how to create a Kubernetes cluster with AAD as authentication provider.
* [Kubernetes Monitoring Walkthrough](kubernetes/monitoring.md) - shows how to set up monitoring of your Kubernetes cluster
* [Swarm Walkthrough](swarm.md) - shows how to create a Swarm enabled Docker cluster on Azure
* [Swarm Mode Walkthrough](swarmmode.md) - shows how to create a Swarm Mode cluster on Azure
* [Custom VNET](../examples/vnet) - shows how to use a custom VNET
* [Attached Disks](../examples/disks-storageaccount) - shows how to attach up to 4 disks per node
* [Managed Disks](../examples/disks-managed) (under private preview) - shows how to use managed disks

Просмотреть файл

@ -1,6 +1,6 @@
# Microsoft Azure Container Service Engine
# Microsoft Azure Kubernetes Engine
The Azure Container Service Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates for Docker enabled clusters on Microsoft Azure with your choice of DCOS, [Kubernetes](kubernetes/deploy.md), or Swarm orchestrators. The input to aks-engine is a cluster definition file which describes the desired cluster, including orchestrator, features, and agents. The structure of the input files is very similar to the public API for Azure Container Service.
The Azure Kubernetes Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates for Kubernetes clusters on Microsoft Azure. The input to aks-engine is a cluster definition file which describes the desired cluster, including orchestrator, features, and agents. The structure of the input files is very similar to the public API for Azure Kubernetes Service.
<a href="#install-aks-engine"></a>

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Cluster Definition
# Microsoft Azure Kubernetes Engine - Cluster Definition
## Cluster Defintions for apiVersion "vlabs"
@ -22,10 +22,7 @@ Here are the cluster definitions for apiVersion "vlabs":
Here are the valid values for the orchestrator types:
1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). [Older releases of DCOS 1.8 may be specified](../examples/dcos-releases).
2. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md).
3. `Swarm` - this represents the [Swarm orchestrator](swarm.md).
4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md).
1. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md).
To learn more about supported orchestrators and versions, run the orchestrators command:
@ -529,7 +526,7 @@ We consider `kubeletConfig`, `controllerManagerConfig`, `apiServerConfig`, and `
| vnetCidr | no | Specifies the VNET cidr when using a custom VNET ([bring your own VNET examples](../examples/vnet)). This VNET cidr should include both the master and the agent subnets. |
| imageReference.name | no | The name of the Linux OS image. Needs to be used in conjunction with resourceGroup, below |
| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above |
| distro | no | Specifies the masters' Linux distribution. Currently supported values are: `ubuntu`, `aks`, `aks-docker-engine` and `coreos` (CoreOS support is currently experimental - [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)). For Azure Public Cloud, defaults to `aks` if undefined, unless GPU nodes are present, in which case it will default to `aks-docker-engine`. For Sovereign Clouds, the default is `ubuntu`. `aks` is a custom image based on `ubuntu` that comes with pre-installed software necessary for Kubernetes deployments (Azure Public Cloud only for now). **NOTE**: GPU nodes are currently incompatible with the default Moby container runtime provided in the `aks` image. Clusters containing GPU nodes will be set to use the `aks-docker-engine` distro which is functionally equivalent to `aks` with the exception of the docker distribution (see [GPU support Walkthrough](kubernetes/gpu.md) for details). Currently supported OS and orchestrator configurations -- `ubuntu` and `aks`: DCOS, Docker Swarm, Kubernetes; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json) |
| distro | no | Specifies the masters' Linux distribution. Currently supported values are: `ubuntu`, `aks`, `aks-docker-engine` and `coreos` (CoreOS support is currently experimental - [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)). For Azure Public Cloud, defaults to `aks` if undefined, unless GPU nodes are present, in which case it will default to `aks-docker-engine`. For Sovereign Clouds, the default is `ubuntu`. `aks` is a custom image based on `ubuntu` that comes with pre-installed software necessary for Kubernetes deployments (Azure Public Cloud only for now). **NOTE**: GPU nodes are currently incompatible with the default Moby container runtime provided in the `aks` image. Clusters containing GPU nodes will be set to use the `aks-docker-engine` distro which is functionally equivalent to `aks` with the exception of the docker distribution (see [GPU support Walkthrough](kubernetes/gpu.md) for details). Currently supported OS and orchestrator configurations -- `ubuntu` and `aks`: Kubernetes; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json) |
| customFiles | no | The custom files to be provisioned to the master nodes. Defined as an array of json objects with each defined as `"source":"absolute-local-path", "dest":"absolute-path-on-masternodes"`.[See examples](../examples/customfiles) |
| availabilityProfile | no | Supported values are `AvailabilitySet` (default) and `VirtualMachineScaleSets` (still under development: upgrade not supported; requires Kubernetes clusters version 1.10+ and agent pool availabilityProfile must also be `VirtualMachineScaleSets`). When MasterProfile is using `VirtualMachineScaleSets`, to SSH into a master node, you need to use `ssh -p 50001` instead of port 22. |
| agentVnetSubnetId | only required when using custom VNET and when MasterProfile is using `VirtualMachineScaleSets` | Specifies the Id of an alternate VNET subnet for all the agent pool nodes. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet)). When MasterProfile is using `VirtualMachineScaleSets`, this value should be the subnetId of the subnet for all agent pool nodes. |
@ -558,7 +555,7 @@ A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for
| imageReference.name | no | The name of a a Linux OS image. Needs to be used in conjunction with resourceGroup, below |
| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above |
| osType | no | Specifies the agent pool's Operating System. Supported values are `Windows` and `Linux`. Defaults to `Linux` |
| distro | no | Specifies the agent pool's Linux distribution. Currently supported values are: `ubuntu`, `aks`, `aks-docker-engine` and `coreos` (CoreOS support is currently experimental - [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)). For Azure Public Cloud, defaults to `aks` if undefined, unless GPU nodes are present, in which case it will default to `aks-docker-engine`. For Sovereign Clouds, the default is `ubuntu`. `aks` is a custom image based on `ubuntu` that comes with pre-installed software necessary for Kubernetes deployments (Azure Public Cloud only for now). **NOTE**: GPU nodes are currently incompatible with the default Moby container runtime provided in the `aks` image. Clusters containing GPU nodes will be set to use the `aks-docker-engine` distro which is functionally equivalent to `aks` with the exception of the docker distribution (see [GPU support Walkthrough](kubernetes/gpu.md) for details). Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) |
| distro | no | Specifies the agent pool's Linux distribution. Currently supported values are: `ubuntu`, `aks`, `aks-docker-engine` and `coreos` (CoreOS support is currently experimental - [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)). For Azure Public Cloud, defaults to `aks` if undefined, unless GPU nodes are present, in which case it will default to `aks-docker-engine`. For Sovereign Clouds, the default is `ubuntu`. `aks` is a custom image based on `ubuntu` that comes with pre-installed software necessary for Kubernetes deployments (Azure Public Cloud only for now). **NOTE**: GPU nodes are currently incompatible with the default Moby container runtime provided in the `aks` image. Clusters containing GPU nodes will be set to use the `aks-docker-engine` distro which is functionally equivalent to `aks` with the exception of the docker distribution (see [GPU support Walkthrough](kubernetes/gpu.md) for details). Currently supported OS and orchestrator configurations -- `ubuntu`: Kubernetes; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) |
| acceleratedNetworkingEnabled | no | Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for Linux agents (You must select a VM SKU that supports Accelerated Networking). Defaults to `true` if the VM SKU selected supports Accelerated Networking |
| acceleratedNetworkingEnabledWindows | no | Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for Windows agents (You must select a VM SKU that supports Accelerated Networking). Defaults to `false` |
@ -662,7 +659,7 @@ format for `keyvaultSecretRef.vaultId`, can be obtained in cli, or found in the
## Cluster Defintions for apiVersion "2016-03-30"
Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Container Service Engine.
Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Kubernetes Engine.
### apiVersion

Просмотреть файл

@ -90,7 +90,7 @@
## "2016-03-30"版本apiVersion的集群定义文件
以下是"2016-03-30"版本apiVersion的集群定义文件这个版本的api和Azure Container Service Engine的一致。
以下是"2016-03-30"版本apiVersion的集群定义文件这个版本的api和Azure Kubernetes Engine的一致。
### apiVersion

Просмотреть файл

@ -1,210 +0,0 @@
# Porting a new DC/OS version to AKS Engine
## 1. Locate the official ARM Template
Go to `https://dcos.io/docs/X.X/administration/installing/cloud/azure/`, where `X.X` should be replaced by the version you are looking to port.
In the documentation, you will the link to the ARM templates you are looking for.
The latest stable templates should be at `https://downloads.dcos.io/dcos/stable/azure.html`
Early Access at: `https://downloads.dcos.io/dcos/EarlyAccess/azure.html`
Etc.
## 2. Find the package GUIDs
Following the previous step, you should now have 3 ARM templates (1, 3 and 5 masters variants).
We now need to find the package GUID of each variant.
In each template you should find a string that looks like: `dcos-config--setup_<Some GUID>`, this GUID is what we are looking for.
Extract the GUIDs from the 3 differents templates, and them in `engine.go/getPackageGUID` for your specific DC/OS version.
In DC/OS 1.11 and greater, you should find a the contents for `/etc/mesosphere/setup-flags/repository-url` in each 3 ARM templates (1, 3 and 5 masters variants).
Extract the GUIDs from the 3 differents templates, and them in `dcos/dcoscustomdataXXX.t` for your specific DC/OS version.
## 3. Extract the cloud-config data from the template
In one of the template (no matter which one), grab the data from the MasterVM.osProfile.customData.
If you remove the concat operation, you should end up which a big string of unescaped JSON.
Unescape it (for example using this [online tool](http://www.freeformatter.com/javascript-escape.html#ad-output)), and convert it to yaml (you can use [json2yaml](https://www.json2yaml.com/)).
You should now have a clean yaml.
## 4. Create and customize the custom data file.
under the `parts` directory, create a new file called `dcoscustomdataXXX.t` replacing `XXX` by the correct version number.
Paste the yaml from the previous step inside.
In the new file, under the `runcmd` section you should find 4 sucessive `curl` calls downloading some `.deb` packages followed by a bash script installing each one of them. This is handled by `parts\dcos\dcosprovision.sh` in AKS Engine, so make sure the dependencies didn't change and replace the `curl` and `bash` calls by a link to the script.
For example, in DC/OS 1.9:
```yaml
- curl -fLsSv --retry 20 -Y 100000 -y 60 -o /var/tmp/1.deb https://az837203.vo.msecnd.net/dcos-deps/libipset3_6.29-1_amd64.deb
- curl -fLsSv --retry 20 -Y 100000 -y 60 -o /var/tmp/2.deb https://az837203.vo.msecnd.net/dcos-deps/ipset_6.29-1_amd64.deb
- curl -fLsSv --retry 20 -Y 100000 -y 60 -o /var/tmp/3.deb https://az837203.vo.msecnd.net/dcos-deps/unzip_6.0-20ubuntu1_amd64.deb
- curl -fLsSv --retry 20 -Y 100000 -y 60 -o /var/tmp/4.deb https://az837203.vo.msecnd.net/dcos-deps/libltdl7_2.4.6-0.1_amd64.deb
- sed -i "s/^Port 22$/Port 22\nPort 2222/1" /etc/ssh/sshd_config
- service ssh restart
- bash -c "try=1;until dpkg -i /var/tmp/{1,2,3,4}.deb || ((try>9));do echo retry \$((try++));sleep
\$((try*try));done"
```
becomes
```yaml
- /opt/azure/containers/provision.sh
```
Additional modifications under `runcmd`:
* Replace every occurence of the Package GUID (that we found in step 2) by `DCOSGUID`.
* the `content` of the cmd with path `/etc/mesosphere/setup-flags/late-config.yaml` should be modified to accept AKS Engine bindings instead of variable where needed (look at a previous custom data file for reference).
* At the very end of the file, replace
```yaml
- content: ''
path: "/etc/mesosphere/roles/master"
- content: ''
path: "/etc/mesosphere/roles/azure_master"
- content: ''
path: "/etc/mesosphere/roles/azure"
```
by
```yaml
- content: ''
path: /etc/mesosphere/roles/azure
- content: 'PROVISION_STR'
path: "/opt/azure/containers/provision.sh"
permissions: "0744"
owner: "root"
```
## 5. Adding the support of the new version inside to .go files
### pkg/engine/defaults.go
- Around line 30, add your `DCOSXXXBootstrapDownloadURL` variable (replace XXX with the version number), inside the `fmt.Sprintf()` function replace the second and third parameters with the version `EA, Stable, Beta, ...` and the commit hash.
> You can find the commit hash from the https://downloads.dcos.io/dcos/stable/X.XX.X/azure.html page.
Example for version 1.10
[https://downloads.dcos.io/dcos/stable/1.10.0/azure.html](https://downloads.dcos.io/dcos/stable/1.10.0/azure.html)
```
DCOS110BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, "stable", "e38ab2aa282077c8eb7bf103c6fff7b0f08db1a4"),
```
### pkg/engine/engine.go
- Around line 39, add `dcosCustomDataXXX = "dcos/dcoscustomdataXXX.t"` variable
Example for version 1.10:
```
dcosCustomData110 = "dcos/dcoscustomdata110.t"
```
- Around line 578, add the code case block for your version.
Example for version 1.10:
```
case api.DCOSRelease1Dot10:
dcosBootstrapURL = cloudSpecConfig.DCOSSpecConfig.DCOS110BootstrapDownloadURL
```
- Around line 1170, add your api case version.
Example for version 1.10:
```
case api.DCOSRelease1Dot10:
switch masterCount {
case 1:
return "c4ec6210f396b8e435177b82e3280a2cef0ce721"
case 3:
return "08197947cb57d479eddb077a429fa15c139d7d20"
case 5:
return "f286ad9d3641da5abb622e4a8781f73ecd8492fa"
}
```
> In the return function, paste the package GUID from the step 2 for each cases.
- Around line 1558, add your api case version.
Example for version 1.10:
```
case api.DCOSRelease1Dot10:
yamlFilename = dcosCustomData110
```
### pkg/engine/types.go
- Around line 40, add your the type for your new version.
Example for version 1.10 :
```
DCOS110BootstrapDownloadURL string
```
### pkg/api/common/const.go
- Around line 59, declare a new const with your `DCOSRelease`
Example for version 1.10 :
```
// DCOSRelease1Dot10 is the major.minor string prefix for 1.9 versions of DCOS
DCOSRelease1Dot10 string = "1.10"
```
- Around line 72, add your `DCOSReleaseToVersion` in the map
Example for version 1.10 :
```
DCOSRelease1Dot10: "1.10.0",
```
### pkg/api/const.go
- Around line 76, add the const for your DCOS release
Example for version 1.10 :
```
// DCOSRelease1Dot10 is the major.minor string prefix for 1.10 versions of DCOS
DCOSRelease1Dot10 string = "1.10"
```
### pkg/api/convertertoapi.go
- Around line 572 and 601 (two places) add the case for your release
Example for version 1.10 :
```
case DCOSRelease1Dot10, DCOSRelease1Dot9, DCOSRelease1Dot8:
```
```
case DCOSRelease1Dot10, DCOSRelease1Dot9, DCOSRelease1Dot8, DCOSRelease1Dot7:
```
### pkg/api/v20170701/validate.go
- Around line 33, add the case for your release
Example for version 1.10 :
```
case common.DCOSRelease1Dot10:
```
### pkg/api/vlabs/validate.go
- Around line 37, add the case for your release
Example for version 1.10 :
```
case common.DCOSRelease1Dot10:
```
## Conclusion
We encourage you to look at previous PR as example, listed bellow :
- [Adding DC/OS 1.10 stable version support #1439](https://github.com/Azure/aks-engine/pull/1439/files)
- [setting dcos test to 1.9 (current default)](https://github.com/Azure/aks-engine/pull/1443)
- [[DC/OS] Set 1.9 as default DCOS version and upgrade Packages](https://github.com/Azure/aks-engine/pull/457)
- [[DC/OS] Add support for DCOS 1.9 EA](https://github.com/Azure/aks-engine/pull/360)
- [DCOS 1.8.8 Support](https://github.com/Azure/aks-engine/pull/278)

Просмотреть файл

@ -1,147 +0,0 @@
# Microsoft Azure Container Service Engine - DC/OS Walkthrough
### `Note:`
Support for DC/OS `1.11` and later continues in the forked project [dcos-engine](https://github.com/Azure/dcos-engine).
## Deployment
Here are the steps to deploy a simple DC/OS cluster:
1. [install aks-engine](acsengine.md#downloading-and-building-aks-engine)
2. [generate your ssh key](ssh.md#ssh-key-generation)
3. edit the [DC/OS example](../examples/dcos.json) and fill in the blank strings
4. [generate the template](acsengine.md#generate-templates)
5. [deploy the output azuredeploy.json and azuredeploy.parameters.json](acsengine.md#deploy-templates)
## Walkthrough
Once your DC/OS cluster has deployed you will have a resource group containing:
1. a set of 1,3, or 5 masters in a master specific availability set. Each master's SSH can be accessed via the public dns address at ports 2200..2204
2. a set of public agents in an Virtual Machine Scale Set (VMSS). The agent VMs can be accessed through a master. See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
3. a set of private agents in an Virtual Machine Scale Set (VMSS).
The following image shows the architecture of a container service cluster with 3 masters, and 6 agents:
![Image of DC/OS container service on azure](images/dcos.png)
In the image above, you can see the following parts:
1. **Admin Router on port 80** - The admin router enables you to access all DC/OS services. For example, if you create an SSH tunnel to port 80 you can access the services on the following urls, you can see the DC/OS dashboard by browsing to <http://localhost/>
2. **Masters** - Masters run the DC/OS processes that schedule and manage workloads on the agent nodes.
3. **Public Agents** - Public agents, deployed in a VM scale set, are publicly accessible through the Azure Load Balancer to ports 80, 443, and 8080. Jobs can be assigned to public agents using role `slave_public`.
4. **Private Agents** - Private agents, deployed in a VM scale set, are not publicly accessible. Workloads are scheduled to private agents by default.
5. **Docker on port 2375** - The Docker engine runs containerized workloads and each Agent runs the Docker engine. DC/OS runs Docker workloads, and examples on how to do this are provided in the Marathon walkthrough sections of this readme.
All VMs are in the same VNET where the masters are on private subnet 172.16.0.0/24 and the agents are on the private subnet, 10.0.0.0/8, and fully accessible to each other.
## Create your First Three DC/OS Services: hello-world, Docker app, and Docker web app
This walk through is inspired by the wonderful digital ocean tutorial: https://www.digitalocean.com/community/tutorials/how-to-configure-a-production-ready-mesosphere-cluster-on-ubuntu-14-04. After completing this walkthrough you will know how to:
* access DC/OS dashboard for cluster health,
* deploy a simple hello-world app,
* deploy a simple docker app,
* look at logs of your workload,
* and deploy a simple web app publicly available to the world.
1. After successfully deploying the template write down the two output master and agent FQDNs (Fully Qualified Domain Name).
1. If using Powershell or CLI, the output parameters are in the OutputsString section named 'agentFQDN' and 'masterFQDN'
2. If using Portal, to get the output you need to:
1. navigate to "resource group"
2. click on the resource group you just created
3. then click on "Succeeded" under *last deployment*
4. then click on the "Microsoft.Template"
5. now you can copy the output FQDNs and sample SSH commands
![Image of docker scaling](images/findingoutputs.png)
2. Create an [SSH tunnel to port 80](ssh.md#create-port-80-tunnel-to-the-master) on the master FQDN.
3. browse to the DC/OS UI <http://localhost/>. This displays the main DC/OS dashboard:
4. The front page shows the DC/OS Dashboard:
1. Scroll down to see your CPU, Memory and Disk Allocation. This also shows you services, node, and component health.
![Image of the DC/OS dashboard](images/dcosdashboard.png)
2. On the left side click "Services"
![Image of DC/OS services on Azure](images/dcosservices.png)
3. start a long running service
1. click "Deploy Service"
2. type "myfirstapp" for the id
3. type `/bin/bash -c 'for i in {1..5}; do echo MyFirstApp $i; sleep 1; done'` for the command
4. scroll to bottom and click Deploy
![Image of Deploy New Service dialog](images/deployfirstapp.png)
5. you will notice the new app change state from not running to running
![Image of the new application status](images/dcos-newapp-status.png)
6. To run a Docker app browse back to Services, and click "Deploy Service" and set id to "/helloworld":
![Image of setting up docker application dialog 1](images/dcos-docker-helloworld1.png)
7. Click "Container Settings", type `hello-world` for image and click "Deploy"
![Image of setting up docker application dialog 2](images/dcos-docker-helloworld2.png)
8. Once deployed, click on the "helloworld" service, and you will see all the finished tasks:
![Image of helloworld tasks](images/dcos-docker-helloworld-tasks.png)
9. Click on the most recent finished tasks, and click "Logs" and you will see the "Hello from Docker!" message:
![Image of helloworld tasks](images/dcos-docker-helloworld-logs.png)
10. The next step is to deploy a docker web app accessible to the world. The public agents have a load balancer exposing port 80, 443, and 8080. On the DC/OS page, browse back to Services, and click "Deploy Service" and set id to "/simpleweb":
![Image of docker web app](images/dcos-simpleweb1.png)
11. On left, click "Container Settings" and container image "yeasy/simple-web". This is the image that will be downloaded from DockerHub
![Image of docker web app](images/dcos-simpleweb2.png)
12. Next on left, click "Network" and type in port 80. This is how you expose port 80 to the world.
![Image of docker web app](images/dcos-simpleweb3.png)
13. Next on left, click "Optional" and set role type "slave_public". This ensures the Docker web app is running on the public agent.
![Image of docker web app](images/dcos-simpleweb4.png)
14. Finally click deploy and watch the web app deploy. Once it goes to running state, open the FQDN retrieved in step 1 during deployment, and you will see the web app.
![Image of web app](images/simpleweb.png)
# DCOS upgrade
Starting from DC/OS 1.11, aks-engine deploys a bootstrap node as part of DC/OS cluster. This enables upgrade operation on an existing cluster.
To start the upgrade, run this following command:
```
aks-engine dcos-upgrade \
--subscription-id <Azure subscription ID> \
--resource-group <the resource group the cluster was deployed in> \
--location <the region the clusetr was deployed in> \
--upgrade-version <desired DC/OS version> \
--deployment-dir <deployment directory produced by "aks-engine generate"> \
--ssh-private-key-path <path to ssh private key used in deployment>
```
The upgrade is an idempotent operation. If failed, it could be re-run and will pick the execution from the last successful checkpoint.
# Learning More
Here are recommended links to learn more about DC/OS:
1. [Azure DC/OS documentation](https://azure.microsoft.com/en-us/documentation/services/container-service/)
## DC/OS Community Documentation
1. [DC/OS Overview](https://dcos.io/docs/1.8/overview/) - provides overview of DC/OS, Architecture, Features, and Concepts.
2. [DC/OS Tutorials](https://docs.mesosphere.com/1.8/usage/tutorials/) - provides various tutorials for DC/OS.

Просмотреть файл

@ -1,6 +1,6 @@
# Acs-engine
The Azure Container Service Engine (aks-engine) is a command line tool that generates ARM (Azure Resource Manager) templates in order for one to deploy container-based clusters (like Kubernetes , DCOS, Openshift, Docker swarm) on the Azure platform.
The Azure Kubernetes Engine (aks-engine) is a command line tool that generates ARM (Azure Resource Manager) templates in order for one to deploy container-based clusters (like Kubernetes , DCOS, Openshift, Docker swarm) on the Azure platform.
This design document provides a brief and high-level overview of what aks-engine does internally to achieve deployment of containerized clusters. The scope of this document will be limited to the execution of aks-engine when creating Kubernetes clusters.

Просмотреть файл

@ -81,8 +81,8 @@ Unit tests may be run locally via `make test`.
### End-to-end Tests
End-to-end tests for the DCOS and Kubernetes orchestrators may be run
via `make test-{dcos,kubernetes}`. The test process can optionally
End-to-end tests for Kubernetes may be run
via `make test-kubernetes`. The test process can optionally
deploy and tear down a cluster as part of the test (this is enabled by default).
You'll need access to an Azure subscription, as well as at least the following
environment variables to be set:

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Extensions
# Microsoft Azure Kubernetes Engine - Extensions
Extensions in aks-engine provide an easy way for aks-engine users to add pre-packaged functionality into their cluster. For example, an extension could configure a monitoring solution on an AKS cluster. The user would not need to know the details of how to install the monitoring solution. Rather, the user would simply add the extension into the extensionProfiles section of the template.

Двоичные данные
docs/images/dcos-add-file-json.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 32 KiB

Двоичные данные
docs/images/dcos-create-service-from-reg.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 35 KiB

Двоичные данные
docs/images/dcos-create-service-json.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 32 KiB

Двоичные данные
docs/images/dcos-create-service.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 35 KiB

Двоичные данные
docs/images/dcos-docker-helloworld-logs.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 164 KiB

Двоичные данные
docs/images/dcos-docker-helloworld-tasks.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 59 KiB

Двоичные данные
docs/images/dcos-docker-helloworld1.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 44 KiB

Двоичные данные
docs/images/dcos-docker-helloworld2.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 58 KiB

Двоичные данные
docs/images/dcos-newapp-status.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 50 KiB

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 25 KiB

Двоичные данные
docs/images/dcos-running-service-from-reg.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 14 KiB

Двоичные данные
docs/images/dcos-simpleweb1.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 40 KiB

Двоичные данные
docs/images/dcos-simpleweb2.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 54 KiB

Двоичные данные
docs/images/dcos-simpleweb3.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 57 KiB

Двоичные данные
docs/images/dcos-simpleweb4.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 37 KiB

Двоичные данные
docs/images/dcos.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 19 KiB

Двоичные данные
docs/images/dcosdashboard.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 46 KiB

Двоичные данные
docs/images/dcosservices.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 51 KiB

Двоичные данные
docs/images/swarm.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 15 KiB

Двоичные данные
docs/images/swarmbrowser.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 22 KiB

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 78 KiB

Двоичные данные
docs/images/swarmmode-hybrid-docker-node-ls.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 116 KiB

Двоичные данные
docs/images/swarmmode-hybrid-linux-agents.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 37 KiB

Двоичные данные
docs/images/swarmmode-hybrid-service-ls.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 56 KiB

Двоичные данные
docs/images/swarmmode-hybrid-stack-deploy.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 57 KiB

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Kubernetes
# Microsoft Azure Kubernetes Engine - Kubernetes
* Create a Kubernetes Cluster
* [Linux](kubernetes/deploy.md) - Create your first Linux Kubernetes cluster

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Kubernetes AAD integration Walkthrough
# Microsoft Azure Kubernetes Engine - Kubernetes AAD integration Walkthrough
This is walkthrough is to help you get start with Azure Active Directory(AAD) integeration with an AKS Engine Kubernetes cluster.

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Using GPUs with Kubernetes
# Microsoft Azure Kubernetes Engine - Using GPUs with Kubernetes
If you created a Kubernetes cluster with one or multiple agent pool(s) whose VM size is `Standard_NC*` or `Standard_NV*` you can schedule GPU workload on your cluster.
The NVIDIA drivers are automatically installed on every GPU agent in your cluster, so you don't need to do that manually, unless you require a specific version of the drivers. Currently, the installed driver is version 396.26.
@ -97,4 +97,4 @@ We specify `nvidia.com/gpu: 1` or `alpha.kubernetes.io/nvidia-gpu: 1` in the res
## Known incompatibilty with Moby
GPU nodes are currently incompatible with the default Moby container runtime provided in the default `aks` image. Clusters containing GPU nodes will be set to use Docker Engine instead of Moby.
GPU nodes are currently incompatible with the default Moby container runtime provided in the default `aks` image. Clusters containing GPU nodes will be set to use Docker Engine instead of Moby.

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine
# Microsoft Azure Kubernetes Engine
## Service Principals

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - SSH
# Microsoft Azure Kubernetes Engine - SSH
# SSH Key Management

Просмотреть файл

@ -1,85 +0,0 @@
# Microsoft Azure Container Service Engine - Swarm Walkthrough
## Deployment
Here are the steps to deploy a simple Swarm cluster:
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
2. [Generate your SSH key](ssh.md#ssh-key-generation)
3. [Edit the Swarm example](../examples/swarm.json) and fill in the blank strings
4. [Generate the template](acsengine.md#generate-templates)
5. [Deploy the output azuredeploy.json and azuredeploy.parameters.json](../docs/acsengine.md#deploy-templates)
## Walkthrough
Once your Swarm cluster has been deployed you will have a resource group containing:
1. a set of 1,3, or 5 masters in a master availability set. Each master's SSH can be accessed via the public dns address at ports 2200..2204. First master's SSH can also be accessed via public dns address on port 22.
2. a set of agents in a VM scale set (VMSS). The agent VMs can be accessed through a master. See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
The following image shows the architecture of a container service cluster with 3 masters, and 3 agents:
![Image of Swarm container service on azure](images/swarm.png)
All VMs are in the same VNET where the masters are on private subnet 172.16.0.0/24 and the agents are on the private subnet, 10.0.0.0/16, and fully accessible to each other.
## Create your First Two Swarm Docker Applications: hello-world, and Docker web app
After completing this walkthrough you will know how to:
* display information from Swarm,
* deploy a simple Docker hello-world app using docker-compose,
* and deploy a simple Docker web app publically available to the world.
1. After successfully deploying the template write down the two output master and agent FQDNs (Fully Qualified Domain Name).
1. If using Powershell or CLI, the output parameters are the last values printed.
2. If using Portal, to get the output you need to:
1. navigate to "resource group"
2. click on the resource group you just created
3. then click on "Succeeded" under *last deployment*
4. then click on the "Microsoft.Template"
5. now you can copy the output FQDNs and sample SSH commands
![Image of docker scaling](images/findingoutputs.png)
2. SSH to port 2200 of the master FQDN. See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
3. Set the DOCKER_HOST environment variable to `:2375`: e.g. ```export DOCKER_HOST=:2375```
4. Type `docker info` to see the status of the agent nodes.
![Image of docker info](images/dockerinfo.png)
5. Type `docker run -it hello-world` to see the hello-world test app run on one of the agents (the '-it' switches ensure output is displayed on your client)
6. Now let's create a simple web app and expose to the world. Start by using your favorite linux file editor to create a file named `docker-compose.yml` with the following contents:
```
web:
image: "yeasy/simple-web"
ports:
- "80:80"
restart: "always"
```
7. type `docker-compose up -d` to create the simple web server. This will take a few minutes to pull the image
8. once completed, type `docker ps` to see the running image.
![Image of docker ps](images/dockerps.png)
9. in your web browser hit the AGENTFQDN endpoint (**not the master FQDN**) you recorded in step #1 and you should see the following page, with a counter that increases on each refresh.
![Image of the web page](images/swarmbrowser.png)
10. You can now scale the web application. For example, if you have 3 agents, you can type `docker-compose scale web=3`, and this will scale to the rest of your agents. Note that in this example you can only scale up to the number of agents that you have since each container requires port 80, so if you deployed a single agent, you won't be able to scale up. The Azure load balancer will automatically pick up the new containers.
![Image of docker scaling](images/dockercomposescale.png)
# Learning More
Here are recommended links to learn more about Swarm, Docker, and Docker Compose:
1. [Docker](https://docs.docker.com/) - learn more through Docker documentation.
2. [Docker Swarm](https://docs.docker.com/swarm/overview/) - learn more about Docker Swarm.
3. [Docker Compose](https://docs.docker.com/compose/overview/) - Learn more about Docker Compose.

Просмотреть файл

@ -1,89 +0,0 @@
# Microsoft Azure Container Service Engine - Hybrid Swarm Mode Walkthrough
## Deployment
Here are the steps to deploy a Hybrid Swarm Mode cluster:
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
2. [Generate your ssh key](ssh.md#ssh-key-generation)
3. [Edit the Hybrid Swarm Mode example](../examples/windows/swarmmode-hybrid.json) and fill in the blank strings
4. [Generate the template](acsengine.md#generate-templates)
5. [Deploy the output azuredeploy.json and azuredeploy.parameters.json](../README.md#deploy-templates)
## Walkthrough
After you edit the template with your values and deploy, you should have:
- 3 Linux masters
- 3 Linux agents
- 3 Windows agents (with Windows Server 2016 with Containers),
all in the same Swarm.
SSH into one of the masters (`ssh yourlinuxuser@masterfqdn.yourregion.cloudapp.azure.com -p 220x`, where x is the number of your master instance - 0,1,2 in the default case) and list all nodes: `docker node ls`. The output should be similar to this:
![](images/swarmmode-hybrid-docker-node-ls.png)
> NOTE - if you only see the Linux masters and agents, a working solution is to reimage the Windows agents scale set - that is restoring the VMs to the initial state and restart them. This will reapply all the steps in the installation, [mainly this one that installs the container host and joins the Swarm](https://github.com/Azure/aks-engine/blob/master/parts/swarm/Install-ContainerHost-And-Join-Swarm.ps1).
Now you can inspect one of the Windows agents with `docker node inspect <hostname or id of node>`:
![](images/swarmmode-hybrid-docker-node-inspect.png)
##Limitations
As the [Windows Server Containers documentation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/swarm-mode#limitations) states, at the moment there are a few limitations to Swarm Mode with Windows hosts.
The most important is that the routing mesh is not available for Windows Server at the moment, but you can publish ports on the host (`docker service create --publish mode=host`)
## Create your first hybrid deployment on the Swarm
Now that you have a functional Hybrid Swarm Mode cluster in Azure, it is time to deploy a hybrid application: a Python front-end that will run on Linux, with a Redis data store on Windows. Here is the stack file for our services:
```
version: "3"
services:
redis:
image: redis:3.2.100-nanoserver
deploy:
placement:
constraints: [node.platform.os == windows]
python-web:
image: radumatei/python-web
ports:
- "80:80"
```
> [The Python application can be found here](https://github.com/microsoft-dx/docker-lab/tree/master/apps/python-redis) and is very similar to the [Docker Compose one from the Official Docker Docs](https://docs.docker.com/compose/gettingstarted/)
On one of the masters create the file above and name it `hybrid-stack.yml`. In order to deploy it, execute: `docker stack deploy --compose-file hybrid-stack.yml python-redis`:
![](images/swarmmode-hybrid-stack-deploy.png)
Now if you execute `docker service ls` you should see the two services:
![](images/swarmmode-hybrid-service-ls.png)
[Since the Nanoserver Redis image](https://hub.docker.com/r/library/redis/) is around 340 MB, it will take a little to pull it, then start a container.
After the container started, we can go to the Linux agents FQDN (the one you setup when editing the `swarmmode-hybrid.json` file) and access it from a browser:
> Note that on the first run it might take a little while
![](images/swarmmode-hybrid-linux-agents.png)
## Learning more
Here are recommended links to learn more about Swarm Mode, Docker, and Docker Compose:
1. [Docker](https://docs.docker.com/) - learn more through Docker documentation.
2. [Docker Swarm Mode](https://docs.docker.com/engine/swarm/) - learn more about Docker Swarm Mode.
For hybrid swarms, you can check the following resources:
3. Use Docker Swarm to run a Windows+Linux containerized application - [Part 1](https://www.youtube.com/watch?v=ZfMV5JmkWCY&t=170s), [Part 2](https://www.youtube.com/watch?v=VbzwKbcC_Mg&t=406s), [Part 3](https://www.youtube.com/watch?v=I9oDD78E_1E&t=354s)
4. [The Hybrid Swarm: Running Windows and Linux Apps in one Docker Cluster - Talk by Elton Stoneman, Docker Captain](https://channel9.msdn.com/Events/DXPortugal/OSCAMP-Open-Source-Software-powered-by-Bright-Pixel/The-Hybrid-Swarm-Running-Windows-and-Linux-Apps-in-one-Docker-Cluster)

Просмотреть файл

@ -1,73 +0,0 @@
# Microsoft Azure Container Service Engine - Swarm Mode Walkthrough
## Deployment
Here are the steps to deploy a simple Swarm Mode cluster:
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
2. [Generate your ssh key](ssh.md#ssh-key-generation)
3. [Edit the Swarm Mode example](../examples/swarmmode.json) and fill in the blank strings
4. [Generate the template](acsengine.md#generate-templates)
5. [Deploy the output azuredeploy.json and azuredeploy.parameters.json](../docs/acsengine.md#deploy-templates)
## Walkthrough
Once your Swarm Mode cluster has been deployed you will have a resource group containing:
1. a set of 1,3, or 5 masters in a master availability set. Each master's SSH can be accessed via the public dns address at ports 2200..2204. First master's SSH can also be accessed via public dns address on port 22.
2. a set of agents in a VM scale set (VMSS). The agent VMs can be accessed through a master. See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
The following image shows the architecture of a container service cluster with 3 masters, and 3 agents:
![Image of Swarm container service on azure](images/swarm.png)
All VMs are in the same VNET where the masters are on private subnet 172.16.0.0/24 and the agents are on the private subnet, 10.0.0.0/16, and fully accessible to each other.
## Create your First Two Swarm Mode Docker services: hello-world, and Docker web app
After completing this walkthrough you will know how to:
* display information from Swarm Mode,
* deploy a simple Docker hello-world app using docker-compose,
* and deploy a simple Docker web app publically available to the world.
1. After successfully deploying the template write down the two output master and agent FQDNs (Fully Qualified Domain Name).
1. If using Powershell or CLI, the output parameters are the last values printed.
2. If using Portal, to get the output you need to:
1. navigate to "resource group"
2. click on the resource group you just created
3. then click on "Succeeded" under *last deployment*
4. then click on the "Microsoft.Template"
5. now you can copy the output FQDNs and sample SSH commands
![Image of docker scaling](images/findingoutputs.png)
2. SSH to port 2200 of the master FQDN (or first master's SSH can also be accessed via public dns address on port 22.). See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
3. Type `docker node ls` to view the list of nodes (and their status) in the Swarm.
![Image of docker node ls](images/dockernodels.png)
4. Type `docker run -it hello-world` to see the hello-world test app run on one of the agents (the '-it' switches ensure output is displayed on your client)
5. Now let's create a simple service in a swarm and expose it to the world. Type `docker service create --name fe --publish 80:80 yeasy/simple-web`
6. Once completed, type `docker service ps fe` to see the running service.
![Image of docker service ps](images/dockerserviceps.png)
7. In your web browser hit the AGENTFQDN endpoint (**not the master FQDN**) you recorded in step #1 and you should see the following page, with a counter that increases on each refresh.
![Image of the web page](images/swarmbrowser.png)
8. You can now scale the service. You can type `docker service scale fe=5`, and this will scale up the service to the desired number of replicas.
![Image of service scaling](images/dockerservicescale.png)
# Learning More
Here are recommended links to learn more about Swarm Mode, Docker, and Docker Compose:
1. [Docker](https://docs.docker.com/) - learn more through Docker documentation.
2. [Docker Swarm Mode](https://docs.docker.com/engine/swarm/) - learn more about Docker Swarm Mode.

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Builds Docker Enabled Clusters
# Microsoft Azure Kubernetes Engine - Builds Kubernetes Clusters
## Overview

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Provisioning of master node custom files
# Microsoft Azure Kubernetes Engine - Provisioning of master node custom files
## Overview

Просмотреть файл

@ -1,41 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 1,
"vmSize": "Standard_D2"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,26 +0,0 @@
# DCOS Agent Node Attributes
Attributes can be defined for each agent node type.
The attributes can then be used with Marathon deployments as [placement contraints](https://mesosphere.github.io/marathon/docs/constraints.html).
Attributes can easily be defined in AgentPoolProfile property of the API Model
The definition below adds 2 attributes `"foo"` and `"att1"` to all nodes in the `agentprivate` pool.
```
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2_v2",
"customNodeLabels" : {
"foo" : "bar",
"att1" : "value1"
}
}
]
```
You can confirm the attributes on the Node Details:
![Node UI Details](images/dcosattributes.png)

Просмотреть файл

@ -1,45 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "xtentprivate",
"count": 3,
"vmSize": "Standard_D2_v2",
"customNodeLabels" : {
"foo" : "bar",
"attribute1" : "value1"
}
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 8.3 KiB

Просмотреть файл

@ -1,67 +0,0 @@
# Private Registry Support
ACS can deploy credentials to private registries to agent nodes DC/OS clusters.
The credentials are specified in the orchestrator profile in the apimodel:
```
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"dcosConfig" : {
"Registry" : "",
"RegistryUser" : "",
"RegistryPassword" : ""
}
},
```
The agent provisioning process will then create a tar archive containing a docker config as documented at: [Using a Private Docker Registry](https://docs.mesosphere.com/1.9/deploying-services/private-docker-registry/)
## Example
Let's provision a DC/OS cluster with credentials to an [Azure Container Registry](https://azure.microsoft.com/en-us/services/container-registry/) deployed to every agent node.
- First, [provision an Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-managed-get-started-portal).
- Enable Admin Access and note the registry credentials
<img src="../../docs/images/acrblade.png" alt="ACR Blade with Admin Access enabled" style="width: 50%; height: 50%;"/>
- Clone [aks-engine](http://github.com/azure/aks-engine) and [start the container with the dev environment](https://github.com/Azure/aks-engine/blob/master/docs/acsengine.md).
- Edit the API model to include the credentials
```
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"registry" : "xtophregistry.azurecr.io",
"registryUser" : "xtophregistry",
"registryPassword" : "aN//=+l==Z+/A=3hXhA+mSX=rXwB/UgW"
},
```
- Run aks-engine to create ARM templates
```
./aks-engine generate examples/dcos-private-registry/dcos.json
```
- Deploy the cluster
```
az group create -l eastus -n cluster-rg
az group deployment create -g cluster-rg --template-file _output/dcoscluster/azuredeploy.json --parameters @_output/dcoscluster/azuredeploy.parameters.json
```
- Create a Service to deploy a container from the ACR
<img src="../../docs/images/dcos-create-service-from-reg.png" alt="Service Creation from Registry" style="width: 50%; height: 50%;"/>
- Add the credential path on the agent using the JSON editor
<img src="../../docs/images/dcos-create-service-json.png" alt="JSON editor with credential path" style="width: 50%; height: 50%;"/>
- See the Service running
<img src="../../docs/images/dcos-running-service-from-reg.png" alt="Running Service" style="width: 50%; height: 50%;"/>
- Check the credential deployment
<img src="../../docs/images/dcos-running-service-from-reg-files.png" alt="Running Service" style="width: 50%; height: 50%;"/>
## Limitations
- The API model currenlty only supports credentials to a single registry.
- Not tested with Kubernetes clusters
- Credentials have to be updated on each node

Просмотреть файл

@ -1,46 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"dcosConfig" : {
"registry" : "",
"registryUser" : "",
"registryPassword" : ""
}
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 1,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,13 +0,0 @@
# Microsoft Azure Container Service Engine - DC/OS Versions
## Overview
This section provides example templates enable creation of Docker enabled cluster with older version of the DC/OS orchestrator.
Here are the release channels aks-engine is able to deploy:
1. DC/OS `1.8`. Access by specifying `"orchestratorVersion": "1.8.8"`.
2. DC/OS `1.9`. Access by specifying `"orchestratorVersion": "1.9.0"`.
3. DC/OS `1.10`. Access by specifying `"orchestratorVersion": "1.10.0"`.
Deploying and using [DC/OS](../../docs/dcos.md)

Просмотреть файл

@ -1,42 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"orchestratorRelease": "1.10"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 2,
"vmSize": "Standard_D2s_v3"
},
{
"name": "agentpublic",
"count": 2,
"vmSize": "Standard_D2s_v3",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,42 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"orchestratorRelease": "1.8"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2s_v3"
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2s_v3",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,42 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"orchestratorRelease": "1.9"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2s_v3"
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2s_v3",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,41 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 2,
"vmSize": "Standard_D2s_v3"
},
{
"name": "agentpublic",
"count": 2,
"vmSize": "Standard_D2s_v3",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,7 +1,7 @@
# Microsoft Azure Container Service Engine - Managed Disks
# Microsoft Azure Kubernetes Engine - Managed Disks
## Overview
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with [managed disks](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview).
AKS Engine enables you to create customized Kubernetes cluster on Microsoft Azure with [managed disks](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview).
These examples are provided as a reference, note that managed disks is the default storage account type if none is specified.

Просмотреть файл

@ -1,39 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3"
},
"agentPoolProfiles": [
{
"name": "agentpublic",
"count": 6,
"vmSize": "Standard_D2s_v3",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128],
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1 +0,0 @@
MARATHON_JSON=marathon-slave-public.json

Просмотреть файл

@ -1,33 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 3,
"vmSize": "Standard_D2_v2",
"AvailabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,34 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,40 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3",
"OSDiskSizeGB": 200
},
"agentPoolProfiles": [
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2s_v3",
"OSDiskSizeGB": 200,
"AvailabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1 +0,0 @@
MARATHON_JSON=marathon-slave-public.json

Просмотреть файл

@ -1,47 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 3,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,47 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 1,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,47 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks"
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,45 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 3,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks"
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,47 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 1,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "ManagedDisks",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,45 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 1,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets",
"storageProfile": "ManagedDisks",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,8 +1,8 @@
# Microsoft Azure Container Service Engine - Attached Disks
# Microsoft Azure Kubernetes Engine - Attached Disks
## Overview
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with attached disks.
AKS Engine enables you to create customized Kubernetes cluster on Microsoft Azure with attached disks.
The examples show you how to configure up to 4 attached disks. The disks can range from 1 to 1024 GB in size:

Просмотреть файл

@ -1,42 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"storageProfile": "StorageAccount"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 1,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,49 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2s_v3",
"OSDiskSizeGB": 200
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 3,
"vmSize": "Standard_D2s_v3",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2s_v3",
"dnsPrefix": "",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [1],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,49 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [1],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,37 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"storageProfile": "StorageAccount"
},
"agentPoolProfiles": [
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,49 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 1,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [1],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,53 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"extensions": [
{
"name": "hello-world-dcos",
"singleOrAll": "single"
}
]
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"extensionProfiles": [
{
"name": "hello-world-dcos",
"version": "v1"
}
]
}
}

Просмотреть файл

@ -1,56 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"preProvisionExtension": {
"name": "hello-world",
"singleOrAll": "All"
}
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2_v2",
"preProvisionExtension": {
"name": "hello-world",
"singleOrAll": "All"
}
},
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"extensionProfiles": [
{
"name": "hello-world",
"version": "v1",
"script": "hello.sh"
}
]
}
}

Просмотреть файл

@ -1,51 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2",
"preProvisionExtension": {
"name": "hello-world",
"singleOrAll": "All"
}
},
"agentPoolProfiles": [
{
"name": "agentpublic",
"count": 1,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
],
"preProvisionExtension": {
"name": "hello-world",
"singleOrAll": "All"
}
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"extensionProfiles": [
{
"name": "hello-world",
"version": "v1",
"script": "hello.sh"
}
]
}
}

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Kubernetes Upgrade
# Microsoft Azure Kubernetes Engine - Kubernetes Upgrade
## Overview

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Key vault referencing for k8s parameters
# Microsoft Azure Kubernetes Engine - Key vault referencing for k8s parameters
## Overview

Просмотреть файл

@ -1,8 +1,8 @@
# Microsoft Azure Container Service Engine - Key vault certificate deployment
# Microsoft Azure Kubernetes Engine - Key vault certificate deployment
## Overview
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with certs installed from key vault during deployment.
AKS Engine enables you to create customized Kubernetes cluster on Microsoft Azure with certs installed from key vault during deployment.
The examples show you how to configure installing a cert from keyvault. These certs are assumed to be in the secrets portion of your keyvault:

Просмотреть файл

@ -1,53 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentprivate",
"count": 3,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
},
"secrets":[
{
"sourceVault":{
"id":""
},
"vaultCertificates":[
{
"certificateUrl" :""
}
]
}
]
}
}
}

Просмотреть файл

@ -1,48 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
},
"secrets":[
{
"sourceVault":{
"id":""
},
"vaultCertificates":[
{
"certificateUrl" :""
}
]
}
]
}
}
}

Просмотреть файл

@ -1,59 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agent128",
"count": 3,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [128, 128, 128, 128]
},
{
"name": "agentpublic",
"count": 3,
"vmSize": "Standard_D2_v2",
"dnsPrefix": "",
"availabilityProfile": "AvailabilitySet",
"storageProfile": "StorageAccount",
"diskSizesGB": [1],
"ports": [
80,
443,
8080
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
},
"secrets":[
{
"sourceVault":{
"id":""
},
"vaultCertificates":[
{
"certificateUrl" :""
}
]
}
]
}
}
}

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Kubernetes Features
# Microsoft Azure Kubernetes Engine - Kubernetes Features
## Overview

Просмотреть файл

@ -1,8 +1,8 @@
# Microsoft Azure Container Service Engine - Large Clusters
# Microsoft Azure Kubernetes Engine - Large Clusters
## Overview
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with 1200 nodes.
AKS Engine enables you to create customized Kubernetes cluster on Microsoft Azure with 1200 nodes.
The examples show you how to configure up to 12 agent pools with 100 nodes each:

Просмотреть файл

@ -1,97 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,85 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,97 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,85 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Swarm"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,97 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 3,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2",
"availabilityProfile": "AvailabilitySet"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,85 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "SwarmMode"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool0",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool1",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool2",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool3",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool4",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool5",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool6",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool7",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool8",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool9",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool10",
"count": 100,
"vmSize": "Standard_D2_v2"
},
{
"name": "agentpool11",
"count": 100,
"vmSize": "Standard_D2_v2"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -1,10 +1,10 @@
# Microsoft Azure Container Service Engine - Network Plugin
# Microsoft Azure Kubernetes Engine - Network Plugin
There are 2 different Network Plugin options :
- Azure Container Networking (default)
- Kubenet
- Flannel (docs are //TODO)
- Flannel (docs are //TODO)
- Cilium (docs are //TODO)
## Azure Container Networking (default)

Просмотреть файл

@ -1,4 +1,4 @@
# Microsoft Azure Container Service Engine - Network Policy
# Microsoft Azure Kubernetes Engine - Network Policy
There are 2 different Network Policy options :

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше