зеркало из https://github.com/Azure/aks-engine.git
Rename to aks-engine
This commit is contained in:
Родитель
3e57d2887a
Коммит
b1ffc7545c
|
@ -1,7 +1,7 @@
|
|||
version: 2
|
||||
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/Azure/acs-engine
|
||||
working_directory: /go/src/github.com/Azure/aks-engine
|
||||
docker:
|
||||
- image: quay.io/deis/go-dev:v1.17.3
|
||||
environment:
|
||||
|
@ -59,7 +59,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-1.10-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -85,7 +85,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-1.11-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -111,7 +111,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-1.12-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -137,7 +137,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-1.13-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -163,7 +163,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-windows-1.10-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -186,7 +186,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-windows-1.11-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -209,7 +209,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-windows-1.12-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -232,7 +232,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
k8s-windows-1.13-release-e2e:
|
||||
<<: *defaults
|
||||
steps:
|
||||
|
@ -255,7 +255,7 @@ jobs:
|
|||
command: make test-kubernetes
|
||||
no_output_timeout: "30m"
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/Azure/acs-engine/_logs
|
||||
path: /go/src/github.com/Azure/aks-engine/_logs
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test_pr:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
./acs-engine
|
||||
./acs-engine.exe
|
||||
./aks-engine
|
||||
./aks-engine.exe
|
||||
./_output
|
||||
./test/acs-engine-test/acs-engine-test
|
||||
./test/aks-engine-test/aks-engine-test
|
||||
## autogenerated
|
||||
./pkg/i18n/translations.go
|
||||
./pkg/acsengine/templates.go
|
||||
./pkg/engine/templates.go
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
---
|
||||
|
||||
**What version of acs-engine?**:
|
||||
**What version of aks-engine?**:
|
||||
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
acs-engine.exe
|
||||
acs-engine
|
||||
aks-engine.exe
|
||||
aks-engine
|
||||
debug
|
||||
_output/
|
||||
_input/
|
||||
|
@ -7,7 +7,7 @@ _input/
|
|||
.DS_Store
|
||||
test/user.env
|
||||
user.env
|
||||
test/acs-engine-test/acs-engine-test
|
||||
test/aks-engine-test/aks-engine-test
|
||||
.editorconfig
|
||||
_dist/
|
||||
bin/
|
||||
|
@ -15,14 +15,14 @@ bin/
|
|||
.coverprofile
|
||||
|
||||
test/junit/
|
||||
test/acs-engine-test/acs-engine-test.exe
|
||||
test/aks-engine-test/aks-engine-test.exe
|
||||
pkg/operations/junit.xml
|
||||
pkg/operations/kubernetesupgrade/junit.xml
|
||||
pkg/acsengine/templates.go
|
||||
pkg/engine/templates.go
|
||||
pkg/i18n/translations.go
|
||||
|
||||
_logs/
|
||||
test/acs-engine-test/report/TestReport.json
|
||||
test/aks-engine-test/report/TestReport.json
|
||||
*.swp
|
||||
|
||||
# I have no idea why these get generated when I run the e2e test
|
||||
|
@ -41,4 +41,4 @@ packer/sp.json
|
|||
.vs
|
||||
|
||||
# exclude vendor directory from gitignore
|
||||
!/vendor/**/*
|
||||
!/vendor/**/*
|
||||
|
|
|
@ -4,7 +4,7 @@ Prow is a CI system that offers various features such as rich Github automation,
|
|||
and running tests in Jenkins or on a Kubernetes cluster. You can read more about
|
||||
Prow in [upstream docs][0].
|
||||
|
||||
## acs-engine setup
|
||||
## aks-engine setup
|
||||
|
||||
Deploy a new Kubernetes cluster (eg. `az aks create -g acse-test-prow-ci -n prow)
|
||||
|
||||
|
@ -36,4 +36,4 @@ is also installed that takes care of merging pull requests that pass all tests
|
|||
and satisfy a set of label requirements.
|
||||
|
||||
[0]: https://github.com/kubernetes/test-infra/tree/master/prow#prow
|
||||
[1]: https://docs.microsoft.com/en-us/azure/aks/ingress
|
||||
[1]: https://docs.microsoft.com/en-us/azure/aks/ingress
|
||||
|
|
|
@ -3,10 +3,10 @@ log_level: debug
|
|||
tide:
|
||||
# target_url: http://prow-ci-bot-ingress.eastus.cloudapp.azure.com/tide.html
|
||||
merge_method:
|
||||
Azure/acs-engine: squash
|
||||
Azure/aks-engine: squash
|
||||
queries:
|
||||
- repos:
|
||||
- Azure/acs-engine
|
||||
- Azure/aks-engine
|
||||
labels:
|
||||
- lgtm
|
||||
- approved
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
approve:
|
||||
- repos:
|
||||
- Azure/acs-engine
|
||||
- Azure/aks-engine
|
||||
implicit_self_approve: true
|
||||
lgtm_acts_as_approve: true
|
||||
|
||||
|
@ -21,7 +21,7 @@ label:
|
|||
|
||||
|
||||
plugins:
|
||||
Azure/acs-engine:
|
||||
Azure/aks-engine:
|
||||
- approve
|
||||
- assign
|
||||
- cat
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
trigger: none
|
||||
|
||||
# steps:
|
||||
# steps:
|
||||
# - create an VHD in Packer to normal storage account
|
||||
# - copy from Packer storage account to classic storage account using AzCopy
|
||||
# - generate SAS link from azure CLI
|
||||
|
@ -8,14 +8,14 @@ trigger: none
|
|||
|
||||
phases:
|
||||
- phase: build_vhd
|
||||
queue:
|
||||
queue:
|
||||
name: Hosted Ubuntu 1604
|
||||
timeoutInMinutes: 120
|
||||
steps:
|
||||
- script: |
|
||||
docker run --rm \
|
||||
-v ${PWD}:/go/src/github.com/Azure/acs-engine \
|
||||
-w /go/src/github.com/Azure/acs-engine \
|
||||
-v ${PWD}:/go/src/github.com/Azure/aks-engine \
|
||||
-w /go/src/github.com/Azure/aks-engine \
|
||||
-e CLIENT_ID=${CLIENT_ID} \
|
||||
-e CLIENT_SECRET="$(CLIENT_SECRET)" \
|
||||
-e TENANT_ID=${TENANT_ID} \
|
||||
|
@ -33,8 +33,8 @@ phases:
|
|||
VHD_NAME="$(echo $OS_DISK_SAS | cut -d "/" -f 8 | cut -d "?" -f 1)" && \
|
||||
printf "COPY ME ----> ${CLASSIC_BLOB}/${VHD_NAME}?" | tee -a vhd-sas && \
|
||||
docker run --rm \
|
||||
-v ${PWD}:/go/src/github.com/Azure/acs-engine \
|
||||
-w /go/src/github.com/Azure/acs-engine \
|
||||
-v ${PWD}:/go/src/github.com/Azure/aks-engine \
|
||||
-w /go/src/github.com/Azure/aks-engine \
|
||||
-e CLIENT_ID=${CLIENT_ID} \
|
||||
-e CLIENT_SECRET="$(CLIENT_SECRET)" \
|
||||
-e TENANT_ID=${TENANT_ID} \
|
||||
|
@ -46,8 +46,8 @@ phases:
|
|||
displayName: Copying resource to Classic Storage Account
|
||||
- script: |
|
||||
docker run --rm \
|
||||
-v ${PWD}:/go/src/github.com/Azure/acs-engine \
|
||||
-w /go/src/github.com/Azure/acs-engine \
|
||||
-v ${PWD}:/go/src/github.com/Azure/aks-engine \
|
||||
-w /go/src/github.com/Azure/aks-engine \
|
||||
-e CLIENT_ID=${CLIENT_ID} \
|
||||
-e CLIENT_SECRET="$(CLIENT_SECRET)" \
|
||||
-e TENANT_ID=${TENANT_ID} \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Contributing Guidelines
|
||||
|
||||
The Microsoft acs-engine project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
|
||||
The Microsoft aks-engine project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
|
@ -18,8 +18,8 @@ signed the CLA can be accepted into the repository.
|
|||
This is an open source project and as such no formal support is available. However, like all good open source projects we do offer "best effort" support through github issues.
|
||||
|
||||
GitHub issues:
|
||||
- ACS-Engine: https://github.com/Azure/acs-engine/issues - file issues and PRs related to ACS-Engine
|
||||
- ACS: https://github.com/Azure/acs/issues - file issues and PRs related to Azure Container Service
|
||||
- AKS-Engine: https://github.com/Azure/aks-engine/issues - file issues and PRs related to AKS-Engine
|
||||
- AKS: https://github.com/Azure/AKS/issues - file issues and PRs related to Azure Kubernetes Service
|
||||
|
||||
Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.
|
||||
|
||||
|
@ -33,11 +33,11 @@ specific upcoming bug or minor release, it would go into `2.2.1` or `2.3.0`.
|
|||
A milestone (and hence release) is considered done when all outstanding issues/PRs have been closed or moved to another milestone.
|
||||
|
||||
## Issues
|
||||
Issues are used as the primary method for tracking anything to do with the acs-engine project.
|
||||
Issues are used as the primary method for tracking anything to do with the aks-engine project.
|
||||
|
||||
### Issue Lifecycle
|
||||
The issue lifecycle is mainly driven by the core maintainers, but is good information for those
|
||||
contributing to acs-engine. All issue types follow the same general lifecycle. Differences are noted below.
|
||||
contributing to aks-engine. All issue types follow the same general lifecycle. Differences are noted below.
|
||||
1. Issue creation
|
||||
2. Triage
|
||||
- The maintainer in charge of triaging will apply the proper labels for the issue. This
|
||||
|
|
|
@ -24,10 +24,10 @@ RUN git clone https://github.com/akesterson/cmdarg.git /tmp/cmdarg \
|
|||
RUN git clone https://github.com/akesterson/shunit.git /tmp/shunit \
|
||||
&& cd /tmp/shunit && make install && rm -rf /tmp/shunit
|
||||
|
||||
WORKDIR /gopath/src/github.com/Azure/acs-engine
|
||||
WORKDIR /gopath/src/github.com/Azure/aks-engine
|
||||
|
||||
# Cache vendor layer
|
||||
ADD . /gopath/src/github.com/Azure/acs-engine/
|
||||
ADD . /gopath/src/github.com/Azure/aks-engine/
|
||||
RUN make bootstrap
|
||||
|
||||
# https://github.com/dotnet/core/blob/master/release-notes/download-archives/2.1.2-sdk-download.md
|
||||
|
@ -45,4 +45,3 @@ RUN apt-get update \
|
|||
&& curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get install azure-cli
|
||||
|
||||
|
|
12
Makefile
12
Makefile
|
@ -16,7 +16,7 @@ GO ?= go
|
|||
TAGS :=
|
||||
LDFLAGS :=
|
||||
BINDIR := $(CURDIR)/bin
|
||||
BINARIES := acs-engine
|
||||
BINARIES := aks-engine
|
||||
VERSION ?= $(shell git rev-parse HEAD)
|
||||
VERSION_SHORT ?= $(shell git rev-parse --short HEAD)
|
||||
GITTAG := $(shell git describe --exact-match --tags $(shell git log -n1 --pretty='%h') 2> /dev/null)
|
||||
|
@ -24,7 +24,7 @@ ifeq ($(GITTAG),)
|
|||
GITTAG := $(VERSION_SHORT)
|
||||
endif
|
||||
|
||||
REPO_PATH := github.com/Azure/acs-engine
|
||||
REPO_PATH := github.com/Azure/aks-engine
|
||||
DEV_ENV_IMAGE := quay.io/deis/go-dev:v1.17.3
|
||||
DEV_ENV_WORK_DIR := /go/src/${REPO_PATH}
|
||||
DEV_ENV_OPTS := --rm -v ${CURDIR}:${DEV_ENV_WORK_DIR} -w ${DEV_ENV_WORK_DIR} ${DEV_ENV_VARS}
|
||||
|
@ -63,17 +63,17 @@ generate-azure-constants:
|
|||
.PHONY: build
|
||||
build: generate
|
||||
GOBIN=$(BINDIR) $(GO) install $(GOFLAGS) -ldflags '$(LDFLAGS)'
|
||||
cd test/acs-engine-test; go build $(GOFLAGS)
|
||||
cd test/aks-engine-test; go build $(GOFLAGS)
|
||||
|
||||
build-binary: generate
|
||||
go build $(GOFLAGS) -v -ldflags "${LDFLAGS}" -o ${BINARY_DEST_DIR}/acs-engine .
|
||||
go build $(GOFLAGS) -v -ldflags "${LDFLAGS}" -o ${BINARY_DEST_DIR}/aks-engine .
|
||||
|
||||
# usage: make clean build-cross dist VERSION=v0.4.0
|
||||
.PHONY: build-cross
|
||||
build-cross: build
|
||||
build-cross: LDFLAGS += -extldflags "-static"
|
||||
build-cross:
|
||||
CGO_ENABLED=0 gox -output="_dist/acs-engine-${GITTAG}-{{.OS}}-{{.Arch}}/{{.Dir}}" -osarch='$(TARGETS)' $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)'
|
||||
CGO_ENABLED=0 gox -output="_dist/aks-engine-${GITTAG}-{{.OS}}-{{.Arch}}/{{.Dir}}" -osarch='$(TARGETS)' $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)'
|
||||
|
||||
.PHONY: build-windows-k8s
|
||||
build-windows-k8s:
|
||||
|
@ -101,7 +101,7 @@ clean:
|
|||
|
||||
GIT_BASEDIR = $(shell git rev-parse --show-toplevel 2>/dev/null)
|
||||
ifneq ($(GIT_BASEDIR),)
|
||||
LDFLAGS += -X github.com/Azure/acs-engine/pkg/test.JUnitOutDir=${GIT_BASEDIR}/test/junit
|
||||
LDFLAGS += -X github.com/Azure/aks-engine/pkg/test.JUnitOutDir=${GIT_BASEDIR}/test/junit
|
||||
endif
|
||||
|
||||
test: generate
|
||||
|
|
12
README.md
12
README.md
|
@ -1,12 +1,12 @@
|
|||
# Microsoft Azure Container Service Engine - Builds Docker Enabled Clusters
|
||||
|
||||
[![Coverage Status](https://codecov.io/gh/Azure/acs-engine/branch/master/graph/badge.svg)](https://codecov.io/gh/Azure/acs-engine)
|
||||
[![CircleCI](https://circleci.com/gh/Azure/acs-engine/tree/master.svg?style=svg)](https://circleci.com/gh/Azure/acs-engine/tree/master)
|
||||
[![GoDoc](https://godoc.org/github.com/Azure/acs-engine?status.svg)](https://godoc.org/github.com/Azure/acs-engine)
|
||||
[![Coverage Status](https://codecov.io/gh/Azure/aks-engine/branch/master/graph/badge.svg)](https://codecov.io/gh/Azure/aks-engine)
|
||||
[![CircleCI](https://circleci.com/gh/Azure/aks-engine/tree/master.svg?style=svg)](https://circleci.com/gh/Azure/aks-engine/tree/master)
|
||||
[![GoDoc](https://godoc.org/github.com/Azure/aks-engine?status.svg)](https://godoc.org/github.com/Azure/aks-engine)
|
||||
|
||||
## Overview
|
||||
|
||||
The Azure Container Service Engine (`acs-engine`) generates ARM (Azure Resource Manager) templates for Docker enabled clusters on Microsoft Azure with your choice of DC/OS, Kubernetes, OpenShift, Swarm Mode, or Swarm orchestrators. The input to the tool is a cluster definition. The cluster definition (or apimodel) is very similar to (in many cases the same as) the ARM template syntax used to deploy a Microsoft Azure Container Service cluster.
|
||||
The Azure Container Service Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates for Docker enabled clusters on Microsoft Azure with your choice of DC/OS, Kubernetes, OpenShift, Swarm Mode, or Swarm orchestrators. The input to the tool is a cluster definition. The cluster definition (or apimodel) is very similar to (in many cases the same as) the ARM template syntax used to deploy a Microsoft Azure Container Service cluster.
|
||||
|
||||
The cluster definition file enables you to customize your Docker enabled cluster in many ways including:
|
||||
|
||||
|
@ -44,7 +44,7 @@ These guides cover more advanced features to try out after you have built your f
|
|||
|
||||
Follow the [developers guide](docs/developers.md) to set up your environment.
|
||||
|
||||
To build acs-engine, run `make build`. If you are developing with a working [Docker environment](https://docs.docker.com/engine), you can also run `make dev` (or `makedev.ps1` on Windows) first to start a Docker container and run `make build` inside the container.
|
||||
To build aks-engine, run `make build`. If you are developing with a working [Docker environment](https://docs.docker.com/engine), you can also run `make dev` (or `makedev.ps1` on Windows) first to start a Docker container and run `make build` inside the container.
|
||||
|
||||
Please follow these instructions before submitting a PR:
|
||||
|
||||
|
@ -65,7 +65,7 @@ $ vim examples/kubernetes.json
|
|||
# insert your preferred, unique DNS prefix
|
||||
# insert your SSH public key
|
||||
|
||||
$ ./acs-engine generate examples/kubernetes.json
|
||||
$ ./aks-engine generate examples/kubernetes.json
|
||||
```
|
||||
|
||||
This produces a new directory inside `_output/` that contains an ARM template for deploying Kubernetes into Azure. (In the case of Kubernetes, some additional needed assets are generated and placed in the output directory.)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## 概览
|
||||
|
||||
微软容器服务引擎(`acs-engine`)用于将一个容器集群描述文件转化成一组ARM(Azure Resource Manager)模板,通过在Azure上部署这些模板,用户可以很方便地在Azure上建立一套基于Docker的容器服务集群。用户可以自由地选择集群编排引擎DC/OS, Kubernetes或者是Swarm/Swarm Mode。集群描述文件使用和ARM模板相同的语法,它们都可以用来部署Azure容器服务。
|
||||
微软容器服务引擎(`aks-engine`)用于将一个容器集群描述文件转化成一组ARM(Azure Resource Manager)模板,通过在Azure上部署这些模板,用户可以很方便地在Azure上建立一套基于Docker的容器服务集群。用户可以自由地选择集群编排引擎DC/OS, Kubernetes或者是Swarm/Swarm Mode。集群描述文件使用和ARM模板相同的语法,它们都可以用来部署Azure容器服务。
|
||||
|
||||
集群描述文件提供了一下几个功能:
|
||||
* 可以自由选择DC/OS, Kubernetes, Swarm Mode和Swarm等多种编排引擎
|
||||
|
@ -16,7 +16,7 @@
|
|||
|
||||
## 演示链接
|
||||
|
||||
* [ACS Engine](docs/acsengine.md) - 演示如何使用ACS引擎来生成基于Docker的容器集群
|
||||
* [AKS Engine](docs/acsengine.md) - 演示如何使用ACS引擎来生成基于Docker的容器集群
|
||||
* [Cluster Definition](docs/clusterdefinition.md) - 详细介绍集群描述文件的格式
|
||||
* [DC/OS Walkthrough](docs/dcos.md) - 演示如何使用ACS引擎在Azure上创建DC/OS集群
|
||||
* [Kubernetes Walkthrough](docs/kubernetes.md) - 演示如何使用ACS引擎在Azure上创建Kubernetes集群
|
||||
|
|
|
@ -6,7 +6,7 @@ package cmd_test
|
|||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/Azure/acs-engine/pkg/test"
|
||||
. "github.com/Azure/aks-engine/pkg/test"
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
|
|
|
@ -11,11 +11,11 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/acs-engine/pkg/i18n"
|
||||
"github.com/Azure/acs-engine/pkg/operations/dcosupgrade"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/i18n"
|
||||
"github.com/Azure/aks-engine/pkg/operations/dcosupgrade"
|
||||
"github.com/leonelquinteros/gotext"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
@ -43,7 +43,7 @@ type dcosUpgradeCmd struct {
|
|||
containerService *api.ContainerService
|
||||
apiVersion string
|
||||
currentDcosVersion string
|
||||
client armhelpers.ACSEngineClient
|
||||
client armhelpers.AKSEngineClient
|
||||
locale *gotext.Locale
|
||||
nameSuffix string
|
||||
sshPrivateKey []byte
|
||||
|
|
|
@ -21,12 +21,12 @@ import (
|
|||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/acsengine"
|
||||
"github.com/Azure/acs-engine/pkg/acsengine/transform"
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/acs-engine/pkg/i18n"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/engine"
|
||||
"github.com/Azure/aks-engine/pkg/engine/transform"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/i18n"
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -63,7 +63,7 @@ type deployCmd struct {
|
|||
apiVersion string
|
||||
locale *gotext.Locale
|
||||
|
||||
client armhelpers.ACSEngineClient
|
||||
client armhelpers.AKSEngineClient
|
||||
resourceGroup string
|
||||
random *rand.Rand
|
||||
location string
|
||||
|
@ -397,13 +397,13 @@ func (dc *deployCmd) validateApimodel() (*api.ContainerService, string, error) {
|
|||
}
|
||||
|
||||
func (dc *deployCmd) run() error {
|
||||
ctx := acsengine.Context{
|
||||
ctx := engine.Context{
|
||||
Translator: &i18n.Translator{
|
||||
Locale: dc.locale,
|
||||
},
|
||||
}
|
||||
|
||||
templateGenerator, err := acsengine.InitializeTemplateGenerator(ctx)
|
||||
templateGenerator, err := engine.InitializeTemplateGenerator(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize template generator: %s", err.Error())
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ func (dc *deployCmd) run() error {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(dc.containerService, acsengine.DefaultGeneratorCode, BuildTag)
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(dc.containerService, engine.DefaultGeneratorCode, BuildTag)
|
||||
if err != nil {
|
||||
log.Fatalf("error generating template %s: %s", dc.apimodelPath, err.Error())
|
||||
os.Exit(1)
|
||||
|
@ -428,7 +428,7 @@ func (dc *deployCmd) run() error {
|
|||
log.Fatalf("error pretty printing template parameters: %s \n", err.Error())
|
||||
}
|
||||
|
||||
writer := &acsengine.ArtifactWriter{
|
||||
writer := &engine.ArtifactWriter{
|
||||
Translator: &i18n.Translator{
|
||||
Locale: dc.locale,
|
||||
},
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
|
||||
"os"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/satori/go.uuid"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -60,13 +60,13 @@ const ExampleAPIModelWithoutServicePrincipalProfile = `{
|
|||
|
||||
//mockAuthProvider implements AuthProvider and allows in particular to stub out getClient()
|
||||
type mockAuthProvider struct {
|
||||
getClientMock armhelpers.ACSEngineClient
|
||||
getClientMock armhelpers.AKSEngineClient
|
||||
*authArgs
|
||||
}
|
||||
|
||||
func (provider *mockAuthProvider) getClient() (armhelpers.ACSEngineClient, error) {
|
||||
func (provider *mockAuthProvider) getClient() (armhelpers.AKSEngineClient, error) {
|
||||
if provider.getClientMock == nil {
|
||||
return &armhelpers.MockACSEngineClient{}, nil
|
||||
return &armhelpers.MockAKSEngineClient{}, nil
|
||||
}
|
||||
return provider.getClientMock, nil
|
||||
|
||||
|
@ -241,7 +241,7 @@ func TestAutoSufixWithDnsPrefixInApiModel(t *testing.T) {
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -285,7 +285,7 @@ func TestAPIModelWithoutServicePrincipalProfileAndClientIdAndSecretInCmd(t *test
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -338,7 +338,7 @@ func TestAPIModelWithEmptyServicePrincipalProfileAndClientIdAndSecretInCmd(t *te
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -384,7 +384,7 @@ func TestAPIModelWithoutServicePrincipalProfileAndWithoutClientIdAndSecretInCmd(
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -421,7 +421,7 @@ func TestAPIModelWithEmptyServicePrincipalProfileAndWithoutClientIdAndSecretInCm
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -473,7 +473,7 @@ func testAutodeployCredentialHandling(t *testing.T, useManagedIdentity bool, cli
|
|||
containerService: cs,
|
||||
apiVersion: ver,
|
||||
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
},
|
||||
|
@ -507,14 +507,14 @@ func testAutodeployCredentialHandling(t *testing.T, useManagedIdentity bool, cli
|
|||
|
||||
func TestDeployCmdMergeAPIModel(t *testing.T) {
|
||||
d := &deployCmd{}
|
||||
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
d.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
err := d.mergeAPIModel()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error calling mergeAPIModel with no --set flag defined: %s", err.Error())
|
||||
}
|
||||
|
||||
d = &deployCmd{}
|
||||
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
d.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
d.set = []string{"masterProfile.count=3,linuxProfile.adminUsername=testuser"}
|
||||
err = d.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -522,7 +522,7 @@ func TestDeployCmdMergeAPIModel(t *testing.T) {
|
|||
}
|
||||
|
||||
d = &deployCmd{}
|
||||
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
d.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
d.set = []string{"masterProfile.count=3", "linuxProfile.adminUsername=testuser"}
|
||||
err = d.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -530,7 +530,7 @@ func TestDeployCmdMergeAPIModel(t *testing.T) {
|
|||
}
|
||||
|
||||
d = &deployCmd{}
|
||||
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
d.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
d.set = []string{"agentPoolProfiles[0].count=1"}
|
||||
err = d.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -540,10 +540,10 @@ func TestDeployCmdMergeAPIModel(t *testing.T) {
|
|||
|
||||
func TestDeployCmdRun(t *testing.T) {
|
||||
d := &deployCmd{
|
||||
client: &armhelpers.MockACSEngineClient{},
|
||||
client: &armhelpers.MockAKSEngineClient{},
|
||||
authProvider: &mockAuthProvider{
|
||||
authArgs: &authArgs{},
|
||||
getClientMock: &armhelpers.MockACSEngineClient{},
|
||||
getClientMock: &armhelpers.MockAKSEngineClient{},
|
||||
},
|
||||
apimodelPath: "./this/is/unused.json",
|
||||
outputDirectory: "_test_output",
|
||||
|
@ -562,7 +562,7 @@ func TestDeployCmdRun(t *testing.T) {
|
|||
t.Fatalf("Invalid SubscriptionId in Test: %s", err)
|
||||
}
|
||||
|
||||
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
d.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
d.getAuthArgs().SubscriptionID = fakeSubscriptionID
|
||||
d.getAuthArgs().rawSubscriptionID = fakeRawSubscriptionID
|
||||
|
||||
|
|
|
@ -9,10 +9,10 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/acsengine"
|
||||
"github.com/Azure/acs-engine/pkg/acsengine/transform"
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/i18n"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/engine"
|
||||
"github.com/Azure/aks-engine/pkg/engine/transform"
|
||||
"github.com/Azure/aks-engine/pkg/i18n"
|
||||
"github.com/leonelquinteros/gotext"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -172,12 +172,12 @@ func (gc *generateCmd) loadAPIModel(cmd *cobra.Command, args []string) error {
|
|||
func (gc *generateCmd) run() error {
|
||||
log.Infoln(fmt.Sprintf("Generating assets into %s...", gc.outputDirectory))
|
||||
|
||||
ctx := acsengine.Context{
|
||||
ctx := engine.Context{
|
||||
Translator: &i18n.Translator{
|
||||
Locale: gc.locale,
|
||||
},
|
||||
}
|
||||
templateGenerator, err := acsengine.InitializeTemplateGenerator(ctx)
|
||||
templateGenerator, err := engine.InitializeTemplateGenerator(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize template generator: %s", err.Error())
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (gc *generateCmd) run() error {
|
|||
log.Fatalf("error in SetPropertiesDefaults template %s: %s", gc.apimodelPath, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(gc.containerService, acsengine.DefaultGeneratorCode, BuildTag)
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(gc.containerService, engine.DefaultGeneratorCode, BuildTag)
|
||||
if err != nil {
|
||||
log.Fatalf("error generating template %s: %s", gc.apimodelPath, err.Error())
|
||||
os.Exit(1)
|
||||
|
@ -202,7 +202,7 @@ func (gc *generateCmd) run() error {
|
|||
}
|
||||
}
|
||||
|
||||
writer := &acsengine.ArtifactWriter{
|
||||
writer := &engine.ArtifactWriter{
|
||||
Translator: &i18n.Translator{
|
||||
Locale: gc.locale,
|
||||
},
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestGenerateCmdValidate(t *testing.T) {
|
|||
r := &cobra.Command{}
|
||||
|
||||
// validate cmd with 1 arg
|
||||
err := g.validate(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json"})
|
||||
err := g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error validating 1 arg: %s", err.Error())
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func TestGenerateCmdValidate(t *testing.T) {
|
|||
g = &generateCmd{}
|
||||
|
||||
// validate cmd with more than 1 arg
|
||||
err = g.validate(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json", "arg1"})
|
||||
err = g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json", "arg1"})
|
||||
t.Logf(err.Error())
|
||||
if err == nil {
|
||||
t.Fatalf("expected error validating multiple args")
|
||||
|
@ -55,14 +55,14 @@ func TestGenerateCmdValidate(t *testing.T) {
|
|||
|
||||
func TestGenerateCmdMergeAPIModel(t *testing.T) {
|
||||
g := &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
err := g.mergeAPIModel()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error calling mergeAPIModel with no --set flag defined: %s", err.Error())
|
||||
}
|
||||
|
||||
g = &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"masterProfile.count=3,linuxProfile.adminUsername=testuser"}
|
||||
err = g.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -70,7 +70,7 @@ func TestGenerateCmdMergeAPIModel(t *testing.T) {
|
|||
}
|
||||
|
||||
g = &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"masterProfile.count=3", "linuxProfile.adminUsername=testuser"}
|
||||
err = g.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -78,7 +78,7 @@ func TestGenerateCmdMergeAPIModel(t *testing.T) {
|
|||
}
|
||||
|
||||
g = &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"agentPoolProfiles[0].count=1"}
|
||||
err = g.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -87,7 +87,7 @@ func TestGenerateCmdMergeAPIModel(t *testing.T) {
|
|||
|
||||
// test with an ssh key that contains == sign
|
||||
g = &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""}
|
||||
err = g.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -96,7 +96,7 @@ func TestGenerateCmdMergeAPIModel(t *testing.T) {
|
|||
|
||||
// test with simple quote
|
||||
g = &generateCmd{}
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"servicePrincipalProfile.secret='=MsR0ck5!t='"}
|
||||
err = g.mergeAPIModel()
|
||||
if err != nil {
|
||||
|
@ -108,12 +108,12 @@ func TestGenerateCmdMLoadAPIModel(t *testing.T) {
|
|||
g := &generateCmd{}
|
||||
r := &cobra.Command{}
|
||||
|
||||
g.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
|
||||
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
|
||||
g.set = []string{"agentPoolProfiles[0].count=1"}
|
||||
|
||||
g.validate(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json"})
|
||||
g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
|
||||
g.mergeAPIModel()
|
||||
err := g.loadAPIModel(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json"})
|
||||
err := g.loadAPIModel(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error loading api model: %s", err.Error())
|
||||
}
|
||||
|
|
|
@ -6,8 +6,8 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
|
24
cmd/root.go
24
cmd/root.go
|
@ -9,10 +9,10 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/api/vlabs"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/api/vlabs"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/pkg/errors"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
@ -23,9 +23,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
rootName = "acs-engine"
|
||||
rootShortDescription = "ACS-Engine deploys and manages container orchestrators in Azure"
|
||||
rootLongDescription = "ACS-Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure"
|
||||
rootName = "aks-engine"
|
||||
rootShortDescription = "AKS-Engine deploys and manages container orchestrators in Azure"
|
||||
rootLongDescription = "AKS-Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -33,7 +33,7 @@ var (
|
|||
dumpDefaultModel bool
|
||||
)
|
||||
|
||||
// NewRootCmd returns the root command for ACS-Engine.
|
||||
// NewRootCmd returns the root command for AKS-Engine.
|
||||
func NewRootCmd() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: rootName,
|
||||
|
@ -90,7 +90,7 @@ func writeDefaultModel(out io.Writer) error {
|
|||
|
||||
type authProvider interface {
|
||||
getAuthArgs() *authArgs
|
||||
getClient() (armhelpers.ACSEngineClient, error)
|
||||
getClient() (armhelpers.AKSEngineClient, error)
|
||||
}
|
||||
|
||||
type authArgs struct {
|
||||
|
@ -193,7 +193,7 @@ func getCloudSubFromAzConfig(cloud string, f *ini.File) (uuid.UUID, error) {
|
|||
return uuid.FromString(sub.String())
|
||||
}
|
||||
|
||||
func (authArgs *authArgs) getClient() (armhelpers.ACSEngineClient, error) {
|
||||
func (authArgs *authArgs) getClient() (armhelpers.AKSEngineClient, error) {
|
||||
var client *armhelpers.AzureClient
|
||||
env, err := azure.EnvironmentFromName(authArgs.RawAzureEnvironment)
|
||||
if err != nil {
|
||||
|
@ -226,12 +226,12 @@ func getCompletionCmd(root *cobra.Command) *cobra.Command {
|
|||
Short: "Generates bash completion scripts",
|
||||
Long: `To load completion run
|
||||
|
||||
source <(acs-engine completion)
|
||||
source <(aks-engine completion)
|
||||
|
||||
To configure your bash shell to load completions for each session, add this to your bashrc
|
||||
|
||||
# ~/.bashrc or ~/.profile
|
||||
source <(acs-engine completion)
|
||||
source <(aks-engine completion)
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
root.GenBashCompletion(os.Stdout)
|
||||
|
|
28
cmd/scale.go
28
cmd/scale.go
|
@ -16,15 +16,15 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/acsengine"
|
||||
"github.com/Azure/acs-engine/pkg/acsengine/transform"
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers/utils"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/acs-engine/pkg/i18n"
|
||||
"github.com/Azure/acs-engine/pkg/openshift/filesystem"
|
||||
"github.com/Azure/acs-engine/pkg/operations"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers/utils"
|
||||
"github.com/Azure/aks-engine/pkg/engine"
|
||||
"github.com/Azure/aks-engine/pkg/engine/transform"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/i18n"
|
||||
"github.com/Azure/aks-engine/pkg/openshift/filesystem"
|
||||
"github.com/Azure/aks-engine/pkg/operations"
|
||||
"github.com/leonelquinteros/gotext"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -47,7 +47,7 @@ type scaleCmd struct {
|
|||
apiVersion string
|
||||
apiModelPath string
|
||||
agentPool *api.AgentPoolProfile
|
||||
client armhelpers.ACSEngineClient
|
||||
client armhelpers.AKSEngineClient
|
||||
locale *gotext.Locale
|
||||
nameSuffix string
|
||||
agentPoolIndex int
|
||||
|
@ -271,7 +271,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
|
||||
switch orchestratorInfo.OrchestratorType {
|
||||
case api.Kubernetes:
|
||||
kubeConfig, err := acsengine.GenerateKubeConfig(sc.containerService.Properties, sc.location)
|
||||
kubeConfig, err := engine.GenerateKubeConfig(sc.containerService.Properties, sc.location)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to generate kube config")
|
||||
}
|
||||
|
@ -337,12 +337,12 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
translator := acsengine.Context{
|
||||
translator := engine.Context{
|
||||
Translator: &i18n.Translator{
|
||||
Locale: sc.locale,
|
||||
},
|
||||
}
|
||||
templateGenerator, err := acsengine.InitializeTemplateGenerator(translator)
|
||||
templateGenerator, err := engine.InitializeTemplateGenerator(translator)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize template generator")
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
log.Fatalf("error in SetPropertiesDefaults template %s: %s", sc.apiModelPath, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(sc.containerService, acsengine.DefaultGeneratorCode, BuildTag)
|
||||
template, parameters, err := templateGenerator.GenerateTemplate(sc.containerService, engine.DefaultGeneratorCode, BuildTag)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error generating template %s", sc.apiModelPath)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# English translations for acs-engine package.
|
||||
# English translations for aks-engine package.
|
||||
# Copyright (C) 2017
|
||||
# This file is distributed under the same license as the acs-engine package.
|
||||
# This file is distributed under the same license as the aks-engine package.
|
||||
# Jiangtian Li <jiangtianli@hotmail.com>, 2017.
|
||||
#
|
||||
msgid ""
|
||||
|
|
|
@ -12,12 +12,12 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/acsengine"
|
||||
"github.com/Azure/acs-engine/pkg/api"
|
||||
"github.com/Azure/acs-engine/pkg/armhelpers"
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/acs-engine/pkg/i18n"
|
||||
"github.com/Azure/acs-engine/pkg/operations/kubernetesupgrade"
|
||||
"github.com/Azure/aks-engine/pkg/api"
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/Azure/aks-engine/pkg/engine"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/i18n"
|
||||
"github.com/Azure/aks-engine/pkg/operations/kubernetesupgrade"
|
||||
"github.com/leonelquinteros/gotext"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
@ -44,7 +44,7 @@ type upgradeCmd struct {
|
|||
// derived
|
||||
containerService *api.ContainerService
|
||||
apiVersion string
|
||||
client armhelpers.ACSEngineClient
|
||||
client armhelpers.AKSEngineClient
|
||||
locale *gotext.Locale
|
||||
nameSuffix string
|
||||
agentPoolsToUpgrade []string
|
||||
|
@ -178,14 +178,14 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error {
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
return errors.Errorf("Upgrading to version %s is not supported. To see a list of available upgrades, use 'acs-engine orchestrators --orchestrator kubernetes --version %s'", uc.upgradeVersion, uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion)
|
||||
return errors.Errorf("Upgrading to version %s is not supported. To see a list of available upgrades, use 'aks-engine orchestrators --orchestrator kubernetes --version %s'", uc.upgradeVersion, uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion)
|
||||
}
|
||||
|
||||
// Read name suffix to identify nodes in the resource group that belong
|
||||
// to this cluster.
|
||||
// TODO: Also update to read namesuffix from the parameters file as
|
||||
// user could have specified a name suffix instead of using the default
|
||||
// value generated by ACS Engine
|
||||
// value generated by AKS Engine
|
||||
templatePath := path.Join(uc.deploymentDirectory, "azuredeploy.json")
|
||||
contents, _ := ioutil.ReadFile(templatePath)
|
||||
|
||||
|
@ -227,7 +227,7 @@ func (uc *upgradeCmd) run(cmd *cobra.Command, args []string) error {
|
|||
StepTimeout: uc.timeout,
|
||||
}
|
||||
|
||||
kubeConfig, err := acsengine.GenerateKubeConfig(uc.containerService.Properties, uc.location)
|
||||
kubeConfig, err := engine.GenerateKubeConfig(uc.containerService.Properties, uc.location)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate kube config: %v", err) // TODO: cleanup
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -29,8 +29,8 @@ var (
|
|||
|
||||
const (
|
||||
versionName = "version"
|
||||
versionShortDescription = "Print the version of ACS-Engine"
|
||||
versionLongDescription = "Print the version of ACS-Engine"
|
||||
versionShortDescription = "Print the version of AKS Engine"
|
||||
versionLongDescription = "Print the version of AKS Engine"
|
||||
)
|
||||
|
||||
type versionInfo struct {
|
||||
|
|
|
@ -6,7 +6,7 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/acs-engine/pkg/helpers"
|
||||
"github.com/Azure/aks-engine/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
@ -20,14 +20,14 @@ var _ = Describe("the version command", func() {
|
|||
Expect(output.Flags().Lookup("output")).NotTo(BeNil())
|
||||
})
|
||||
|
||||
It("should print a json version of ACS-Engine", func() {
|
||||
It("should print a json version of AKS Engine", func() {
|
||||
output := getVersion("json")
|
||||
|
||||
expectedOutput, _ := helpers.JSONMarshalIndent(version, "", " ", false)
|
||||
|
||||
Expect(output).Should(Equal(string(expectedOutput)))
|
||||
})
|
||||
It("should print a humanized version of ACS-Engine", func() {
|
||||
It("should print a humanized version of AKS Engine", func() {
|
||||
output := getVersion("human")
|
||||
|
||||
expectedOutput := fmt.Sprintf("Version: %s\nGitCommit: %s\nGitTreeState: %s",
|
||||
|
@ -38,7 +38,7 @@ var _ = Describe("the version command", func() {
|
|||
Expect(output).Should(Equal(expectedOutput))
|
||||
})
|
||||
|
||||
It("should print a json version of ACS-Engine", func() {
|
||||
It("should print a json version of AKS Engine", func() {
|
||||
output := getVersion("json")
|
||||
|
||||
expectedOutput, _ := helpers.JSONMarshalIndent(version, "", " ", false)
|
||||
|
|
|
@ -6,7 +6,7 @@ This cluster definition examples demonstrate how to create a customized Docker E
|
|||
|
||||
## User Guides
|
||||
|
||||
* [ACS Engine](acsengine.md) - shows you how to build and use the ACS engine to generate custom Docker enabled container clusters
|
||||
* [AKS Engine](acsengine.md) - shows you how to build and use the AKS engine to generate custom Docker enabled container clusters
|
||||
* [Cluster Definition](clusterdefinition.md) - describes the components of the cluster definition file
|
||||
* [DC/OS Walkthrough](dcos.md) - shows how to create a DC/OS enabled Docker cluster on Azure
|
||||
* [Kubernetes Walkthrough](kubernetes.md) - shows how to create a Kubernetes enabled Docker cluster on Azure
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
# ACS Engine code delivery guide
|
||||
# AKS Engine code delivery guide
|
||||
|
||||
[![ACS Engine](https://azurecomcdn.azureedge.net/mediahandler/acomblog/media/Default/blog/a8f28783-3ddc-4081-a57d-6d97147467bf.png)](https://github.com/azure/acs-engine)
|
||||
[![AKS Engine](https://azurecomcdn.azureedge.net/mediahandler/acomblog/media/Default/blog/a8f28783-3ddc-4081-a57d-6d97147467bf.png)](https://github.com/azure/aks-engine)
|
||||
|
||||
ACS Engine is an open source project to generate ARM (Azure Resource Manager) templates DC/OS, Kubernetes, Swarm Mode clusters on Microsoft Azure.
|
||||
This documents provides guidelines to the acs-engine testing and continuous integration process.
|
||||
AKS Engine is an open source project to generate ARM (Azure Resource Manager) templates DC/OS, Kubernetes, Swarm Mode clusters on Microsoft Azure.
|
||||
This documents provides guidelines to the aks-engine testing and continuous integration process.
|
||||
|
||||
## Development pipeline
|
||||
|
||||
ACS Engine employs CI system that incorporates Jenkins server, configured to interact with ACS Engine GitHub project.
|
||||
A recommended way to contribute to ACS Engine is to fork github.com/Azure/acs-engine project.
|
||||
AKS Engine employs CI system that incorporates Jenkins server, configured to interact with AKS Engine GitHub project.
|
||||
A recommended way to contribute to AKS Engine is to fork github.com/Azure/aks-engine project.
|
||||
and create a separated branch (a feature branch) for the feature you are working on.
|
||||
|
||||
The following steps constitute ACS Engine delivery pipeline
|
||||
The following steps constitute AKS Engine delivery pipeline
|
||||
|
||||
1. Complete the current iteration of the code change, and check it into the feature branch
|
||||
2. Invoke unit test. Return to step (1) if failed.
|
||||
|
@ -20,7 +20,7 @@ The following steps constitute ACS Engine delivery pipeline
|
|||
```
|
||||
3. Create a template. Return to step (1) if failed.
|
||||
```sh
|
||||
$ acs-engine generate --api-model kubernetes.json
|
||||
$ aks-engine generate --api-model kubernetes.json
|
||||
```
|
||||
4. Deploy the template in Azure. Return to step (1) if failed.
|
||||
```sh
|
||||
|
@ -31,7 +31,7 @@ The following steps constitute ACS Engine delivery pipeline
|
|||
--template-file azuredeploy.json \
|
||||
--parameters @azuredeploy.parameters.json
|
||||
```
|
||||
5. Create a pull request (PR) from github.com/Azure/acs-engine portal.
|
||||
5. Create a pull request (PR) from github.com/Azure/aks-engine portal.
|
||||
6. The PR triggers a Jenkins job that
|
||||
+ applies the changes to the HEAD of the master branch
|
||||
+ generates multiple ARM templates for different deployment scenarios
|
||||
|
@ -39,6 +39,6 @@ The following steps constitute ACS Engine delivery pipeline
|
|||
This test might take 20-40 minutes.
|
||||
If the test fails, review the logs. If the failure was caused by your code change, return to step (1).
|
||||
Sometimes the test might fail because of intermittent Azure issues, such as resource unavailability of provisioning timeout. In this case manually trigger Jenkins PR job from your GitHub PR page.
|
||||
7. The PR is reviewed by the members of ACS Engine team. Should the changes have been requested, return to step (1).
|
||||
7. The PR is reviewed by the members of AKS Engine team. Should the changes have been requested, return to step (1).
|
||||
8. Once the PR is approved, and Jenkins PR job has passed, the PR could be merged into the master branch
|
||||
9. Once merged, another Jenkins job is triggered, to verify integrity of the master branch. This job is similar to the PR job.
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
# Microsoft Azure Container Service Engine
|
||||
|
||||
The Azure Container Service Engine (`acs-engine`) generates ARM (Azure Resource Manager) templates for Docker enabled clusters on Microsoft Azure with your choice of DCOS, [Kubernetes](kubernetes/deploy.md), or Swarm orchestrators. The input to acs-engine is a cluster definition file which describes the desired cluster, including orchestrator, features, and agents. The structure of the input files is very similar to the public API for Azure Container Service.
|
||||
The Azure Container Service Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates for Docker enabled clusters on Microsoft Azure with your choice of DCOS, [Kubernetes](kubernetes/deploy.md), or Swarm orchestrators. The input to aks-engine is a cluster definition file which describes the desired cluster, including orchestrator, features, and agents. The structure of the input files is very similar to the public API for Azure Container Service.
|
||||
|
||||
<a href="#install-acs-engine"></a>
|
||||
<a href="#install-aks-engine"></a>
|
||||
|
||||
## Install
|
||||
|
||||
Binary downloads for the latest version of acs-engine for are available [here](https://github.com/Azure/acs-engine/releases/latest). Download `acs-engine` for your operating system. Extract the binary and copy it to your `$PATH`.
|
||||
Binary downloads for the latest version of aks-engine for are available [here](https://github.com/Azure/aks-engine/releases/latest). Download `aks-engine` for your operating system. Extract the binary and copy it to your `$PATH`.
|
||||
|
||||
You can also choose to install acs-engine using [gofish](https://gofi.sh/#about), to do so execute the command `gofish install acs-engine` . You can install gofish following the [instructions](https://gofi.sh/#install) for your OS.
|
||||
You can also choose to install aks-engine using [gofish](https://gofi.sh/#about), to do so execute the command `gofish install aks-engine` . You can install gofish following the [instructions](https://gofi.sh/#install) for your OS.
|
||||
|
||||
If you would prefer to build `acs-engine` from source or you are interested in contributing to `acs-engine` see [building from source](#build-acs-engine-from-source) below.
|
||||
If you would prefer to build `aks-engine` from source or you are interested in contributing to `aks-engine` see [building from source](#build-aks-engine-from-source) below.
|
||||
|
||||
## Completion
|
||||
|
||||
`acs-engine` supports bash completion. To enable this, add the following to your `.bashrc` or `~/.profile`
|
||||
`aks-engine` supports bash completion. To enable this, add the following to your `.bashrc` or `~/.profile`
|
||||
|
||||
```bash
|
||||
source <(acs-engine completion)
|
||||
source <(aks-engine completion)
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
`acs-engine` reads a JSON [cluster definition](./clusterdefinition.md) and generates a number of files that may be submitted to Azure Resource Manager (ARM). The generated files include:
|
||||
`aks-engine` reads a JSON [cluster definition](./clusterdefinition.md) and generates a number of files that may be submitted to Azure Resource Manager (ARM). The generated files include:
|
||||
|
||||
1. **apimodel.json**: is an expanded version of the cluster definition provided to the generate command. All default or computed values will be expanded during the generate phase.
|
||||
2. **azuredeploy.json**: represents a complete description of all Azure resources required to fulfill the cluster definition from `apimodel.json`.
|
||||
|
@ -31,9 +31,9 @@ source <(acs-engine completion)
|
|||
|
||||
### Generate Templates
|
||||
|
||||
ACS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of Kubernetes. There are a number of features that can be enabled through the cluster definition.
|
||||
AKS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of Kubernetes. There are a number of features that can be enabled through the cluster definition.
|
||||
|
||||
See [ACS Engine The Long Way](kubernetes/deploy.md#acs-engine-the-long-way) for an example on generating templates by hand.
|
||||
See [AKS Engine The Long Way](kubernetes/deploy.md#aks-engine-the-long-way) for an example on generating templates by hand.
|
||||
|
||||
<a href="#deployment-usage"></a>
|
||||
|
||||
|
@ -81,36 +81,36 @@ New-AzureRmResourceGroupDeployment `
|
|||
|
||||
<a href="#build-from-source"></a>
|
||||
|
||||
## Build ACS Engine from Source
|
||||
## Build AKS Engine from Source
|
||||
|
||||
### Docker Development Environment
|
||||
|
||||
The easiest way to start hacking on `acs-engine` is to use a Docker-based environment. If you already have Docker installed then you can get started with a few commands.
|
||||
The easiest way to start hacking on `aks-engine` is to use a Docker-based environment. If you already have Docker installed then you can get started with a few commands.
|
||||
|
||||
* Windows (PowerShell): `.\scripts\devenv.ps1`
|
||||
* Linux/OSX (bash): `./scripts/devenv.sh`
|
||||
|
||||
This script mounts the `acs-engine` source directory as a volume into the Docker container, which means you can edit your source code in your favorite editor on your machine, while still being able to compile and test inside of the Docker container. This environment mirrors the environment used in the acs-engine continuous integration (CI) system.
|
||||
This script mounts the `aks-engine` source directory as a volume into the Docker container, which means you can edit your source code in your favorite editor on your machine, while still being able to compile and test inside of the Docker container. This environment mirrors the environment used in the aks-engine continuous integration (CI) system.
|
||||
|
||||
When the script `devenv.ps1` or `devenv.sh` completes, you will be left at a command prompt.
|
||||
|
||||
Run the following commands to pull the latest dependencies and build the `acs-engine` tool.
|
||||
Run the following commands to pull the latest dependencies and build the `aks-engine` tool.
|
||||
|
||||
```sh
|
||||
# install and download build dependencies
|
||||
make bootstrap
|
||||
# build the `acs-engine` binary
|
||||
# build the `aks-engine` binary
|
||||
make build
|
||||
```
|
||||
|
||||
The build process leaves the compiled `acs-engine` binary in the `bin` directory. Make sure everything completed successfully by running `bin/acs-engine` without any arguments:
|
||||
The build process leaves the compiled `aks-engine` binary in the `bin` directory. Make sure everything completed successfully by running `bin/aks-engine` without any arguments:
|
||||
|
||||
```sh
|
||||
$ ./bin/acs-engine
|
||||
ACS-Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure
|
||||
$ ./bin/aks-engine
|
||||
AKS Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure
|
||||
|
||||
Usage:
|
||||
acs-engine [command]
|
||||
aks-engine [command]
|
||||
|
||||
Available Commands:
|
||||
deploy Deploy an Azure Resource Manager template
|
||||
|
@ -119,22 +119,22 @@ Available Commands:
|
|||
orchestrators Display info about supported orchestrators
|
||||
scale Scale an existing Kubernetes cluster
|
||||
upgrade Upgrade an existing Kubernetes cluster
|
||||
version Print the version of ACS-Engine
|
||||
version Print the version of AKS Engine
|
||||
|
||||
Flags:
|
||||
--debug enable verbose debug logs
|
||||
-h, --help help for acs-engine
|
||||
-h, --help help for aks-engine
|
||||
|
||||
Use "acs-engine [command] --help" for more information about a command.
|
||||
Use "aks-engine [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
[Here is a reference to the information on Kubernetes cluster upgrade.](https://github.com/Azure/acs-engine/blob/master/examples/k8s-upgrade/README.md)
|
||||
[Here is a reference to the information on Kubernetes cluster upgrade.](https://github.com/Azure/aks-engine/blob/master/examples/k8s-upgrade/README.md)
|
||||
|
||||
[Here's a quick demo video showing the dev/build/test cycle with this setup.](https://www.youtube.com/watch?v=lc6UZmqxQMs)
|
||||
|
||||
## Building on Windows, OSX, and Linux
|
||||
|
||||
Building ACS Engine from source has a few requirements for each of the platforms. Download and install the pre-reqs for your platform, Windows, Linux, or Mac:
|
||||
Building AKS Engine from source has a few requirements for each of the platforms. Download and install the pre-reqs for your platform, Windows, Linux, or Mac:
|
||||
|
||||
### Prerequisite
|
||||
|
||||
|
@ -151,22 +151,22 @@ Setup steps:
|
|||
3. Add `c:\go\bin` and `c:\gopath\bin` to your PATH variables
|
||||
4. Click "new" and add new environment variable named `GOPATH` and set the value to `c:\gopath`
|
||||
|
||||
* Build acs-engine:
|
||||
* Build aks-engine:
|
||||
1. Type Windows key-R to open the run prompt
|
||||
2. Type `cmd` to open a command prompt
|
||||
3. Type `mkdir %GOPATH%` to create your gopath
|
||||
4. Type `cd %GOPATH%`
|
||||
5. Type `go get -d github.com/Azure/acs-engine` to download acs-engine from GitHub
|
||||
5. Type `go get -d github.com/Azure/aks-engine` to download aks-engine from GitHub
|
||||
6. Type `go get all` to get the supporting components
|
||||
7. Type `go get -u github.com/go-bindata/go-bindata/...`
|
||||
8. Type `cd %GOPATH%\src\github.com\Azure\acs-engine\pkg\acsengine`
|
||||
8. Type `cd %GOPATH%\src\github.com\Azure\aks-engine\pkg\acsengine`
|
||||
9. Type `go generate`
|
||||
10. Type `cd %GOPATH%\src\github.com\Azure\acs-engine\pkg\i18n`
|
||||
10. Type `cd %GOPATH%\src\github.com\Azure\aks-engine\pkg\i18n`
|
||||
11. Type `go generate`
|
||||
12. Type `cd %GOPATH%\src\github.com\Azure\acs-engine`
|
||||
12. Type `cd %GOPATH%\src\github.com\Azure\aks-engine`
|
||||
13. Type `go build` to build the project
|
||||
14. Type `go install` to install the project
|
||||
15. Run `acs-engine.exe` to see the command line parameters
|
||||
15. Run `aks-engine.exe` to see the command line parameters
|
||||
|
||||
### OS X and Linux
|
||||
|
||||
|
@ -181,10 +181,10 @@ Setup steps:
|
|||
```
|
||||
4. `source $HOME/.bash_profile`
|
||||
|
||||
Build acs-engine:
|
||||
Build aks-engine:
|
||||
|
||||
1. Type `go get github.com/Azure/acs-engine` to get the acs-engine Github project
|
||||
2. Type `cd $GOPATH/src/github.com/Azure/acs-engine` to change to the source directory
|
||||
1. Type `go get github.com/Azure/aks-engine` to get the aks-engine Github project
|
||||
2. Type `cd $GOPATH/src/github.com/Azure/aks-engine` to change to the source directory
|
||||
3. Type `make bootstrap` to install supporting components
|
||||
4. Type `make build` to build the project
|
||||
5. Type `./bin/acs-engine` to see the command line parameters
|
||||
5. Type `./bin/aks-engine` to see the command line parameters
|
||||
|
|
|
@ -1,34 +1,34 @@
|
|||
# 微软Azure容器服务引擎
|
||||
|
||||
微软容器服务引擎(`acs-engine`)用于将一个容器集群描述文件转化成一组ARM(Azure Resource Manager)模板,通过在Azure上部署这些模板,用户可以很方便地在Azure上建立一套基于Docker的容器服务集群。用户可以自由地选择集群编排引擎DC/OS, Kubernetes或者是Swarm/Swarm Mode。集群描述文件使用和ARM模板相同的语法,它们都可以用来部署Azure容器服务。
|
||||
微软容器服务引擎(`aks-engine`)用于将一个容器集群描述文件转化成一组ARM(Azure Resource Manager)模板,通过在Azure上部署这些模板,用户可以很方便地在Azure上建立一套基于Docker的容器服务集群。用户可以自由地选择集群编排引擎DC/OS, Kubernetes或者是Swarm/Swarm Mode。集群描述文件使用和ARM模板相同的语法,它们都可以用来部署Azure容器服务。
|
||||
|
||||
# 基于Docker的部署
|
||||
|
||||
最简单的开始使用`acs-engine`的方式是使用Docker。如果本地计算机安装了Docker或者windows、Mac版本的Docker的话,无需安装任何软件就可以直接使用`acs-engine`了。
|
||||
最简单的开始使用`aks-engine`的方式是使用Docker。如果本地计算机安装了Docker或者windows、Mac版本的Docker的话,无需安装任何软件就可以直接使用`aks-engine`了。
|
||||
|
||||
* Windows (PowerShell): `.\scripts\devenv.ps1`
|
||||
* Linux (bash): `./scripts/devenv.sh`
|
||||
|
||||
上面的这段脚本在Docker容器中挂载了`acs-engine`源目录。你可以在任何熟悉的编辑器上修改这些源代码,所做的修改可以直接在Docker容器中编译和测试(本项目的持续集成系统中也采用了同样的方式)。
|
||||
上面的这段脚本在Docker容器中挂载了`aks-engine`源目录。你可以在任何熟悉的编辑器上修改这些源代码,所做的修改可以直接在Docker容器中编译和测试(本项目的持续集成系统中也采用了同样的方式)。
|
||||
|
||||
```
|
||||
make bootstrap
|
||||
```
|
||||
|
||||
当`devenv.{ps1,sh}`执行完毕的时候,你可以在容器中查看对应的日志,最后执行下面的脚本就可以生成`acs-engine`工具了:
|
||||
当`devenv.{ps1,sh}`执行完毕的时候,你可以在容器中查看对应的日志,最后执行下面的脚本就可以生成`aks-engine`工具了:
|
||||
|
||||
```
|
||||
make build
|
||||
```
|
||||
|
||||
当项目编译通过后,可以使用如下的命令来验证`acs-engine`是否正常运行:
|
||||
当项目编译通过后,可以使用如下的命令来验证`aks-engine`是否正常运行:
|
||||
|
||||
```
|
||||
# ./bin/acs-engine
|
||||
ACS-Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure
|
||||
# ./bin/aks-engine
|
||||
AKS Engine deploys and manages Kubernetes, OpenShift, Swarm Mode, and DC/OS clusters in Azure
|
||||
|
||||
Usage:
|
||||
acs-engine [command]
|
||||
aks-engine [command]
|
||||
|
||||
Available Commands:
|
||||
deploy Deploy an Azure Resource Manager template
|
||||
|
@ -37,13 +37,13 @@ Available Commands:
|
|||
orchestrators Display info about supported orchestrators
|
||||
scale Scale an existing Kubernetes cluster
|
||||
upgrade Upgrade an existing Kubernetes cluster
|
||||
version Print the version of ACS-Engine
|
||||
version Print the version of AKS Engine
|
||||
|
||||
Flags:
|
||||
--debug enable verbose debug logs
|
||||
-h, --help help for acs-engine
|
||||
-h, --help help for aks-engine
|
||||
|
||||
Use "acs-engine [command] --help" for more information about a command.
|
||||
Use "aks-engine [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
[详细的开发,编译,测试过程和步骤可以参考这个视频](https://www.youtube.com/watch?v=lc6UZmqxQMs)
|
||||
|
@ -57,10 +57,10 @@ ACS引擎具有跨平台特性,可以在windows,OS X和Linux上运行。以
|
|||
安装依赖软件:
|
||||
- Git for Windows. [点击这里下载安装](https://git-scm.com/download/win)
|
||||
- Go for Windows. [点击这里下载安装](https://golang.org/dl/), 缺省默认安装.
|
||||
- Powershell
|
||||
- Powershell
|
||||
|
||||
编译步骤:
|
||||
|
||||
编译步骤:
|
||||
|
||||
1. 设置工作目录。 这里假设使用`c:\gopath`作为工作目录:
|
||||
1. 使用Windows + R组合键打开运行窗口
|
||||
2. 执行命令:`rundll32 sysdm.cpl,EditEnvironmentVariables`打开系统环境变量设置对话框
|
||||
|
@ -71,18 +71,18 @@ ACS引擎具有跨平台特性,可以在windows,OS X和Linux上运行。以
|
|||
2. 运行`cmd`命令打开命令行窗口
|
||||
3. 运行命令mkdir %GOPATH%
|
||||
4. cd %GOPATH%
|
||||
5. 运行`go get github.com/Azure/acs-engine`命令获取ACS引擎在github上的最新代码
|
||||
5. 运行`go get github.com/Azure/aks-engine`命令获取ACS引擎在github上的最新代码
|
||||
6. 运行`go get all`命令安装ACS引擎需要的依赖组件
|
||||
7. `cd %GOPATH%\src\github.com\Azure\acs-engine`
|
||||
7. `cd %GOPATH%\src\github.com\Azure\aks-engine`
|
||||
8. 运行`go build`编译项目
|
||||
3. 运行`acs-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
3. 运行`aks-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
|
||||
## OS X
|
||||
|
||||
安装依赖软件::
|
||||
- Go for OS X. [点击这里下载安装](https://golang.org/dl/)
|
||||
|
||||
安装步骤:
|
||||
安装步骤:
|
||||
|
||||
1. 打开命令行窗口并设置GOPATH环境变量:
|
||||
1. `mkdir $HOME/gopath`
|
||||
|
@ -93,11 +93,11 @@ ACS引擎具有跨平台特性,可以在windows,OS X和Linux上运行。以
|
|||
```
|
||||
3. `source $HOME/.sh_profile`使配置生效。
|
||||
2. 编译ACS引擎:
|
||||
1. 运行`go get github.com/Azure/acs-engine`命令获取ACS引擎在github上的最新代码。
|
||||
1. 运行`go get github.com/Azure/aks-engine`命令获取ACS引擎在github上的最新代码。
|
||||
2. 运行`go get all`命令安装ACS引擎需要的依赖组件
|
||||
3. `cd $GOPATH/src/github.com/Azure/acs-engine`
|
||||
3. `cd $GOPATH/src/github.com/Azure/aks-engine`
|
||||
4. `go build`编译项目
|
||||
3. 运行`acs-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
3. 运行`aks-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
|
||||
## Linux
|
||||
|
||||
|
@ -107,7 +107,7 @@ ACS引擎具有跨平台特性,可以在windows,OS X和Linux上运行。以
|
|||
- 执行命令sudo tar -C /usr/local -xzf go$VERSION.$OS-$ARCH.tar.gz解压并替换原有文件。
|
||||
- `git`
|
||||
|
||||
编译步骤:
|
||||
编译步骤:
|
||||
|
||||
1. 设置GOPATH:
|
||||
1. 运行命令`mkdir $HOME/gopath`新建gopath目录
|
||||
|
@ -118,11 +118,11 @@ ACS引擎具有跨平台特性,可以在windows,OS X和Linux上运行。以
|
|||
```
|
||||
3. 运行命令`source $HOME/.profile`使配置生效。
|
||||
2. 编译ACS引擎:
|
||||
1. 运行命令`go get github.com/Azure/acs-engine`获取ACS引擎在github上的最新代码。
|
||||
1. 运行命令`go get github.com/Azure/aks-engine`获取ACS引擎在github上的最新代码。
|
||||
2. 运行`go get all`命令安装ACS引擎需要的依赖组件
|
||||
3. `cd $GOPATH/src/github.com/Azure/acs-engine`
|
||||
3. `cd $GOPATH/src/github.com/Azure/aks-engine`
|
||||
4. 运行`go build`命令编译项目
|
||||
3. 运行`acs-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
3. 运行`aks-engine`命令,如果能看到命令参数提示就说明已经正确编译成功了。
|
||||
|
||||
|
||||
# 生成模板
|
||||
|
@ -134,15 +134,15 @@ ACS引擎使用json格式的[集群定义文件](clusterdefinition.md)作为输
|
|||
3. **azuredeploy.parameters.json** - 部署参数文件,其中的参数可以自定义
|
||||
4. **certificate and access config files** - 某些编排引擎例如kubernetes需要生成一些证书,这些证书文件和它依赖的kube config配置文件也存放在和ARM模板同级目录下面
|
||||
|
||||
需要注意的是,当修改已有的Docker容器集群的时候,应该修改`apimodel.json`文件来保证最新的部署不会影响到目前集群中已有的资源。举个例子,如果一个容器集群中的节点数量不够的时候,可以修改`apimodel.json`中的集群节点数量,然后重新运行`acs-engine`命令并将`apimodel.json`作为输入参数来生成新的ARM模板。这样部署以后,集群中的旧的节点就不会有变化,新的节点会自动加入。
|
||||
需要注意的是,当修改已有的Docker容器集群的时候,应该修改`apimodel.json`文件来保证最新的部署不会影响到目前集群中已有的资源。举个例子,如果一个容器集群中的节点数量不够的时候,可以修改`apimodel.json`中的集群节点数量,然后重新运行`aks-engine`命令并将`apimodel.json`作为输入参数来生成新的ARM模板。这样部署以后,集群中的旧的节点就不会有变化,新的节点会自动加入。
|
||||
|
||||
# 演示
|
||||
|
||||
这里通过部署一个kubernetes容器集群来演示如何使用`acs-engine`。kubernetes集群定义文件使用[examples/kubernetes.json](../examples/kubernetes.json)。
|
||||
这里通过部署一个kubernetes容器集群来演示如何使用`aks-engine`。kubernetes集群定义文件使用[examples/kubernetes.json](../examples/kubernetes.json)。
|
||||
|
||||
1. 首先需要准备一个[SSH 公钥私钥对](ssh.md#ssh-key-generation).
|
||||
2. 编辑[examples/kubernetes.json](../examples/kubernetes.json)将其需要的参数配置好.
|
||||
3. 运行`./bin/acs-engine generate examples/kubernetes.json`命令在_output/Kubernetes-UNIQUEID目录中生成对应的模板。(UNIQUEID是master节点的FQDN前缀的hash值)
|
||||
3. 运行`./bin/aks-engine generate examples/kubernetes.json`命令在_output/Kubernetes-UNIQUEID目录中生成对应的模板。(UNIQUEID是master节点的FQDN前缀的hash值)
|
||||
4. 按照README中指定的方式使用`azuredeploy.json`和`azuredeploy.parameters.json`部署容器集群 [deployment usage](../acsengine.md#deployment-usage).
|
||||
|
||||
# 部署方法
|
||||
|
|
|
@ -30,7 +30,7 @@ Here are the valid values for the orchestrator types:
|
|||
|
||||
To learn more about supported orchestrators and versions, run the orchestrators command:
|
||||
|
||||
```/bin/acs-engine orchestrators```
|
||||
```/bin/aks-engine orchestrators```
|
||||
|
||||
|
||||
### kubernetesConfig
|
||||
|
@ -45,7 +45,7 @@ To learn more about supported orchestrators and versions, run the orchestrators
|
|||
| clusterSubnet | no | The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. With Azure CNI enabled, the default value is 10.240.0.0/12. Without Azure CNI, the default value is 10.244.0.0/16. |
|
||||
| containerRuntime | no | The container runtime to use as a backend. The default is `docker`. The other options are `clear-containers`, `kata-containers`, and `containerd` |
|
||||
| controllerManagerConfig | no | Configure various runtime configuration for controller-manager. See `controllerManagerConfig` [below](#feat-controller-manager-config) |
|
||||
| customWindowsPackageURL | no | Configure custom windows Kubernetes release package URL for deployment on Windows that is generated by scripts/build-windows-k8s.sh. The format of this file is a zip file with multiple items (binaries, cni, infra container) in it. This setting will be depreciated in future release of acs-engine where the binaries will be pulled in the format of Kubernetes releases that only contain the kubernetes binaries. |
|
||||
| customWindowsPackageURL | no | Configure custom windows Kubernetes release package URL for deployment on Windows that is generated by scripts/build-windows-k8s.sh. The format of this file is a zip file with multiple items (binaries, cni, infra container) in it. This setting will be depreciated in future release of aks-engine where the binaries will be pulled in the format of Kubernetes releases that only contain the kubernetes binaries. |
|
||||
| WindowsNodeBinariesURL | no | Windows Kubernetes Node binaries can be provided in the format of Kubernetes release (example: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#node-binaries-1). This setting allows overriding the binaries for custom builds. |
|
||||
| dnsServiceIP | no | IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr` |
|
||||
| dockerBridgeSubnet | no | The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0) |
|
||||
|
@ -182,7 +182,7 @@ Above you see custom configuration for both tiller and kubernetes-dashboard. Bot
|
|||
|
||||
See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for more on Kubernetes resource limits.
|
||||
|
||||
Additionally above, we specified a custom docker image for tiller, let's say we want to build a cluster and test an alpha version of tiller in it. **Important note!** customizing the image is not sticky across upgrade/scale, to ensure that acs-engine always delivers a version-curated, known-working addon when moving a cluster to a new version. Considering all that, providing a custom image reference for an addon configuration should be considered for testing/development, but not for a production cluster. If you'd like to entirely customize one of the addons available, including across scale/upgrade operations, you may include in an addon's spec a gzip+base64-encoded (in that order) string of a Kubernetes yaml manifest. E.g.,
|
||||
Additionally above, we specified a custom docker image for tiller, let's say we want to build a cluster and test an alpha version of tiller in it. **Important note!** customizing the image is not sticky across upgrade/scale, to ensure that aks-engine always delivers a version-curated, known-working addon when moving a cluster to a new version. Considering all that, providing a custom image reference for an addon configuration should be considered for testing/development, but not for a production cluster. If you'd like to entirely customize one of the addons available, including across scale/upgrade operations, you may include in an addon's spec a gzip+base64-encoded (in that order) string of a Kubernetes yaml manifest. E.g.,
|
||||
|
||||
```
|
||||
"kubernetesConfig": {
|
||||
|
@ -198,14 +198,14 @@ Additionally above, we specified a custom docker image for tiller, let's say we
|
|||
|
||||
The reason for the unsightly gzip+base64 encoded input type is to optimize delivery payload, and to squash a human-maintainable yaml file representation into something that can be tightly pasted into a JSON string value without the arguably more unsightly carriage returns / whitespace that would be delivered with a literal copy/paste of a Kubernetes manifest.
|
||||
|
||||
Finally, the `addons.enabled` boolean property was omitted above; that's by design. If you specify a `containers` configuration, acs-engine assumes you're enabling the addon. The very first example above demonstrates a simple "enable this addon with default configuration" declaration.
|
||||
Finally, the `addons.enabled` boolean property was omitted above; that's by design. If you specify a `containers` configuration, aks-engine assumes you're enabling the addon. The very first example above demonstrates a simple "enable this addon with default configuration" declaration.
|
||||
|
||||
#### External Custom YAML scripts
|
||||
|
||||
External YAML scripts can be configured for these supported addons and the manifest files for kube-scheduler, kube-controller-manager, cloud-controller-manager, kube-apiserver and PodSecurityPolicy. For addons, you will need to pass in a _base64_ encoded string of the kubernetes addon YAML file that you wish to use to `addons.Data` property. When `addons.Data` is provided with a value, the `containers` and `config` are required to be empty.
|
||||
|
||||
CAVEAT: Please note that this is an experimental feature. Since Addons.Data allows you to provide your own scripts, you face the risk of any unintended/undesirable consequences of the errors and failures from running that script.
|
||||
|
||||
|
||||
```
|
||||
"kubernetesConfig": {
|
||||
"addons": [
|
||||
|
@ -251,7 +251,7 @@ and to pass a custom pod security policy config, do the following:
|
|||
|
||||
See [here](https://kubernetes.io/docs/reference/generated/kubelet/) for a reference of supported kubelet options.
|
||||
|
||||
Below is a list of kubelet options that acs-engine will configure by default:
|
||||
Below is a list of kubelet options that aks-engine will configure by default:
|
||||
|
||||
| kubelet option | default value |
|
||||
| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|
@ -305,7 +305,7 @@ Below is a list of kubelet options that are _not_ currently user-configurable, e
|
|||
|
||||
See [here](https://kubernetes.io/docs/reference/generated/kube-controller-manager/) for a reference of supported controller-manager options.
|
||||
|
||||
Below is a list of controller-manager options that acs-engine will configure by default:
|
||||
Below is a list of controller-manager options that aks-engine will configure by default:
|
||||
|
||||
| controller-manager option | default value |
|
||||
| ------------------------------- | ------------------------------------------ |
|
||||
|
@ -350,7 +350,7 @@ Below is a list of controller-manager options that are _not_ currently user-conf
|
|||
|
||||
See [here](https://kubernetes.io/docs/reference/generated/cloud-controller-manager/) for a reference of supported controller-manager options.
|
||||
|
||||
Below is a list of cloud-controller-manager options that acs-engine will configure by default:
|
||||
Below is a list of cloud-controller-manager options that aks-engine will configure by default:
|
||||
|
||||
| controller-manager option | default value |
|
||||
| ------------------------------- | ------------- |
|
||||
|
@ -399,7 +399,7 @@ Or perhaps you want to customize/override the set of admission-control flags pas
|
|||
|
||||
See [here](https://kubernetes.io/docs/reference/generated/kube-apiserver/) for a reference of supported apiserver options.
|
||||
|
||||
Below is a list of apiserver options that acs-engine will configure by default:
|
||||
Below is a list of apiserver options that aks-engine will configure by default:
|
||||
|
||||
| apiserver option | default value |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|
@ -472,7 +472,7 @@ Below is a list of apiserver options that are _not_ currently user-configurable,
|
|||
|
||||
See [here](https://kubernetes.io/docs/reference/generated/kube-scheduler/) for a reference of supported kube-scheduler options.
|
||||
|
||||
Below is a list of scheduler options that acs-engine will configure by default:
|
||||
Below is a list of scheduler options that aks-engine will configure by default:
|
||||
|
||||
| kube-scheduler option | default value |
|
||||
| --------------------- | ------------------------------------------ |
|
||||
|
@ -502,7 +502,7 @@ We consider `kubeletConfig`, `controllerManagerConfig`, `apiServerConfig`, and `
|
|||
|
||||
#### jumpboxProfile
|
||||
|
||||
`jumpboxProfile` describes the settings for a jumpbox deployed via acs-engine to access a private cluster. It is a child property of `privateCluster`.
|
||||
`jumpboxProfile` describes the settings for a jumpbox deployed via aks-engine to access a private cluster. It is a child property of `privateCluster`.
|
||||
|
||||
| Name | Required | Description |
|
||||
| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Porting a new DC/OS version to ACS-Engine
|
||||
# Porting a new DC/OS version to AKS Engine
|
||||
|
||||
## 1. Locate the official ARM Template
|
||||
|
||||
|
@ -31,7 +31,7 @@ You should now have a clean yaml.
|
|||
under the `parts` directory, create a new file called `dcoscustomdataXXX.t` replacing `XXX` by the correct version number.
|
||||
Paste the yaml from the previous step inside.
|
||||
|
||||
In the new file, under the `runcmd` section you should find 4 sucessive `curl` calls downloading some `.deb` packages followed by a bash script installing each one of them. This is handled by `parts\dcos\dcosprovision.sh` in ACS-Engine, so make sure the dependencies didn't change and replace the `curl` and `bash` calls by a link to the script.
|
||||
In the new file, under the `runcmd` section you should find 4 sucessive `curl` calls downloading some `.deb` packages followed by a bash script installing each one of them. This is handled by `parts\dcos\dcosprovision.sh` in AKS Engine, so make sure the dependencies didn't change and replace the `curl` and `bash` calls by a link to the script.
|
||||
|
||||
For example, in DC/OS 1.9:
|
||||
```yaml
|
||||
|
@ -53,7 +53,7 @@ becomes
|
|||
|
||||
Additional modifications under `runcmd`:
|
||||
* Replace every occurence of the Package GUID (that we found in step 2) by `DCOSGUID`.
|
||||
* the `content` of the cmd with path `/etc/mesosphere/setup-flags/late-config.yaml` should be modified to accept ACS-Engine bindings instead of variable where needed (look at a previous custom data file for reference).
|
||||
* the `content` of the cmd with path `/etc/mesosphere/setup-flags/late-config.yaml` should be modified to accept AKS Engine bindings instead of variable where needed (look at a previous custom data file for reference).
|
||||
* At the very end of the file, replace
|
||||
```yaml
|
||||
- content: ''
|
||||
|
@ -77,7 +77,7 @@ by
|
|||
|
||||
## 5. Adding the support of the new version inside to .go files
|
||||
|
||||
### pkg/acsengine/defaults.go
|
||||
### pkg/engine/defaults.go
|
||||
|
||||
- Around line 30, add your `DCOSXXXBootstrapDownloadURL` variable (replace XXX with the version number), inside the `fmt.Sprintf()` function replace the second and third parameters with the version `EA, Stable, Beta, ...` and the commit hash.
|
||||
|
||||
|
@ -90,7 +90,7 @@ Example for version 1.10
|
|||
DCOS110BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, "stable", "e38ab2aa282077c8eb7bf103c6fff7b0f08db1a4"),
|
||||
```
|
||||
|
||||
### pkg/acsengine/engine.go
|
||||
### pkg/engine/engine.go
|
||||
|
||||
- Around line 39, add `dcosCustomDataXXX = "dcos/dcoscustomdataXXX.t"` variable
|
||||
|
||||
|
@ -132,7 +132,7 @@ Example for version 1.10:
|
|||
yamlFilename = dcosCustomData110
|
||||
```
|
||||
|
||||
### pkg/acsengine/types.go
|
||||
### pkg/engine/types.go
|
||||
|
||||
- Around line 40, add your the type for your new version.
|
||||
|
||||
|
@ -203,8 +203,8 @@ case common.DCOSRelease1Dot10:
|
|||
|
||||
We encourage you to look at previous PR as example, listed bellow :
|
||||
|
||||
- [Adding DC/OS 1.10 stable version support #1439](https://github.com/Azure/acs-engine/pull/1439/files)
|
||||
- [setting dcos test to 1.9 (current default)](https://github.com/Azure/acs-engine/pull/1443)
|
||||
- [[DC/OS] Set 1.9 as default DCOS version and upgrade Packages](https://github.com/Azure/acs-engine/pull/457)
|
||||
- [[DC/OS] Add support for DCOS 1.9 EA](https://github.com/Azure/acs-engine/pull/360)
|
||||
- [DCOS 1.8.8 Support](https://github.com/Azure/acs-engine/pull/278)
|
||||
- [Adding DC/OS 1.10 stable version support #1439](https://github.com/Azure/aks-engine/pull/1439/files)
|
||||
- [setting dcos test to 1.9 (current default)](https://github.com/Azure/aks-engine/pull/1443)
|
||||
- [[DC/OS] Set 1.9 as default DCOS version and upgrade Packages](https://github.com/Azure/aks-engine/pull/457)
|
||||
- [[DC/OS] Add support for DCOS 1.9 EA](https://github.com/Azure/aks-engine/pull/360)
|
||||
- [DCOS 1.8.8 Support](https://github.com/Azure/aks-engine/pull/278)
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
# Using a custom virtual network with Azure Container Service
|
||||
In this tutorial you are going to learn how to use [ACS Engine](https://github.com/Azure/acs-engine) to deploy a brand new cluster into an existing or pre-created virtual network.
|
||||
In this tutorial you are going to learn how to use [AKS Engine](https://github.com/Azure/aks-engine) to deploy a brand new cluster into an existing or pre-created virtual network.
|
||||
By doing this, you will be able to control the properties of the virtual network or integrate a new cluster into your existing infrastructure.
|
||||
|
||||
*Note: This article describes the procedure with Docker Swarm but it will work in the exact same way with the all the orchestrators available with ACS Engine: Docker Swarm, Kubernetes and DC/OS.*
|
||||
*Note: This article describes the procedure with Docker Swarm but it will work in the exact same way with the all the orchestrators available with AKS Engine: Docker Swarm, Kubernetes and DC/OS.*
|
||||
|
||||
*For Kubernetes, the cluster should be deployed in the same resource group as the virtual network and the service principal you use for the cluster needs permissions on the VNET resource's group too. Custom VNET for Kubernetes Windows cluster has a [known issue](https://github.com/Azure/acs-engine/issues/1767).*
|
||||
*For Kubernetes, the cluster should be deployed in the same resource group as the virtual network and the service principal you use for the cluster needs permissions on the VNET resource's group too. Custom VNET for Kubernetes Windows cluster has a [known issue](https://github.com/Azure/aks-engine/issues/1767).*
|
||||
|
||||
## Prerequisites
|
||||
You can run this walkthrough on OS X, Windows, or Linux.
|
||||
- You need an Azure subscription. If you don't have one, you can [sign up for an account](https://azure.microsoft.com/).
|
||||
- Install the [Azure CLI 2.0](/cli/azure/install-az-cli2).
|
||||
- Install the [ACS Engine](https://github.com/Azure/acs-engine/blob/master/docs/acsengine.md)
|
||||
- Install the [AKS Engine](https://github.com/Azure/aks-engine/blob/master/docs/acsengine.md)
|
||||
|
||||
## Create the virtual network
|
||||
*You need a virtual network before creating the new cluster. If you already have one, you can skip this step.*
|
||||
|
@ -78,12 +78,12 @@ az group deployment create -g acs-custom-vnet --name "CustomVNet" --template-fil
|
|||
Once the deployment is completed you should see the virtual network in the resource group.
|
||||
|
||||
|
||||
## Create the template for ACS Engine
|
||||
ACS Engine uses a JSON template in input and generates the ARM template and ARM parameters files in output.
|
||||
## Create the template for AKS Engine
|
||||
AKS Engine uses a JSON template in input and generates the ARM template and ARM parameters files in output.
|
||||
|
||||
Depending on the orchestrator you want to deploy, the number of agent pools, the machine size you want (etc.) this input template could differ from the one we are going to detail here.
|
||||
|
||||
There are a lot of examples available on the [ACS Engine GitHub](https://github.com/Azure/acs-engine/tree/master/examples) and you can find [one dedicated for virtual network](https://github.com/Azure/acs-engine/blob/master/examples/vnet/README.md).
|
||||
There are a lot of examples available on the [AKS Engine GitHub](https://github.com/Azure/aks-engine/tree/master/examples) and you can find [one dedicated for virtual network](https://github.com/Azure/aks-engine/blob/master/examples/vnet/README.md).
|
||||
|
||||
In this case, we are going to use the following template:
|
||||
|
||||
|
@ -140,10 +140,10 @@ As you can see, for all node pools definition (master or agents) you can use the
|
|||
*Note: Make sure the the vnetSubnetId matches with your subnet, by giving your **SUBSCRIPTION_ID**, **RESOURCE_GROUP_NAME**, virtual network and subnet names. You also need to fill DNS prefix for all the public pools you want to create, give an SSH keys...*
|
||||
|
||||
## Generate the cluster Azure Resource Manager template
|
||||
Once your are ready with the cluster definition file, you can use ACS Engine to generate the ARM template that will be used to deploy the cluster on Azure:
|
||||
Once your are ready with the cluster definition file, you can use AKS Engine to generate the ARM template that will be used to deploy the cluster on Azure:
|
||||
|
||||
```bash
|
||||
acs-engine azuredeploy.swarm.clusterdefinition.json
|
||||
aks-engine azuredeploy.swarm.clusterdefinition.json
|
||||
```
|
||||
|
||||
This command will output three files:
|
||||
|
@ -155,12 +155,12 @@ wrote _output/Swarm-12652785/azuredeploy.parameters.json
|
|||
acsengine took 37.1384ms
|
||||
```
|
||||
|
||||
- apimodel.json: this is the cluster definition file you gave to ACS Engine
|
||||
- apimodel.json: this is the cluster definition file you gave to AKS Engine
|
||||
- azuredeploy.json: this is the Azure Resource Manager JSON template that you are going to use to deploy the cluster
|
||||
- azuredeploy.parameters.json: this is the parameters file that you are going to use to deploy the cluster
|
||||
|
||||
## Deploy the Azure Container Service cluster
|
||||
Now that you have generated the ARM templates and its parameters file using ACS Engine, you can use Azure CLI 2.0 to start the deployment of the cluster:
|
||||
Now that you have generated the ARM templates and its parameters file using AKS Engine, you can use Azure CLI 2.0 to start the deployment of the cluster:
|
||||
|
||||
```bash
|
||||
az group deployment create -g acs-custom-vnet --name "ClusterDeployment" --template-file azuredeploy.json --parameters "@azuredeploy.parameters.json"
|
||||
|
|
|
@ -7,7 +7,7 @@ Support for DC/OS `1.11` and later continues in the forked project [dcos-engine]
|
|||
|
||||
Here are the steps to deploy a simple DC/OS cluster:
|
||||
|
||||
1. [install acs-engine](acsengine.md#downloading-and-building-acs-engine)
|
||||
1. [install aks-engine](acsengine.md#downloading-and-building-aks-engine)
|
||||
2. [generate your ssh key](ssh.md#ssh-key-generation)
|
||||
3. edit the [DC/OS example](../examples/dcos.json) and fill in the blank strings
|
||||
4. [generate the template](acsengine.md#generate-templates)
|
||||
|
@ -120,16 +120,16 @@ This walk through is inspired by the wonderful digital ocean tutorial: https://w
|
|||
|
||||
# DCOS upgrade
|
||||
|
||||
Starting from DC/OS 1.11, acs-engine deploys a bootstrap node as part of DC/OS cluster. This enables upgrade operation on an existing cluster.
|
||||
Starting from DC/OS 1.11, aks-engine deploys a bootstrap node as part of DC/OS cluster. This enables upgrade operation on an existing cluster.
|
||||
|
||||
To start the upgrade, run this following command:
|
||||
```
|
||||
acs-engine dcos-upgrade \
|
||||
aks-engine dcos-upgrade \
|
||||
--subscription-id <Azure subscription ID> \
|
||||
--resource-group <the resource group the cluster was deployed in> \
|
||||
--location <the region the clusetr was deployed in> \
|
||||
--upgrade-version <desired DC/OS version> \
|
||||
--deployment-dir <deployment directory produced by "acs-engine generate"> \
|
||||
--deployment-dir <deployment directory produced by "aks-engine generate"> \
|
||||
--ssh-private-key-path <path to ssh private key used in deployment>
|
||||
```
|
||||
The upgrade is an idempotent operation. If failed, it could be re-run and will pick the execution from the last successful checkpoint.
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# Acs-engine
|
||||
|
||||
The Azure Container Service Engine (acs-engine) is a command line tool that generates ARM (Azure Resource Manager) templates in order for one to deploy container-based clusters (like Kubernetes , DCOS, Openshift, Docker swarm) on the Azure platform.
|
||||
The Azure Container Service Engine (aks-engine) is a command line tool that generates ARM (Azure Resource Manager) templates in order for one to deploy container-based clusters (like Kubernetes , DCOS, Openshift, Docker swarm) on the Azure platform.
|
||||
|
||||
This design document provides a brief and high-level overview of what acs-engine does internally to achieve deployment of containerized clusters. The scope of this document will be limited to the execution of acs-engine when creating Kubernetes clusters.
|
||||
This design document provides a brief and high-level overview of what aks-engine does internally to achieve deployment of containerized clusters. The scope of this document will be limited to the execution of aks-engine when creating Kubernetes clusters.
|
||||
|
||||
## Architecture Diagram
|
||||
|
||||
![Alt text](../images/proposed-template-refactor.jpg "acs-engine architecture diagram")
|
||||
![Alt text](../images/proposed-template-refactor.jpg "aks-engine architecture diagram")
|
||||
|
||||
## Components
|
||||
|
||||
|
@ -231,7 +231,7 @@ Acs-engine interfaces with Azure Resource Manager (ARM) through the Azure Go SDK
|
|||
|
||||
### Kubernetes Client API
|
||||
|
||||
Acs-engine also performs kubernetes cluster management operations (kubectl) through the imported Kubernetes API libraries. The Client API calls are made during the scale and upgrade commands of acs-engine.
|
||||
Acs-engine also performs kubernetes cluster management operations (kubectl) through the imported Kubernetes API libraries. The Client API calls are made during the scale and upgrade commands of aks-engine.
|
||||
|
||||
|
||||
Design challenges and proposals
|
||||
|
@ -244,7 +244,7 @@ We find that the current implementation of templating leads to challenges in ter
|
|||
|
||||
- There is no direct and intuitive mapping between the input apimodels and the ARM templates. The placeholder substitutions are performed at very specific areas in the template skeletons. It's hard to draw any generality from it and this makes it difficult to create the template JSONs purely through code as opposed to performing the placeholder substitutions.
|
||||
|
||||
- This also limits the capabilities of acs-engine as far as extensibility is concerned. If we were to introduce more changes and customizations, it would potentially entail modifying the template skeleton layouts. This would just add more complexity.
|
||||
- This also limits the capabilities of aks-engine as far as extensibility is concerned. If we were to introduce more changes and customizations, it would potentially entail modifying the template skeleton layouts. This would just add more complexity.
|
||||
|
||||
#### Possible Solutions
|
||||
|
||||
|
@ -258,7 +258,7 @@ _**Pros**_
|
|||
|
||||
- This will allow us to accommodate future ARM template customization more effectively, because we can express and maintain the variety of inter-dependent outputs natively, as first class data representations.
|
||||
|
||||
- Template validation can be done within the acs-engine layer itself. Currently, template validation can only be performed via the Azure GO SDK and this entails a network call.
|
||||
- Template validation can be done within the aks-engine layer itself. Currently, template validation can only be performed via the Azure GO SDK and this entails a network call.
|
||||
|
||||
_**Cons/Challenges**_
|
||||
|
||||
|
@ -268,7 +268,7 @@ _**Cons/Challenges**_
|
|||
|
||||
**YAML-based templates**
|
||||
|
||||
We could also do away with our current JSON-based template skeletons and use YAML templating instead.
|
||||
We could also do away with our current JSON-based template skeletons and use YAML templating instead.
|
||||
|
||||
_**Pros**_
|
||||
|
||||
|
@ -281,4 +281,4 @@ _**Pros**_
|
|||
|
||||
_**Cons/Challenges**_
|
||||
|
||||
- While this method is easier to implement, the benefits of extensibility and maintainability are significantly lesser than that provided by a strong schema type.
|
||||
- While this method is easier to implement, the benefits of extensibility and maintainability are significantly lesser than that provided by a strong schema type.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Developers Guide
|
||||
|
||||
This guide explains how to set up your environment for developing on
|
||||
acs-engine.
|
||||
aks-engine.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
@ -21,11 +21,11 @@ elegant and high-quality open source code so that our users will benefit.
|
|||
|
||||
Make sure you have read and understood the main CONTRIBUTING guide:
|
||||
|
||||
https://github.com/Azure/acs-engine/blob/master/CONTRIBUTING.md
|
||||
https://github.com/Azure/aks-engine/blob/master/CONTRIBUTING.md
|
||||
|
||||
### Structure of the Code
|
||||
|
||||
The code for the acs-engine project is organized as follows:
|
||||
The code for the aks-engine project is organized as follows:
|
||||
|
||||
- The individual programs are located in `cmd/`. Code inside of `cmd/`
|
||||
is not designed for library re-use.
|
||||
|
@ -46,9 +46,9 @@ home of the current development candidate. Releases are tagged.
|
|||
We accept changes to the code via GitHub Pull Requests (PRs). One
|
||||
workflow for doing this is as follows:
|
||||
|
||||
1. Use `go get` to clone the acs-engine repository: `go get github.com/Azure/acs-engine`
|
||||
1. Use `go get` to clone the aks-engine repository: `go get github.com/Azure/aks-engine`
|
||||
2. Fork that repository into your GitHub account
|
||||
3. Add your repository as a remote for `$GOPATH/github.com/Azure/acs-engine`
|
||||
3. Add your repository as a remote for `$GOPATH/github.com/Azure/aks-engine`
|
||||
4. Create a new working branch (`git checkout -b feat/my-feature`) and
|
||||
do your work on that branch.
|
||||
5. When you are ready for us to review, push your branch to GitHub, and
|
||||
|
@ -56,9 +56,9 @@ workflow for doing this is as follows:
|
|||
|
||||
### Third Party Dependencies
|
||||
|
||||
Third party dependencies reside locally inside the repository under the `vendor/` directory. We use [dep](https://github.com/golang/dep) to enforce our dependency graph, declared in [Gopkg.toml](https://github.com/Azure/acs-engine/blob/master/CONTRIBUTING.md) in the project root.
|
||||
Third party dependencies reside locally inside the repository under the `vendor/` directory. We use [dep](https://github.com/golang/dep) to enforce our dependency graph, declared in [Gopkg.toml](https://github.com/Azure/aks-engine/blob/master/CONTRIBUTING.md) in the project root.
|
||||
|
||||
If you wish to introduce a new third party dependency into `acs-engine`, please file an [issue](https://github.com/Azure/acs-engine/issues), and include the canonical VCS path (e.g., `github.com/Azure/azure-sdk-for-go`) along with either the desired release string expression to depend on (e.g., `~8.1.0`), or the commit hash to pin to a static commit (e.g., `4cdb38c072b86bf795d2c81de50784d9fdd6eb77`). A project maintainer will then own the effort to update the codebase with that dependency, including relevant updates to `Gopkg.toml` and `vendor/`.
|
||||
If you wish to introduce a new third party dependency into `aks-engine`, please file an [issue](https://github.com/Azure/aks-engine/issues), and include the canonical VCS path (e.g., `github.com/Azure/azure-sdk-for-go`) along with either the desired release string expression to depend on (e.g., `~8.1.0`), or the commit hash to pin to a static commit (e.g., `4cdb38c072b86bf795d2c81de50784d9fdd6eb77`). A project maintainer will then own the effort to update the codebase with that dependency, including relevant updates to `Gopkg.toml` and `vendor/`.
|
||||
|
||||
As a rule we want to distinguish dependency update PRs from feature/bug PRs; we may ask that feature/bug PRs which include updates to `vendor/` and/or contain any other dependency-related overhead to be triaged into separate PRs that can be managed independently, pre-requisite dependency changes in one, and features/bugs in another. The objective of enforcing these distinctions is to help focus the PR review process, and to make manageable the difficult task of rationalizing a multitude of parallel PRs in flight, many of which which may carry hard-to-reconcile dependency side-effects when aggressively updated with a fresh dependency graph as part of the PR payload.
|
||||
|
||||
|
@ -119,24 +119,24 @@ following environment variables:
|
|||
|
||||
Finally, you'll need to make sure that the apimodel.json corresponding to the
|
||||
pre-deployed cluster is available at `_output/$NAME.json`. If you previously
|
||||
used `acs-engine deploy` directly to deploy the cluster, you will need to run
|
||||
used `aks-engine deploy` directly to deploy the cluster, you will need to run
|
||||
`cp _output/$NAME/apimodel.json _output/$NAME.json`.
|
||||
|
||||
### Debugging
|
||||
|
||||
For acs-engine code debugging you can use [Delve](https://github.com/derekparker/delve) debugger.
|
||||
For aks-engine code debugging you can use [Delve](https://github.com/derekparker/delve) debugger.
|
||||
|
||||
#### CLI
|
||||
|
||||
Run command:
|
||||
```
|
||||
dlv debug github.com/Azure/acs-engine -- generate ~/Documents/azure/openshift.json
|
||||
dlv debug github.com/Azure/aks-engine -- generate ~/Documents/azure/openshift.json
|
||||
```
|
||||
|
||||
Test individual package and individual test:
|
||||
```
|
||||
dlv test github.com/Azure/acs-engine/pkg/acsengine
|
||||
dlv test github.com/Azure/acs-engine/pkg/acsengine -- -test.run ^TestNetworkPolicyDefaults$
|
||||
dlv test github.com/Azure/aks-engine/pkg/engine
|
||||
dlv test github.com/Azure/aks-engine/pkg/engine -- -test.run ^TestNetworkPolicyDefaults$
|
||||
```
|
||||
|
||||
#### Visual Code Studio
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# Microsoft Azure Container Service Engine - Extensions
|
||||
|
||||
Extensions in acs-engine provide an easy way for acs-engine users to add pre-packaged functionality into their cluster. For example, an extension could configure a monitoring solution on an ACS cluster. The user would not need to know the details of how to install the monitoring solution. Rather, the user would simply add the extension into the extensionProfiles section of the template.
|
||||
Extensions in aks-engine provide an easy way for aks-engine users to add pre-packaged functionality into their cluster. For example, an extension could configure a monitoring solution on an AKS cluster. The user would not need to know the details of how to install the monitoring solution. Rather, the user would simply add the extension into the extensionProfiles section of the template.
|
||||
|
||||
# extensionProfiles
|
||||
|
||||
The extensionProfiles contains the extensions that the cluster will install. The following illustrates a template with a hello-world-dcos extension.
|
||||
|
||||
``` javascript
|
||||
{
|
||||
{
|
||||
...
|
||||
"extensionProfiles": [
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ You normally would not provide a rootURL. The extensions are normally loaded fr
|
|||
- Create a blob container called 'extensions'
|
||||
- Under 'extensions', create a folder called 'extension-one'
|
||||
- Under 'extension-one', create a folder called 'v1'
|
||||
- Under 'v1', upload your files (see Required Extension Files)
|
||||
- Under 'v1', upload your files (see Required Extension Files)
|
||||
- Set the rootURL to: 'https://mystorageaccount.blob.core.windows.net/'
|
||||
|
||||
# masterProfile
|
||||
|
@ -50,7 +50,7 @@ Extensions, in the current implementation run a script on a master node. The ext
|
|||
"osType": "Linux",
|
||||
"firstConsecutiveStaticIP": "10.240.255.5",
|
||||
"extensions": [
|
||||
{
|
||||
{
|
||||
"name": "hello-world-k8s",
|
||||
"singleOrAll": "single"
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ Or they can be referenced as a preprovision extension, this will run during clou
|
|||
"name": "hello-world",
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
|
||||
|
||||
},
|
||||
"extensionProfiles": [
|
||||
{
|
||||
|
@ -92,7 +92,7 @@ Or they can be referenced as a preprovision extension, this will run during clou
|
|||
```
|
||||
|Name|Required|Description|
|
||||
|---|---|---|
|
||||
|name|yes|The name of the extension. This must match the name in the extensionProfiles|
|
||||
|name|yes|The name of the extension. This must match the name in the extensionProfiles|
|
||||
|
||||
# Required Extension Files
|
||||
|
||||
|
@ -120,7 +120,7 @@ The supported-orchestrators.json file is a simple one line file that contains th
|
|||
|
||||
The template.json file is a linked template that will be called by the main cluster deployment template and must adhere to all the rules of a normal ARM template. All the necessary parameters needed from the azuredeploy.json file must be passed into this template and defined appropriately.
|
||||
|
||||
Additional variables can be defined for use in creating additional resources. Additional resources can also be created. The key resource for installing the extension is the custom script extension.
|
||||
Additional variables can be defined for use in creating additional resources. Additional resources can also be created. The key resource for installing the extension is the custom script extension.
|
||||
|
||||
Modify the commandToExecute entry with the necessary command and paramters to install the desired extension. Replace EXTENSION-NAME with the name of the extension. The resource name of the custom script extension has to have the same name as the other custom script on the box as we aren't allowed to have two, this is also why we use a linked deployment so we can have the same resource twice and just make this one depend on the other so that it always runs after the provision extension is done.
|
||||
|
||||
|
@ -130,7 +130,7 @@ The following is an example of the template.json file.
|
|||
{
|
||||
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"parameters": {
|
||||
"apiVersionStorage": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
|
@ -167,21 +167,21 @@ The following is an example of the template.json file.
|
|||
}
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"variables": {
|
||||
"singleQuote": "'",
|
||||
"sampleStorageAccountName": "[concat(uniqueString(concat(parameters('storageAccountBaseName'), 'sample')), 'aa')]"
|
||||
"initScriptUrl": "https://raw.githubusercontent.com/Azure/acs-engine/master/extensions/EXTENSION-NAME/v1/EXTENSION-NAME.sh"
|
||||
"initScriptUrl": "https://raw.githubusercontent.com/Azure/aks-engine/master/extensions/EXTENSION-NAME/v1/EXTENSION-NAME.sh"
|
||||
},
|
||||
"resources": [
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "[parameters('apiVersionStorage')]",
|
||||
"dependsOn": [],
|
||||
"location": "[resourceGroup().location]",
|
||||
"name": "[variables('sampleStorageAccountName')]",
|
||||
"apiVersion": "[parameters('apiVersionStorage')]",
|
||||
"dependsOn": [],
|
||||
"location": "[resourceGroup().location]",
|
||||
"name": "[variables('sampleStorageAccountName')]",
|
||||
"properties": {
|
||||
"accountType": "Standard_LRS"
|
||||
},
|
||||
"type": "Microsoft.Storage/storageAccounts"
|
||||
},
|
||||
"type": "Microsoft.Storage/storageAccounts"
|
||||
}, {
|
||||
"apiVersion": "[parameters('apiVersionCompute')]",
|
||||
"dependsOn": [],
|
||||
|
@ -194,8 +194,8 @@ The following is an example of the template.json file.
|
|||
"typeHandlerVersion": "1.5",
|
||||
"autoUpgradeMinorVersion": true,
|
||||
"settings": {
|
||||
"fileUris": [
|
||||
"[variables('initScriptUrl')]"
|
||||
"fileUris": [
|
||||
"[variables('initScriptUrl')]"
|
||||
]
|
||||
},
|
||||
"protectedSettings": {
|
||||
|
@ -207,10 +207,10 @@ The following is an example of the template.json file.
|
|||
"outputs": { }
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Creating extension template-link.json
|
||||
|
||||
When acs-engine generates the azuredeploy.json file, this JSON snippet will be injected. This code calls the linked template (template.json) defined above.
|
||||
When aks-engine generates the azuredeploy.json file, this JSON snippet will be injected. This code calls the linked template (template.json) defined above.
|
||||
|
||||
Any parameters from the main azuredeploy.json file that is needed by template.json must be passed in via the parameters section. The parameter, "extensionParameters" is an optional parameter that is passed in directly by the user in the **extensionProfiles** section as defined in an earlier section. This special parameter can be used to pass in information such as an activation key or access code (as an example). If the extension does not need this capability, this optional parameter can be deleted.
|
||||
|
||||
|
@ -229,7 +229,7 @@ Replace "**EXTENSION-NAME**" with the name of the extension.
|
|||
"properties": {
|
||||
"mode": "Incremental",
|
||||
"templateLink": {
|
||||
"uri": "https://raw.githubusercontent.com/Azure/acs-engine/master/extensions/EXTENSION-NAME/v1/template.json",
|
||||
"uri": "https://raw.githubusercontent.com/Azure/aks-engine/master/extensions/EXTENSION-NAME/v1/template.json",
|
||||
"contentVersion": "1.0.0.0"
|
||||
},
|
||||
"parameters": {
|
||||
|
@ -296,4 +296,4 @@ echo $(date) " - Script complete"
|
|||
- [hello-world-k8s](../extensions/hello-world-k8s/README.md)
|
||||
|
||||
# Known issues
|
||||
Kubernetes extensions that run after provisioning don't currently work if the VM needs to reboot for security reboots. this is a timing issue. the extension script is started before the vm reboots and it will be cutoff before it finishes but will still report success. I've tried to get the provision script to only finish as reboot happens and I haven't gotten that to work. An extension could work most of the time if it cancelled the restart at the start and checked if a restart was needed and scheduled one at the end of its work
|
||||
Kubernetes extensions that run after provisioning don't currently work if the VM needs to reboot for security reboots. this is a timing issue. the extension script is started before the vm reboots and it will be cutoff before it finishes but will still report success. I've tried to get the provision script to only finish as reboot happens and I haven't gotten that to work. An extension could work most of the time if it cancelled the restart at the start and checked if a restart was needed and scheduled one at the end of its work
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
The steps listed on this page describe a way to modify a running Kubernetes cluster deployed with `acs-engine` on Azure. These steps are only tested with changes targeting actually Azure resources. Changes made to Kubernetes configuration are not tested yet.
|
||||
The steps listed on this page describe a way to modify a running Kubernetes cluster deployed with `aks-engine` on Azure. These steps are only tested with changes targeting actually Azure resources. Changes made to Kubernetes configuration are not tested yet.
|
||||
|
||||
## `generate` and `deploy`
|
||||
|
||||
These are the common steps (unless described otherwise) you'll have to run after modifying an existing `apimodel.json` file.
|
||||
|
||||
* Modify the apimodel.json file located in the `_output/<clustername>` folder
|
||||
* Run `acs-engine generate --api-model _output/<clustername>/apimodel.json`. This wil update the `azuredeploy*` files needed for the new ARM deployment. These files are also located in the `_output` folder.
|
||||
* Run `aks-engine generate --api-model _output/<clustername>/apimodel.json`. This wil update the `azuredeploy*` files needed for the new ARM deployment. These files are also located in the `_output` folder.
|
||||
* Apply the changes by manually starting an ARM deployment. From within the `_output/<clustername>` run
|
||||
|
||||
az group deployment create --template-file azuredeploy.json --parameters azuredeploy.parameters.json --resource-group "<my-resource-group>"
|
||||
|
||||
To use the `az` CLI tools you have to login. More info can be found here: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest
|
||||
|
||||
_Note: I use `az group deployment` instead of `acs-engine deploy` because the latter seems to assume you are deploying a new cluster and as a result overwriting you private ssh keys located in the _ouput folder_
|
||||
_Note: I use `az group deployment` instead of `aks-engine deploy` because the latter seems to assume you are deploying a new cluster and as a result overwriting you private ssh keys located in the _ouput folder_
|
||||
|
||||
* Grab a coffee
|
||||
* Profit!
|
||||
|
@ -34,9 +34,9 @@ Add (or copy) an entry in the `agentPoolProfiles` array.
|
|||
|
||||
### Resizing a node pool
|
||||
|
||||
Use the `acs-engine scale` command
|
||||
Use the `aks-engine scale` command
|
||||
|
||||
acs-engine scale --location westeurope --subscription-id "xxx" --resource-group "<my-resource-group" \
|
||||
aks-engine scale --location westeurope --subscription-id "xxx" --resource-group "<my-resource-group" \
|
||||
--deployment-dir ./_output/<clustername> --node-pool <nodepool name> --new-node-count <desired number of nodes> --master-FQDN <fqdn of the master lb>
|
||||
|
||||
**Remember to also update your original api-model.json file (used for 1st deployment) or else you would end up with the original number of VM's after using the `generate` command described above**
|
||||
|
@ -47,4 +47,3 @@ Use the `acs-engine scale` command
|
|||
* `generate` and `deploy` (see above)
|
||||
|
||||
**Important: The default ARM deployment won't drain your Kubernetes nodes properly before 'rebooting' them. Please [drain](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) them manually before deploying the change**
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# Large Kubernetes Clusters with acs-engine
|
||||
# Large Kubernetes Clusters with aks-engine
|
||||
|
||||
## Background
|
||||
Starting from acs-engine v0.3.0, acs-engine supports using exponential cloud backoff that is a feature of Kubernetes v1.6.6 and newer. Cloud backoff allows Kubernetes nodes to backoff on HTTP 429 errors that are usually caused by exceeding Azure API limits.
|
||||
Starting from aks-engine v0.3.0, aks-engine supports using exponential cloud backoff that is a feature of Kubernetes v1.6.6 and newer. Cloud backoff allows Kubernetes nodes to backoff on HTTP 429 errors that are usually caused by exceeding Azure API limits.
|
||||
|
||||
## To Use
|
||||
Declare your kubernetes cluster API model config as you normally would, with the following requirements:
|
||||
- You must be using at minimum the `v1.6.6` version of Kubernetes to have access to the `kubernetesConfig` configuration vectors exemplified in [examples/largeclusters/kubernetes.json](https://github.com/Azure/acs-engine/blob/master/examples/largeclusters/kubernetes.json). As long as you are using a version of acs-engine `v0.3.0` or newer, your kubernetes cluster specification will fulfill this minimum version requirement.
|
||||
- You must be using at minimum the `v1.6.6` version of Kubernetes to have access to the `kubernetesConfig` configuration vectors exemplified in [examples/largeclusters/kubernetes.json](https://github.com/Azure/aks-engine/blob/master/examples/largeclusters/kubernetes.json). As long as you are using a version of aks-engine `v0.3.0` or newer, your kubernetes cluster specification will fulfill this minimum version requirement.
|
||||
- We recommend the use of smaller pools (e.g., count of 20) over larger pools (e.g., count of 100); produce your desired total node count with lots of pools, as opposed to as few as possible.
|
||||
- We also recommend using large vmSize configurations to reduce node counts, where appropriate. Make sure you have a defensible infrastructure justification for more nodes in terms of node count (for example as of kubernetes 1.7 there is a 100 pods per node limit), instead of opting to use more powerful nodes. Doing so reduces cluster complexity, and azure resource administrative overhead. As Kubernetes excels in binpacking pods onto available instances, vertically scaling VM sizes (more CPU/RAM) is a better approach for expanding cluster capacity, if you are not approaching the pod-per-node limit.
|
||||
|
||||
|
@ -46,4 +46,4 @@ The following configuration parameters are available in the `properties.orchestr
|
|||
"--route-reconciliation-period": "1m" // how often to reconcile cloudprovider-originating node routes
|
||||
}
|
||||
```
|
||||
The [examples/largeclusters/kubernetes.json](https://github.com/Azure/acs-engine/blob/master/examples/largeclusters/kubernetes.json) api model example suggests how you might opt into these large cluster features following the guidelines above.
|
||||
The [examples/largeclusters/kubernetes.json](https://github.com/Azure/aks-engine/blob/master/examples/largeclusters/kubernetes.json) api model example suggests how you might opt into these large cluster features following the guidelines above.
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
* [Windows](kubernetes/windows.md) - Create your first Windows Kubernetes cluster
|
||||
* [Kubernetes Next Steps](kubernetes/walkthrough.md) - You have successfully deployed a Kubernetes cluster, now what?
|
||||
* [Troubleshooting](kubernetes/troubleshooting.md) - Running into issues? Start here to troubleshoot Kubernetes.
|
||||
* [Features](kubernetes/features.md) - Guide to alpha, beta, and stable functionality in acs-engine.
|
||||
* [For Kubernetes Developers](kubernetes/k8s-developers.md) - Info for devs working on Kubernetes upstream and wanting to test using acs-engine.
|
||||
* [Features](kubernetes/features.md) - Guide to alpha, beta, and stable functionality in aks-engine.
|
||||
* [For Kubernetes Developers](kubernetes/k8s-developers.md) - Info for devs working on Kubernetes upstream and wanting to test using aks-engine.
|
||||
|
||||
## Known Issues
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
|||
Nodes might appear in the "NotReady" state for approx. 15 minutes if master stops receiving updates from agents.
|
||||
This is a known upstream kubernetes [issue #41916](https://github.com/kubernetes/kubernetes/issues/41916#issuecomment-312428731). This fixing PR is currently under review.
|
||||
|
||||
ACS-Engine partially mitigates this issue on Linux by detecting dead TCP connections more quickly via **net.ipv4.tcp_retries2=8**.
|
||||
AKS Engine partially mitigates this issue on Linux by detecting dead TCP connections more quickly via **net.ipv4.tcp_retries2=8**.
|
||||
|
||||
## Additional Kubernetes Resources
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Microsoft Azure Container Service Engine - Kubernetes AAD integration Walkthrough
|
||||
|
||||
This is walkthrough is to help you get start with Azure Active Directory(AAD) integeration with an ACS-Engine Kubernetes cluster.
|
||||
This is walkthrough is to help you get start with Azure Active Directory(AAD) integeration with an AKS Engine Kubernetes cluster.
|
||||
|
||||
[OpenID Connect](http://openid.net/connect/) is a simple identity layer built on top of the OAuth 2.0 protocol, and it is supported by both AAD and Kubernetes. Here we're going to use OpenID Connect as the communication protocol.
|
||||
|
||||
|
@ -45,7 +45,7 @@ To sign in, use a web browser to open the page https://aka.ms/devicelogin and en
|
|||
You can now authenticate to the Kubernetes cluster, but you need to set up authorization as well.
|
||||
|
||||
#### Authentication
|
||||
With ACS-Engine, the cluster is locked down by default.
|
||||
With AKS Engine, the cluster is locked down by default.
|
||||
|
||||
This means that when you try to use your AAD account you will see something
|
||||
like:
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
|
||||
## Install Prerequisites
|
||||
|
||||
All the commands in this guide require both the Azure CLI and `acs-engine`. Follow the [installation instructions to download acs-engine before continuing](../acsengine.md#install-acs-engine) or [compile from source](../acsengine.md#build-from-source).
|
||||
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [installation instructions to download aks-engine before continuing](../acsengine.md#install-aks-engine) or [compile from source](../acsengine.md#build-from-source).
|
||||
|
||||
For installation instructions see [the Azure CLI GitHub repository](https://github.com/Azure/azure-cli#installation) for the latest release.
|
||||
|
||||
## Overview
|
||||
|
||||
`acs-engine` reads a cluster definition which describes the size, shape, and configuration of your cluster. This guide takes the default configuration of one master and two Linux agents. If you would like to change the configuration, edit `examples/kubernetes.json` before continuing.
|
||||
`aks-engine` reads a cluster definition which describes the size, shape, and configuration of your cluster. This guide takes the default configuration of one master and two Linux agents. If you would like to change the configuration, edit `examples/kubernetes.json` before continuing.
|
||||
|
||||
The `acs-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#acs-engine-the-long-way).
|
||||
The `aks-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#aks-engine-the-long-way).
|
||||
|
||||
**NOTE:** ACS Engine creates a _cluster_; it _doesn't_ create an Azure Container Service resource. So clusters that you create using the `acs-engine` command (or ARM templates generated by the `acs-engine` command) won't show up as ACS resources, for example when you run `az acs list`. Think of `acs-engine` as the, er, engine which ACS uses to create clusters: you can use the same engine yourself, but ACS won't know about the results.
|
||||
**NOTE:** AKS Engine creates a _cluster_; it _doesn't_ create an Azure Container Service resource. So clusters that you create using the `aks-engine` command (or ARM templates generated by the `aks-engine` command) won't show up as AKS resources, for example when you run `az acs list`. Think of `aks-engine` as the, er, engine which AKS uses to create clusters: you can use the same engine yourself, but AKS won't know about the results.
|
||||
|
||||
After the cluster is deployed the upgrade and [scale](scale.md) commands can be used to make updates to your cluster.
|
||||
|
||||
## Gather Information
|
||||
|
||||
* The subscription in which you would like to provision the cluster. This is a uuid which can be found with `az account list -o table`.
|
||||
* Proper access rights within the subscription. Especially the right to create and assign service principals to applications ( see ACS Engine the Long Way, Step #2)
|
||||
* Proper access rights within the subscription. Especially the right to create and assign service principals to applications ( see AKS Engine the Long Way, Step #2)
|
||||
* A `dnsPrefix` which forms part of the the hostname for your cluster (e.g. staging, prodwest, blueberry). The DNS prefix must be unique so pick a random name.
|
||||
* A location to provision the cluster e.g. `westus2`.
|
||||
|
||||
|
@ -34,10 +34,10 @@ Contoso Subscription AzureCloud 51ac25de-afdg-9201
|
|||
|
||||
For this example, the subscription id is `51ac25de-afdg-9201-d923-8d8e8e8e8e8e`, the DNS prefix is `contoso-apple`, and location is `westus2`.
|
||||
|
||||
Run `acs-engine deploy` with the appropriate arguments:
|
||||
Run `aks-engine deploy` with the appropriate arguments:
|
||||
|
||||
```sh
|
||||
$ acs-engine deploy --subscription-id 51ac25de-afdg-9201-d923-8d8e8e8e8e8e \
|
||||
$ aks-engine deploy --subscription-id 51ac25de-afdg-9201-d923-8d8e8e8e8e8e \
|
||||
--dns-prefix contoso-apple --location westus2 \
|
||||
--api-model examples/kubernetes.json
|
||||
|
||||
|
@ -50,12 +50,12 @@ INFO[0034] Starting ARM Deployment (contoso-apple-1423145182). This will take so
|
|||
INFO[0393] Finished ARM Deployment (contoso-apple-1423145182).
|
||||
```
|
||||
|
||||
`acs-engine` will output Azure Resource Manager (ARM) templates, SSH keys, and a kubeconfig file in `_output/contoso-apple-59769a59` directory:
|
||||
`aks-engine` will output Azure Resource Manager (ARM) templates, SSH keys, and a kubeconfig file in `_output/contoso-apple-59769a59` directory:
|
||||
|
||||
* `_output/contoso-apple-59769a59/azureuser_rsa`
|
||||
* `_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json`
|
||||
|
||||
acs-engine generates kubeconfig files for each possible region. Access the new cluster by using the kubeconfig generated for the cluster's location. This example used `westus2`, so the kubeconfig is `_output/<clustername>/kubeconfig/kubeconfig.westus2.json`:
|
||||
aks-engine generates kubeconfig files for each possible region. Access the new cluster by using the kubeconfig generated for the cluster's location. This example used `westus2`, so the kubeconfig is `_output/<clustername>/kubeconfig/kubeconfig.westus2.json`:
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json kubectl cluster-info
|
||||
|
@ -67,14 +67,14 @@ kubernetes-dashboard is running at https://contoso-apple-59769a59.westus2.clouda
|
|||
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
|
||||
```
|
||||
|
||||
Administrative note: By default, the directory where acs-engine stores cluster configuration (`_output/contoso-apple` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `acs-engine deploy` command. On a related note, include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory).
|
||||
Administrative note: By default, the directory where aks-engine stores cluster configuration (`_output/contoso-apple` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `aks-engine deploy` command. On a related note, include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory).
|
||||
|
||||
**Note**: If the cluster is using an existing VNET please see the [Custom VNET](features.md#feat-custom-vnet) feature documentation for additional steps that must be completed after cluster provisioning.
|
||||
|
||||
The deploy command lets you override any values under the properties tag (even in arrays) from the cluster definition file without having to update the file. You can use the `--set` flag to do that. For example:
|
||||
|
||||
```bash
|
||||
acs-engine deploy --resource-group "your-resource-group" \
|
||||
aks-engine deploy --resource-group "your-resource-group" \
|
||||
--location "westeurope" \
|
||||
--subscription-id "your-subscription-id" \
|
||||
--api-model "./apimodel.json" \
|
||||
|
@ -88,7 +88,7 @@ acs-engine deploy --resource-group "your-resource-group" \
|
|||
|
||||
<a href="#the-long-way"></a>
|
||||
|
||||
## ACS Engine the Long Way
|
||||
## AKS Engine the Long Way
|
||||
|
||||
### Step 1: Generate an SSH Key
|
||||
|
||||
|
@ -98,11 +98,11 @@ If you don't have an SSH key [cluster operators may generate a new one](../ssh.m
|
|||
|
||||
### Step 2: Create a Service Principal
|
||||
|
||||
Kubernetes clusters have integrated support for various cloud providers as core functionality. On Azure, acs-engine uses a Service Principal to interact with Azure Resource Manager (ARM). Follow the [instructions](../serviceprincipal.md) to create a new service principal and grant it the necessary IAM role to create Azure resources.
|
||||
Kubernetes clusters have integrated support for various cloud providers as core functionality. On Azure, aks-engine uses a Service Principal to interact with Azure Resource Manager (ARM). Follow the [instructions](../serviceprincipal.md) to create a new service principal and grant it the necessary IAM role to create Azure resources.
|
||||
|
||||
### Step 3: Edit your Cluster Definition
|
||||
|
||||
ACS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of Kubernetes. There are a number of features that can be enabled through the cluster definition: check the `examples` directory for a number of... examples.
|
||||
AKS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of Kubernetes. There are a number of features that can be enabled through the cluster definition: check the `examples` directory for a number of... examples.
|
||||
|
||||
Edit the [simple Kubernetes cluster definition](/examples/kubernetes.json) and fill out the required values:
|
||||
|
||||
|
@ -119,18 +119,18 @@ Note: you can then use the `--set` option of the generate command to override va
|
|||
|
||||
The generate command takes a cluster definition and outputs a number of templates which describe your Kubernetes cluster. By default, `generate` will create a new directory named after your cluster nested in the `_output` directory. If my dnsPrefix was `larry` my cluster templates would be found in `_output/larry-`.
|
||||
|
||||
Run `acs-engine generate examples/kubernetes.json`
|
||||
Run `aks-engine generate examples/kubernetes.json`
|
||||
|
||||
The generate command lets you override values from the cluster definition file without having to update the file. You can use the `--set` flag to do that:
|
||||
|
||||
```sh
|
||||
acs-engine generate --set linuxProfile.adminUsername=myNewUsername,masterProfile.count=3 clusterdefinition.json
|
||||
aks-engine generate --set linuxProfile.adminUsername=myNewUsername,masterProfile.count=3 clusterdefinition.json
|
||||
```
|
||||
|
||||
The `--set` flag only supports JSON properties under `properties`. You can also work with array, like the following:
|
||||
|
||||
```sh
|
||||
acs-engine generate --set agentPoolProfiles[0].count=5,agentPoolProfiles[1].name=myPoolName clusterdefinition.json
|
||||
aks-engine generate --set agentPoolProfiles[0].count=5,agentPoolProfiles[1].name=myPoolName clusterdefinition.json
|
||||
```
|
||||
|
||||
### Step 5: Submit your Templates to Azure Resource Manager (ARM)
|
||||
|
@ -160,7 +160,7 @@ k8s-master-22116803-0 XXXXXXXXXXXX southeastasia
|
|||
az vm show -g <resource group of cluster> -n <name of Master or agent VM> --query tags
|
||||
```
|
||||
|
||||
Sample JSON out of this command is shown below. This command can also be used to check the acs-engine version which was used to create the cluster
|
||||
Sample JSON out of this command is shown below. This command can also be used to check the aks-engine version which was used to create the cluster
|
||||
|
||||
```json
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
## Managed Identity
|
||||
|
||||
Enabling Managed Identity configures acs-engine to include and use MSI identities for all interactions with the Azure Resource Manager (ARM) API.
|
||||
Enabling Managed Identity configures aks-engine to include and use MSI identities for all interactions with the Azure Resource Manager (ARM) API.
|
||||
|
||||
Instead of using a static servic principal written to `/etc/kubernetes/azure.json`, Kubernetes will use a dynamic, time-limited token fetched from the MSI extension running on master and agent nodes. This support is currently alpha and requires Kubernetes v1.9.1 or newer.
|
||||
|
||||
|
@ -39,7 +39,7 @@ By default, the cluster will be provisioned with [Role-Based Access Control](htt
|
|||
}
|
||||
```
|
||||
|
||||
See [cluster definition](https://github.com/Azure/acs-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig) for further detail.
|
||||
See [cluster definition](https://github.com/Azure/aks-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig) for further detail.
|
||||
|
||||
## Managed Disks
|
||||
|
||||
|
@ -49,7 +49,7 @@ Related [upstream PR](https://github.com/kubernetes/kubernetes/pull/46360) for d
|
|||
|
||||
### Using Kubernetes Persistent Volumes
|
||||
|
||||
By default, each ACS-Engine cluster is bootstrapped with several StorageClass resources. This bootstrapping is handled by the addon-manager pod that creates resources defined under /etc/kubernetes/addons directory on master VMs.
|
||||
By default, each AKS Engine cluster is bootstrapped with several StorageClass resources. This bootstrapping is handled by the addon-manager pod that creates resources defined under /etc/kubernetes/addons directory on master VMs.
|
||||
|
||||
#### Non-managed Disks
|
||||
|
||||
|
@ -105,7 +105,7 @@ spec:
|
|||
|
||||
## Using Azure integrated networking (CNI)
|
||||
|
||||
Kubernetes clusters are configured by default to use the [Azure CNI plugin](https://github.com/Azure/azure-container-networking) which provides an Azure native networking experience. Pods will receive IP addresses directly from the vnet subnet on which they're hosted. If the api model doesn't specify explicitly, acs-engine will automatically provide the following `networkPlugin` configuration in `kubernetesConfig`:
|
||||
Kubernetes clusters are configured by default to use the [Azure CNI plugin](https://github.com/Azure/azure-container-networking) which provides an Azure native networking experience. Pods will receive IP addresses directly from the vnet subnet on which they're hosted. If the api model doesn't specify explicitly, aks-engine will automatically provide the following `networkPlugin` configuration in `kubernetesConfig`:
|
||||
|
||||
```
|
||||
"kubernetesConfig": {
|
||||
|
@ -148,7 +148,7 @@ When using Azure integrated networking the maxPods setting will be set to 30 by
|
|||
|
||||
Using the default configuration, Kubernetes allows communication between all
|
||||
Pods within a cluster. To ensure that Pods can only be accessed by authorized
|
||||
Pods, a policy enforcement is needed. To enable policy enforcement using Calico refer to the [cluster definition](https://github.com/Azure/acs-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig) document under networkPolicy. There is also a reference cluster definition available [here](https://github.com/Azure/acs-engine/blob/master/examples/networkpolicy/kubernetes-calico.json).
|
||||
Pods, a policy enforcement is needed. To enable policy enforcement using Calico refer to the [cluster definition](https://github.com/Azure/aks-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig) document under networkPolicy. There is also a reference cluster definition available [here](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy/kubernetes-calico.json).
|
||||
|
||||
This will deploy a Calico node controller to every instance of the cluster
|
||||
using a Kubernetes DaemonSet. After a successful deployment you should be able
|
||||
|
@ -166,7 +166,7 @@ Per default Calico still allows all communication within the cluster. Using Kube
|
|||
|
||||
* [NetworkPolicy User Guide](https://kubernetes.io/docs/user-guide/networkpolicies/)
|
||||
* [NetworkPolicy Example Walkthrough](https://kubernetes.io/docs/getting-started-guides/network-policy/walkthrough/)
|
||||
* [Calico Kubernetes](https://github.com/Azure/acs-engine/blob/master/examples/networkpolicy)
|
||||
* [Calico Kubernetes](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy)
|
||||
|
||||
<a name="feat-cilium"></a>
|
||||
|
||||
|
@ -175,9 +175,9 @@ Per default Calico still allows all communication within the cluster. Using Kube
|
|||
Using the default configuration, Kubernetes allows communication between all
|
||||
Pods within a cluster. To ensure that Pods can only be accessed by authorized
|
||||
Pods, a policy enforcement is needed. To enable policy enforcement using Cilium refer to the
|
||||
[cluster definition](https://github.com/Azure/acs-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig)
|
||||
[cluster definition](https://github.com/Azure/aks-engine/blob/master/docs/clusterdefinition.md#kubernetesconfig)
|
||||
document under networkPolicy. There is also a reference cluster definition available
|
||||
[here](https://github.com/Azure/acs-engine/blob/master/examples/networkpolicy/kubernetes-cilium.json).
|
||||
[here](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy/kubernetes-cilium.json).
|
||||
|
||||
This will deploy a Cilium agent to every instance of the cluster
|
||||
using a Kubernetes DaemonSet. After a successful deployment you should be able
|
||||
|
@ -197,22 +197,22 @@ you can define stricter policies. Good resources to get information about that a
|
|||
* [Cilum Network Policy Docs](https://cilium.readthedocs.io/en/latest/kubernetes/policy/#k8s-policy)
|
||||
* [NetworkPolicy User Guide](https://kubernetes.io/docs/user-guide/networkpolicies/)
|
||||
* [NetworkPolicy Example Walkthrough](https://kubernetes.io/docs/getting-started-guides/network-policy/walkthrough/)
|
||||
* [Cilium Kubernetes](https://github.com/Azure/acs-engine/blob/master/examples/networkpolicy)
|
||||
* [Cilium Kubernetes](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy)
|
||||
|
||||
<a name="feat-custom-vnet"></a>
|
||||
|
||||
## Custom VNET
|
||||
|
||||
*Note: Custom VNET for Kubernetes Windows cluster has a [known issue](https://github.com/Azure/acs-engine/issues/1767).*
|
||||
*Note: Custom VNET for Kubernetes Windows cluster has a [known issue](https://github.com/Azure/aks-engine/issues/1767).*
|
||||
|
||||
ACS Engine supports deploying into an existing VNET. Operators must specify the ARM path/id of Subnets for the `masterProfile` and any `agentPoolProfiles`, as well as the first IP address to use for static IP allocation in `firstConsecutiveStaticIP`. Please note that in any azure subnet, the first four and the last ip address is reserved and can not be used. Additionally, each pod now gets the IP address from the Subnet. As a result, enough IP addresses (equal to `ipAddressCount` for each node) should be available beyond `firstConsecutiveStaticIP`. By default, the `ipAddressCount` has a value of 31, 1 for the node and 30 for pods, (note that the number of pods can be changed via `KubeletConfig["--max-pods"]`). `ipAddressCount` can be changed if desired. Furthermore, to prevent source address NAT'ing within the VNET, we assign to the `vnetCidr` property in `masterProfile` the CIDR block that represents the usable address space in the existing VNET. Therefore, it is recommended to use a large subnet size such as `/16`.
|
||||
AKS Engine supports deploying into an existing VNET. Operators must specify the ARM path/id of Subnets for the `masterProfile` and any `agentPoolProfiles`, as well as the first IP address to use for static IP allocation in `firstConsecutiveStaticIP`. Please note that in any azure subnet, the first four and the last ip address is reserved and can not be used. Additionally, each pod now gets the IP address from the Subnet. As a result, enough IP addresses (equal to `ipAddressCount` for each node) should be available beyond `firstConsecutiveStaticIP`. By default, the `ipAddressCount` has a value of 31, 1 for the node and 30 for pods, (note that the number of pods can be changed via `KubeletConfig["--max-pods"]`). `ipAddressCount` can be changed if desired. Furthermore, to prevent source address NAT'ing within the VNET, we assign to the `vnetCidr` property in `masterProfile` the CIDR block that represents the usable address space in the existing VNET. Therefore, it is recommended to use a large subnet size such as `/16`.
|
||||
|
||||
Depending upon the size of the VNET address space, during deployment, it is possible to experience IP address assignment collision between the required Kubernetes static IPs (one each per master and one for the API server load balancer, if more than one masters) and Azure CNI-assigned dynamic IPs (one for each NIC on the agent nodes). In practice, the larger the VNET the less likely this is to happen; some detail, and then a guideline.
|
||||
|
||||
First, the detail:
|
||||
|
||||
* Azure CNI assigns dynamic IP addresses from the "beginning" of the subnet IP address space (specifically, it looks for available addresses starting at ".4" ["10.0.0.4" in a "10.0.0.0/24" network])
|
||||
* acs-engine will require a range of up to 16 unused IP addresses in multi-master scenarios (1 per master for up to 5 masters, and then the next 10 IP addresses immediately following the "last" master for headroom reservation, and finally 1 more for the load balancer immediately adjacent to the afore-described _n_ masters+10 sequence) to successfully scaffold the network stack for your cluster
|
||||
* aks-engine will require a range of up to 16 unused IP addresses in multi-master scenarios (1 per master for up to 5 masters, and then the next 10 IP addresses immediately following the "last" master for headroom reservation, and finally 1 more for the load balancer immediately adjacent to the afore-described _n_ masters+10 sequence) to successfully scaffold the network stack for your cluster
|
||||
|
||||
A guideline that will remove the danger of IP address allocation collision during deployment:
|
||||
|
||||
|
@ -378,7 +378,7 @@ You can build a private Kubernetes cluster with no public IP addresses assigned
|
|||
}
|
||||
```
|
||||
|
||||
In order to access this cluster using kubectl commands, you will need a jumpbox in the same VNET (or onto a peer VNET that routes to the VNET). If you do not already have a jumpbox, you can use acs-engine to provision your jumpbox (see below) or create it manually. You can create a new jumpbox manually in the Azure Portal under "Create a resource > Compute > Ubuntu Server 16.04 LTS VM" or using the [az cli](https://docs.microsoft.com/en-us/cli/azure/vm?view=azure-cli-latest#az_vm_create). You will then be able to:
|
||||
In order to access this cluster using kubectl commands, you will need a jumpbox in the same VNET (or onto a peer VNET that routes to the VNET). If you do not already have a jumpbox, you can use aks-engine to provision your jumpbox (see below) or create it manually. You can create a new jumpbox manually in the Azure Portal under "Create a resource > Compute > Ubuntu Server 16.04 LTS VM" or using the [az cli](https://docs.microsoft.com/en-us/cli/azure/vm?view=azure-cli-latest#az_vm_create). You will then be able to:
|
||||
- install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the jumpbox
|
||||
- copy the kubeconfig artifact for the right region from the deployment directory to the jumpbox
|
||||
- run `export KUBECONFIG=<path to your kubeconfig>`
|
||||
|
@ -386,7 +386,7 @@ In order to access this cluster using kubectl commands, you will need a jumpbox
|
|||
|
||||
Alternatively, you may also ssh into your nodes (given that your ssh key is on the jumpbox) and use the admin user kubeconfig on the cluster to run `kubectl` commands directly on the cluster. However, in the case of a multi-master private cluster, the connection will be refused when running commands on a master every time that master gets picked by the load balancer as it will be routing to itself (1 in 3 times for a 3 master cluster, 1 in 5 for 5 masters). This is expected behavior and therefore the method aforementioned of accessing nodes on the jumpbox using the `_output` directory kubeconfig is preferred.
|
||||
|
||||
To auto-provision a jumpbox with your acs-engine deployment use:
|
||||
To auto-provision a jumpbox with your aks-engine deployment use:
|
||||
|
||||
```
|
||||
"kubernetesConfig": {
|
||||
|
@ -406,7 +406,7 @@ To auto-provision a jumpbox with your acs-engine deployment use:
|
|||
|
||||
## Azure Key Vault Data Encryption
|
||||
|
||||
Enabling Azure Key Vault Encryption configures acs-engine to create an Azure Key Vault in the same resource group as the Kubernetes cluster and configures Kubernetes to use a key from this Key Vault to encrypt and decrypt etcd data for the Kubernetes cluster.
|
||||
Enabling Azure Key Vault Encryption configures aks-engine to create an Azure Key Vault in the same resource group as the Kubernetes cluster and configures Kubernetes to use a key from this Key Vault to encrypt and decrypt etcd data for the Kubernetes cluster.
|
||||
|
||||
To enable this feature, add `encryptionWithExternalKms` in `kubernetesConfig` and `objectId` in `servicePrincipalProfile`:
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ As illustrated on the figure above, we recommand to deploy the Kubernetes cluste
|
|||
|
||||
This document assumes that you are familiar with:
|
||||
|
||||
- Deploying Kubernetes cluster in a [custom VNET using ACS-Engine](../../examples/vnet/README.md)
|
||||
- Deploying Kubernetes cluster in a [custom VNET using AKS Engine](../../examples/vnet/README.md)
|
||||
- Azure [VPN Gateway](https://azure.microsoft.com/en-us/services/vpn-gateway/) and/or [Azure Express Route](https://azure.microsoft.com/en-us/services/expressroute/)
|
||||
- Azure [Virtual Network Peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview)
|
||||
|
||||
|
@ -27,15 +27,15 @@ The network topology must be well defined beforehand to enable peering between t
|
|||
### DNS
|
||||
|
||||
In a hybrid environment, you usually want to integrate with your on-premises DNS. There is two aspects to this. The first one is to register the VMs forming the cluster, and using your local search domain when resolving other services. The second is getting the services running on Kubernetes to use the external DNS.
|
||||
To benefit the scaling capabilities of the cluster and to ensure resiliency to machine failure, every node configuration needs to be scripted and part of the initial template that acs-engine will deploy. To register the nodes in your DNS at startup, you need to define [an acs-engine extension](../extensions.md) that will run your [DNS registration script](https://github.com/tesharp/acs-engine/blob/register-dns-extension/extensions/register-dns/v1/register-dns.sh).
|
||||
To benefit the scaling capabilities of the cluster and to ensure resiliency to machine failure, every node configuration needs to be scripted and part of the initial template that aks-engine will deploy. To register the nodes in your DNS at startup, you need to define [an aks-engine extension](../extensions.md) that will run your [DNS registration script](https://github.com/tesharp/aks-engine/blob/register-dns-extension/extensions/register-dns/v1/register-dns.sh).
|
||||
|
||||
In addition, you might want cluster services to address URLs outside the cluster using your on-premise DNS. To achieve this you need to configure KubeDNS to use your existing nameservice as upstream. [This setup is well documented on kubernetes blog](https://kubernetes.io/blog/2017/04/configuring-private-dns-zones-upstream-nameservers-kubernetes)
|
||||
|
||||
Note : There is some ongoing work to make this easier. See [acs-engine#2590](https://github.com/Azure/acs-engine/pull/2590)
|
||||
Note : There is some ongoing work to make this easier. See [aks-engine#2590](https://github.com/Azure/aks-engine/pull/2590)
|
||||
|
||||
### Private Cluster
|
||||
|
||||
By default, Kubernetes deployment with acs-engine expose the the admin api publicly (and securely). This can be avoided. Using peering with private/on-premise virtual network with ACS-Engine also allows you to create cloud-hosted [private cluster](features.md#private-cluster), with no endpoint exposed over the Internet.
|
||||
By default, Kubernetes deployment with aks-engine expose the the admin api publicly (and securely). This can be avoided. Using peering with private/on-premise virtual network with AKS Engine also allows you to create cloud-hosted [private cluster](features.md#private-cluster), with no endpoint exposed over the Internet.
|
||||
|
||||
## Kubernetes Network
|
||||
|
||||
|
@ -43,12 +43,12 @@ For your kubernetes cluster to communicate with your on-premise network, you wil
|
|||
|
||||
### Network
|
||||
|
||||
Deploying ACS-Engine on Azure, you have 3 options of network policy. Azure CNI, Kubenet, or Calico.
|
||||
Deploying AKS Engine on Azure, you have 3 options of network policy. Azure CNI, Kubenet, or Calico.
|
||||
|
||||
#### Azure CNI
|
||||
|
||||
By default, ACS-Engine is using the [**azure cni** network policy](../../examples/networkpolicy/README.md#azure-container-networking-default) plugin. This has some advantages and some consequences that must be considered when defining the network where we deploy the cluster. CNI provides an integration with azure subnet IP addressing so that every pod created by kubernetes is assigned an IP address from the corresponding subnet.
|
||||
All IP addresses are pre-allocated at provisionning time. By default, [acs-engine will pre-allocate 128 IPs per node](https://github.com/Azure/azure-container-networking/blob/master/docs/acs.md#enabling-azure-vnet-plugins-for-an-acs-kubernetes-cluster) on the subnet.
|
||||
By default, AKS Engine is using the [**azure cni** network policy](../../examples/networkpolicy/README.md#azure-container-networking-default) plugin. This has some advantages and some consequences that must be considered when defining the network where we deploy the cluster. CNI provides an integration with azure subnet IP addressing so that every pod created by kubernetes is assigned an IP address from the corresponding subnet.
|
||||
All IP addresses are pre-allocated at provisionning time. By default, [aks-engine will pre-allocate 128 IPs per node](https://github.com/Azure/azure-container-networking/blob/master/docs/acs.md#enabling-azure-vnet-plugins-for-an-acs-kubernetes-cluster) on the subnet.
|
||||
While this can be configured, new addresses will not be allocated dynamically. That means that you need to anticipate and plan for the maximum number of IP addresses you will need for the maximum scale.
|
||||
|
||||
Consequences:
|
||||
|
@ -155,7 +155,7 @@ Note: when using service without selector, you can't have any Kubernetes readine
|
|||
|
||||
## Cluster upgrades
|
||||
|
||||
As you may know, ACS-Engine proposes an [upgrade command](../../examples/k8s-upgrade/README.md). It is really important to understand that this uprade process is *not* fail-safe. Therefore in most cases, and especially with production clusters, a good practice consists of creating another Kubernetes cluster running the targeted version in another VNET and move the workloads into this new cluster. Once everything is tested and works as desired, set up the network redirection to this new environment.
|
||||
As you may know, AKS Engine proposes an [upgrade command](../../examples/k8s-upgrade/README.md). It is really important to understand that this uprade process is *not* fail-safe. Therefore in most cases, and especially with production clusters, a good practice consists of creating another Kubernetes cluster running the targeted version in another VNET and move the workloads into this new cluster. Once everything is tested and works as desired, set up the network redirection to this new environment.
|
||||
|
||||
If you plan everything correctly following the documentation above and use Kubernetes Services properly to address both in-cluster and outside services, everything should work fine.
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# For Kubernetes Developers
|
||||
|
||||
If you're working on Kubernetes upstream, you can use ACS Engine to test your build of Kubernetes in the Azure environment. The option that allows you to do this is `orchestratorProfile/kubernetesConfig/customHyperkubeImage`, which you should set to point to a Docker image containing your build of hyperkube.
|
||||
If you're working on Kubernetes upstream, you can use AKS Engine to test your build of Kubernetes in the Azure environment. The option that allows you to do this is `orchestratorProfile/kubernetesConfig/customHyperkubeImage`, which you should set to point to a Docker image containing your build of hyperkube.
|
||||
|
||||
The following instructions describe in more detail how to create the required Docker image and deploy it using ACS Engine (replace `dockerhubid` and `sometag` with your Docker Hub ID and a unique tag for your build):
|
||||
The following instructions describe in more detail how to create the required Docker image and deploy it using AKS Engine (replace `dockerhubid` and `sometag` with your Docker Hub ID and a unique tag for your build):
|
||||
|
||||
## In the Kubernetes repo
|
||||
|
||||
|
@ -29,9 +29,9 @@ docker push dockerhubid/hyperkube-amd64:sometag
|
|||
|
||||
(It's convenient to put these steps into a script.)
|
||||
|
||||
## In the ACS repo
|
||||
## In the AKS Engine repo
|
||||
|
||||
* Open the ACS Engine input JSON (e.g. a file from the examples directory) and add the following to the `orchestratorProfile` section:
|
||||
* Open the AKS Engine input JSON (e.g. a file from the examples directory) and add the following to the `orchestratorProfile` section:
|
||||
|
||||
```
|
||||
"kubernetesConfig": {
|
||||
|
@ -39,4 +39,4 @@ docker push dockerhubid/hyperkube-amd64:sometag
|
|||
}
|
||||
```
|
||||
|
||||
* Run `./bin/acs-engine deploy --api-model the_json_file_you_just_edited.json ...` [as normal](deploy.md).
|
||||
* Run `./bin/aks-engine deploy --api-model the_json_file_you_just_edited.json ...` [as normal](deploy.md).
|
||||
|
|
|
@ -14,7 +14,7 @@ There are five main options to monitor your cluster:
|
|||
|
||||
## Intro to Heapster
|
||||
|
||||
Monitoring your cluster in Kubernetes is powered by a component called [Heapster](https://github.com/kubernetes/Heapster/). Heapster is a pod that is responsible for aggregating monitoring data from across all the nodes and pods in your cluster. Heapster is necessary for viewing monitoring data in the Kubernetes dashboard as well as in Grafana. Heapster comes preinstalled on `acs-engine` deployments. To ensure that Heapster is set up in your cluster and is running:
|
||||
Monitoring your cluster in Kubernetes is powered by a component called [Heapster](https://github.com/kubernetes/Heapster/). Heapster is a pod that is responsible for aggregating monitoring data from across all the nodes and pods in your cluster. Heapster is necessary for viewing monitoring data in the Kubernetes dashboard as well as in Grafana. Heapster comes preinstalled on `aks-engine` deployments. To ensure that Heapster is set up in your cluster and is running:
|
||||
1. Ensure you have set up a [working kubernetes cluster](../kubernetes.md) and are able to use kubectl
|
||||
2. Run `kubectl get pods --namespace=kube-system`
|
||||
|
||||
|
@ -68,7 +68,7 @@ Once you have opened the UI, you can explore node stats (CPU, Memory, etc...) un
|
|||
|
||||
## Monitoring extension
|
||||
|
||||
A quick way to scaffold out cloud-native and open source monitoring components is to use the [acs-engine monitoring extension](https://github.com/Azure/acs-engine/tree/master/extensions/prometheus-grafana-k8s). For details on how to use the monitoring extension, please refer to the [extension documentation](https://github.com/Azure/acs-engine/tree/master/extensions/prometheus-grafana-k8s). By embedding the extension in your apimodel, the extension will do much of the work to create a monitoring solution in your cluster, which includes the following:
|
||||
A quick way to scaffold out cloud-native and open source monitoring components is to use the [aks-engine monitoring extension](https://github.com/Azure/aks-engine/tree/master/extensions/prometheus-grafana-k8s). For details on how to use the monitoring extension, please refer to the [extension documentation](https://github.com/Azure/aks-engine/tree/master/extensions/prometheus-grafana-k8s). By embedding the extension in your apimodel, the extension will do much of the work to create a monitoring solution in your cluster, which includes the following:
|
||||
|
||||
- [cAdvisor](https://github.com/google/cadvisor) daemon set to publish container metrics
|
||||
- [Prometheus](https://prometheus.io/) for metrics collection and storage
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Install Pre-requisites
|
||||
|
||||
All the commands in this guide require both the Azure CLI and `acs-engine`. Follow the [installation instructions to download acs-engine before continuing](../acsengine.md#install-acs-engine) or [compile from source](../acsengine.md#build-from-source).
|
||||
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [installation instructions to download aks-engine before continuing](../acsengine.md#install-aks-engine) or [compile from source](../acsengine.md#build-from-source).
|
||||
|
||||
For installation instructions see [the Azure CLI GitHub repository](https://github.com/Azure/azure-cli#installation) for the latest release.
|
||||
|
||||
|
@ -11,12 +11,12 @@ This guide assumes you already have deployed a cluster using acs engine. For mor
|
|||
## Scale
|
||||
After a cluster has been deployed using acs engine the cluster can be interacted further by using the scale command. The scale command can add more nodes to an existing node pool or remove them. Nodes will always be added or removed from the end of the agent pool. Nodes will be cordoned and drained before deletion.
|
||||
|
||||
This guide will assume you have a cluster deployed and the output for the deployed cluster is stored at _output/mycluster. It will also assume there is a node pool named "agentpool1" in your cluster. ACS engine will default to storing the output at ./_output/dns-prefix from where the acs-engine command was ran.
|
||||
This guide will assume you have a cluster deployed and the output for the deployed cluster is stored at _output/mycluster. It will also assume there is a node pool named "agentpool1" in your cluster. AKS Engine will default to storing the output at ./_output/dns-prefix from where the aks-engine command was ran.
|
||||
|
||||
To scale the cluster you will run a command like:
|
||||
|
||||
```
|
||||
$ acs-engine scale --subscription-id 51ac25de-afdg-9201-d923-8d8e8e8e8e8e \
|
||||
$ aks-engine scale --subscription-id 51ac25de-afdg-9201-d923-8d8e8e8e8e8e \
|
||||
--resource-group mycluster --location westus2 \
|
||||
--deployment-dir _output/mycluster --new-node-count 5 \
|
||||
--node-pool agentpool1 --master-FQDN mycluster.westus2.cloudapp.azure.com
|
||||
|
@ -30,7 +30,7 @@ This command will look the the deployment directory to find info about the clust
|
|||
|subscription-id|yes|The subscription id the cluster is deployed in.|
|
||||
|resource-group|yes|The resource group the cluster is deployed in.|
|
||||
|location|yes|The location the resource group is in.|
|
||||
|deployment-dir|yes|Relative path to the folder location for the output from the acs-engine deploy/generate command.|
|
||||
|deployment-dir|yes|Relative path to the folder location for the output from the aks-engine deploy/generate command.|
|
||||
|node-pool|depends|Required if there is more than one node pool. Which node pool should be scaled.|
|
||||
|new-node-count|yes|Desired number of nodes in the node pool.|
|
||||
|master-FQDN|depends|When scaling down a kuberentes cluster this is required. The master FDQN so that the nodes can be cordoned and drained before removal. This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.|
|
||||
|master-FQDN|depends|When scaling down a kuberentes cluster this is required. The master FDQN so that the nodes can be cordoned and drained before removal. This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.|
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## VMExtensionProvisioningError or VMExtensionProvisioningTimeout
|
||||
|
||||
The two above VMExtensionProvisioning— errors tell us that a vm in the cluster failed installing required application prerequisites after CRP provisioned the VM into the resource group. When acs-engine creates a new Kubernetes cluster, a series of shell scripts runs to install prereq's like docker, etcd, Kubernetes runtime, and various other host OS packages that support the Kubernetes application layer. *Usually* this indicates one of the following:
|
||||
The two above VMExtensionProvisioning— errors tell us that a vm in the cluster failed installing required application prerequisites after CRP provisioned the VM into the resource group. When aks-engine creates a new Kubernetes cluster, a series of shell scripts runs to install prereq's like docker, etcd, Kubernetes runtime, and various other host OS packages that support the Kubernetes application layer. *Usually* this indicates one of the following:
|
||||
|
||||
1. Something about the cluster configuration is pathological. For example, perhaps the cluster config includes a custom version of a particular software dependency that doesn't exist. Or, another example, for a cluster created inside a custom VNET (i.e., a user-provided, pre-existing VNET), perhaps that custom VNET does not have general outbound internet access, and so apt, docker pull, etc is not able to execute successfully.
|
||||
2. A transient Azure environmental error caused the shell script operation to timeout, or exceed its retry count. For example, the shell script may attempt to download a required package (e.g., etcd), and if the Azure networking environment for the newly provisioned vm is flaky for a period of time, then the shell script may retry several times, but eventually timeout and fail.
|
||||
|
@ -15,7 +15,7 @@ For classification #2 above, the appropriate strategic response is to retry a fe
|
|||
|
||||
CSE stands for CustomScriptExtension, and is just a way of expressing: "a script that executes as part of the VM provisioning process, and that must exit 0 (i.e., successfully) in order for that VM provisioning process to succeed". Basically it's another way of expressing the VMExtensionProvisioning— concept above.
|
||||
|
||||
To summarize, the way that acs-engine implements Kubernetes on Azure is a collection of (1) Azure VM configuration + (2) shell script execution. Both are implemented as a single operational unit, and when #2 fails, we consider the entire VM provisioning operation to be a failure; more importantly, if only one VM in the cluster deployment fails, we consider the entire cluster operation to be a failure.
|
||||
To summarize, the way that aks-engine implements Kubernetes on Azure is a collection of (1) Azure VM configuration + (2) shell script execution. Both are implemented as a single operational unit, and when #2 fails, we consider the entire VM provisioning operation to be a failure; more importantly, if only one VM in the cluster deployment fails, we consider the entire cluster operation to be a failure.
|
||||
|
||||
### How To Debug CSE errors (Linux)
|
||||
|
||||
|
@ -67,7 +67,7 @@ There are two symptoms where you may need to debug Custom Script Extension error
|
|||
To get more logs, you need to connect to the Windows nodes using Remote Desktop - see [Connecting to Windows Nodes](#connecting-to-windows-nodes)
|
||||
|
||||
Once connected, check the following logs for errors:
|
||||
|
||||
|
||||
- `c:\Azure\CustomDataSetupScript.log`
|
||||
|
||||
#### Connecting to Windows nodes
|
||||
|
@ -152,4 +152,4 @@ read and **write** permissions to the target Subscription.
|
|||
|
||||
`Nov 10 16:35:22 k8s-master-43D6F832-0 docker[3177]: E1110 16:35:22.840688 3201 kubelet_node_status.go:69] Unable to construct api.Node object for kubelet: failed to get external ID from cloud provider: autorest#WithErrorUnlessStatusCode: POST https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/token?api-version=1.0 failed with 400 Bad Request: StatusCode=400`
|
||||
|
||||
[This documentation](../serviceprincipal.md) explains how to create/configure a service principal for an ACS-Engine Kubernetes cluster.
|
||||
[This documentation](../serviceprincipal.md) explains how to create/configure a service principal for an AKS Engine Kubernetes cluster.
|
||||
|
|
|
@ -4,7 +4,7 @@ If you're trying to deploy Kubernetes with Windows the first time, be sure to ch
|
|||
|
||||
## Customizing Windows deployments
|
||||
|
||||
ACS-Engine allows a lot more customizations available in the [docs](../), but here are a few important ones you should know for Windows deployments. Each of these are extra parameters you can add into the ACS-Engine apimodel file (such as `kubernetes-windows.json` from the quick start) before running `acs-engine generate`.
|
||||
AKS Engine allows a lot more customizations available in the [docs](../), but here are a few important ones you should know for Windows deployments. Each of these are extra parameters you can add into the AKS Engine apimodel file (such as `kubernetes-windows.json` from the quick start) before running `aks-engine generate`.
|
||||
|
||||
### Changing the OS disk size
|
||||
|
||||
|
@ -179,9 +179,9 @@ spec:
|
|||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /wwwcache
|
||||
command:
|
||||
command:
|
||||
- powershell.exe
|
||||
- -command
|
||||
- -command
|
||||
- "while ($true) { Start-Sleep -Seconds 10; Copy-Item -Path C:\\wwwcache\\iisstart.htm -Destination C:\\inetpub\\wwwroot\\iisstart.htm; }"
|
||||
|
||||
- name: servercore-container
|
||||
|
@ -189,9 +189,9 @@ spec:
|
|||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /poddata
|
||||
command:
|
||||
command:
|
||||
- powershell.exe
|
||||
- -command
|
||||
- -command
|
||||
- "$i=0; while ($true) { Start-Sleep -Seconds 10; $msg = 'Hello from the servercore container, count is {0}' -f $i; Set-Content -Path C:\\poddata\\iisstart.htm -Value $msg; $i++; }"
|
||||
|
||||
nodeSelector:
|
||||
|
@ -234,12 +234,12 @@ Get the Azure CNI build by running `C:\k\azurecni\bin\azure-vnet.exe --help`. It
|
|||
### Known Issues per Version
|
||||
|
||||
|
||||
ACS-Engine | Windows Server | Kubernetes | Azure CNI | Notes
|
||||
AKS Engine | Windows Server | Kubernetes | Azure CNI | Notes
|
||||
-----------|----------------|------------|-----------|----------
|
||||
V0.16.2 | Windows Server version 1709 (10.0.16299.____) | V1.9.7 | ? | DNS resolution is not configured
|
||||
V0.17.0 | Windows Server version 1709 | V1.10.2 | v1.0.4 | Acs-engine version 0.17 defaults to Windows Server version 1803. You can override it to use 1709 instead [here](#choosing-the-windows-server-version). Manual workarounds needed on Windows for DNS Server list, DNS search suffix
|
||||
V0.17.0 | Windows Server version 1803 (10.0.17134.1) | V1.10.2 | v1.0.4 | Manual workarounds needed on Windows for DNS Server list, DNS search suffix, and dropped packets
|
||||
v0.17.1 | Windows Server version 1709 | v1.10.3 | v1.0.4-1-gf0f090e | Manual workarounds needed on Windows for DNS Server list and DNS search suffix. This ACS-Engine version defaults to Windows Server version 1803, but you can override it to use 1709 instead [here](#choosing-the-windows-server-version)
|
||||
v0.17.1 | Windows Server version 1709 | v1.10.3 | v1.0.4-1-gf0f090e | Manual workarounds needed on Windows for DNS Server list and DNS search suffix. This AKS Engine version defaults to Windows Server version 1803, but you can override it to use 1709 instead [here](#choosing-the-windows-server-version)
|
||||
v0.18.3 | Windows Server version 1803 | v1.10.3 | v1.0.6 | Pods cannot resolve cluster DNS names
|
||||
v0.20.9 | Windows Server version 1803 | v1.10.6 | v1.0.11 | Pods cannot resolve cluster DNS names
|
||||
|
||||
|
@ -249,12 +249,12 @@ v0.20.9 | Windows Server version 1803 | v1.10.6 | v1.0.11 | Pods cannot resolve
|
|||
|
||||
Affects: Windows Server version 1803 (10.0.17134.1)
|
||||
|
||||
Issues: https://github.com/Azure/acs-engine/issues/3037
|
||||
Issues: https://github.com/Azure/aks-engine/issues/3037
|
||||
|
||||
There is a problem with the “L2Tunnel” networking mode not forwarding packets correctly specific to Windows Server version 1803. Windows Server version 1709 is not affected.
|
||||
|
||||
Workarounds:
|
||||
**Fixes are still in development.** A Windows hotfix is needed, and will be deployed by ACS-Engine once it's ready. The hotfix will be removed later when it's in a future cumulative rollup.
|
||||
**Fixes are still in development.** A Windows hotfix is needed, and will be deployed by AKS Engine once it's ready. The hotfix will be removed later when it's in a future cumulative rollup.
|
||||
|
||||
|
||||
#### Pods cannot resolve public DNS names
|
||||
|
@ -312,7 +312,7 @@ Workaround:
|
|||
|
||||
#### Pods cannot ping default route or internet IPs
|
||||
|
||||
Affects: All clusters deployed by acs-engine
|
||||
Affects: All clusters deployed by aks-engine
|
||||
|
||||
ICMP traffic is not routed between private Azure vNETs or to the internet.
|
||||
|
||||
|
@ -337,4 +337,4 @@ If you see output that looks like the following, then you have **not** configure
|
|||
You may need to check to ensure the credentials were provided accurately, and that the configured Service Principal has
|
||||
read and **write** permissions to the target Subscription.
|
||||
|
||||
`Nov 10 16:35:22 k8s-master-43D6F832-0 docker[3177]: E1110 16:35:22.840688 3201 kubelet_node_status.go:69] Unable to construct api.Node object for kubelet: failed to get external ID from cloud provider: autorest#WithErrorUnlessStatusCode: POST https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/token?api-version=1.0 failed with 400 Bad Request: StatusCode=400`
|
||||
`Nov 10 16:35:22 k8s-master-43D6F832-0 docker[3177]: E1110 16:35:22.840688 3201 kubelet_node_status.go:69] Unable to construct api.Node object for kubelet: failed to get external ID from cloud provider: autorest#WithErrorUnlessStatusCode: POST https://login.microsoftonline.com/72f988bf-86f1-41af-91ab-2d7cd011db47/oauth2/token?api-version=1.0 failed with 400 Bad Request: StatusCode=400`
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Microsoft ACS-Engine - Kubernetes Windows Walkthrough
|
||||
# Microsoft AKS Engine - Kubernetes Windows Walkthrough
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
|||
- [Create a Resource Group and Service Principal](#create-a-resource-group-and-service-principal)
|
||||
- [Create a Resource Group and Service Principal (Windows)](#create-a-resource-group-and-service-principal-windows)
|
||||
- [Create a Resource Group and Service Principal (Mac+Linux)](#create-a-resource-group-and-service-principal-maclinux)
|
||||
- [Create an acs-engine apimodel](#create-an-acs-engine-apimodel)
|
||||
- [Create an aks-engine apimodel](#create-an-aks-engine-apimodel)
|
||||
- [Filling out apimodel (Windows)](#filling-out-apimodel-windows)
|
||||
- [Filling out apimodel (Mac & Linux)](#filling-out-apimodel-mac--linux)
|
||||
- [Generate Azure Resource Manager template](#generate-azure-resource-manager-template)
|
||||
|
@ -27,21 +27,21 @@
|
|||
This guide will step through everything needed to build your first Kubernetes cluster and deploy a Windows web server on it. The steps include:
|
||||
|
||||
- Getting the right tools
|
||||
- Completing an ACS-Engine apimodel which describes what you want to deploy
|
||||
- Running ACS-Engine to generate Azure Resource Model templates
|
||||
- Completing an AKS Engine apimodel which describes what you want to deploy
|
||||
- Running AKS Engine to generate Azure Resource Model templates
|
||||
- Deploying your first Kubernetes cluster with Windows Server nodes
|
||||
- Managing the cluster from your Windows machine
|
||||
- Deploying your first app on the cluster
|
||||
|
||||
All of these steps can be done from any OS platform, so some sections are split out by Windows, Mac or Linux to provide the most relevant samples and scripts. If you have a Windows machine but want to use the Linux tools - no problem! Set up the [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/about) and you can follow the Linux instructions on this page.
|
||||
|
||||
> Note: Windows support for Kubernetes is still in beta and under **active development**. If you run into problems, please be sure to check the [Troubleshooting](windows-details.md#troubleshooting) page and [active Windows issues](https://github.com/azure/acs-engine/issues?&q=is:issue+is:open+label:windows) in this repo, then help us by filing new issues for things that aren't already covered.
|
||||
> Note: Windows support for Kubernetes is still in beta and under **active development**. If you run into problems, please be sure to check the [Troubleshooting](windows-details.md#troubleshooting) page and [active Windows issues](https://github.com/azure/aks-engine/issues?&q=is:issue+is:open+label:windows) in this repo, then help us by filing new issues for things that aren't already covered.
|
||||
|
||||
### Install Needed Tools
|
||||
|
||||
This guide needs a few important tools, which are available on Windows, Mac, and Linux:
|
||||
|
||||
- ACS-Engine - used to generate the Azure Resource Manager (ARM) template to automatically deploy a Kubernetes cluster
|
||||
- AKS Engine - used to generate the Azure Resource Manager (ARM) template to automatically deploy a Kubernetes cluster
|
||||
- Azure CLI - used to log into Azure, create resource groups, and deploy a Kubernetes cluster from a template
|
||||
- Kubectl - "Kube control" tool used to manage Kubernetes clusters
|
||||
- SSH - A SSH public key is needed when you deploy a cluster. It's used to connect to the Linux VMs running the cluster if you need to do more management or troubleshooting later.
|
||||
|
@ -56,20 +56,20 @@ Once it's installed, make sure you can connect to Azure with it. Open a new Powe
|
|||
|
||||
> If you want other versions, check out the [official instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest). For more help, check out the Azure CLI [getting started](https://docs.microsoft.com/en-us/cli/azure/get-started-with-azure-cli?view=azure-cli-latest) page.
|
||||
|
||||
##### ACS-Engine (Windows)
|
||||
##### AKS Engine (Windows)
|
||||
|
||||
Windows support is evolving rapidly, so be sure to use the latest ACS-Engine version (v0.20 or later).
|
||||
Windows support is evolving rapidly, so be sure to use the latest AKS Engine version (v0.20 or later).
|
||||
|
||||
1. Browse to the ACS-Engine [releases page](https://github.com/Azure/acs-engine/releases) on GitHub.
|
||||
1. Browse to the AKS Engine [releases page](https://github.com/Azure/aks-engine/releases) on GitHub.
|
||||
|
||||
2. Find the latest version, and download the file ending in `-windows-amd64.zip`.
|
||||
|
||||
3. Extract the `acs-engine...-windows-amd64.zip` file to a working folder such as `c:\tools`
|
||||
3. Extract the `aks-engine...-windows-amd64.zip` file to a working folder such as `c:\tools`
|
||||
|
||||
4. Check that it runs with `.\acs-engine.exe version`
|
||||
4. Check that it runs with `.\aks-engine.exe version`
|
||||
|
||||
```none
|
||||
PS C:\Users\patrick\acs-engine> .\acs-engine.exe version
|
||||
PS C:\Users\patrick\aks-engine> .\aks-engine.exe version
|
||||
Version: v0.20.6
|
||||
GitCommit: 293adfda
|
||||
GitTreeState: clean
|
||||
|
@ -88,7 +88,7 @@ $oldPath = [Environment]::GetEnvironmentVariable('Path', [EnvironmentVariableTar
|
|||
|
||||
The latest release of Kubernetes Control (kubectl) is available on the [Kubernetes release page](https://kubernetes.io/docs/imported/release/notes/). Look for `kubernetes-client-windows-amd64.tar.gz` and download it.
|
||||
|
||||
Windows 10 version 1803 already includes `tar`, so extract the archive and move `kubectl.exe` to the same folder (such as `c:\tools`) that you put `acs-engine.exe`. If you don't already have `tar`, then [busybox-w32](https://frippery.org/busybox/) is a good alternative. Download [busybox.exe](https://frippery.org/files/busybox/busybox.exe), then copy it to `c:\tools\tar.exe`. It must be named to `tar.exe` for the next step to work.
|
||||
Windows 10 version 1803 already includes `tar`, so extract the archive and move `kubectl.exe` to the same folder (such as `c:\tools`) that you put `aks-engine.exe`. If you don't already have `tar`, then [busybox-w32](https://frippery.org/busybox/) is a good alternative. Download [busybox.exe](https://frippery.org/files/busybox/busybox.exe), then copy it to `c:\tools\tar.exe`. It must be named to `tar.exe` for the next step to work.
|
||||
|
||||
```powershell
|
||||
tar xvzf C:\Users\patrick\Downloads\kubernetes-client-windows-amd64.tar.gz
|
||||
|
@ -116,7 +116,7 @@ If the file already exists, then you can skip forward to [Create a Resource Grou
|
|||
If it does not exist, then run `ssh-keygen.exe`. Use the default file, and enter a passphrase if you wish to protect it. Be sure not to use a SSH key with blank passphrase in production.
|
||||
|
||||
```powershell
|
||||
PS C:\Users\patrick\acs-engine> ssh-keygen.exe
|
||||
PS C:\Users\patrick\aks-engine> ssh-keygen.exe
|
||||
Generating public/private rsa key pair.
|
||||
Enter file in which to save the key (C:\Users\patrick/.ssh/id_rsa):
|
||||
Created directory 'C:\Users\patrick/.ssh'.
|
||||
|
@ -142,20 +142,20 @@ Most of the needed tools are available with [Homebrew](https://brew.sh/). Use it
|
|||
|
||||
Once you have those installed, make sure you can log into Azure. Open a new Terminal window, then run `az login`. It will have you log in to Azure in your web browser, then return back to the command line and show "You have logged in. Now let us find all the subscriptions to which you have access..." along with the list of subscriptions.
|
||||
|
||||
##### ACS-Engine (Mac)
|
||||
##### AKS Engine (Mac)
|
||||
|
||||
Windows support is evolving rapidly, so be sure to use the latest ACS-Engine version (v0.20 or later).
|
||||
Windows support is evolving rapidly, so be sure to use the latest AKS Engine version (v0.20 or later).
|
||||
|
||||
1. Browse to the ACS-Engine [releases page](https://github.com/Azure/acs-engine/releases) on GitHub.
|
||||
1. Browse to the AKS Engine [releases page](https://github.com/Azure/aks-engine/releases) on GitHub.
|
||||
|
||||
2. Find the latest version, and download the file ending in `-darwin-amd64.zip`.
|
||||
|
||||
3. Extract the `acs-engine...-darwin-amd64.zip` file to a folder in your path such as `/usr/local/bin`
|
||||
3. Extract the `aks-engine...-darwin-amd64.zip` file to a folder in your path such as `/usr/local/bin`
|
||||
|
||||
4. Check that it runs with `acs-engine version`
|
||||
4. Check that it runs with `aks-engine version`
|
||||
|
||||
```bash
|
||||
$ acs-engine.exe version
|
||||
$ aks-engine.exe version
|
||||
Version: v0.20.6
|
||||
GitCommit: 293adfda
|
||||
GitTreeState: clean
|
||||
|
@ -178,7 +178,7 @@ If the file doesn't exist, run `ssh-keygen` to create one.
|
|||
|
||||
#### Linux
|
||||
|
||||
These tools are included in most distributions. Use your typical package manager to make sure they're installed:
|
||||
These tools are included in most distributions. Use your typical package manager to make sure they're installed:
|
||||
|
||||
- `jq` - helpful JSON processor
|
||||
- `curl` - to download files
|
||||
|
@ -194,20 +194,20 @@ Packages for the `az` cli are available for most distributions. Please follow th
|
|||
|
||||
Now, make sure you can log into Azure. Open a new Terminal window, then run `az login`. It will have you log in to Azure in your web browser, then return back to the command line and show "You have logged in. Now let us find all the subscriptions to which you have access..." along with the list of subscriptions.
|
||||
|
||||
##### ACS-Engine (Linux)
|
||||
##### AKS Engine (Linux)
|
||||
|
||||
Windows support is evolving rapidly, so be sure to use the latest ACS-Engine version (v0.20 or later).
|
||||
Windows support is evolving rapidly, so be sure to use the latest AKS Engine version (v0.20 or later).
|
||||
|
||||
1. Browse to the ACS-Engine [releases page](https://github.com/Azure/acs-engine/releases) on GitHub.
|
||||
1. Browse to the AKS Engine [releases page](https://github.com/Azure/aks-engine/releases) on GitHub.
|
||||
|
||||
2. Find the latest version, and download the file ending in `-linux-amd64.zip`.
|
||||
|
||||
3. Extract the `acs-engine...-linux-amd64.zip` file to a folder in your path such as `/usr/local/bin`
|
||||
3. Extract the `aks-engine...-linux-amd64.zip` file to a folder in your path such as `/usr/local/bin`
|
||||
|
||||
4. Check that it runs with `acs-engine version`
|
||||
4. Check that it runs with `aks-engine version`
|
||||
|
||||
```bash
|
||||
$ acs-engine.exe version
|
||||
$ aks-engine.exe version
|
||||
Version: v0.20.6
|
||||
GitCommit: 293adfda
|
||||
GitTreeState: clean
|
||||
|
@ -251,14 +251,14 @@ If the file doesn't exist, run `ssh-keygen` to create one.
|
|||
|
||||
Now that we have the Azure CLI configured and a SSH key generated, it's time to create a resource group to hold the deployment.
|
||||
|
||||
ACS-Engine and Kubernetes also need access to deploy resources inside that resource group to build the cluster, as well as configure more resources such as Azure Load Balancers once the cluster is running. This is done using an Azure Service Principal. It's safest to create one with access just to the resource group so that once your deployment is deleted, the service principal can't be used to make other changes in your subscription.
|
||||
AKS Engine and Kubernetes also need access to deploy resources inside that resource group to build the cluster, as well as configure more resources such as Azure Load Balancers once the cluster is running. This is done using an Azure Service Principal. It's safest to create one with access just to the resource group so that once your deployment is deleted, the service principal can't be used to make other changes in your subscription.
|
||||
|
||||
#### Create a Resource Group and Service Principal (Windows)
|
||||
|
||||
`az group create --location <location> --name <name>` will create a group for you. Be sure to use a unique name for each cluster. If you need a list of available locations, run `az account list-locations -o table`.
|
||||
|
||||
```powershell
|
||||
PS C:\Users\patrick\acs-engine> az group create --location westus2 --name k8s-win1
|
||||
PS C:\Users\patrick\aks-engine> az group create --location westus2 --name k8s-win1
|
||||
{
|
||||
"id": "/subscriptions/df392461-0000-1111-2222-cd3aa2d911a6/resourceGroups/k8s-win1",
|
||||
"location": "westus2",
|
||||
|
@ -302,7 +302,7 @@ export SERVICEPRINCIPAL=$(az ad sp create-for-rbac --role="Contributor" --scopes
|
|||
```
|
||||
|
||||
|
||||
### Create an acs-engine apimodel
|
||||
### Create an aks-engine apimodel
|
||||
|
||||
Multiple samples are available in this repo under [examples/windows](../../examples/windows/). This guide will use the [windows/kubernetes.json](../../examples/windows/kubernetes.json) sample to deploy 1 Linux VM to run Kubernetes services, and 2 Windows nodes to run your Windows containers.
|
||||
|
||||
|
@ -324,7 +324,7 @@ $windowsUser = "winuser"
|
|||
$windowsPassword = "Cr4shOverride!"
|
||||
|
||||
# Download template
|
||||
Invoke-WebRequest -UseBasicParsing https://raw.githubusercontent.com/Azure/acs-engine/master/examples/windows/kubernetes.json -OutFile kubernetes-windows.json
|
||||
Invoke-WebRequest -UseBasicParsing https://raw.githubusercontent.com/Azure/aks-engine/master/examples/windows/kubernetes.json -OutFile kubernetes-windows.json
|
||||
|
||||
# Load template
|
||||
$inJson = Get-Content .\kubernetes-windows.json | ConvertFrom-Json
|
||||
|
@ -356,7 +356,7 @@ export DNSPREFIX="wink8s1"
|
|||
export WINDOWSUSER="winuser"
|
||||
export WINDOWSPASSWORD="Cr4shOverride!"
|
||||
|
||||
curl -L https://raw.githubusercontent.com/Azure/acs-engine/master/examples/windows/kubernetes.json -o kubernetes.json
|
||||
curl -L https://raw.githubusercontent.com/Azure/aks-engine/master/examples/windows/kubernetes.json -o kubernetes.json
|
||||
|
||||
cat kubernetes.json | \
|
||||
jq ".properties.masterProfile.dnsPrefix = \"$DNSPREFIX\"" | \
|
||||
|
@ -369,10 +369,10 @@ jq ".properties.windowsProfile.adminUsername = \"$WINDOWSUSER\"" > kubernetes-wi
|
|||
|
||||
### Generate Azure Resource Manager template
|
||||
|
||||
Now that the ACS-Engine cluster definition is complete, generate the Azure templates with `acs-engine generate kubernetes-windows-complete.json`
|
||||
Now that the AKS Engine cluster definition is complete, generate the Azure templates with `aks-engine generate kubernetes-windows-complete.json`
|
||||
|
||||
```none
|
||||
acs-engine.exe generate kubernetes-windows-complete.json
|
||||
aks-engine.exe generate kubernetes-windows-complete.json
|
||||
INFO[0000] Generating assets into _output/plangk8swin1...
|
||||
```
|
||||
|
||||
|
@ -399,7 +399,7 @@ After several minutes, it will return the list of resources created in JSON. Loo
|
|||
|
||||
#### Check that the cluster is up
|
||||
|
||||
As mentioned earlier, `acs-engine generate` also creates Kubernetes configuration files under `_output/<dnsprefix>/kubeconfig`. There will be one per possible region, so find the one matching the region you deployed in.
|
||||
As mentioned earlier, `aks-engine generate` also creates Kubernetes configuration files under `_output/<dnsprefix>/kubeconfig`. There will be one per possible region, so find the one matching the region you deployed in.
|
||||
|
||||
In the example above with `dnsprefix`=`plangk8swin1` and the `westus2` region, the filename would be `_output/plangk8swin1/kubeconfig/kubeconfig.westus2.json`.
|
||||
|
||||
|
@ -526,11 +526,11 @@ Once your Kubernetes cluster has been created you will have a resource group con
|
|||
|
||||
1. 1 master accessible by SSH on port 22 or kubectl on port 443
|
||||
|
||||
2. A set of Windows and/or Linux nodes. The windows nodes can be accessed through an RDP SSH tunnel via the master node, following these steps [Connecting to Windows Nodes](troubleshooting.md#connecting-to-windows-nodes).
|
||||
2. A set of Windows and/or Linux nodes. The windows nodes can be accessed through an RDP SSH tunnel via the master node, following these steps [Connecting to Windows Nodes](troubleshooting.md#connecting-to-windows-nodes).
|
||||
|
||||
![Image of Kubernetes cluster on azure with Windows](../images/kubernetes-windows.png)
|
||||
|
||||
These parts were all automatically created using the Azure Resource Manager template created by ACS-Engine:
|
||||
These parts were all automatically created using the Azure Resource Manager template created by AKS Engine:
|
||||
|
||||
1. **Master Components** - The master runs the Kubernetes scheduler, api server, and controller manager. Port 443 is exposed for remote management with the kubectl cli.
|
||||
2. **Linux Nodes** - the Kubernetes nodes run in an availability set. Azure load balancers are dynamically added to the cluster depending on exposed services.
|
||||
|
@ -540,7 +540,7 @@ These parts were all automatically created using the Azure Resource Manager temp
|
|||
|
||||
## Next Steps
|
||||
|
||||
For more resources on Windows and ACS-Engine, continue reading:
|
||||
For more resources on Windows and AKS Engine, continue reading:
|
||||
|
||||
- [Customizing Windows Deployments](windows-details.md#customizing-windows-deployments)
|
||||
- [More Examples](windows-details.md#more-examples)
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
|
||||
## Install Pre-requisites
|
||||
|
||||
All the commands in this guide require both the Azure CLI and `acs-engine`. Follow the [installation instructions to download acs-engine before continuing](../acsengine.md#install-acs-engine) or [compile it from source](../acsengine.md#build-from-source).
|
||||
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [installation instructions to download aks-engine before continuing](../acsengine.md#install-aks-engine) or [compile it from source](../acsengine.md#build-from-source).
|
||||
|
||||
To install the Azure CLI, follow [the official documentation](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) for your operating system.
|
||||
|
||||
## Overview
|
||||
|
||||
[acs-engine](https://github.com/Azure/acs-engine/blob/master/docs/acsengine.md) reads a cluster definition (or api model) which describes the size, shape, and configuration of your cluster. This guide follows the default configuration of one master and two Linux nodes, where one node is used by the OpenShift internal infrastructure, and the other one is for end-user workloads (compute node). At least one of each node type is required for a working OpenShift cluster. In the openshift.json file, one agent pool specifies the number of infrastructure node(s); another is used to specify the number of compute node(s). If you would like to change these numbers, edit [examples/openshift.json](/examples/openshift.json) before continuing.
|
||||
[aks-engine](https://github.com/Azure/aks-engine/blob/master/docs/acsengine.md) reads a cluster definition (or api model) which describes the size, shape, and configuration of your cluster. This guide follows the default configuration of one master and two Linux nodes, where one node is used by the OpenShift internal infrastructure, and the other one is for end-user workloads (compute node). At least one of each node type is required for a working OpenShift cluster. In the openshift.json file, one agent pool specifies the number of infrastructure node(s); another is used to specify the number of compute node(s). If you would like to change these numbers, edit [examples/openshift.json](/examples/openshift.json) before continuing.
|
||||
|
||||
The `acs-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#acs-engine-the-long-way).
|
||||
The `aks-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#aks-engine-the-long-way).
|
||||
|
||||
## Preparing for the Deployment
|
||||
|
||||
In order to deploy OpenShift, you will need the following:
|
||||
|
||||
* The subscription and tenant ID in which you would like to provision the cluster. Both uuids can be found with `az account list -o json`, under the `id` and `tenantId` fields.
|
||||
* Proper access rights within the subscription. Especially the right to create and assign service principals to applications (see ACS Engine the Long Way, Step #2).
|
||||
* Proper access rights within the subscription. Especially the right to create and assign service principals to applications (see AKS Engine the Long Way, Step #2).
|
||||
* A `dnsPrefix` which forms part of the the hostname for your cluster (e.g. staging, prodwest, blueberry). The DNS prefix must be unique in the given geographical location, so pick a random name.
|
||||
* A location to provision the cluster e.g. `eastus`.
|
||||
|
||||
|
@ -43,12 +43,12 @@ $ az account list -o json
|
|||
|
||||
For this example, the subscription id is `5eca53b6-18b4-4d9b-a4d4-a45a1ff367c8`, the tenant id is `5a27b61b-1b6e-4be5-aa9d-0d5696076bb9`, the DNS prefix and a resource group is `openshift-red`, and location is `eastus`.
|
||||
|
||||
Before running the `acs-engine deploy` command, you must fill in all missing fields in the `examples/openshift.json` file. See the ["Long Way" section below](#acs-engine-the-long-way) for the description of required values.
|
||||
Before running the `aks-engine deploy` command, you must fill in all missing fields in the `examples/openshift.json` file. See the ["Long Way" section below](#aks-engine-the-long-way) for the description of required values.
|
||||
|
||||
Now you can run `acs-engine deploy` with the appropriate arguments:
|
||||
Now you can run `aks-engine deploy` with the appropriate arguments:
|
||||
|
||||
```
|
||||
$ acs-engine deploy --subscription-id 5eca53b6-18b4-4d9b-a4d4-a45a1ff367c8 \
|
||||
$ aks-engine deploy --subscription-id 5eca53b6-18b4-4d9b-a4d4-a45a1ff367c8 \
|
||||
--resource-group openshift-red --location eastus \
|
||||
--api-model examples/openshift.json
|
||||
|
||||
|
@ -56,7 +56,7 @@ INFO[0034] Starting ARM Deployment (openshift-red-1843927849). This will take so
|
|||
INFO[0393] Finished ARM Deployment (openshift-red-1843927849).
|
||||
```
|
||||
|
||||
As well as deploying the cluster, `acs-engine` will output Azure Resource Manager (ARM) templates, SSH keys (only if generated by `acs-engine`) and a node configuration in `_output/openshift-red` directory:
|
||||
As well as deploying the cluster, `aks-engine` will output Azure Resource Manager (ARM) templates, SSH keys (only if generated by `aks-engine`) and a node configuration in `_output/openshift-red` directory:
|
||||
|
||||
* `_output/openshift-red/apimodel.json`
|
||||
* `_output/openshift-red/azuredeploy.json`
|
||||
|
@ -64,7 +64,7 @@ As well as deploying the cluster, `acs-engine` will output Azure Resource Manage
|
|||
* `_output/openshift-red/master.tar.gz`
|
||||
* `_output/openshift-red/node.tar.gz`
|
||||
|
||||
Administrative note: By default, the directory where acs-engine stores cluster configuration (`_output/openshift-red` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `acs-engine deploy` command.
|
||||
Administrative note: By default, the directory where aks-engine stores cluster configuration (`_output/openshift-red` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `aks-engine deploy` command.
|
||||
|
||||
Bonus tip: include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory).
|
||||
|
||||
|
@ -72,7 +72,7 @@ After couple of minutes, your OpenShift web console should be accessible at `htt
|
|||
|
||||
For next steps, see [getting started documentation](https://docs.openshift.org/latest/getting_started/index.html) on OpenShift website.
|
||||
|
||||
## ACS Engine the Long Way
|
||||
## AKS Engine the Long Way
|
||||
|
||||
### Step 1: Generate an SSH Key
|
||||
|
||||
|
@ -86,7 +86,7 @@ The OpenShift cluster needs a Service Principal to interact with Azure Resource
|
|||
|
||||
### Step 3: Edit your Cluster Definition
|
||||
|
||||
ACS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of OpenShift. There are a number of features that can be enabled through the cluster definition: check the `examples` directory for a number of examples.
|
||||
AKS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of OpenShift. There are a number of features that can be enabled through the cluster definition: check the `examples` directory for a number of examples.
|
||||
|
||||
Edit the [simple OpenShift cluster definition](/examples/openshift.json) and fill out the required values (every value with empty default `""` must be filled in):
|
||||
|
||||
|
@ -106,7 +106,7 @@ Edit the [simple OpenShift cluster definition](/examples/openshift.json) and fil
|
|||
|
||||
The generate command takes a cluster definition and outputs a template and parameters file which describes your OpenShift cluster. By default, `generate` will create a new directory named after your cluster nested in the `_output` directory. If my dnsPrefix was `openshift-red` my cluster templates would be found in `_output/openshift-red-`.
|
||||
|
||||
Run `acs-engine generate examples/openshift.json`
|
||||
Run `aks-engine generate examples/openshift.json`
|
||||
|
||||
### Step 5: Submit your Templates to Azure Resource Manager (ARM)
|
||||
|
||||
|
@ -120,7 +120,7 @@ For next steps, see [getting started documentation](https://docs.openshift.org/l
|
|||
|
||||
## Custom VNET
|
||||
|
||||
ACS Engine supports deploying into an existing VNET. Operators must specify the ARM path/id of Subnets for the `masterProfile` and any `agentPoolProfiles`, as well as the master IP address in `firstConsecutiveStaticIP`. Note: Currently OpenShift clusters cannot be set up in the 172.30.0.0/16 range.
|
||||
AKS Engine supports deploying into an existing VNET. Operators must specify the ARM path/id of Subnets for the `masterProfile` and any `agentPoolProfiles`, as well as the master IP address in `firstConsecutiveStaticIP`. Note: Currently OpenShift clusters cannot be set up in the 172.30.0.0/16 range.
|
||||
|
||||
To create a vnet and a subnet, for example:
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Planning Process
|
||||
|
||||
acs-engine features a lightweight process that emphasizes openness and ensures every community member can see project goals for the future.
|
||||
aks-engine features a lightweight process that emphasizes openness and ensures every community member can see project goals for the future.
|
||||
|
||||
## The Role of Maintainers
|
||||
|
||||
[Maintainers][] lead the acs-engine project. Their duties include proposing the Roadmap, reviewing and integrating contributions and maintaining the vision of the project.
|
||||
[Maintainers][] lead the aks-engine project. Their duties include proposing the Roadmap, reviewing and integrating contributions and maintaining the vision of the project.
|
||||
|
||||
## Open Roadmap
|
||||
|
||||
The [acs-engine Roadmap](roadmap.md) is a community document. While Maintainers propose the Roadmap, it gets discussed and refined in Release Planning Meetings.
|
||||
The [aks-engine Roadmap](roadmap.md) is a community document. While Maintainers propose the Roadmap, it gets discussed and refined in Release Planning Meetings.
|
||||
|
||||
## Contributing to the Roadmap
|
||||
|
||||
|
@ -20,9 +20,9 @@ The Roadmap gets delivered progressively via the [Release Schedule][]. Releases
|
|||
|
||||
## Release Planning Meetings
|
||||
|
||||
Major decisions affecting the Roadmap are discussed during Release Planning Meetings on the first Thursday of each month, aligned with the [Release Schedule][] and monthly objectives for the Microsoft ACS team.
|
||||
Major decisions affecting the Roadmap are discussed during Release Planning Meetings on the first Thursday of each month, aligned with the [Release Schedule][] and monthly objectives for the Microsoft AKS team.
|
||||
|
||||
Release Planning Meetings are not currently open to non-Microsoft contributors, but we may change this in the future.
|
||||
|
||||
[Maintainers]: https://github.com/Azure/acs-engine/blob/master/OWNERS
|
||||
[Maintainers]: https://github.com/Azure/aks-engine/blob/master/OWNERS
|
||||
[Release Schedule]: releases.md
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Releases
|
||||
|
||||
acs-engine uses a [continuous delivery][] approach for creating releases. Every merged commit that passes
|
||||
aks-engine uses a [continuous delivery][] approach for creating releases. Every merged commit that passes
|
||||
testing results in a deliverable that can be given a [semantic version][] tag and shipped.
|
||||
|
||||
## Release as Needed
|
||||
|
@ -8,33 +8,33 @@ testing results in a deliverable that can be given a [semantic version][] tag an
|
|||
The master `git` branch of a project should always work. Only changes considered ready to be
|
||||
released publicly are merged.
|
||||
|
||||
acs-engine depends on components that release new versions as often as needed. Fixing
|
||||
aks-engine depends on components that release new versions as often as needed. Fixing
|
||||
a high priority bug requires the project maintainer to create a new patch release.
|
||||
Merging a backward-compatible feature implies a minor release.
|
||||
|
||||
By releasing often, each component release becomes a safe and routine event. This makes it faster
|
||||
and easier for users to obtain specific fixes. Continuous delivery also reduces the work
|
||||
necessary to release a product such as acs-engine, which depends on several external projects.
|
||||
necessary to release a product such as aks-engine, which depends on several external projects.
|
||||
|
||||
"Components" applies not just to ACS projects, but also to development and release
|
||||
"Components" applies not just to AKS projects, but also to development and release
|
||||
tools, orchestrator versions (Kubernetes, DC/OS, Swarm),to Docker base images, and to other Azure
|
||||
projects that do [semantic version][] releases.
|
||||
|
||||
## acs-engine Releases Each Month
|
||||
## aks-engine Releases Each Month
|
||||
|
||||
acs-engine has a regular, public release cadence. From v0.1.0 onward, new acs-engine feature
|
||||
aks-engine has a regular, public release cadence. From v0.1.0 onward, new aks-engine feature
|
||||
releases arrive on the first Thursday of each month. Patch releases are created at any time,
|
||||
as needed. GitHub milestones are used to communicate the content and timing of major and minor
|
||||
releases, and longer-term planning is visible at [the Roadmap](roadmap.md).
|
||||
|
||||
acs-engine release timing is not linked to specific features. If a feature is merged before the
|
||||
aks-engine release timing is not linked to specific features. If a feature is merged before the
|
||||
release date, it is included in the next release.
|
||||
|
||||
See "[How to Release acs-engine](#how-to-release-acs-engine)" for more detail.
|
||||
See "[How to Release aks-engine](#how-to-release-aks-engine)" for more detail.
|
||||
|
||||
## Semantic Versioning
|
||||
|
||||
acs-engine releases comply with [semantic versioning][semantic version], with the "public API" broadly
|
||||
aks-engine releases comply with [semantic versioning][semantic version], with the "public API" broadly
|
||||
defined as:
|
||||
|
||||
- REST, gRPC, or other API that is network-accessible
|
||||
|
@ -44,16 +44,16 @@ defined as:
|
|||
- Integration with Azure public APIs such as ARM
|
||||
|
||||
In general, changes to anything a user might reasonably link to, customize, or integrate with should
|
||||
be backward-compatible, or else require a major release. acs-engine users can be confident that upgrading
|
||||
be backward-compatible, or else require a major release. aks-engine users can be confident that upgrading
|
||||
to a patch or to a minor release will not break anything.
|
||||
|
||||
## How to Release acs-engine
|
||||
## How to Release aks-engine
|
||||
|
||||
This section leads a maintainer through creating an acs-engine release.
|
||||
This section leads a maintainer through creating an aks-engine release.
|
||||
|
||||
### Step 1: Assemble Master Changelog
|
||||
A change log is a file which contains a curated, chronologically ordered list of changes
|
||||
for each version of acs-engine, which helps users and contributors see what notable changes
|
||||
for each version of aks-engine, which helps users and contributors see what notable changes
|
||||
have been made between each version of the project.
|
||||
|
||||
The CHANGELOG should be driven by release milestones defined on Github, which track specific deliverables and
|
||||
|
@ -88,13 +88,13 @@ TBD
|
|||
|
||||
### Step 5: Let Everyone Know
|
||||
|
||||
Let the rest of the team know they can start blogging and tweeting about the new acs-engine release.
|
||||
Let the rest of the team know they can start blogging and tweeting about the new aks-engine release.
|
||||
Post a message to the #company channel on Slack. Include a link to the released chart and to the
|
||||
master CHANGELOG:
|
||||
|
||||
```
|
||||
@here acs-engine 0.1.0 is here!
|
||||
Master CHANGELOG: https://github.com/Azure/acs-engine/CHANGELOG.md
|
||||
@here aks-engine 0.1.0 is here!
|
||||
Master CHANGELOG: https://github.com/Azure/aks-engine/CHANGELOG.md
|
||||
```
|
||||
|
||||
You're done with the release. Nice job!
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
Here are the steps to deploy a simple Swarm cluster:
|
||||
|
||||
1. [Install acs-engine](acsengine.md#downloading-and-building-acs-engine)
|
||||
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
|
||||
2. [Generate your SSH key](ssh.md#ssh-key-generation)
|
||||
3. [Edit the Swarm example](../examples/swarm.json) and fill in the blank strings
|
||||
4. [Generate the template](acsengine.md#generate-templates)
|
||||
|
@ -40,7 +40,7 @@ After completing this walkthrough you will know how to:
|
|||
3. then click on "Succeeded" under *last deployment*
|
||||
4. then click on the "Microsoft.Template"
|
||||
5. now you can copy the output FQDNs and sample SSH commands
|
||||
|
||||
|
||||
![Image of docker scaling](images/findingoutputs.png)
|
||||
|
||||
2. SSH to port 2200 of the master FQDN. See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
Here are the steps to deploy a Hybrid Swarm Mode cluster:
|
||||
|
||||
1. [Install acs-engine](acsengine.md#downloading-and-building-acs-engine)
|
||||
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
|
||||
2. [Generate your ssh key](ssh.md#ssh-key-generation)
|
||||
3. [Edit the Hybrid Swarm Mode example](../examples/windows/swarmmode-hybrid.json) and fill in the blank strings
|
||||
4. [Generate the template](acsengine.md#generate-templates)
|
||||
|
@ -22,7 +22,7 @@ SSH into one of the masters (`ssh yourlinuxuser@masterfqdn.yourregion.cloudapp.a
|
|||
|
||||
![](images/swarmmode-hybrid-docker-node-ls.png)
|
||||
|
||||
> NOTE - if you only see the Linux masters and agents, a working solution is to reimage the Windows agents scale set - that is restoring the VMs to the initial state and restart them. This will reapply all the steps in the installation, [mainly this one that installs the container host and joins the Swarm](https://github.com/Azure/acs-engine/blob/master/parts/swarm/Install-ContainerHost-And-Join-Swarm.ps1).
|
||||
> NOTE - if you only see the Linux masters and agents, a working solution is to reimage the Windows agents scale set - that is restoring the VMs to the initial state and restart them. This will reapply all the steps in the installation, [mainly this one that installs the container host and joins the Swarm](https://github.com/Azure/aks-engine/blob/master/parts/swarm/Install-ContainerHost-And-Join-Swarm.ps1).
|
||||
|
||||
Now you can inspect one of the Windows agents with `docker node inspect <hostname or id of node>`:
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
Here are the steps to deploy a simple Swarm Mode cluster:
|
||||
|
||||
1. [Install acs-engine](acsengine.md#downloading-and-building-acs-engine)
|
||||
1. [Install aks-engine](acsengine.md#downloading-and-building-aks-engine)
|
||||
2. [Generate your ssh key](ssh.md#ssh-key-generation)
|
||||
3. [Edit the Swarm Mode example](../examples/swarmmode.json) and fill in the blank strings
|
||||
4. [Generate the template](acsengine.md#generate-templates)
|
||||
|
@ -40,7 +40,7 @@ After completing this walkthrough you will know how to:
|
|||
3. then click on "Succeeded" under *last deployment*
|
||||
4. then click on the "Microsoft.Template"
|
||||
5. now you can copy the output FQDNs and sample SSH commands
|
||||
|
||||
|
||||
![Image of docker scaling](images/findingoutputs.png)
|
||||
|
||||
2. SSH to port 2200 of the master FQDN (or first master's SSH can also be accessed via public dns address on port 22.). See [agent forwarding](ssh.md#key-management-and-agent-forwarding-with-windows-pageant) for an example of how to do this.
|
||||
|
@ -60,7 +60,7 @@ After completing this walkthrough you will know how to:
|
|||
|
||||
![Image of the web page](images/swarmbrowser.png)
|
||||
|
||||
8. You can now scale the service. You can type `docker service scale fe=5`, and this will scale up the service to the desired number of replicas.
|
||||
8. You can now scale the service. You can type `docker service scale fe=5`, and this will scale up the service to the desired number of replicas.
|
||||
|
||||
![Image of service scaling](images/dockerservicescale.png)
|
||||
|
||||
|
@ -70,4 +70,4 @@ Here are recommended links to learn more about Swarm Mode, Docker, and Docker Co
|
|||
|
||||
1. [Docker](https://docs.docker.com/) - learn more through Docker documentation.
|
||||
|
||||
2. [Docker Swarm Mode](https://docs.docker.com/engine/swarm/) - learn more about Docker Swarm Mode.
|
||||
2. [Docker Swarm Mode](https://docs.docker.com/engine/swarm/) - learn more about Docker Swarm Mode.
|
||||
|
|
|
@ -6,14 +6,14 @@ These cluster definition examples demonstrate how to create a customized Docker
|
|||
|
||||
## Walkthroughs
|
||||
|
||||
* [ACS Engine](../docs/acsengine.md) - shows you how to use the ACS engine to generate custom Docker enabled container clusters
|
||||
* [AKS Engine](../docs/acsengine.md) - shows you how to use the AKS Engine to generate custom Docker enabled container clusters
|
||||
* [Cluster Definition](../docs/clusterdefinition.md) - describes the components of the cluster definition file
|
||||
* [DC/OS Walkthrough](../docs/dcos.md) - shows how to create a DC/OS enabled Docker cluster on Azure
|
||||
* [Kubernetes Walkthrough](../docs/kubernetes.md) - shows how to create a Kubernetes enabled Docker cluster on Azure
|
||||
* [Swarm Walkthrough](../docs/swarm.md) - shows how to create a Swarm enabled Docker cluster on Azure
|
||||
* [Swarm Mode Walkthrough](../docs/swarmmode.md) - shows how to create a Swarm Mode cluster on Azure
|
||||
* [Custom VNET](vnet) - shows how to use a custom VNET
|
||||
* [Custom VNET](vnet) - shows how to use a custom VNET
|
||||
* [Attached Disks](disks-storageaccount) - shows how to attach up to 4 disks per node
|
||||
* [Managed Disks](disks-managed) (under private preview) - shows how to use managed disks
|
||||
* [Managed Disks](disks-managed) (under private preview) - shows how to use managed disks
|
||||
* [Large Clusters](largeclusters) - shows how to create cluster sizes of up to 1200 nodes
|
||||
* [Windows Clusters](windows) - shows how to create mixed Microsoft Windows and Linux Docker clusters on Microsoft Azure
|
||||
|
|
|
@ -15,7 +15,7 @@ OUTPUT="_output/${INSTANCE_NAME}"
|
|||
# allow nodes to run for a while before scaling
|
||||
sleep 180
|
||||
|
||||
./bin/acs-engine scale \
|
||||
./bin/aks-engine scale \
|
||||
--subscription-id ${SUBSCRIPTION_ID} \
|
||||
--deployment-dir ${OUTPUT} \
|
||||
--location ${LOCATION} \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ACSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
AKSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
|
||||
NEW_AGENT_NODE_COUNT=2
|
||||
EXPECTED_NODE_COUNT=3
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ACSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
AKSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
|
||||
NEW_AGENT_NODE_COUNT=3
|
||||
EXPECTED_NODE_COUNT=4
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
ACSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
AKSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
|
||||
NEW_AGENT_NODE_COUNT=2
|
||||
EXPECTED_NODE_COUNT=3
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
ACSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
AKSE_POSTDEPLOY=examples/azure-cni/k8s-scale.sh
|
||||
|
||||
NEW_AGENT_NODE_COUNT=3
|
||||
EXPECTED_NODE_COUNT=4
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to provision custom files to your master nodes. This can be used to put whichever files you want on your master nodes to whichever path you want (and have permission to). For example, the use case is when you want additional configurations to native kubernetes features, such as in
|
||||
AKS Engine enables you to provision custom files to your master nodes. This can be used to put whichever files you want on your master nodes to whichever path you want (and have permission to). For example, the use case is when you want additional configurations to native kubernetes features, such as in
|
||||
the [given example](../examples/customfiles/kubernetes-customfiles-podnodeselector.yaml)
|
||||
|
||||
## Examples
|
||||
|
@ -31,4 +31,4 @@ These two need to be provisioned to your master nodes in order for the api serve
|
|||
}
|
||||
```
|
||||
|
||||
This way, the files are provisioned to `/etc/kubernetes` on our master nodes and the apiserver boots up with those provisioned files defining the admission control.
|
||||
This way, the files are provisioned to `/etc/kubernetes` on our master nodes and the apiserver boots up with those provisioned files defining the admission control.
|
||||
|
|
|
@ -25,7 +25,7 @@ Let's provision a DC/OS cluster with credentials to an [Azure Container Registry
|
|||
- Enable Admin Access and note the registry credentials
|
||||
<img src="../../docs/images/acrblade.png" alt="ACR Blade with Admin Access enabled" style="width: 50%; height: 50%;"/>
|
||||
|
||||
- Clone [acs-engine](http://github.com/azure/acs-engine) and [start the container with the dev environment](https://github.com/Azure/acs-engine/blob/master/docs/acsengine.md).
|
||||
- Clone [aks-engine](http://github.com/azure/aks-engine) and [start the container with the dev environment](https://github.com/Azure/aks-engine/blob/master/docs/acsengine.md).
|
||||
|
||||
- Edit the API model to include the credentials
|
||||
```
|
||||
|
@ -38,9 +38,9 @@ Let's provision a DC/OS cluster with credentials to an [Azure Container Registry
|
|||
},
|
||||
```
|
||||
|
||||
- Run acs-engine to create ARM templates
|
||||
- Run aks-engine to create ARM templates
|
||||
```
|
||||
./acs-engine generate examples/dcos-private-registry/dcos.json
|
||||
./aks-engine generate examples/dcos-private-registry/dcos.json
|
||||
```
|
||||
|
||||
- Deploy the cluster
|
||||
|
@ -64,4 +64,4 @@ az group deployment create -g cluster-rg --template-file _output/dcoscluster/azu
|
|||
## Limitations
|
||||
- The API model currenlty only supports credentials to a single registry.
|
||||
- Not tested with Kubernetes clusters
|
||||
- Credentials have to be updated on each node
|
||||
- Credentials have to be updated on each node
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
This section provides example templates enable creation of Docker enabled cluster with older version of the DC/OS orchestrator.
|
||||
|
||||
Here are the release channels acs-engine is able to deploy:
|
||||
Here are the release channels aks-engine is able to deploy:
|
||||
|
||||
1. DC/OS `1.8`. Access by specifying `"orchestratorVersion": "1.8.8"`.
|
||||
2. DC/OS `1.9`. Access by specifying `"orchestratorVersion": "1.9.0"`.
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to create customized Docker enabled cluster on Microsoft Azure with [managed disks](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview).
|
||||
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with [managed disks](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview).
|
||||
|
||||
These examples are provided as a reference, note that managed disks is the default storage account type if none is specified.
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to create customized Docker enabled cluster on Microsoft Azure with attached disks.
|
||||
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with attached disks.
|
||||
|
||||
The examples show you how to configure up to 4 attached disks. The disks can range from 1 to 1024 GB in size:
|
||||
|
||||
1. **dcos.json** - deploying and using [DC/OS](../../docs/dcos.md)
|
||||
2. **kubernetes.json** - deploying and using [Kubernetes](../../docs/kubernetes.md)
|
||||
3. **swarm.json** - deploying and using [Swarm](../../docs/swarm.md)
|
||||
4. **swarmmode.json** - deploying and using [Swarm Mode](../../docs/swarmmode.md)
|
||||
4. **swarmmode.json** - deploying and using [Swarm Mode](../../docs/swarmmode.md)
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
{
|
||||
"name": "prometheus-grafana-k8s",
|
||||
"version": "v1",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/acs-engine/master/"
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/aks-engine/master/"
|
||||
}
|
||||
],
|
||||
"servicePrincipalProfile": {
|
||||
|
|
|
@ -4,20 +4,20 @@
|
|||
|
||||
This document describes how to upgrade kubernetes version for an existing cluster.
|
||||
|
||||
*acs-engine* supports Kubernetes version upgrades starting from ``1.5`` release.
|
||||
During the upgrade, *acs-engine* successively visits virtual machines that constitute the cluster (first the master nodes, then the agent nodes) and performs the following operations:
|
||||
*aks-engine* supports Kubernetes version upgrades starting from ``1.5`` release.
|
||||
During the upgrade, *aks-engine* successively visits virtual machines that constitute the cluster (first the master nodes, then the agent nodes) and performs the following operations:
|
||||
- cordon the node and drain existing workload
|
||||
- delete the VM
|
||||
- create new VM and install desired orchestrator version
|
||||
- add the new VM to the cluster
|
||||
|
||||
*acs-engine* allows one subsequent minor version upgrade at a time, for example, from ``1.6.x`` to ``1.7.y``.
|
||||
*aks-engine* allows one subsequent minor version upgrade at a time, for example, from ``1.6.x`` to ``1.7.y``.
|
||||
|
||||
For upgrade that spans over more than a single minor version, this operation should be called several times, each time advancing the minor version by one. For example, to upgrade from ``1.6.x`` to ``1.8.z`` one should first upgrade the cluster to ``1.7.y``, followed by upgrading it to ``1.8.z``
|
||||
|
||||
To get the list of all available Kubernetes versions and upgrades, run the *orchestrators* command and specify Kubernetes orchestrator type. The output is a JSON object:
|
||||
```bash
|
||||
./bin/acs-engine orchestrators --orchestrator Kubernetes
|
||||
./bin/aks-engine orchestrators --orchestrator Kubernetes
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -75,7 +75,7 @@ To get the list of all available Kubernetes versions and upgrades, run the *orch
|
|||
|
||||
To get the information specific to the cluster, provide its current orchestrator version:
|
||||
```bash
|
||||
./bin/acs-engine orchestrators --orchestrator Kubernetes --version 1.7.8
|
||||
./bin/aks-engine orchestrators --orchestrator Kubernetes --version 1.7.8
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -132,9 +132,9 @@ To get the information specific to the cluster, provide its current orchestrator
|
|||
|
||||
Once the desired Kubernetes version is finalized, call the *upgrade* command:
|
||||
```bash
|
||||
./bin/acs-engine upgrade \
|
||||
./bin/aks-engine upgrade \
|
||||
--subscription-id <subscription id> \
|
||||
--deployment-dir <acs-engine output directory > \
|
||||
--deployment-dir <aks-engine output directory > \
|
||||
--location <resource group location> \
|
||||
--resource-group <resource group name> \
|
||||
--upgrade-version <desired Kubernetes version> \
|
||||
|
@ -144,7 +144,7 @@ Once the desired Kubernetes version is finalized, call the *upgrade* command:
|
|||
```
|
||||
For example,
|
||||
```bash
|
||||
./bin/acs-engine upgrade \
|
||||
./bin/aks-engine upgrade \
|
||||
--subscription-id xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx \
|
||||
--deployment-dir ./_output/test \
|
||||
--location westus \
|
||||
|
@ -155,9 +155,9 @@ For example,
|
|||
--client-secret xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
```
|
||||
|
||||
By its nature, the upgrade operation is long running and potentially could fail for various reasons, such as temporary lack of resources, etc. In this case, rerun the command. The *upgrade* command is idempotent, and will pick up execution from the point it failed on.
|
||||
By its nature, the upgrade operation is long running and potentially could fail for various reasons, such as temporary lack of resources, etc. In this case, rerun the command. The *upgrade* command is idempotent, and will pick up execution from the point it failed on.
|
||||
|
||||
[This directory](https://github.com/Azure/acs-engine/tree/master/examples/k8s-upgrade) contains the following files:
|
||||
[This directory](https://github.com/Azure/aks-engine/tree/master/examples/k8s-upgrade) contains the following files:
|
||||
- **README.md** - this file
|
||||
- **k8s-upgrade.sh** - script invoking upgrade operation
|
||||
- **\*.json** - cluster definition examples for various orchestrator versions and configurations: Linux clusters, Windows clusters, hybrid clusters.
|
||||
|
|
|
@ -12,7 +12,7 @@ fi
|
|||
|
||||
OUTPUT="_output/${INSTANCE_NAME}"
|
||||
|
||||
./bin/acs-engine upgrade \
|
||||
./bin/aks-engine upgrade \
|
||||
--subscription-id ${SUBSCRIPTION_ID} \
|
||||
--deployment-dir ${OUTPUT} \
|
||||
--location ${LOCATION} \
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.1
|
||||
AKSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.1
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.9
|
||||
AKSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.9
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
AKSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.9
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
AKSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.8.9
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
AKSE_POSTDEPLOY=examples/k8s-upgrade/k8s-upgrade.sh
|
||||
EXPECTED_ORCHESTRATOR_VERSION=1.9.4
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to retrieve the following k8s deployment parameters from Microsoft Azure KeyVault:
|
||||
AKS Engine enables you to retrieve the following k8s deployment parameters from Microsoft Azure KeyVault:
|
||||
|
||||
* certificateProfile
|
||||
* apiServerCertificate
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to create customized Docker enabled cluster on Microsoft Azure with certs installed from key vault during deployment.
|
||||
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with certs installed from key vault during deployment.
|
||||
|
||||
The examples show you how to configure installing a cert from keyvault. These certs are assumed to be in the secrets portion of your keyvault:
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ The definition below adds 2 labels `"bar"` and `"baz"` to all nodes in the `firs
|
|||
],
|
||||
```
|
||||
|
||||
In addition to any custom node labels you may add, ACS Engine will add another label, `"agentpool"`, which identifies which Agent Pool the node belongs to.
|
||||
In addition to any custom node labels you may add, AKS Engine will add another label, `"agentpool"`, which identifies which Agent Pool the node belongs to.
|
||||
|
||||
You can confirm the labels have been applied on the node by running `kubectl describe node <nodename>`:
|
||||
|
||||
![Node Labels](images/kubernetesnodelabels.png)
|
||||
![Node Labels](images/kubernetesnodelabels.png)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Overview
|
||||
|
||||
ACS-Engine enables you to create customized Docker enabled cluster on Microsoft Azure with 1200 nodes.
|
||||
AKS Engine enables you to create customized Docker enabled cluster on Microsoft Azure with 1200 nodes.
|
||||
|
||||
The examples show you how to configure up to 12 agent pools with 100 nodes each:
|
||||
|
||||
|
@ -12,4 +12,4 @@ The examples show you how to configure up to 12 agent pools with 100 nodes each:
|
|||
4. **swarm.json** - deploying and using [Swarm](../../docs/swarm.md)
|
||||
5. **swarm-vmas.json** - this provides an example using availability sets instead of the default virtual machine scale sets. You will want to use availability sets if you want to dynamically attach/detach disks.
|
||||
6. **swarmmode.json** - deploying and using [Swarm Mode](../../docs/swarmmode.md)
|
||||
7. **swarmmode-vmas.json** - this provides an example using availability sets instead of the default virtual machine scale sets. You will want to use availability sets if you want to dynamically attach/detach disks.
|
||||
7. **swarmmode-vmas.json** - this provides an example using availability sets instead of the default virtual machine scale sets. You will want to use availability sets if you want to dynamically attach/detach disks.
|
||||
|
|
|
@ -7,7 +7,7 @@ There are 2 different Network Policy options :
|
|||
|
||||
## Calico
|
||||
|
||||
The kubernetes-calico deployment template enables Calico networking and policies for the ACS-engine cluster via `"networkPolicy": "calico"` being present inside the `kubernetesConfig`.
|
||||
The kubernetes-calico deployment template enables Calico networking and policies for the AKS Engine cluster via `"networkPolicy": "calico"` being present inside the `kubernetesConfig`.
|
||||
|
||||
```json
|
||||
"properties": {
|
||||
|
@ -32,10 +32,10 @@ Once the template has been successfully deployed, following the [simple policy t
|
|||
|
||||
> Note: `ping` (ICMP) traffic is blocked on the cluster by default. Wherever `ping` is used in any tutorial substitute testing access with something like `wget -q --timeout=5 google.com -O -` instead.
|
||||
|
||||
### Update guidance for clusters deployed by acs-engine releases prior to 0.17.0
|
||||
### Update guidance for clusters deployed by aks-engine releases prior to 0.17.0
|
||||
Clusters deployed with calico networkPolicy enabled prior to `0.17.0` had calico `2.6.3` deployed, and a daemonset with an `updateStrategy` of `Ondelete`.
|
||||
|
||||
acs-engine releases starting with 0.17.0 now produce an addon manifest for calico in `/etc/kubernetes/addons/calico-daemonset.yaml` contaning calico 3.1.x, and an `updateStrategy` of `RollingUpdate`. Due to breaking changes introduced by calico 3, one must first migrate through calico `2.6.5` or a later 2.6.x release in order to migrate to calico 3.1.x. as described in the [calico kubernetes upgrade documentation](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/). The acs-engine manifest for calico uses the [kubernetes API datastore, policy-only setup](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/upgrade#upgrading-an-installation-that-uses-the-kubernetes-api-datastore).
|
||||
aks-engine releases starting with 0.17.0 now produce an addon manifest for calico in `/etc/kubernetes/addons/calico-daemonset.yaml` contaning calico 3.1.x, and an `updateStrategy` of `RollingUpdate`. Due to breaking changes introduced by calico 3, one must first migrate through calico `2.6.5` or a later 2.6.x release in order to migrate to calico 3.1.x. as described in the [calico kubernetes upgrade documentation](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/). The aks-engine manifest for calico uses the [kubernetes API datastore, policy-only setup](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/upgrade/upgrade#upgrading-an-installation-that-uses-the-kubernetes-api-datastore).
|
||||
|
||||
1. To update to `2.6.5+` in preparation of an upgrade to 3.1.x as specified, edit `/etc/kubernetes/addons/calico-daemonset.yaml` on a master node, replacing `calico/node:v3.1.1` with `calico/node:v2.6.10` and `calico/cni:v3.1.1` with `calico/cni:v2.0.6`. Run `kubectl apply -f /etc/kubernetes/addons/calico-daemonset.yaml`.
|
||||
|
||||
|
@ -61,7 +61,7 @@ If you have any customized calico resource manifests, you must also follow the [
|
|||
|
||||
## Cilium
|
||||
|
||||
The kubernetes-cilium deployment template enables Cilium networking and policies for the ACS-engine cluster via `"networkPolicy": "cilium"` or `"networkPlugin": "cilium"` being present inside the `kubernetesConfig`.
|
||||
The kubernetes-cilium deployment template enables Cilium networking and policies for the AKS Engine cluster via `"networkPolicy": "cilium"` or `"networkPlugin": "cilium"` being present inside the `kubernetesConfig`.
|
||||
|
||||
```json
|
||||
"properties": {
|
||||
|
@ -91,6 +91,6 @@ The kubernetes-cilium deployment template enables Cilium networking and policies
|
|||
|
||||
### Post installation
|
||||
|
||||
Once the template has been successfully deployed, following the [deploy the demo application](http://cilium.readthedocs.io/en/latest/gettingstarted/minikube/#step-2-deploy-the-demo-application) tutorial will provide a good foundation for how to do L3/4 policy as well as more advanced Layer 7 inspection and routing. If you have [Istio](https://istio.io) you can try this [tutorial](http://cilium.readthedocs.io/en/latest/gettingstarted/istio/) where cilium is used to side by side with Istio to enforce security policies in a Kubernetes deployment.
|
||||
Once the template has been successfully deployed, following the [deploy the demo application](http://cilium.readthedocs.io/en/latest/gettingstarted/minikube/#step-2-deploy-the-demo-application) tutorial will provide a good foundation for how to do L3/4 policy as well as more advanced Layer 7 inspection and routing. If you have [Istio](https://istio.io) you can try this [tutorial](http://cilium.readthedocs.io/en/latest/gettingstarted/istio/) where cilium is used to side by side with Istio to enforce security policies in a Kubernetes deployment.
|
||||
|
||||
For the latest documentation on Cilium (including BPF and XDP reference guides), please refer to [this](http://cilium.readthedocs.io/en/latest/)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
There are numerous implementations of a service mesh which integrate with kubernetes such as Istio, [Linkerd](http://linkerd.io), and [Conduit](https://conduit.io/). [This is one blog post](https://medium.com/microservices-in-practice/service-mesh-for-microservices-2953109a3c9a) which explains some fundamentals behind what it is and why to use it.
|
||||
|
||||
Some service mesh implementations **may** benefit from or require additional [customizations to the kubernetes cluster itself](https://github.com/Azure/acs-engine/blob/master/docs/clusterdefinition.md).
|
||||
Some service mesh implementations **may** benefit from or require additional [customizations to the kubernetes cluster itself](https://github.com/Azure/aks-engine/blob/master/docs/clusterdefinition.md).
|
||||
|
||||
## Istio
|
||||
|
||||
|
@ -28,7 +28,7 @@ The main changes this configuration makes is adding these flags to the apiserver
|
|||
|
||||
#### Update istio.json
|
||||
|
||||
3. Ensure `orchestratorRelease` is 1.9+.
|
||||
3. Ensure `orchestratorRelease` is 1.9+.
|
||||
4. Update `--admission-control` to include `MutatingAdmissionWebhook,ValidatingAdmissionWebhook`
|
||||
|
||||
**Note**: admission-controls need to be entered in the order defined on the kubernetes [docs](https://kubernetes.io/docs/admin/admission-controllers/#is-there-a-recommended-set-of-admission-controllers-to-use).
|
||||
|
@ -64,4 +64,4 @@ Once the template has been successfully deployed, then Istio can be installed vi
|
|||
|
||||
> Note: So far it seems the manual steps are more well maintained and up-to-date than the helm chart.
|
||||
|
||||
After Istio has been installed, consider [walking through the various Tasks](https://istio.io/docs/tasks/) which use the Book info example application.
|
||||
After Istio has been installed, consider [walking through the various Tasks](https://istio.io/docs/tasks/) which use the Book info example application.
|
||||
|
|
|
@ -1 +1 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/dual-subnet-vnet-predeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/dual-subnet-vnet-predeploy.sh
|
||||
|
|
|
@ -1 +1 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-azure-cni-predeploy.sh
|
||||
|
|
|
@ -1,73 +1,73 @@
|
|||
{
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "Kubernetes",
|
||||
"kubernetesConfig": {
|
||||
"networkPolicy": "calico"
|
||||
}
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 1,
|
||||
"dnsPrefix": "test",
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "Kubernetes",
|
||||
"kubernetesConfig": {
|
||||
"networkPolicy": "calico"
|
||||
}
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 1,
|
||||
"dnsPrefix": "test",
|
||||
"vmSize": "Standard_D2_v3",
|
||||
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME",
|
||||
"firstConsecutiveStaticIP": "10.239.255.239",
|
||||
"vnetCidr": "10.22.221.0/24",
|
||||
"preProvisionExtension": {
|
||||
"name": "register-dns",
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"name": "staging",
|
||||
"count": 2,
|
||||
"vmSize": "Standard_D2_v3",
|
||||
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME",
|
||||
"firstConsecutiveStaticIP": "10.239.255.239",
|
||||
"vnetCidr": "10.22.221.0/24",
|
||||
"availabilityProfile": "VirtualMachineScaleSets",
|
||||
"preProvisionExtension": {
|
||||
"name": "register-dns",
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"name": "staging",
|
||||
"count": 2,
|
||||
"vmSize": "Standard_D2_v3",
|
||||
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME",
|
||||
"availabilityProfile": "VirtualMachineScaleSets",
|
||||
"preProvisionExtension": {
|
||||
"name": "register-dns",
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "production",
|
||||
"count": 2,
|
||||
"vmSize": "Standard_D2_v3",
|
||||
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME",
|
||||
"availabilityProfile": "VirtualMachineScaleSets",
|
||||
"preProvisionExtension": {
|
||||
"name": "register-dns",
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"customNodesDNS": {
|
||||
"dnsServer": "10.239.255.255"
|
||||
},
|
||||
"adminUsername": "azureuser",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"extensionProfiles": [
|
||||
{
|
||||
{
|
||||
"name": "production",
|
||||
"count": 2,
|
||||
"vmSize": "Standard_D2_v3",
|
||||
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME",
|
||||
"availabilityProfile": "VirtualMachineScaleSets",
|
||||
"preProvisionExtension": {
|
||||
"name": "register-dns",
|
||||
"version": "v1",
|
||||
"extensionParameters": "mydomain.com",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/acs-engine/master/extensions/dnsupdate/",
|
||||
"script": "register-dns.sh"
|
||||
"singleOrAll": "All"
|
||||
}
|
||||
],
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "",
|
||||
"secret": ""
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"customNodesDNS": {
|
||||
"dnsServer": "10.239.255.255"
|
||||
},
|
||||
"adminUsername": "azureuser",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"extensionProfiles": [
|
||||
{
|
||||
"name": "register-dns",
|
||||
"version": "v1",
|
||||
"extensionParameters": "mydomain.com",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/aks-engine/master/extensions/dnsupdate/",
|
||||
"script": "register-dns.sh"
|
||||
}
|
||||
],
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "",
|
||||
"secret": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
ACSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
AKSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
ACSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
AKSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
ACSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/k8s-vnet-predeploy.sh
|
||||
AKSE_POSTDEPLOY=examples/vnet/k8s-vnet-postdeploy.sh
|
||||
|
|
|
@ -1 +1 @@
|
|||
ACSE_PREDEPLOY=examples/vnet/dual-subnet-vnet-predeploy.sh
|
||||
AKSE_PREDEPLOY=examples/vnet/dual-subnet-vnet-predeploy.sh
|
||||
|
|
|
@ -59,7 +59,7 @@ metadata:
|
|||
type: Opaque
|
||||
data:
|
||||
wsid: `echo $wsid | base64 -w0`
|
||||
key: `echo $key | base64 -w0`
|
||||
key: `echo $key | base64 -w0`
|
||||
EOFSECRET
|
||||
|
||||
log 'done'
|
||||
|
@ -85,7 +85,7 @@ spec:
|
|||
dockerProviderVersion: 10.0.0-22
|
||||
spec:
|
||||
containers:
|
||||
- name: omsagent
|
||||
- name: omsagent
|
||||
image: "microsoft/oms"
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
|
@ -103,7 +103,7 @@ spec:
|
|||
privileged: true
|
||||
ports:
|
||||
- containerPort: 25225
|
||||
protocol: TCP
|
||||
protocol: TCP
|
||||
- containerPort: 25224
|
||||
protocol: UDP
|
||||
volumeMounts:
|
||||
|
@ -111,10 +111,10 @@ spec:
|
|||
name: docker-sock
|
||||
- mountPath: /var/opt/microsoft/omsagent/state/containerhostname
|
||||
name: container-hostname
|
||||
- mountPath: /var/log
|
||||
- mountPath: /var/log
|
||||
name: host-log
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: container-hostname
|
||||
|
@ -122,7 +122,7 @@ spec:
|
|||
path: /etc/hostname
|
||||
- name: host-log
|
||||
hostPath:
|
||||
path: /var/log
|
||||
path: /var/log
|
||||
EOFDAEMONSET
|
||||
|
||||
log 'done'
|
||||
|
@ -137,7 +137,7 @@ deploy_yaml() {
|
|||
log 'Deploying oms agent secret - oms-agentsecret.yaml'
|
||||
kubectl create -f 'oms-agentsecret.yaml'
|
||||
|
||||
log 'Deploying oms agent daemonset - oms-daemonset.yaml'
|
||||
log 'Deploying oms agent daemonset - oms-daemonset.yaml'
|
||||
kubectl create -f 'oms-daemonset.yaml'
|
||||
|
||||
log 'done'
|
||||
|
@ -175,7 +175,7 @@ then
|
|||
fi
|
||||
|
||||
log ''
|
||||
log 'ACS-Engine - installing Microsoft OMS Agent (k8s)'
|
||||
log 'AKS Engine - installing Microsoft OMS Agent (k8s)'
|
||||
log '--------------------------------------------------'
|
||||
|
||||
install_script_dependencies
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# prometheus-grafana Extension
|
||||
|
||||
|
||||
This is the prometheus-grafana extension. Add this extension to the api model you pass as input into acs-engine as shown below to automatically enable prometheus and grafana in your new Kubernetes cluster.
|
||||
This is the prometheus-grafana extension. Add this extension to the api model you pass as input into aks-engine as shown below to automatically enable prometheus and grafana in your new Kubernetes cluster.
|
||||
|
||||
```
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ This is the prometheus-grafana extension. Add this extension to the api model y
|
|||
"vmSize": "Standard_DS2_v2",
|
||||
"availabilityProfile": "AvailabilitySet",
|
||||
"extensions": [
|
||||
{
|
||||
{
|
||||
"name": "prometheus-grafana-k8s"
|
||||
}
|
||||
]
|
||||
|
@ -39,10 +39,10 @@ This is the prometheus-grafana extension. Add this extension to the api model y
|
|||
}
|
||||
},
|
||||
"extensionProfiles": [
|
||||
{
|
||||
"name": "prometheus-grafana-k8s",
|
||||
{
|
||||
"name": "prometheus-grafana-k8s",
|
||||
"version": "v1",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/acs-engine/master/"
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/aks-engine/master/"
|
||||
}
|
||||
],
|
||||
"servicePrincipalProfile": {
|
||||
|
|
|
@ -287,8 +287,8 @@ ensure_k8s_namespace_exists() {
|
|||
}
|
||||
|
||||
NAMESPACE=default
|
||||
RAW_PROMETHEUS_CHART_VALS="https://raw.githubusercontent.com/Azure/acs-engine/master/extensions/prometheus-grafana-k8s/v1/prometheus_values.yaml"
|
||||
CADVISOR_CONFIG_URL="https://raw.githubusercontent.com/Azure/acs-engine/master/extensions/prometheus-grafana-k8s/v1/cadvisor_daemonset.yml"
|
||||
RAW_PROMETHEUS_CHART_VALS="https://raw.githubusercontent.com/Azure/aks-engine/master/extensions/prometheus-grafana-k8s/v1/prometheus_values.yaml"
|
||||
CADVISOR_CONFIG_URL="https://raw.githubusercontent.com/Azure/aks-engine/master/extensions/prometheus-grafana-k8s/v1/cadvisor_daemonset.yml"
|
||||
|
||||
# retrieve and parse extension parameters
|
||||
if [[ -n "$1" ]]; then
|
||||
|
|
|
@ -11,7 +11,7 @@ This extension will install Windows Server patches, including prerelease hotfixe
|
|||
|-------------------|--------|----------------------|
|
||||
|name |yes | windows-patches |
|
||||
|version |yes | v1 |
|
||||
|rootURL |optional| `https://raw.githubusercontent.com/Azure/acs-engine/master/` or any repo with the same extensions/... directory structure |
|
||||
|rootURL |optional| `https://raw.githubusercontent.com/Azure/aks-engine/master/` or any repo with the same extensions/... directory structure |
|
||||
|extensionParameters|yes | comma-delimited list of URIs enclosed with ' such as `'https://privateupdates.domain.ext/Windows10.0-KB999999-x64-InstallForTestingPurposesOnly.exe', 'https://privateupdates.domain.ext/Windows10.0-KB123456-x64-InstallForTestingPurposesOnly.exe'` |
|
||||
|
||||
## Example
|
||||
|
@ -33,7 +33,7 @@ This extension will install Windows Server patches, including prerelease hotfixe
|
|||
{
|
||||
"name": "windows-patches",
|
||||
"version": "v1",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/acs-engine/master/",
|
||||
"rootURL": "https://raw.githubusercontent.com/Azure/aks-engine/master/",
|
||||
"extensionParameters": "'https://mypatches.blob.core.windows.net/hotfix3692/Windows10.0-KB999999-x64-InstallForTestingPurposesOnly.exe?sp=r&st=2018-08-17T00:25:01Z&se=2018-09-17T08:25:01Z&spr=https&sv=2017-11-09&sig=0000000000%3D&sr=b', 'http://download.windowsupdate.com/c/msdownload/update/software/secu/2018/08/windows10.0-kb4343909-x64_f931af6d56797388715fe3b0d97569af7aebdae6.msu'"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Scanned and autogenerated by https://github.com/tonglil/labeler
|
||||
---
|
||||
repo: Azure/acs-engine
|
||||
repo: Azure/aks-engine
|
||||
labels:
|
||||
- name: backlog
|
||||
color: e99695
|
||||
|
|
2
main.go
2
main.go
|
@ -4,7 +4,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Azure/acs-engine/cmd"
|
||||
"github.com/Azure/aks-engine/cmd"
|
||||
"github.com/mattn/go-colorable"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
$REPO_PATH = "github.com/Azure/acs-engine"
|
||||
$REPO_PATH = "github.com/Azure/aks-engine"
|
||||
$DEV_ENV_IMAGE = "quay.io/deis/go-dev:v1.17.3"
|
||||
$DEV_ENV_WORK_DIR = "/go/src/$REPO_PATH"
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ done
|
|||
|
||||
pullContainerImage "docker" "busybox"
|
||||
|
||||
# TODO: fetch supported k8s versions from an acs-engine command instead of hardcoding them here
|
||||
# TODO: fetch supported k8s versions from an aks-engine command instead of hardcoding them here
|
||||
K8S_VERSIONS="1.7.15 1.7.16 1.8.14 1.8.15 1.9.10 1.9.11 1.10.8 1.10.9 1.11.4 1.11.5 1.12.1 1.12.2"
|
||||
|
||||
for KUBERNETES_VERSION in ${K8S_VERSIONS}; do
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script originated at https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/health-monitor.sh
|
||||
# and has been modified for acs-engine.
|
||||
# and has been modified for aks-engine.
|
||||
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
@ -15,7 +15,7 @@ container_runtime_monitoring() {
|
|||
if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then
|
||||
healthcheck_command="${crictl} pods"
|
||||
fi
|
||||
|
||||
|
||||
until timeout 60 ${healthcheck_command} > /dev/null; do
|
||||
if (( attempt == max_attempts )); then
|
||||
echo "Max attempt ${max_attempts} reached! Proceeding to monitor container runtime healthiness."
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
# Mounting is done here instead of etcd because of bug https://bugs.launchpad.net/cloud-init/+bug/1692093
|
||||
# Once the bug is fixed, replace the below with the cloud init changes replaced in https://github.com/Azure/acs-engine/pull/661.
|
||||
# Once the bug is fixed, replace the below with the cloud init changes replaced in https://github.com/Azure/aks-engine/pull/661.
|
||||
set -x
|
||||
DISK=/dev/sdc
|
||||
PARTITION=${DISK}1
|
||||
|
@ -25,4 +25,4 @@ then
|
|||
/sbin/mkfs.ext4 $PARTITION -L etcd_disk -F -E lazy_itable_init=1,lazy_journal_init=1
|
||||
fi
|
||||
mount $MOUNTPOINT
|
||||
/bin/chown -R etcd:etcd /var/lib/etcddisk
|
||||
/bin/chown -R etcd:etcd /var/lib/etcddisk
|
||||
|
|
|
@ -296,7 +296,7 @@ ensureK8sControlPlane() {
|
|||
return
|
||||
fi
|
||||
wait_for_file 3600 1 $KUBECTL || exit $ERR_FILE_WATCH_TIMEOUT
|
||||
# workaround for 1.12 bug https://github.com/Azure/acs-engine/issues/3681
|
||||
# workaround for 1.12 bug https://github.com/Azure/aks-engine/issues/3681
|
||||
if [[ "${KUBERNETES_VERSION}" = 1.12.* ]]; then
|
||||
ensureKubelet
|
||||
retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || ensureKubelet && retrycmd_if_failure 900 1 20 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
.DESCRIPTION
|
||||
Provisions VM as a Kubernetes agent.
|
||||
|
||||
|
||||
The parameters passed in are required, and will vary per-deployment.
|
||||
|
||||
Notes on modifying this file:
|
||||
- This file extension is PS1, but it is actually used as a template from pkg/acsengine/template_generator.go
|
||||
- This file extension is PS1, but it is actually used as a template from pkg/engine/template_generator.go
|
||||
- All of the lines that have braces in them will be modified. Please do not change them here, change them in the Go sources
|
||||
- Single quotes are forbidden, they are reserved to delineate the different members for the ARM template concat() call
|
||||
#>
|
||||
|
@ -48,11 +48,11 @@ param(
|
|||
# These globals will not change between nodes in the same cluster, so they are not
|
||||
# passed as powershell parameters
|
||||
|
||||
## Certificates generated by acs-engine
|
||||
## Certificates generated by aks-engine
|
||||
$global:CACertificate = "{{WrapAsParameter "caCertificate"}}"
|
||||
$global:AgentCertificate = "{{WrapAsParameter "clientCertificate"}}"
|
||||
|
||||
## Download sources provided by acs-engine
|
||||
## Download sources provided by aks-engine
|
||||
$global:KubeBinariesPackageSASURL = "{{WrapAsParameter "kubeBinariesSASURL"}}"
|
||||
$global:WindowsKubeBinariesURL = "{{WrapAsParameter "windowsKubeBinariesURL"}}"
|
||||
$global:KubeBinariesVersion = "{{WrapAsParameter "kubeBinariesVersion"}}"
|
||||
|
@ -87,7 +87,7 @@ $global:LoadBalancerSku = "{{WrapAsVariable "loadBalancerSku"}}"
|
|||
$global:ExcludeMasterFromStandardLB = "{{WrapAsVariable "excludeMasterFromStandardLB"}}"
|
||||
|
||||
|
||||
# Windows defaults, not changed by acs-engine
|
||||
# Windows defaults, not changed by aks-engine
|
||||
$global:KubeDir = "c:\k"
|
||||
$global:HNSModule = [Io.path]::Combine("$global:KubeDir", "hns.psm1")
|
||||
|
||||
|
@ -154,9 +154,9 @@ try
|
|||
Write-Log "Download kubelet binaries and unzip"
|
||||
Get-KubePackage -KubeBinariesSASURL $global:KubeBinariesPackageSASURL
|
||||
|
||||
# this overwrite the binaries that are download from the custom packge with binaries
|
||||
# this overwrite the binaries that are download from the custom packge with binaries
|
||||
# The custom package has a few files that are nessary for future steps (nssm.exe)
|
||||
# this is a temporary work around to get the binaries until we depreciate
|
||||
# this is a temporary work around to get the binaries until we depreciate
|
||||
# custom package and nssm.exe as defined in #3851.
|
||||
if ($global:WindowsKubeBinariesURL){
|
||||
Write-Log "Overwriting kube node binaries from $global:WindowsKubeBinariesURL"
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче