test: remove obsolete e2e test scripts (#1768)

* test: rationalize e2e test scripts

* test: folks prefer up/down :(

* test: skip test foo

* chore: dep

* test: execute bit

* test: $(pwd)

* test: LF not CRLF

* test: prefer echo ${API_MODEL_INPUT}

* test: cleanup

* chore: echo

* test: whoops

* chore: upgrade timeout

* chore: jq

* chore: don’t assume jq

* test: skip logic

* test: correct skip logic

* test: lint

* test: debug and change git branch once

* test: simplify upgrade/scale

* test: need GINKGO_SKIP

* chore: focus on subsequent tests

* test: add new Windows image vars

* fix: missing $

* test: add Jenkinsfile

* chore: try just stage

* chore: no steps either

* chore: remove println

* chore: i heart groovy

* chore: quote

* chore: withenv

* chore: just echo

* chore: try steps

* chore: try node

* chore: will this run?

* chore: where am i

* chore: where is workspace

* chore: checkout scm

* chore: run the test

* chore: paramaterized creds

* chore: credentials not as literal strings

* chore: try root environment

* chore: use the right names

* chore: in pipeline

* chore: back to withenv

* chore: withCredentials

* chore: different creds name

* chore: try other name

* chore: use actual cred names

* chore: scaffold multi-version pipeline

* chore: get rid of json for now

* chore: json parsing scaffolding

* chore: need ORCHESTRATOR_RELEASE

* chore: use DEPLOY_REGIONS, skip ls check

* test: delete wip Jenkinsfile

* test: remove wip cluster.json

* chore: remove cluster.sh
This commit is contained in:
Jack Francis 2019-08-28 14:10:24 -07:00 коммит произвёл Azure Kubernetes Service Bot
Родитель 3756d8e6e5
Коммит 4f7c185225
61 изменённых файлов: 1 добавлений и 5065 удалений

9
Gopkg.lock сгенерированный
Просмотреть файл

@ -54,14 +54,6 @@
revision = "7be49c3f43696c7ca8c9a76d6836703d83aea404"
version = "v1.1.1"
[[projects]]
digest = "1:d6613cccd218e927314d7f6a7a6e8169473b07db7e07810341cacd3bdca920ba"
name = "github.com/alexcesaro/statsd"
packages = ["."]
pruneopts = "NUT"
revision = "7fea3f0d2fab1ad973e641e51dba45443a311a90"
version = "v2.0.0"
[[projects]]
digest = "1:aba270497eb2d49f5cba6f4162d524b9a1195a24cbce8be20bf56a0051f47deb"
name = "github.com/blang/semver"
@ -861,7 +853,6 @@
"github.com/Azure/go-autorest/autorest/date",
"github.com/Azure/go-autorest/autorest/to",
"github.com/Jeffail/gabs",
"github.com/alexcesaro/statsd",
"github.com/blang/semver",
"github.com/davecgh/go-spew/spew",
"github.com/fatih/structs",

Просмотреть файл

@ -81,7 +81,6 @@ generate-azure-constants:
.PHONY: build
build: validate-dependencies generate
$(GO) build $(GOFLAGS) -ldflags '$(LDFLAGS)' -o $(BINDIR)/$(PROJECT)$(EXTENSION) $(REPO_PATH)
$(GO) build $(GOFLAGS) -o $(BINDIR)/aks-engine-test$(EXTENSION) $(REPO_PATH)/test/aks-engine-test
build-binary: generate
go build $(GOFLAGS) -v -ldflags "$(LDFLAGS)" -o $(BINARY_DEST_DIR)/aks-engine .
@ -138,7 +137,7 @@ ginkgoBuild: generate
ginkgo build test/e2e/kubernetes
test: generate ginkgoBuild
ginkgo -skipPackage test/e2e/dcos,test/e2e/kubernetes -failFast -r .
ginkgo -skipPackage test/e2e/kubernetes -failFast -r .
.PHONY: test-style
test-style: validate-go validate-shell validate-copyright-headers

Просмотреть файл

@ -1,80 +0,0 @@
{
"deployments": [
{
"cluster_definition": "windows/kubernetes-hybrid.json",
"location": "eastus"
},
{
"cluster_definition": "windows/kubernetes.json",
"location": "eastus"
},
{
"cluster_definition": "dcos-releases/dcos1.9.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/dcos-preAttachedDisks-vmas.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/dcos-vmss.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/kubernetes-preAttachedDisks-vmas.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/kubernetes-vmas.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/swarm-preAttachedDisks-vmss.json",
"location": "southcentralus"
},
{
"cluster_definition": "disks-managed/swarmmode-vmas.json",
"location": "southcentralus"
},
{
"cluster_definition": "disks-managed/swarmmode-vmss.json",
"location": "southcentralus"
},
{
"cluster_definition": "disks-storageaccount/dcos.json",
"location": "westcentralus"
},
{
"cluster_definition": "disks-storageaccount/kubernetes.json",
"location": "westcentralus"
},
{
"cluster_definition": "disks-storageaccount/swarmmode.json",
"location": "southcentralus"
},
{
"cluster_definition": "networkpolicy/kubernetes-calico.json",
"location": "southcentralus"
},
{
"cluster_definition": "kubernetes-config/kubernetes-clustersubnet.json",
"location": "westus2"
},
{
"cluster_definition": "v20170131/swarmmode.json",
"location": "westus2"
},
{
"cluster_definition": "vnet/dcosvnet.json",
"location": "westus2"
},
{
"cluster_definition": "vnet/kubernetesvnet.json",
"location": "westus2"
},
{
"cluster_definition": "vnet/swarmmodevnet.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,12 +0,0 @@
{
"deployments": [
{
"cluster_definition": "swarm.json",
"location": "westus2"
},
{
"cluster_definition": "swarmmode.json",
"location": "westcentralus"
}
]
}

Просмотреть файл

@ -1,24 +0,0 @@
{
"deployments": [
{
"cluster_definition": "dcos-releases/dcos1.9.json",
"category": "version"
},
{
"cluster_definition": "disks-managed/dcos-preAttachedDisks-vmas.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-managed/dcos-vmss.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-storageaccount/dcos.json",
"category": "managed-disk"
},
{
"cluster_definition": "vnet/dcosvnet.json",
"category": "network"
}
]
}

Просмотреть файл

@ -1,61 +0,0 @@
{
"deployments": [
{
"cluster_definition": "dcos-releases/dcos1.9.json",
"category": "version"
},
{
"cluster_definition": "disks-managed/dcos-preAttachedDisks-vmas.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-managed/dcos-vmss.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-managed/swarm-preAttachedDisks-vmss.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-managed/swarmmode-vmas.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-managed/swarmmode-vmss.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-storageaccount/dcos.json",
"category": "managed-disk"
},
{
"cluster_definition": "disks-storageaccount/swarmmode.json",
"category": "sa-disk"
},
{
"cluster_definition": "keyvaultcerts/swarmmode.json",
"category": "keyvault",
"location": "westus"
},
{
"cluster_definition": "networkpolicy/kubernetes-calico.json",
"category": "network"
},
{
"cluster_definition": "v20170131/swarmmode.json",
"category": "version"
},
{
"cluster_definition": "vnet/dcosvnet.json",
"category": "network"
},
{
"cluster_definition": "vnet/kubernetesvnet.json",
"category": "network"
},
{
"cluster_definition": "vnet/swarmmodevnet.json",
"category": "network"
}
]
}

Просмотреть файл

@ -1,11 +0,0 @@
{
"deployments": [
{
"cluster_definition": "azure-cni/k8s-vnet-scaleup.json"
},
{
"cluster_definition": "azure-cni/k8s-vnet-scaledown.json"
}
]
}

Просмотреть файл

@ -1,11 +0,0 @@
{
"deployments": [
{
"cluster_definition": "azure-cni/k8s-scaleup.json"
},
{
"cluster_definition": "azure-cni/k8s-scaledown.json"
}
]
}

Просмотреть файл

@ -1,9 +0,0 @@
{
"deployments": [
{
"cluster_definition": "dcos.json",
"category": "canary",
"location": "eastus2euap"
}
]
}

Просмотреть файл

@ -1,9 +0,0 @@
{
"deployments": [
{
"cluster_definition": "windows/kubernetes.json",
"category": "canary",
"location": "eastus2euap"
}
]
}

Просмотреть файл

@ -1,9 +0,0 @@
{
"deployments": [
{
"cluster_definition": "kubernetes.json",
"category": "canary",
"location": "eastus2euap"
}
]
}

Просмотреть файл

@ -1,9 +0,0 @@
{
"deployments": [
{
"cluster_definition": "swarmmode.json",
"category": "canary",
"location": "eastus2euap"
}
]
}

Просмотреть файл

@ -1,124 +0,0 @@
{
"deployments": [
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "canadacentral"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "canadaeast"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "centralindia"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "westindia"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "southindia"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "centralus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "eastus2"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "eastus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "westcentralus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "westus2"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "westus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "southeastasia"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "koreacentral"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "japaneast"
},
{
"cluster_definition": "dcos-D2.json",
"category": "service-availability",
"location": "japanwest"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "northeurope"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "westeurope"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "uksouth"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "australiaeast"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "australiasoutheast"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "koreasouth"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "northcentralus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "southcentralus"
},
{
"cluster_definition": "dcos.json",
"category": "service-availability",
"location": "ukwest"
}
]
}

Просмотреть файл

@ -1,124 +0,0 @@
{
"deployments": [
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "canadacentral"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "canadaeast"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "centralindia"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "westindia"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "southindia"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "centralus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "eastus2"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "eastus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "westcentralus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "westus2"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "westus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "southeastasia"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "koreacentral"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "japaneast"
},
{
"cluster_definition": "windows/kubernetes-D2.json",
"category": "service-availability",
"location": "japanwest"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "northeurope"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "westeurope"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "uksouth"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "australiaeast"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "australiasoutheast"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "koreasouth"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "northcentralus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "southcentralus"
},
{
"cluster_definition": "windows/kubernetes.json",
"category": "service-availability",
"location": "ukwest"
}
]
}

Просмотреть файл

@ -1,124 +0,0 @@
{
"deployments": [
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "canadacentral"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "canadaeast"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "centralindia"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "westindia"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "southindia"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "centralus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "eastus2"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "eastus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "westcentralus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "westus2"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "westus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "southeastasia"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "koreacentral"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "japaneast"
},
{
"cluster_definition": "kubernetes-D2.json",
"category": "service-availability",
"location": "japanwest"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "northeurope"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "westeurope"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "uksouth"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "australiaeast"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "australiasoutheast"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "koreasouth"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "northcentralus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "southcentralus"
},
{
"cluster_definition": "kubernetes.json",
"category": "service-availability",
"location": "ukwest"
}
]
}

Просмотреть файл

@ -1,124 +0,0 @@
{
"deployments": [
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "canadacentral"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "canadaeast"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "centralindia"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "westindia"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "southindia"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "centralus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "eastus2"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "eastus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "westcentralus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "westus2"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "westus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "southeastasia"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "koreacentral"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "japaneast"
},
{
"cluster_definition": "swarmmode-D2.json",
"category": "service-availability",
"location": "japanwest"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "northeurope"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "westeurope"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "uksouth"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "australiaeast"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "australiasoutheast"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "koreasouth"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "northcentralus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "southcentralus"
},
{
"cluster_definition": "swarmmode.json",
"category": "service-availability",
"location": "ukwest"
}
]
}

Просмотреть файл

@ -1,17 +0,0 @@
{
"deployments": [
{
"cluster_definition": "k8s-upgrade/v1.7.7.json",
"location": "eastus"
},
{
"cluster_definition": "k8s-upgrade/v1.7.9.json",
"location": "centralus"
},
{
"cluster_definition": "k8s-upgrade/v1.8.4.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,12 +0,0 @@
{
"deployments": [
{
"cluster_definition": "k8s-upgrade/v1.7.9-win.json",
"location": "eastus"
},
{
"cluster_definition": "k8s-upgrade/v1.7.9-hybrid.json",
"location": "westus"
}
]
}

Просмотреть файл

@ -1,106 +0,0 @@
#!/usr/bin/env groovy
node("slave") {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'AZURE_CLI_SPN_AKS_TEST',
passwordVariable: 'SPN_PASSWORD', usernameVariable: 'SPN_USER']]) {
timestamps {
wrap([$class: 'AnsiColorBuildWrapper', 'colorMapName': 'XTerm']) {
env.GOPATH="${WORKSPACE}"
env.PATH="${env.PATH}:${env.GOPATH}/bin"
def clone_dir = "${env.GOPATH}/src/github.com/Azure/aks-engine"
env.HOME=clone_dir
def success = true
Integer timeoutInMinutes = TEST_TIMEOUT.toInteger()
dir(clone_dir) {
def img = null
try {
stage('Init') {
deleteDir()
checkout scm
img = docker.build('aks-engine-test', '--pull .')
}
}
catch(exc) {
echo "Exception ${exc}"
success = false
}
img.inside("-u root:root") {
String errorMsg = ""
def log_dir = pwd()+"/_logs"
try {
stage('Test') {
if(success) {
// Create log directory
sh("mkdir -p ${log_dir}")
// Create template, deploy and test
env.SERVICE_PRINCIPAL_CLIENT_ID="${SPN_USER}"
env.SERVICE_PRINCIPAL_CLIENT_SECRET="${SPN_PASSWORD}"
env.TENANT_ID="${TENANT_ID}"
env.SUBSCRIPTION_ID="${SUBSCRIPTION_ID}"
env.LOCATION = "${LOCATION}"
env.LOGFILE = "${log_dir}/${LOCATION}.log"
env.CLEANUP = "${CLEANUP}"
env.INSTANCE_NAME = "test-acs-ci-${ORCHESTRATOR}-${env.LOCATION}-${env.BUILD_NUM}"
env.INSTANCE_NAME_PREFIX = "test-acs-ci"
env.ORCHESTRATOR = "${ORCHESTRATOR}"
env.CLUSTER_DEFINITION="examples/${ORCHESTRATOR}.json"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID}"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET}"
script="test/cluster-tests/${ORCHESTRATOR}/test.sh"
def exists = fileExists script
if (exists) {
env.VALIDATE = script
} else {
echo 'Skip validation'
}
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh('./test/deploy.sh')
}
}
}
}
catch(exc) {
echo "Exception ${exc}"
success = false
errorMsg = "Please run \"make ci\" for verification"
}
archiveArtifacts(allowEmptyArchive: true, artifacts: "${log_dir}/**/*.log")
// Allow for future removal from the host
sh("chmod -R a+rwx ${WORKSPACE}")
if(!success) {
currentBuild.result = "FAILURE"
String to = "${SEND_TO}".trim()
if(errorMsg != "") {
if(to != "") {
to += ";"
}
to += emailextrecipients([[$class: 'CulpritsRecipientProvider']])
}
if(to != "") {
def url = "${env.BUILD_URL}\n\n"
for(String addr : to.tokenize('[ \t\n;,]+')) {
if(!addr.endsWith("@microsoft.com")) {
url = ""
}
}
gitCommit = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
emailext(
to: to,
subject: "[AKS Engine is BROKEN] ${env.JOB_NAME} #${env.BUILD_NUM}",
body: "Commit: ${gitCommit}\n\n${url}${errorMsg}"
)
}
}
}
}
}
}
}
}

Просмотреть файл

@ -1,134 +0,0 @@
#!/usr/bin/env groovy
node("slave") {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'AZURE_CLI_SPN_AKS_TEST',
passwordVariable: 'SPN_PASSWORD', usernameVariable: 'SPN_USER']]) {
timestamps {
wrap([$class: 'AnsiColorBuildWrapper', 'colorMapName': 'XTerm']) {
env.GOPATH="${WORKSPACE}"
env.PATH="${env.PATH}:${env.GOPATH}/bin"
def clone_dir = "${env.GOPATH}/src/github.com/Azure/aks-engine"
env.HOME=clone_dir
String sendTo = "${SEND_TO}".trim()
Integer timeoutInMinutes = STAGE_TIMEOUT.toInteger()
def autoclean="${AUTOCLEAN}"
dir(clone_dir) {
def img = null
stage('Init') {
deleteDir()
checkout scm
img = docker.build('aks-engine-test', '--pull .')
}
img.inside("-u root:root") {
def success = true
def junit_dir = "_junit"
def prefix = ""
try {
stage('Setup') {
// Set up Azure
sh("az login --service-principal -u ${SPN_USER} -p ${SPN_PASSWORD} --tenant ${TENANT_ID}")
sh("az account set --subscription ${SUBSCRIPTION_ID}")
env.SERVICE_PRINCIPAL_CLIENT_ID="${SPN_USER}"
env.SERVICE_PRINCIPAL_CLIENT_SECRET="${SPN_PASSWORD}"
env.TENANT_ID="${TENANT_ID}"
env.SUBSCRIPTION_ID="${SUBSCRIPTION_ID}"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID}"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET}"
// First check to see if var exists in context, then check for true-ness
// In Groovy, null and empty strings are false...
if(getBinding().hasVariable("CUSTOM_HYPERKUBE_SPEC") && CUSTOM_HYPERKUBE_SPEC) {
env.CUSTOM_HYPERKUBE_SPEC="${CUSTOM_HYPERKUBE_SPEC}"
}
sh("printf 'acs-features-test%x' \$(date '+%s') > INSTANCE_NAME_PREFIX")
prefix = readFile('INSTANCE_NAME_PREFIX').trim()
// Create report directory
sh("mkdir -p ${junit_dir}")
// Build and test aks-engine
sh('make ci')
}
def pairs = "${SCENARIOS_LOCATIONS}".tokenize('|')
for(i = 0; i < pairs.size(); i++) {
def pair = pairs[i].tokenize('[ \t\n]+')
if(pair.size() != 2) {
echo "Skipping '"+pairs[i]+"'"
continue
}
def subdir = pair[0]
def names = sh(returnStdout: true, script: "cd examples; ls ${subdir}/*.json").split("\\r?\\n")
env.LOCATION = pair[1]
for(j = 0; j< names.size(); j++) {
def name = names[j].trim()
env.CLUSTER_DEFINITION = pwd()+"/examples/${name}"
env.INSTANCE_NAME = "${prefix}-${i}-${j}"
env.RESOURCE_GROUP = "test-acs-${subdir}-${env.LOCATION}-${env.BUILD_NUM}-${i}-${j}"
env.DEPLOYMENT_NAME = "${env.RESOURCE_GROUP}"
env.ORCHESTRATOR = sh(returnStdout: true, script: './test/step.sh get_orchestrator_type').trim()
env.LOGFILE = pwd()+"/${junit_dir}/${name}.log"
env.CLEANUP = "y"
// Generate and deploy template, validate deployments
try {
stage(name) {
def scripts = ["generate_template.sh", "deploy_template.sh"]
if(env.ORCHESTRATOR == "dcos" || env.ORCHESTRATOR == "swarmmode" || env.ORCHESTRATOR == "kubernetes") {
scripts += "validate_deployment.sh"
}
for(k = 0; k < scripts.size(); k++) {
def script = scripts[k]
def test = "${name}.${script}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/${script} ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [${name}] : ${exc}"
}
// Clean up
try {
sh('./test/step.sh cleanup')
}
catch(exc) {
echo "Exception ${exc}"
}
} // for (j = 0; j <files...
} // for (i = 0; i <subdirs...
// Generate reports
try {
junit("${junit_dir}/**/junit.xml")
archiveArtifacts(allowEmptyArchive: true, artifacts: "${junit_dir}/**/*.log")
if(currentBuild.result == "UNSTABLE") {
currentBuild.result = "FAILURE"
if(sendTo != "") {
emailext(
to: "${sendTo}",
subject: "[AKS Engine Jenkins Failure] ${env.JOB_NAME} #${env.BUILD_NUM}",
body: "${env.BUILD_URL}testReport")
}
}
}
catch(exc) {
echo "Exception ${exc}"
}
}
catch(exc) {
currentBuild.result = "FAILURE"
echo "Exception ${exc}"
}
// Allow for future removal from the host
sh("chmod -R a+rwx ${WORKSPACE}")
}
}
}
}
}
}

Просмотреть файл

@ -1,174 +0,0 @@
#!/usr/bin/env groovy
node("slave") {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'AZURE_CLI_SPN_AKS_TEST',
passwordVariable: 'SPN_PASSWORD', usernameVariable: 'SPN_USER']]) {
timestamps {
wrap([$class: 'AnsiColorBuildWrapper', 'colorMapName': 'XTerm']) {
env.GOPATH="${WORKSPACE}"
env.PATH="${env.PATH}:${env.GOPATH}/bin"
def clone_dir = "${env.GOPATH}/src/github.com/Azure/aks-engine"
env.HOME=clone_dir
String locations_str = "${LOCATIONS}"
Integer scalingDelay = SCALE_CYCLE_DELAY_MIN.toInteger()
String sendTo = "${SEND_TO}".trim()
Integer timeoutInMinutes = STAGE_TIMEOUT.toInteger()
def autoclean="${AUTOCLEAN}"
if(locations_str.equals("all")) {
locations_str = "\
australiaeast \
brazilsouth \
canadacentral canadaeast \
centralindia southindia \
centralus eastus2 eastus northcentralus southcentralus westcentralus westus2 westus \
eastasia southeastasia \
koreacentral koreasouth \
japaneast japanwest \
northeurope westeurope \
uksouth ukwest"
}
def locations = locations_str.tokenize('[ \t\n]+')
dir(clone_dir) {
def img = null
stage('Init') {
deleteDir()
checkout scm
img = docker.build('aks-engine-test', '--pull .')
}
img.inside("-u root:root") {
def junit_dir = "_junit"
try {
String canonicalName = sh(returnStdout: true, script: 'echo "${CLUSTER_DEFINITION%.*}" | sed "s/\\//_/g"').trim()
stage('Setup') {
// Set up Azure
sh("az login --service-principal -u ${SPN_USER} -p ${SPN_PASSWORD} --tenant ${TENANT_ID}")
sh("az account set --subscription ${SUBSCRIPTION_ID}")
// Create report directory
sh("mkdir ${junit_dir}")
// Build and test aks-engine
sh('make ci')
// Create template
env.CLUSTER_DEFINITION = pwd()+"/examples/${CLUSTER_DEFINITION}"
env.ORCHESTRATOR = sh(returnStdout: true, script: './test/step.sh get_orchestrator_type').trim()
sh("printf 'acs-test%x' \$(date '+%s') > INSTANCE_NAME")
env.INSTANCE_NAME = readFile('INSTANCE_NAME').trim()
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID}"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET}"
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh('./test/step.sh generate_template')
}
}
for (i = 0; i <locations.size(); i++) {
env.LOCATION = locations[i]
env.RESOURCE_GROUP = "test-acs-svc-${canonicalName}-${env.LOCATION}-${env.BUILD_NUM}"
env.DEPLOYMENT_NAME = "${env.RESOURCE_GROUP}"
env.LOGFILE = pwd()+"/${junit_dir}/${canonicalName}.${env.LOCATION}.log"
env.CLEANUP = "y"
def ok = true
// Deploy
try {
stage("${env.LOCATION} deploy") {
def test = "deploy-${env.LOCATION}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/deploy_template.sh ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [deploy ${canonicalName}/${env.LOCATION}] : ${exc}"
ok = false
}
// Scale-up cycle
try {
def counts = "${AGENT_POOL_SIZES}".tokenize('[ \t\n]+')
for (i = 0; i <counts.size(); i++) {
env.AGENT_POOL_SIZE = counts[i]
stage("${env.LOCATION} #${env.AGENT_POOL_SIZE}") {
if(ok) {
sleep(scalingDelay*60)
def test = "scale-up-${env.AGENT_POOL_SIZE}-${env.LOCATION}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/scale_agent_pool.sh ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
}
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [scale ${canonicalName}/${env.LOCATION}] : ${exc}"
ok = false
}
// Validate deployment
try {
stage("${env.LOCATION} validate") {
if(ok) {
env.EXPECTED_NODE_COUNT = sh(returnStdout: true, script: './test/step.sh get_node_count').trim()
env.EXPECTED_ORCHESTRATOR_VERSION = sh(returnStdout: true, script: './test/step.sh get_orchestrator_release').trim()
def test = "validate-${env.LOCATION}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/validate_deployment.sh ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
else {
echo "Skipped verification for ${env.RESOURCE_GROUP}"
}
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [validate ${canonicalName}/${env.LOCATION}] : ${exc}"
}
// Clean up
try {
sh('./test/step.sh cleanup')
}
catch(exc) {
echo "Exception ${exc}"
}
} // for (i = 0; i <locations...
// Generate reports
try {
junit("${junit_dir}/**/junit.xml")
archiveArtifacts(allowEmptyArchive: true, artifacts: "${junit_dir}/**/*.log")
if(currentBuild.result == "UNSTABLE") {
currentBuild.result = "FAILURE"
if(sendTo != "") {
emailext(
to: "${sendTo}",
subject: "[AKS Engine Jenkins Failure] ${env.JOB_NAME} #${env.BUILD_NUM}",
body: "${env.BUILD_URL}testReport")
}
}
}
catch(exc) {
echo "Exception ${exc}"
}
}
catch(exc) {
currentBuild.result = "FAILURE"
echo "Exception ${exc}"
}
// Allow for future removal from the host
sh("chmod -R a+rwx ${WORKSPACE}")
}
}
}
}
}
}

Просмотреть файл

@ -1,146 +0,0 @@
#!/usr/bin/env groovy
node("slave") {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'AZURE_CLI_SPN_AKS_TEST',
passwordVariable: 'SPN_PASSWORD', usernameVariable: 'SPN_USER']]) {
timestamps {
wrap([$class: 'AnsiColorBuildWrapper', 'colorMapName': 'XTerm']) {
env.GOPATH="${WORKSPACE}"
env.PATH="${env.PATH}:${env.GOPATH}/bin"
def clone_dir = "${env.GOPATH}/src/github.com/Azure/aks-engine"
env.HOME=clone_dir
String locations_str = "${LOCATIONS}"
String sendTo = "${SEND_TO}".trim()
Integer timeoutInMinutes = STAGE_TIMEOUT.toInteger()
def autoclean="${AUTOCLEAN}"
if(locations_str.equals("all")) {
locations_str = "\
australiaeast \
brazilsouth \
canadacentral canadaeast \
centralindia southindia \
centralus eastus2 eastus northcentralus southcentralus westcentralus westus2 westus \
eastasia southeastasia \
koreacentral koreasouth \
japaneast japanwest \
northeurope westeurope \
uksouth ukwest"
}
def locations = locations_str.tokenize('[ \t\n]+')
dir(clone_dir) {
def img = null
stage('Init') {
deleteDir()
checkout scm
img = docker.build('aks-engine-test', '--pull .')
}
img.inside("-u root:root") {
def junit_dir = "_junit"
try {
String canonicalName = sh(returnStdout: true, script: 'echo "${CLUSTER_DEFINITION%.*}" | sed "s/\\//_/g"').trim()
stage('Setup') {
// Set up Azure
sh("az login --service-principal -u ${SPN_USER} -p ${SPN_PASSWORD} --tenant ${TENANT_ID}")
sh("az account set --subscription ${SUBSCRIPTION_ID}")
// Create report directory
sh("mkdir ${junit_dir}")
// Build and test aks-engine
sh('make ci')
// Create template
env.CLUSTER_DEFINITION = "examples/${CLUSTER_DEFINITION}"
env.ORCHESTRATOR = sh(returnStdout: true, script: './test/step.sh get_orchestrator_type').trim()
sh("printf 'acs-test%x' \$(date '+%s') > INSTANCE_NAME")
env.INSTANCE_NAME = readFile('INSTANCE_NAME').trim()
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID}"
env.CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET}"
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh('./test/step.sh generate_template')
}
}
for (i = 0; i <locations.size(); i++) {
env.LOCATION = locations[i]
env.RESOURCE_GROUP = "test-acs-svc-${canonicalName}-${env.LOCATION}-${env.BUILD_NUM}"
env.DEPLOYMENT_NAME = "${env.RESOURCE_GROUP}"
env.LOGFILE = pwd()+"/${junit_dir}/${canonicalName}.${env.LOCATION}.log"
env.CLEANUP = "y"
def ok = true
// Deploy
try {
stage("${env.LOCATION} deploy") {
def test = "deploy-${env.LOCATION}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/deploy_template.sh ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [deploy ${canonicalName}/${env.LOCATION}] : ${exc}"
ok = false
}
// Verify deployment
try {
stage("${env.LOCATION} validate") {
if(ok) {
def test = "validate-${env.LOCATION}"
sh("mkdir -p ${junit_dir}/${test}")
sh("cp ./test/shunit/validate_deployment.sh ${junit_dir}/${test}/t.sh")
timeout(time: timeoutInMinutes, unit: 'MINUTES') {
sh("cd ${junit_dir}; shunit.sh -t ${test} > ${test}/junit.xml")
}
sh("grep 'failures=\"0\"' ${junit_dir}/${test}/junit.xml")
}
else {
echo "Skipped verification for ${env.RESOURCE_GROUP}"
}
}
}
catch(exc) {
env.CLEANUP = autoclean
echo "Exception in [validate ${canonicalName}/${env.LOCATION}] : ${exc}"
}
// Clean up
try {
sh('./test/step.sh cleanup')
}
catch(exc) {
echo "Exception ${exc}"
}
} // for (i = 0; i <locations...
// Generate reports
try {
junit("${junit_dir}/**/junit.xml")
archiveArtifacts(allowEmptyArchive: true, artifacts: "${junit_dir}/**/*.log")
if(currentBuild.result == "UNSTABLE") {
currentBuild.result = "FAILURE"
if(sendTo != "") {
emailext(
to: "${sendTo}",
subject: "[AKS Engine Jenkins Failure] ${env.JOB_NAME} #${env.BUILD_NUM}",
body: "${env.BUILD_URL}testReport")
}
}
}
catch(exc) {
echo "Exception ${exc}"
}
}
catch(exc) {
currentBuild.result = "FAILURE"
echo "Exception ${exc}"
}
// Allow for future removal from the host
sh("chmod -R a+rwx ${WORKSPACE}")
}
}
}
}
}
}

Просмотреть файл

@ -1,197 +0,0 @@
{
"Errors": [
{
"name": "AzCliRunError",
"class": "AzCLI",
"regex": "_init__.py"
},
{
"name": "AzCliLoadError",
"class": "AzCLI",
"regex": "Error loading command module"
},
{
"name": "VMStartTimedOut",
"class": "Deployment",
"regex": "VMStartTimedOut"
},
{
"name": "OSProvisioningTimedOut",
"class": "Deployment",
"regex": "OSProvisioningTimedOut"
},
{
"name": "VMExtensionProvisioningError",
"class": "Deployment",
"regex": "VMExtensionProvisioningError"
},
{
"name": "VMExtensionProvisioningTimeout",
"class": "Deployment",
"regex": "VMExtensionProvisioningTimeout"
},
{
"name": "InternalExecutionError",
"class": "Deployment",
"regex": "InternalExecutionError"
},
{
"name": "SkuNotAvailable",
"class": "Deployment",
"regex": "SkuNotAvailable"
},
{
"name": "MaxStorageAccountsCountPerSubscriptionExceeded",
"class": "Deployment",
"regex": "MaxStorageAccountsCountPerSubscriptionExceeded"
},
{
"name": "ImageManagementOperationError",
"class": "Deployment",
"regex": "ImageManagementOperationError"
},
{
"name": "DiskProcessingError",
"class": "Deployment",
"regex": "DiskProcessingError"
},
{
"name": "DiskServiceInternalError",
"class": "Deployment",
"regex": "DiskServiceInternalError"
},
{
"name": "AllocationFailed",
"class": "Deployment",
"regex": "AllocationFailed"
},
{
"name": "NetworkingInternalOperationError",
"class": "Deployment",
"regex": "NetworkingInternalOperationError"
},
{
"name": "PlatformFaultDomainCount",
"class": "Deployment",
"regex": "platformFaultDomainCount"
},
{
"name": "K8sNodeNotReady",
"class": "Validation",
"regex": "K8S: gave up waiting for apiserver"
},
{
"name": "K8sUnexpectedVersion",
"class": "Validation",
"regex": "K8S: unexpected kubernetes version"
},
{
"name": "K8sContainerNotCreated",
"class": "Validation",
"regex": "K8S: gave up waiting for containers"
},
{
"name": "K8sPodNotRunning",
"class": "Validation",
"regex": "K8S: gave up waiting for running pods"
},
{
"name": "K8sKubeDnsNotRunning",
"class": "Validation",
"regex": "K8S: gave up waiting for kube-dns"
},
{
"name": "K8sDashboardNotRunning",
"class": "Validation",
"regex": "K8S: gave up waiting for kubernetes-dashboard"
},
{
"name": "K8sKubeProxyNotRunning",
"class": "Validation",
"regex": "K8S: gave up waiting for kube-proxy"
},
{
"name": "K8sProxyNotWorking",
"class": "Validation",
"regex": "K8S: gave up verifying proxy"
},
{
"name": "K8sLinuxDeploymentNotReady",
"class": "Validation",
"regex": "K8S-Linux: gave up waiting for deployment"
},
{
"name": "K8sWindowsDeploymentNotReady",
"class": "Validation",
"regex": "K8S-Windows: gave up waiting for deployment"
},
{
"name": "K8sLinuxNoExternalIP",
"class": "Validation",
"regex": "K8S-Linux: gave up waiting for loadbalancer to get an ingress ip"
},
{
"name": "K8sWindowsNoExternalIP",
"class": "Validation",
"regex": "K8S-Windows: gave up waiting for loadbalancer to get an ingress ip"
},
{
"name": "K8sLinuxNginxUnreachable",
"class": "Validation",
"regex": "K8S-Linux: failed to get expected response from nginx through the loadbalancer"
},
{
"name": "K8sWindowsSimpleWebUnreachable",
"class": "Validation",
"regex": "K8S-Windows: failed to get expected response from simpleweb through the loadbalancer"
},
{
"name": "K8sWindowsNoSimpleWebPodname",
"class": "Validation",
"regex": "K8S-Windows: failed to get expected pod name for simpleweb"
},
{
"name": "K8sWindowsNoSimpleWebOutboundInternet",
"class": "Validation",
"regex": "K8S-Windows: failed to get outbound internet connection inside simpleweb container"
},
{
"name": "DcosNodeNotReady",
"class": "Validation",
"regex": "gave up waiting for DCOS nodes"
},
{
"name": "DcosMarathonValidationFailed",
"class": "Validation",
"regex": "dcos/test.sh] marathon validation failed"
},
{
"name": "DcosMarathonNotAdded",
"class": "Validation",
"regex": "dcos/test.sh] gave up waiting for marathon to be added"
},
{
"name": "DcosMarathonLbNotInstalled",
"class": "Validation",
"regex": "Failed to install marathon-lb"
},
{
"name": "DockerCeNetworkNotReady",
"class": "Validation",
"regex": "DockerCE: gave up waiting for network to be created"
},
{
"name": "DockerCeServiceNotReady",
"class": "Validation",
"regex": "DockerCE: gave up waiting for service to be created"
},
{
"name": "DockerCeServiceUnreachable",
"class": "Validation",
"regex": "DockerCE: gave up waiting for service to be externally reachable"
}
]
}

Просмотреть файл

@ -1,52 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package config
import (
"encoding/json"
"errors"
"io/ioutil"
)
// Deployment represents an AKS cluster deployment on Azure
type Deployment struct {
ClusterDefinition string `json:"cluster_definition"`
Location string `json:"location"`
TestCategory string `json:"category,omitempty"`
SkipValidation bool `json:"skip_validation,omitempty"`
}
// TestConfig represents a cluster config
type TestConfig struct {
Deployments []Deployment `json:"deployments"`
}
func (c *TestConfig) Read(data []byte) error {
return json.Unmarshal(data, c)
}
func (c *TestConfig) validate() error {
for _, d := range c.Deployments {
if d.ClusterDefinition == "" {
return errors.New("Cluster definition is not set")
}
}
return nil
}
// GetTestConfig parses a cluster config
func GetTestConfig(fname string) (*TestConfig, error) {
data, err := ioutil.ReadFile(fname)
if err != nil {
return nil, err
}
config := &TestConfig{}
if err = config.Read(data); err != nil {
return nil, err
}
if err = config.validate(); err != nil {
return nil, err
}
return config, nil
}

Просмотреть файл

@ -1,45 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package config
import "testing"
func TestConfigParse(t *testing.T) {
testCfg := `
{"deployments":
[
{
"cluster_definition":"examples/kubernetes.json",
"location":"westus",
"skip_validation":true
},
{
"cluster_definition":"examples/dcos.json",
"location":"eastus",
"skip_validation":false
},
{
"cluster_definition":"examples/swarm.json",
"location":"southcentralus"
},
{
"cluster_definition":"examples/swarmmode.json",
"location":"westus2"
}
]
}
`
testConfig := TestConfig{}
if err := testConfig.Read([]byte(testCfg)); err != nil {
t.Fatal(err)
}
if err := testConfig.validate(); err != nil {
t.Fatal(err)
}
if len(testConfig.Deployments) != 4 {
t.Fatalf("Wrong number of deployments: %d instead of 4", len(testConfig.Deployments))
}
}

Просмотреть файл

@ -1,633 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/Azure/aks-engine/test/aks-engine-test/config"
"github.com/Azure/aks-engine/test/aks-engine-test/metrics"
"github.com/Azure/aks-engine/test/aks-engine-test/promote"
"github.com/Azure/aks-engine/test/aks-engine-test/report"
"github.com/pkg/errors"
)
const (
script = "test/step.sh"
stepInitAzure = "set_azure_account"
stepCreateRG = "create_resource_group"
stepPredeploy = "predeploy"
stepGenerateTemplate = "generate_template"
stepDeployTemplate = "deploy_template"
stepPostDeploy = "postdeploy"
stepValidate = "validate"
stepCleanup = "cleanup"
testReport = "TestReport.json"
combinedReport = "CombinedReport.json"
metricsEndpoint = ":8125"
metricsNS = "AKSEngine"
metricError = "Error"
metricDeploymentDuration = "DeploymentDuration"
metricValidationDuration = "ValidationDuration"
)
const usage = `Usage:
aks-engine-test <options>
Options:
-c <configuration.json> : JSON file containing a list of deployment configurations.
Refer to aks-engine/test/aks-engine-test/aks-engine-test.json for examples
-d <aks-engine root directory>
-e <log-errors configuration file>
`
var (
logDir string
orchestratorRe *regexp.Regexp
enableMetrics bool
saName string
saKey string
sa promote.StorageAccount
subID string
rgPrefix string
orchestrator string
)
func init() {
orchestratorRe = regexp.MustCompile(`"orchestratorType": "(\S+)"`)
}
// ErrorStat represents an error status that will be reported
type ErrorStat struct {
errorInfo *report.ErrorInfo
testCategory string
count int64
}
// TestManager is object that contains test runner functions
type TestManager struct {
config *config.TestConfig
Manager *report.Manager
lock sync.Mutex
wg sync.WaitGroup
rootDir string
regions []string
}
// Run begins the test run process
func (m *TestManager) Run() error {
fmt.Printf("Randomizing regional tests against the following regions: %s\n", m.regions)
n := len(m.config.Deployments)
if n == 0 {
return nil
}
sa = promote.StorageAccount{
Name: saName,
Key: saKey,
}
// determine timeout
timeoutMin, err := strconv.Atoi(os.Getenv("STAGE_TIMEOUT_MIN"))
if err != nil {
return errors.Wrap(err, "Error [Atoi STAGE_TIMEOUT_MIN]")
}
timeout := time.Minute * time.Duration(timeoutMin)
usePromoteToFailure := os.Getenv("PROMOTE_TO_FAILURE") == "true"
promoteToFailureTestSuffix := os.Getenv("PROMOTE_TO_FAILURE_TEST_SUFFIX")
var retries int
if usePromoteToFailure {
fmt.Println("Using promote to failure to determine pass/fail")
} else {
// determine number of retries
retries, err = strconv.Atoi(os.Getenv("NUM_OF_RETRIES"))
if err != nil {
// Set default retries if not set
retries = 1
}
fmt.Printf("Will allow %d retries to determine pass/fail\n", retries)
}
// login to Azure
if _, _, err = m.runStep("init", stepInitAzure, os.Environ(), timeout); err != nil {
return err
}
// return values for tests
success := make([]bool, n)
rand.Seed(time.Now().UnixNano())
m.wg.Add(n)
for index, dep := range m.config.Deployments {
go func(index int, dep config.Deployment) {
defer m.wg.Done()
var promToFailInfo promote.DigitalSignalFilter
resMap := make(map[string]*ErrorStat)
if usePromoteToFailure {
testName := strings.Replace(dep.ClusterDefinition, "/", "-", -1)
if promoteToFailureTestSuffix != "" {
testName += fmt.Sprintf("-%s", promoteToFailureTestSuffix)
}
if dep.Location != "" {
testName += fmt.Sprintf("-%s", dep.Location)
}
errorInfo := m.testRun(dep, index, 0, timeout)
var failureStr string
if errorInfo != nil {
if errorStat, ok := resMap[errorInfo.ErrName]; !ok {
resMap[errorInfo.ErrName] = &ErrorStat{errorInfo: errorInfo, testCategory: dep.TestCategory, count: 1}
} else {
errorStat.count++
}
// For RecordTestRun
success[index] = false
failureStr = errorInfo.ErrName
// RecordTestRun QoS
sendRecordTestRun(sa, success[index], dep.Location, testName, dep.TestCategory, failureStr)
// RunPromoteToFailure
if isPromoteToFailureStep(errorInfo.Step) {
promToFailInfo = promote.DigitalSignalFilter{
TestName: testName,
TestType: metricsNS,
FailureStr: failureStr,
FailureCount: 1,
}
var result bool
result, err = promote.RunPromoteToFailure(sa, promToFailInfo)
if err != nil {
fmt.Printf("Got error from RunPromoteToFailure: %#v\n", err)
}
if result {
success[index] = false
} else {
success[index] = true
}
}
} else {
success[index] = true
failureStr = ""
// RecordTestRun QoS
sendRecordTestRun(sa, success[index], dep.Location, testName, dep.TestCategory, failureStr)
// RunPromoteToFailure
promToFailInfo = promote.DigitalSignalFilter{
TestName: testName,
TestType: metricsNS,
FailureStr: failureStr,
FailureCount: 0,
}
promote.RunPromoteToFailure(sa, promToFailInfo)
}
if success[index] {
fmt.Printf("Promote to Fail passed: SUCCESS [%s]\n", testName)
} else {
fmt.Printf("Promote to Fail did not pass: ERROR [%s]\n", testName)
}
} else {
for attempt := 0; attempt < retries; attempt++ {
errorInfo := m.testRun(dep, index, attempt, timeout)
// do not retry if successful
if errorInfo == nil {
success[index] = true
break
}
if errorStat, ok := resMap[errorInfo.ErrName]; !ok {
resMap[errorInfo.ErrName] = &ErrorStat{errorInfo: errorInfo, testCategory: dep.TestCategory, count: 1}
} else {
errorStat.count++
}
}
}
sendErrorMetrics(resMap, usePromoteToFailure)
}(index, dep)
}
m.wg.Wait()
//create reports
if err = m.Manager.CreateTestReport(fmt.Sprintf("%s/%s", logDir, testReport)); err != nil {
fmt.Printf("Failed to create %s: %v\n", testReport, err)
}
if err = m.Manager.CreateCombinedReport(fmt.Sprintf("%s/%s", logDir, combinedReport), testReport); err != nil {
fmt.Printf("Failed to create %s: %v\n", combinedReport, err)
}
// fail the test on error
for _, ok := range success {
if !ok {
return errors.New("Test failed")
}
}
return nil
}
func (m *TestManager) testRun(d config.Deployment, index, attempt int, timeout time.Duration) *report.ErrorInfo {
subID = os.Getenv("SUBSCRIPTION_ID")
rgPrefix = os.Getenv("RESOURCE_GROUP_PREFIX")
if rgPrefix == "" {
rgPrefix = "y"
fmt.Printf("RESOURCE_GROUP_PREFIX is not set. Using default '%s'\n", rgPrefix)
}
// Randomize region if no location was configured
if d.Location == "" {
randomIndex := rand.Intn(len(m.regions))
d.Location = m.regions[randomIndex]
}
testName := strings.TrimSuffix(d.ClusterDefinition, filepath.Ext(d.ClusterDefinition))
instanceName := fmt.Sprintf("acse-%d-%s-%s-%d-%d", rand.Intn(0x0ffffff), d.Location, os.Getenv("BUILD_NUM"), index, attempt)
resourceGroup := fmt.Sprintf("%s-%s-%s-%s-%d-%d", rgPrefix, strings.Replace(testName, "/", "-", -1), d.Location, os.Getenv("BUILD_NUM"), index, attempt)
logFile := fmt.Sprintf("%s/%s.log", logDir, resourceGroup)
validateLogFile := fmt.Sprintf("%s/validate-%s.log", logDir, resourceGroup)
// determine orchestrator
env := os.Environ()
env = append(env, fmt.Sprintf("CLUSTER_DEFINITION=examples/%s", d.ClusterDefinition))
cmd := exec.Command("test/step.sh", "get_orchestrator_type")
cmd.Env = env
out, err := cmd.Output()
if err != nil {
wrileLog(logFile, "Error [getOrchestrator %s] : %v", d.ClusterDefinition, err)
return report.NewErrorInfo(testName, "pretest", "OrchestratorTypeParsingError", "PreRun", d.Location)
}
orchestrator = strings.TrimSpace(string(out))
// update environment
env = append(env, fmt.Sprintf("LOCATION=%s", d.Location))
env = append(env, fmt.Sprintf("ORCHESTRATOR=%s", orchestrator))
env = append(env, fmt.Sprintf("INSTANCE_NAME=%s", instanceName))
env = append(env, fmt.Sprintf("DEPLOYMENT_NAME=%s", instanceName))
env = append(env, fmt.Sprintf("RESOURCE_GROUP=%s", resourceGroup))
// add scenario-specific environment variables
envFile := fmt.Sprintf("examples/%s.env", d.ClusterDefinition)
if _, err = os.Stat(envFile); err == nil {
var envHandle *os.File
envHandle, err = os.Open(envFile)
if err != nil {
wrileLog(logFile, "Error [open %s] : %v", envFile, err)
return report.NewErrorInfo(testName, "pretest", "FileAccessError", "PreRun", d.Location)
}
defer envHandle.Close()
fileScanner := bufio.NewScanner(envHandle)
for fileScanner.Scan() {
str := strings.TrimSpace(fileScanner.Text())
if match, _ := regexp.MatchString(`^\S+=\S+$`, str); match {
env = append(env, str)
}
}
}
var errorInfo *report.ErrorInfo
steps := []string{stepCreateRG, stepPredeploy, stepGenerateTemplate, stepDeployTemplate, stepPostDeploy}
// determine validation script
if !d.SkipValidation {
validate := fmt.Sprintf("test/cluster-tests/%s/test.sh", orchestrator)
if _, err = os.Stat(fmt.Sprintf("%s/%s", m.rootDir, validate)); err == nil {
env = append(env, fmt.Sprintf("VALIDATE=%s", validate))
steps = append(steps, stepValidate)
}
}
for _, step := range steps {
txt, duration, err := m.runStep(resourceGroup, step, env, timeout)
if err != nil {
errorInfo = m.Manager.Process(txt, step, testName, d.Location)
sendDurationMetrics(step, d.Location, duration, errorInfo.ErrName)
wrileLog(logFile, "Error [%s:%s] %v\nOutput: %s", step, resourceGroup, err, txt)
// check AUTOCLEAN flag: if set to 'n', don't remove deployment
if os.Getenv("AUTOCLEAN") == "n" {
env = append(env, "CLEANUP=n")
}
break
}
sendDurationMetrics(step, d.Location, duration, report.ErrSuccess)
wrileLog(logFile, txt)
if step == stepGenerateTemplate {
// set up extra environment variables available after template generation
validateLogFile = fmt.Sprintf("%s/validate-%s.log", logDir, resourceGroup)
env = append(env, fmt.Sprintf("LOGFILE=%s", validateLogFile))
cmd := exec.Command("test/step.sh", "get_orchestrator_version")
cmd.Env = env
out, err := cmd.Output()
if err != nil {
wrileLog(logFile, "Error [%s:%s] %v", "get_orchestrator_version", resourceGroup, err)
errorInfo = report.NewErrorInfo(testName, step, "OrchestratorVersionParsingError", "PreRun", d.Location)
break
}
env = append(env, fmt.Sprintf("EXPECTED_ORCHESTRATOR_VERSION=%s", strings.TrimSpace(string(out))))
cmd = exec.Command("test/step.sh", "get_node_count")
cmd.Env = env
out, err = cmd.Output()
if err != nil {
wrileLog(logFile, "Error [%s:%s] %v", "get_node_count", resourceGroup, err)
errorInfo = report.NewErrorInfo(testName, step, "NodeCountParsingError", "PreRun", d.Location)
break
}
nodesCount := strings.Split(strings.TrimSpace(string(out)), ":")
if len(nodesCount) != 3 {
wrileLog(logFile, "get_node_count: unexpected output '%s'", string(out))
errorInfo = report.NewErrorInfo(testName, step, "NodeCountParsingError", "PreRun", d.Location)
break
}
env = append(env, fmt.Sprintf("EXPECTED_NODE_COUNT=%s", nodesCount[0]))
env = append(env, fmt.Sprintf("EXPECTED_LINUX_AGENTS=%s", nodesCount[1]))
env = append(env, fmt.Sprintf("EXPECTED_WINDOWS_AGENTS=%s", nodesCount[2]))
}
}
// clean up
if txt, _, err := m.runStep(resourceGroup, stepCleanup, env, timeout); err != nil {
wrileLog(logFile, "Error: %v\nOutput: %s", err, txt)
}
if errorInfo == nil {
// do not keep logs for successful test
for _, fname := range []string{logFile, validateLogFile} {
if _, err := os.Stat(fname); !os.IsNotExist(err) {
if err = os.Remove(fname); err != nil {
fmt.Printf("Failed to remove %s : %v\n", fname, err)
}
}
}
}
return errorInfo
}
func isPromoteToFailureStep(step string) bool {
switch step {
case stepDeployTemplate:
return true
case stepValidate:
return true
case stepPostDeploy:
return true
default:
return false
}
}
func isValidEnv() bool {
valid := true
envVars := []string{
"SERVICE_PRINCIPAL_CLIENT_ID",
"SERVICE_PRINCIPAL_CLIENT_SECRET",
"TENANT_ID",
"SUBSCRIPTION_ID",
"CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID",
"CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET",
"STAGE_TIMEOUT_MIN"}
for _, envVar := range envVars {
if os.Getenv(envVar) == "" {
fmt.Printf("Must specify environment variable %s\n", envVar)
valid = false
}
}
return valid
}
func (m *TestManager) runStep(name, step string, env []string, timeout time.Duration) (string, time.Duration, error) {
// prevent ARM throttling
m.lock.Lock()
go func() {
time.Sleep(2 * time.Second)
m.lock.Unlock()
}()
start := time.Now()
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s %s", script, step))
cmd.Dir = m.rootDir
cmd.Env = env
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if err := cmd.Start(); err != nil {
return "", time.Since(start), err
}
timer := time.AfterFunc(timeout, func() {
cmd.Process.Kill()
})
err := cmd.Wait()
timer.Stop()
now := time.Now().Format("15:04:05")
if err != nil {
fmt.Printf("ERROR [%s] [%s %s]\n", now, step, name)
return out.String(), time.Since(start), err
}
fmt.Printf("SUCCESS [%s] [%s %s]\n", now, step, name)
return out.String(), time.Since(start), nil
}
func wrileLog(fname string, format string, args ...interface{}) {
str := fmt.Sprintf(format, args...)
f, err := os.OpenFile(fname, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
fmt.Printf("Error [OpenFile %s] : %v\n", fname, err)
return
}
defer f.Close()
if _, err = f.Write([]byte(str)); err != nil {
fmt.Printf("Error [Write %s] : %v\n", fname, err)
}
}
func sendErrorMetrics(resMap map[string]*ErrorStat, usePromoteToFailure bool) {
if !enableMetrics {
return
}
for _, errorStat := range resMap {
var severity string
if usePromoteToFailure || errorStat.count > 1 {
severity = "Critical"
} else {
severity = "Intermittent"
}
category := errorStat.testCategory
if len(category) == 0 {
category = "generic"
}
// add metrics
dims := map[string]string{
"TestName": errorStat.errorInfo.TestName,
"TestCategory": category,
"Location": errorStat.errorInfo.Location,
"Error": errorStat.errorInfo.ErrName,
"Class": errorStat.errorInfo.ErrClass,
"Severity": severity,
}
err := metrics.AddMetric(metricsEndpoint, metricsNS, metricError, errorStat.count, dims)
if err != nil {
fmt.Printf("Failed to send metric: %v\n", err)
}
}
}
func sendDurationMetrics(step, location string, duration time.Duration, errorName string) {
if !enableMetrics {
return
}
var metricName string
switch step {
case stepDeployTemplate:
metricName = metricDeploymentDuration
case stepValidate:
metricName = metricValidationDuration
default:
return
}
durationSec := int64(duration / time.Second)
// add metrics
dims := map[string]string{
"Location": location,
"Error": errorName,
}
err := metrics.AddMetric(metricsEndpoint, metricsNS, metricName, durationSec, dims)
if err != nil {
fmt.Printf("Failed to send metric: %v\n", err)
}
}
func sendRecordTestRun(sa promote.StorageAccount, success bool, location, testName, testtype, failureStr string) {
testRecordQoS := promote.TestRunQos{
TimeStampUTC: time.Now(),
TestName: testName,
TestType: metricsNS,
SubscriptionID: subID,
ResourceGroup: rgPrefix,
Region: location,
Orchestrator: testtype,
Success: success,
FailureStr: failureStr,
}
promote.RecordTestRun(sa, testRecordQoS)
}
func mainInternal() error {
var configFile string
var rootDir string
var logErrorFile string
var err error
flag.StringVar(&configFile, "c", "", "deployment configurations")
flag.StringVar(&rootDir, "d", "", "aks-engine root directory")
flag.StringVar(&logErrorFile, "e", "", "logError config file")
flag.StringVar(&saName, "j", "", "SA Name")
flag.StringVar(&saKey, "k", "", "SA Key")
flag.Usage = func() {
fmt.Println(usage)
}
flag.Parse()
testManager := TestManager{}
// validate environment
if !isValidEnv() {
return errors.New("environment is not set")
}
// get test configuration
if configFile == "" {
return errors.New("test configuration is not provided")
}
testManager.config, err = config.GetTestConfig(configFile)
if err != nil {
return err
}
// get Jenkins build number
buildNum, err := strconv.Atoi(os.Getenv("BUILD_NUM"))
if err != nil {
fmt.Println("Warning: BUILD_NUM is not set or invalid. Assuming 0")
buildNum = 0
}
// set environment variable ENABLE_METRICS=y to enable sending the metrics (disabled by default)
if os.Getenv("ENABLE_METRICS") == "y" {
enableMetrics = true
}
// initialize report manager
testManager.Manager = report.New(os.Getenv("JOB_BASE_NAME"), buildNum, len(testManager.config.Deployments), logErrorFile)
// check root directory
if rootDir == "" {
return errors.New("aks-engine root directory is not provided")
}
testManager.rootDir = rootDir
if _, err = os.Stat(fmt.Sprintf("%s/%s", rootDir, script)); err != nil {
return err
}
// make logs directory
logDir = fmt.Sprintf("%s/_logs", rootDir)
os.RemoveAll(logDir)
if err = os.Mkdir(logDir, os.FileMode(0755)); err != nil {
return err
}
// set regions
regions := []string{}
for _, region := range helpers.GetAzureLocations() {
switch region {
case "eastus2euap": // initial deploy region for all RPs, known to be less stable
case "japanwest": // no D2V2 support
case "chinaeast": // private cloud
case "chinanorth": // private cloud
case "chinaeast2": // private cloud
case "chinanorth2": // private cloud
case "germanycentral": // Germany cloud
case "germanynortheast": // Germany cloud
case "usgovvirginia": // US Gov cloud
case "usgoviowa": // US Gov cloud
case "usgovarizona": // US Gov cloud
case "usgovtexas": // US Gov cloud
case "koreacentral": // TODO make sure our versions of azure-cli support this cloud
case "centraluseuap": // TODO determine why this region is flaky
case "brazilsouth": // canary region
case "francecentral": // not supported by sub
default:
regions = append(regions, region)
}
}
testManager.regions = regions
// seed random number generator
rand.Seed(time.Now().Unix())
// run tests
return testManager.Run()
}
func main() {
if err := mainInternal(); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
os.Exit(0)
}

Просмотреть файл

@ -1,42 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package metrics
import (
"fmt"
"github.com/Azure/aks-engine/pkg/helpers"
"github.com/alexcesaro/statsd"
)
type mdmBucket struct {
Namespace string `json:"Namespace"`
Metric string `json:"Metric"`
Dims map[string]string `json:"Dims"`
}
// AddMetric adds the defined metric to a list of metrics to send to MDM
func AddMetric(endpoint, namespace, metric string, count int64, dims map[string]string) error {
bucket := mdmBucket{
Namespace: namespace,
Metric: metric,
Dims: dims}
data, err := helpers.JSONMarshal(bucket, false)
if err != nil {
return err
}
client, err := statsd.New(
statsd.Address(endpoint),
statsd.Network("udp"),
statsd.ErrorHandler(
func(err error) {
fmt.Println(err.Error())
}))
if err != nil {
return err
}
defer client.Close()
client.Count(string(data), count)
return nil
}

Просмотреть файл

@ -1,96 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package metrics
import (
"errors"
"net"
"sync"
"testing"
"time"
)
type testSession struct {
endpoint string
conn *net.UDPConn
timer *time.Timer
wg sync.WaitGroup
err error
}
func newSession() *testSession {
sess := &testSession{
endpoint: ":12345",
timer: time.NewTimer(time.Second),
}
sess.timer.Stop()
return sess
}
func (s *testSession) start() error {
addr, err := net.ResolveUDPAddr("udp", s.endpoint)
if err != nil {
return err
}
s.conn, err = net.ListenUDP("udp", addr)
if err != nil {
return err
}
s.wg.Add(1)
go s.run()
return nil
}
func (s *testSession) stop() error {
// allow up to 2 sec to complete session
s.timer.Reset(2 * time.Second)
s.wg.Wait()
s.conn.Close()
return s.err
}
func (s *testSession) run() {
defer s.wg.Done()
buffer := make([]byte, 1024)
for {
select {
case <-s.timer.C:
s.err = errors.New("No metrics message. Exiting by timeout")
return
default:
n, err := s.conn.Read(buffer)
if err != nil {
s.err = err
return
}
if n > 0 {
s.timer.Stop()
return
}
}
}
}
func TestMetric(t *testing.T) {
sess := newSession()
if err := sess.start(); err != nil {
t.Fatal(err)
}
dims := map[string]string{
"test": "myTest",
"location": "myLocation",
"error": "myError",
"errClass": "myErrorClass",
}
if err := AddMetric(sess.endpoint, "metricsNS", "metricName", 1, dims); err != nil {
t.Fatal(err)
}
if err := sess.stop(); err != nil {
t.Fatal(err)
}
}

Просмотреть файл

@ -1,214 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package promote
import (
"fmt"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
)
const (
// ACSRPTests are the rp tests
ACSRPTests string = "ACSRpTests"
// AKSEngineTests are the AKS Engine tests
AKSEngineTests string = "AKSEngineTests"
// SimDemTests are the SimDem tests
SimDemTests string = "SimDemTests"
)
// StorageAccount is how we connect to storage
type StorageAccount struct {
Name string
Key string
}
const (
// Mesos is the string constant for MESOS orchestrator type
Mesos string = "Mesos"
// DCOS is the string constant for DCOS orchestrator type and defaults to DCOS188
DCOS string = "DCOS"
// Swarm is the string constant for the Swarm orchestrator type
Swarm string = "Swarm"
// Kubernetes is the string constant for the Kubernetes orchestrator type
Kubernetes string = "Kubernetes"
// SwarmMode is the string constant for the Swarm Mode orchestrator type
SwarmMode string = "SwarmMode"
// RecordTestRunTableName for storing RecordTestRun
RecordTestRunTableName string = "RecordTestRun"
// PromoteToFailureTableName for storing PromoteToFailure
PromoteToFailureTableName string = "PromoteToFailure"
)
// TestRunQos structure
type TestRunQos struct {
TimeStampUTC time.Time // partition key
TestName string // row key
TestType string
SubscriptionID string
ResourceGroup string
Region string
Orchestrator string
Success bool
FailureStr string
// DeploymentDurationInSeconds int
}
//DigitalSignalFilter structure
type DigitalSignalFilter struct {
TestName string // partition key
TestType string // row key
FailureStr string
FailureCount float64
}
// RecordTestRun procedure pushes all test result data to RecordTestRun Table
func RecordTestRun(sa StorageAccount, testRunQos TestRunQos) {
// fmt.Printf("record test run Qos to '%s': %v\n", sa.Name, testRunQos)
// get Azure Storage Client
var err error
var azureStoreClient storage.Client
if azureStoreClient, err = storage.NewBasicClient(sa.Name, sa.Key); err != nil {
fmt.Printf("FAIL to create azure storage basic client, Error: %s\n", err.Error())
return
}
// From storageClient, get Table Service Client
tsc := azureStoreClient.GetTableService()
table1 := tsc.GetTableReference(RecordTestRunTableName)
// Create Table if it does not exist
if err = table1.Create(30, storage.FullMetadata, nil); err != nil && !strings.Contains(err.Error(), "The table specified already exists") {
fmt.Printf("Failed to create table: %s, Error: %s\n", RecordTestRunTableName, err.Error())
return
}
// fmt.Printf("Table : %s is created\n", RecordTestRunTableName)
t := testRunQos.TimeStampUTC.Format("2006-01-02 15:04:05")
// Insert Entity Entry into Table
entity := table1.GetEntityReference(t, testRunQos.TestName)
props := map[string]interface{}{
"TestType": testRunQos.TestType,
"SubscriptionID": testRunQos.SubscriptionID,
"ResourceGroup": testRunQos.ResourceGroup,
"Region": testRunQos.Region,
"Orchestrator": testRunQos.Orchestrator,
"Success": testRunQos.Success,
"FailureStr": testRunQos.FailureStr,
}
entity.Properties = props
if err = entity.Insert(storage.FullMetadata, nil); err != nil {
fmt.Printf("Could not insert entity into table, Error: %v\n", err)
return
}
}
// RunPromoteToFailure procedure
// Returns True when Error is Promoted, Else False
func RunPromoteToFailure(sa StorageAccount, testRunPromToFail DigitalSignalFilter) (bool, error) {
// get Azure Storage Client
var err error
var azureStoreClient storage.Client
if azureStoreClient, err = storage.NewBasicClient(sa.Name, sa.Key); err != nil {
fmt.Printf("FAIL to create azure storage basic client, Error: %s\n", err.Error())
return false, err
}
// From azureStoreClient, get Table Service Client
tsc := azureStoreClient.GetTableService()
table1 := tsc.GetTableReference(PromoteToFailureTableName)
// Create Table if it does not exist
if err = table1.Create(30, storage.FullMetadata, nil); err != nil && !strings.Contains(err.Error(), "The table specified already exists") {
fmt.Printf("Failed to create table: %s, Error: %s\n", PromoteToFailureTableName, err.Error())
return false, err
}
// 1. Get the Entity using partition key and row key
// 2. If doesnt exist, then create the new entity and exit as success
// 3. If it exists, then increment the FailureCount
// 4. If FailureCount == 3, push out faillure
entity := table1.GetEntityReference(testRunPromToFail.TestName, testRunPromToFail.TestType)
err = entity.Get(30, storage.FullMetadata, &storage.GetEntityOptions{
Select: []string{"FailureStr", "FailureCount"},
})
if err != nil {
if strings.Contains(err.Error(), "The specified resource does not exist") {
// Entity does not exist in Table
// Insert Entity into Table
err = insertEntity(table1, testRunPromToFail)
if err != nil {
fmt.Printf("Error inserting entity : %v\n", err)
return false, err
}
}
return false, err
}
existingFailureStr := entity.Properties["FailureStr"]
existingFailureCount := entity.Properties["FailureCount"]
if existingFailureStr != testRunPromToFail.FailureStr {
// Perform Update of this entity with testRunPromToFail.FailureStr and testRunPromToFail.FailureCount
updateEntity(entity, testRunPromToFail.FailureCount, testRunPromToFail.FailureStr)
return false, nil
}
if testRunPromToFail.FailureCount == 0 {
// Update the Entity with FailureCount 0
// Return False
updateEntity(entity, testRunPromToFail.FailureCount, testRunPromToFail.FailureStr)
fmt.Printf("Reset Failure Count for %s to : %v\n\n", testRunPromToFail.TestName, testRunPromToFail.FailureCount)
return false, nil
}
fmt.Printf("Existing Failure Count for %s : %v\n\n", testRunPromToFail.TestName, existingFailureCount)
newFailureCount := existingFailureCount.(float64) + testRunPromToFail.FailureCount
fmt.Printf("Incremented Failure Count for %s to : %v\n\n", testRunPromToFail.TestName, newFailureCount)
updateEntity(entity, newFailureCount, testRunPromToFail.FailureStr)
if newFailureCount >= 3 {
return true, nil
}
return false, nil
}
func insertEntity(table *storage.Table, testRunPromToFail DigitalSignalFilter) error {
// Insert Entity Entry into Table
entity := table.GetEntityReference(testRunPromToFail.TestName, testRunPromToFail.TestType)
props := map[string]interface{}{
"FailureStr": testRunPromToFail.FailureStr,
"FailureCount": testRunPromToFail.FailureCount,
}
entity.Properties = props
if err := entity.Insert(storage.FullMetadata, nil); err != nil {
fmt.Printf("Could not insert entity into table, Error: %v\n", err)
return err
}
return nil
}
func updateEntity(entity *storage.Entity, failureCount float64, failureStr string) {
props := map[string]interface{}{
"FailureStr": failureStr,
"FailureCount": failureCount,
}
entity.Properties = props
if err := entity.Update(false, nil); err != nil {
fmt.Printf("Error in Updating Entity : %v\n\n", err)
}
}

Просмотреть файл

@ -1,201 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package report
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"sync"
"time"
"github.com/Azure/aks-engine/pkg/helpers"
)
// ErrorInfo represents the CI error
type ErrorInfo struct {
TestName string
Step string
ErrName string
ErrClass string
Location string
}
// ErrorStat represents the aggregate error count and region
type ErrorStat struct {
Count int `json:"count"`
Locations map[string]int `json:"locations"`
}
// Manager represents the details about a build and errors in that build
type Manager struct {
lock sync.Mutex
JobName string `json:"job"`
BuildNum int `json:"build"`
Deployments int `json:"deployments"`
Errors int `json:"errors"`
StartTime time.Time `json:"startTime"`
Duration string `json:"duration"`
// Failure map: key=error, value=locations
Failures map[string]*ErrorStat `json:"failures"`
LogErrors logErrors `json:"-"`
}
type logErrors struct {
LogErrors []logError `json:"Errors"`
}
type logError struct {
Name string `json:"name"`
Class string `json:"class"`
Regex string `json:"regex"`
}
const (
// ErrClassDeployment represents an error during deployment
ErrClassDeployment = "Deployment"
// ErrClassValidation represents an error during validation (tests)
ErrClassValidation = "Validation"
// ErrClassAzcli represents an error with Azure CLI
ErrClassAzcli = "AzCLI"
// ErrClassNone represents absence of error
ErrClassNone = "None"
// ErrSuccess represents a success, for some reason
ErrSuccess = "Success"
// ErrUnknown represents an unknown error
ErrUnknown = "UnspecifiedError"
)
// New creates a new error report
func New(jobName string, buildNum int, nDeploys int, logErrorsFileName string) *Manager {
h := &Manager{}
h.JobName = jobName
h.BuildNum = buildNum
h.Deployments = nDeploys
h.Errors = 0
h.StartTime = time.Now().UTC()
h.Failures = make(map[string]*ErrorStat)
h.LogErrors = makeErrorList(logErrorsFileName)
return h
}
func makeErrorList(fileName string) logErrors {
dummy := logErrors{}
if fileName != "" {
file, e := ioutil.ReadFile(fileName)
if e != nil {
// do not exit the tests
fmt.Printf("ERROR: %v\n", e)
}
json.Unmarshal(file, &dummy)
}
return dummy
}
// Copy TBD needs definition [ToDo]
func (h *Manager) Copy() *Manager {
n := New(h.JobName, h.BuildNum, h.Deployments, "")
n.Errors = h.Errors
n.StartTime = h.StartTime
for e, f := range h.Failures {
locs := make(map[string]int)
for l, c := range f.Locations {
locs[l] = c
}
n.Failures[e] = &ErrorStat{Count: f.Count, Locations: locs}
}
return n
}
// Process TBD needs definition
func (h *Manager) Process(txt, step, testName, location string) *ErrorInfo {
for _, logErr := range h.LogErrors.LogErrors {
if match, _ := regexp.MatchString(logErr.Regex, txt); match {
h.addFailure(logErr.Name, map[string]int{location: 1})
return NewErrorInfo(testName, step, logErr.Name, logErr.Class, location)
}
}
h.addFailure(ErrUnknown, map[string]int{location: 1})
return NewErrorInfo(testName, step, ErrUnknown, ErrClassNone, location)
}
func (h *Manager) addFailure(key string, locations map[string]int) {
h.lock.Lock()
defer h.lock.Unlock()
cnt := 0
if failure, ok := h.Failures[key]; !ok {
locs := make(map[string]int)
for l, c := range locations {
locs[l] = c
cnt += c
}
h.Failures[key] = &ErrorStat{Count: cnt, Locations: locs}
} else {
for l, c := range locations {
cnt += c
if _, ok := failure.Locations[l]; !ok {
failure.Locations[l] = c
} else {
failure.Locations[l] += c
}
}
failure.Count += cnt
}
h.Errors += cnt
}
// CreateTestReport TBD needs definition
func (h *Manager) CreateTestReport(filepath string) error {
h.Duration = time.Now().UTC().Sub(h.StartTime).String()
data, err := helpers.JSONMarshalIndent(h, "", " ", false)
if err != nil {
return err
}
file, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY, os.FileMode(0644))
if err != nil {
return err
}
defer file.Close()
_, err = file.Write(data)
return err
}
// CreateCombinedReport TBD needs definition
func (h *Manager) CreateCombinedReport(filepath, testReportFname string) error {
// "COMBINED_PAST_REPORTS" is the number of recent reports in the combined report
reports, err := strconv.Atoi(os.Getenv("COMBINED_PAST_REPORTS"))
if err != nil || reports <= 0 {
return nil
}
combinedReport := h.Copy()
for i := 1; i <= reports; i++ {
data, err := ioutil.ReadFile(fmt.Sprintf("%s/%d/%s/%s",
os.Getenv("JOB_BUILD_ROOTDIR"), h.BuildNum-i, os.Getenv("JOB_BUILD_SUBDIR"), testReportFname))
if err != nil {
break
}
testReport := &Manager{}
if err := json.Unmarshal(data, &testReport); err != nil {
break
}
combinedReport.StartTime = testReport.StartTime
combinedReport.Deployments += testReport.Deployments
for e, f := range testReport.Failures {
combinedReport.addFailure(e, f.Locations)
}
}
return combinedReport.CreateTestReport(filepath)
}
// NewErrorInfo TBD needs definition
func NewErrorInfo(testName, step, errName, errClass, location string) *ErrorInfo {
return &ErrorInfo{TestName: testName, Step: step, ErrName: errName, ErrClass: errClass, Location: location}
}

Просмотреть файл

@ -1,41 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package report
import (
"encoding/json"
"io/ioutil"
"testing"
)
func TestReportParse(t *testing.T) {
jobName := "TestJob"
buildNum := 001
nDeploys := 4
fileName := "../aks-engine-errors.json"
dummy := New(jobName, buildNum, nDeploys, fileName)
txt := "Error loading command module"
step := "step"
testName := "dummyTest"
d := "westus"
_ = dummy.Process(txt, step, testName, d)
testReport := "TestReport.json"
if err := dummy.CreateTestReport(testReport); err != nil {
t.Fatal(err)
}
raw, err := ioutil.ReadFile(testReport)
if err != nil {
t.Fatal(err)
}
h := &Manager{}
json.Unmarshal(raw, &h)
if len(h.LogErrors.LogErrors) != 0 {
t.Fatalf("Expected LogErrors to be empty, instead it is of size %d", len(h.LogErrors.LogErrors))
}
}

Просмотреть файл

@ -1,35 +0,0 @@
#!/usr/bin/env groovy
node("slave") {
withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'AZURE_CLI_SPN_AKS_TEST',
passwordVariable: 'SPN_PASSWORD', usernameVariable: 'SPN_USER']]) {
timestamps {
wrap([$class: 'AnsiColorBuildWrapper', 'colorMapName': 'XTerm']) {
def jobname = "${JOBNAME}"
def tests = [:]
def pairs = "${ORCHESTRATOR_LOCATION}".tokenize('|')
for(i = 0; i < pairs.size(); i++) {
def pair = pairs[i].tokenize('[ \t\n]+')
if(pair.size() != 2) {
echo "Skipping '"+pairs[i]+"'"
continue
}
def orchestrator = pair[0]
def location = pair[1]
def name = "${orchestrator}-${location}"
tests[name] = {
stage(name) {
build job: jobname,
parameters:
[[$class: 'StringParameterValue', name: 'ORCHESTRATOR', value: orchestrator],
[$class: 'StringParameterValue', name: 'LOCATION', value: location]]
}
}
}
parallel tests
}
}
}
}

Просмотреть файл

@ -1,27 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
set -eu -o pipefail
set -x
git init .
git clean -dfx
git reset --hard
git config --local user.name 'AKS Bot'
git config --local user.email 'aks-bot@microsoft.com'
git fetch --tags https://github.com/${REPO_OWNER}/${REPO_NAME} master +refs/pull/${PULL_NUMBER}/head:refs/pr/${PULL_NUMBER}
git checkout -B test "${PULL_BASE_SHA}"
git merge --no-ff -m "Merge +refs/pull/${PULL_NUMBER}/head:refs/pr/${PULL_NUMBER}" "${PULL_PULL_SHA}"
echo "----------------------------------------------------------"
env
echo "----------------------------------------------------------"

Просмотреть файл

@ -1,46 +0,0 @@
{
"id": "web",
"container": {
"type": "DOCKER",
"docker": {
"image": "yeasy/simple-web",
"network": "HOST",
"forcePullImage": false,
"privileged": false
}
},
"instances": 3,
"cpus": 0.1,
"mem": 65,
"cmd": null,
"disk": 0,
"executor": null,
"fetch": null,
"constraints": null,
"acceptedResourceRoles": [
"slave_public"
],
"user": null,
"env": null,
"healthChecks": [{
"protocol": "HTTP",
"path": "/",
"portIndex": 0,
"timeoutSeconds": 10,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"maxConsecutiveFailures": 10
}],
"portDefinitions": [
{
"protocol": "Tcp",
"port": 0,
"name": "80"
}
],
"labels":{
"HAPROXY_GROUP":"external",
"HAPROXY_0_VHOST":"{agentFQDN}",
"HAPROXY_0_MODE":"http"
}
}

Просмотреть файл

@ -1,31 +0,0 @@
{
"id": "web",
"container": {
"type": "DOCKER",
"docker": {
"image": "yeasy/simple-web",
"network": "BRIDGE",
"portMappings": [
{ "hostPort": 0, "containerPort": 80, "servicePort": 10000 }
],
"forcePullImage":true
}
},
"instances": 3,
"cpus": 0.1,
"mem": 65,
"healthChecks": [{
"protocol": "HTTP",
"path": "/",
"portIndex": 0,
"timeoutSeconds": 10,
"gracePeriodSeconds": 10,
"intervalSeconds": 2,
"maxConsecutiveFailures": 10
}],
"labels":{
"HAPROXY_GROUP":"external",
"HAPROXY_0_VHOST":"{agentFQDN}",
"HAPROXY_0_MODE":"http"
}
}

Просмотреть файл

@ -1,126 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
# do not use 'set -e'
# -o pipefail
set -x
source "$DIR/../utils.sh"
ENV_FILE="${CLUSTER_DEFINITION}.env"
if [ -e "${ENV_FILE}" ]; then
source "${ENV_FILE}"
fi
MARATHON_JSON="${MARATHON_JSON:-marathon.json}"
remote_exec="ssh -i "${SSH_KEY}" -o ConnectTimeout=30 -o StrictHostKeyChecking=no azureuser@${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com -p2200"
agentFQDN="${INSTANCE_NAME}0.${LOCATION}.cloudapp.azure.com"
remote_cp="scp -i "${SSH_KEY}" -P 2200 -o StrictHostKeyChecking=no"
function teardown {
${remote_exec} ./dcos marathon app remove /web
}
###### Check node count
function check_node_count() {
log "Checking node count"
count=20
while (( $count > 0 )); do
log " ... counting down $count"
node_count=$(${remote_exec} curl -s http://localhost:1050/system/health/v1/nodes | jq '.nodes | length')
[ $? -eq 0 ] && [ ! -z "$node_count" ] && [ $node_count -eq ${EXPECTED_NODE_COUNT} ] && log "Successfully got $EXPECTED_NODE_COUNT nodes" && break
sleep 30; count=$((count-1))
done
if (( $node_count != ${EXPECTED_NODE_COUNT} )); then
log "gave up waiting for DCOS nodes: $node_count available, ${EXPECTED_NODE_COUNT} expected"
exit 1
fi
}
check_node_count
log "Downloading dcos"
${remote_exec} curl -O https://dcos-mirror.azureedge.net/binaries/cli/linux/x86-64/dcos-1.10/dcos
if [[ "$?" != "0" ]]; then log "Failed to download dcos"; exit 1; fi
log "Setting dcos permissions"
${remote_exec} chmod a+x ./dcos
if [[ "$?" != "0" ]]; then log "Failed to chmod dcos"; exit 1; fi
log "Configuring dcos"
${remote_exec} ./dcos cluster setup http://localhost:80
if [[ "$?" != "0" ]]; then log "Failed to configure dcos"; exit 1; fi
log "Copying marathon.json"
${remote_cp} "${DIR}/${MARATHON_JSON}" azureuser@${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com:marathon.json
if [[ "$?" != "0" ]]; then log "Failed to copy marathon.json"; exit 1; fi
# feed agentFQDN to marathon.json
log "Configuring marathon.json"
${remote_exec} sed -i "s/{agentFQDN}/${agentFQDN}/g" marathon.json
if [[ "$?" != "0" ]]; then log "Failed to configure marathon.json"; exit 1; fi
log "Adding marathon app"
count=20
while (( $count > 0 )); do
log " ... counting down $count"
${remote_exec} ./dcos marathon app list | grep /web
retval=$?
if [[ $retval -eq 0 ]]; then log "Marathon App successfully installed" && break; fi
${remote_exec} ./dcos marathon app add marathon.json
retval=$?
if [[ "$retval" == "0" ]]; then break; fi
sleep 15; count=$((count-1))
done
if [[ $retval -ne 0 ]]; then log "gave up waiting for marathon to be added"; exit 1; fi
# only need to teardown if app added successfully
trap teardown EXIT
log "Validating marathon app"
count=0
while [[ ${count} -lt 25 ]]; do
count=$((count+1))
log " ... cycle $count"
running=$(${remote_exec} ./dcos marathon app show /web | jq .tasksRunning)
if [[ "${running}" == "3" ]]; then
log "Found 3 running tasks"
break
fi
sleep ${count}
done
if [[ "${running}" != "3" ]]; then
log "marathon validation failed"
${remote_exec} ./dcos marathon app show /web
${remote_exec} ./dcos marathon app list
exit 1
fi
# install marathon-lb
${remote_exec} ./dcos package install marathon-lb --yes
if [[ "$?" != "0" ]]; then log "Failed to install marathon-lb"; exit 1; fi
# curl simpleweb through external haproxy
log "Checking Service"
count=20
while true; do
log " ... counting down $count"
rc=$(curl -sI --max-time 60 "http://${agentFQDN}" | head -n1 | cut -d$' ' -f2)
[[ "$rc" -eq "200" ]] && log "Successfully hitting simpleweb through external haproxy http://${agentFQDN}" && break
if [[ "${count}" -le 1 ]]; then
log "failed to get expected response from nginx through the loadbalancer: Error $rc"
exit 1
fi
sleep 15; count=$((count-1))
done

Просмотреть файл

@ -1,183 +0,0 @@
#!/bin/bash
function test_linux_deployment() {
###### Testing an nginx deployment
log "Testing deployments"
k create namespace ${namespace}
NGINX="docker.io/library/nginx:latest"
IMAGE="${NGINX}" # default to the library image unless we're in TEST_ACR mode
if [[ "${TEST_ACR}" == "y" ]]; then
# force it to pull from ACR
IMAGE="${ACR_REGISTRY}/test/nginx:latest"
# wait for acr
wait
# TODO: how to do this without polluting user home dir?
docker login --username="${SERVICE_PRINCIPAL_CLIENT_ID}" --password="${SERVICE_PRINCIPAL_CLIENT_SECRET}" "${ACR_REGISTRY}"
docker pull "${NGINX}"
docker tag "${NGINX}" "${IMAGE}"
docker push "${IMAGE}"
fi
k run --image="${IMAGE}" nginx --namespace=${namespace} --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}'
count=12
while (( $count > 0 )); do
log " ... counting down $count"
running=$(k get pods --namespace=${namespace} | grep nginx | grep Running | wc | awk '{print $1}')
if (( ${running} == 1 )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${running} != 1 )); then
log "K8S-Linux: gave up waiting for deployment"
k get all --namespace=${namespace}
exit 1
fi
k expose deployments/nginx --type=LoadBalancer --namespace=${namespace} --port=80
log "Checking Service External IP"
count=60
external_ip=""
while (( $count > 0 )); do
log " ... counting down $count"
external_ip=$(k get svc --namespace ${namespace} nginx --template="{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}" || echo "")
[[ -n "${external_ip}" ]] && break
sleep 10; count=$((count-1))
done
if [[ -z "${external_ip}" ]]; then
log "K8S-Linux: gave up waiting for loadbalancer to get an ingress ip"
exit 1
fi
log "Checking Service"
count=5
success="n"
while (( $count > 0 )); do
log " ... counting down $count"
ret=$(curl -f --max-time 60 "http://${external_ip}" | grep 'Welcome to nginx!' || echo "curl_error")
if [[ $ret =~ .*'Welcome to nginx!'.* ]]; then
success="y"
break
fi
sleep 5; count=$((count-1))
done
if [[ "${success}" != "y" ]]; then
log "K8S-Linux: failed to get expected response from nginx through the loadbalancer"
exit 1
fi
}
function test_windows_deployment() {
###### Testing a simpleweb windows deployment
log "Testing Windows deployments"
log "Creating simpleweb service"
k apply -f "$DIR/simpleweb-windows.yaml"
count=90
while (( $count > 0 )); do
log " ... counting down $count"
running=$(k get pods --namespace=default | grep win-webserver | grep Running | wc | awk '{print $1}')
if (( ${running} == 1 )); then break; fi
sleep 10; count=$((count-1))
done
if (( ${running} != 1 )); then
log "K8S-Windows: gave up waiting for deployment"
k get all --namespace=default
exit 1
fi
log "Checking Service External IP"
count=60
external_ip=""
while (( $count > 0 )); do
log " ... counting down $count"
external_ip=$(k get svc --namespace default win-webserver --template="{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}" || echo "")
[[ -n "${external_ip}" ]] && break
sleep 10; count=$((count-1))
done
if [[ -z "${external_ip}" ]]; then
log "K8S-Windows: gave up waiting for loadbalancer to get an ingress ip"
exit 1
fi
log "Checking Service"
count=5
success="n"
while (( $count > 0 )); do
log " ... counting down $count"
ret=$(curl -f --max-time 60 "http://${external_ip}" | grep 'Windows Container Web Server' || echo "curl_error")
if [[ $ret =~ .*'Windows Container Web Server'.* ]]; then
success="y"
break
fi
sleep 10; count=$((count-1))
done
if [[ "${success}" != "y" ]]; then
log "K8S-Windows: failed to get expected response from simpleweb through the loadbalancer"
exit 1
fi
log "Checking outbound connection"
count=10
while (( $count > 0 )); do
log " ... counting down $count"
winpodname=$(k get pods --namespace=default | grep win-webserver | awk '{print $1}')
[[ -n "${winpodname}" ]] && break
sleep 10; count=$((count-1))
done
if [[ -z "${winpodname}" ]]; then
log "K8S-Windows: failed to get expected pod name for simpleweb"
exit 1
fi
log "query DNS"
count=0 # disabled while outbound connection bug is present
success="y" # disabled while outbound connection bug is present
while (( $count > 0 )); do
log " ... counting down $count"
query=$(k exec $winpodname -- powershell nslookup www.bing.com)
if echo ${query} | grep -q "DNS request timed out" && echo ${query} | grep -q "UnKnown"; then
success="y"
break
fi
sleep 10; count=$((count-1))
done
# temporarily disable breaking on errors to allow the retry
set +e
log "curl external website"
count=0 # disabled while outbound connection bug is present
success="y" # disabled while outbound connection bug is present
while (( $count > 0 )); do
log " ... counting down $count"
# curl without getting status first and see the response. getting status sometimes has the problem to hang
# and it doesn't repro when running k from the node
k exec $winpodname -- powershell iwr -UseBasicParsing -TimeoutSec 60 www.bing.com
statuscode=$(k exec $winpodname -- powershell iwr -UseBasicParsing -TimeoutSec 60 www.bing.com | grep StatusCode)
if [[ ${statuscode} != "" ]] && [[ $(echo ${statuscode} | grep 200 | awk '{print $3}' | tr -d '\r') -eq "200" ]]; then
log "got 200 status code"
log "${statuscode}"
success="y"
break
fi
log "curl failed, retrying..."
ipconfig=$(k exec $winpodname -- powershell ipconfig /all)
log "$ipconfig"
# TODO: reduce sleep time when outbound connection delay is fixed
sleep 100; count=$((count-1))
done
set -e
if [[ "${success}" != "y" ]]; then
nslookup=$(k exec $winpodname -- powershell nslookup www.bing.com)
log "$nslookup"
log "getting the last 50 events to check timeout failure"
hdr=$(k get events | head -n 1)
log "$hdr"
evt=$(k get events | tail -n 50)
log "$evt"
log "K8S-Windows: failed to get outbound internet connection inside simpleweb container"
exit 1
else
log "outbound connection succeeded!"
fi
}

Просмотреть файл

@ -1,38 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: win-webserver
labels:
app: win-webserver
spec:
ports:
# the port that this service should serve on
- port: 80
targetPort: 80
selector:
app: win-webserver
type: LoadBalancer
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: win-webserver
name: win-webserver
spec:
replicas: 1
template:
metadata:
labels:
app: win-webserver
name: win-webserver
spec:
containers:
- name: windowswebserver
image: microsoft/windowsservercore:1803
command:
- powershell.exe
- -command
- "<#code used from https://gist.github.com/wagnerandrade/5424431#> ; $$listener = New-Object System.Net.HttpListener ; $$listener.Prefixes.Add('http://*:80/') ; $$listener.Start() ; $$callerCounts = @{} ; Write-Host('Listening at http://*:80/') ; while ($$listener.IsListening) { ;$$context = $$listener.GetContext() ;$$requestUrl = $$context.Request.Url ;$$clientIP = $$context.Request.RemoteEndPoint.Address ;$$response = $$context.Response ;Write-Host '' ;Write-Host('> {0}' -f $$requestUrl) ; ;$$count = 1 ;$$k=$$callerCounts.Get_Item($$clientIP) ;if ($$k -ne $$null) { $$count += $$k } ;$$callerCounts.Set_Item($$clientIP, $$count) ;$$ip=(Get-NetAdapter | Get-NetIpAddress); $$header='<html><body><H1>Windows Container Web Server</H1>' ;$$callerCountsString='' ;$$callerCounts.Keys | % { $$callerCountsString+='<p>IP {0} callerCount {1} ' -f $$ip[1].IPAddress,$$callerCounts.Item($$_) } ;$$footer='</body></html>' ;$$content='{0}{1}{2}' -f $$header,$$callerCountsString,$$footer ;Write-Output $$content ;$$buffer = [System.Text.Encoding]::UTF8.GetBytes($$content) ;$$response.ContentLength64 = $$buffer.Length ;$$response.OutputStream.Write($$buffer, 0, $$buffer.Length) ;$$response.Close() ;$$responseStatus = $$response.StatusCode ;Write-Host('< {0}' -f $$responseStatus) } ; "
nodeSelector:
beta.kubernetes.io/os: windows

Просмотреть файл

@ -1,219 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
# exit on errors
set -e
# exit on unbound variables
set -u
# verbose logging
set -x
source "$DIR/../utils.sh"
source "$DIR/k8s-utils.sh"
ENV_FILE="${CLUSTER_DEFINITION}.env"
if [ -e "${ENV_FILE}" ]; then
source "${ENV_FILE}"
fi
EXPECTED_NODE_COUNT="${EXPECTED_NODE_COUNT:-4}"
EXPECTED_LINUX_AGENTS="${EXPECTED_LINUX_AGENTS:-3}"
EXPECTED_WINDOWS_AGENTS="${EXPECTED_WINDOWS_AGENTS:-0}"
EXPECTED_DNS="${EXPECTED_DNS:-2}"
EXPECTED_DASHBOARD="${EXPECTED_DASHBOARD:-1}"
EXPECTED_RESCHEDULER="${EXPECTED_RESCHEDULER:-0}"
EXPECTED_ORCHESTRATOR_VERSION="${EXPECTED_ORCHESTRATOR_VERSION:-}"
KUBE_PROXY_COUNT=$((EXPECTED_NODE_COUNT-$EXPECTED_WINDOWS_AGENTS))
# set TEST_ACR to "y" for ACR testing
TEST_ACR="${TEST_ACR:-n}"
namespace="namespace-${RANDOM}"
log "Running test in namespace: ${namespace}"
trap teardown EXIT
function teardown {
k get all --all-namespaces || echo "teardown error"
k get nodes || echo "teardown error"
k get namespaces || echo "teardown error"
k delete namespaces ${namespace} || echo "teardown error"
}
# TODO: cleanup the loops more
# TODO: the wc|awk business can just be kubectl with an output format and wc -l
###### Deploy ACR
if [[ "${TEST_ACR}" == "y" ]]; then
ACR_NAME="${INSTANCE_NAME//[-._]/}1"
ACR_REGISTRY="${ACR_NAME}-microsoft.azurecr.io" # fix this for non-ms tenant users
if ! az acr show --resource-group "${RESOURCE_GROUP}" --name "${ACR_NAME}" ; then
az acr create --location "${LOCATION}" --resource-group "${RESOURCE_GROUP}" --name "${ACR_NAME}" &
fi
fi
###### Check node count
function check_node_count() {
log "Checking node count"
count=120
while (( $count > 0 )); do
log " ... counting down $count"
node_count=$(k get nodes --no-headers | grep -v NotReady | grep Ready | wc | awk '{print $1}')
if (( ${node_count} == ${EXPECTED_NODE_COUNT} )); then break; fi
sleep 5; count=$((count-1))
done
if (( $node_count != ${EXPECTED_NODE_COUNT} )); then
log "K8S: gave up waiting for apiserver / node counts"; exit 1
fi
}
check_node_count
###### Validate Kubernetes version
kubernetes_version=$(k version --short)
DASHBOARD_PORT=80
if [[ ${kubernetes_version} == *"Server Version: v1.9."* ]]; then
DASHBOARD_PORT=443
fi
log "Setting dashboard port to ${DASHBOARD_PORT}"
log "Checking Kubernetes version. Expected: ${EXPECTED_ORCHESTRATOR_VERSION}"
if [ -n "${EXPECTED_ORCHESTRATOR_VERSION}" ]; then
if [[ ${kubernetes_version} != *"Server Version: v${EXPECTED_ORCHESTRATOR_VERSION}"* ]]; then
log "K8S: unexpected kubernetes version:\n${kubernetes_version}"; exit 1
fi
fi
###### Wait for no more container creating
log "Checking containers being created"
count=60
while (( $count > 0 )); do
log " ... counting down $count"
creating_count=$(k get nodes --no-headers | grep 'CreatingContainer' | wc | awk '{print $1}')
if (( ${creating_count} == 0 )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${creating_count} != 0 )); then
log "K8S: gave up waiting for containers"; exit 1
fi
###### Check existence and status of essential pods
# we test other essential pods (kube-dns, kube-proxy) separately
pods="heapster kube-addon-manager kube-apiserver kube-controller-manager kube-scheduler tiller"
if (( ${EXPECTED_RESCHEDULER} != 0 )); then
pods="$pods rescheduler"
fi
log "Checking $pods"
count=60
while (( $count > 0 )); do
for pod in $pods; do
if k get pods --all-namespaces | grep $pod | grep -q Running; then
log "... $pod is Running"
pods=${pods/$pod/}
fi
done
if [ -z "$(echo $pods | tr -d '[:space:]')" ]; then
break
fi
sleep 5; count=$((count-1))
done
if [ -n "$(echo $pods | tr -d '[:space:]')" ]; then
log "K8S: gave up waiting for running pods [$pods]"; exit 1
fi
###### Check for Kube-DNS
log "Checking Kube-DNS"
count=60
while (( $count > 0 )); do
log " ... counting down $count"
running=$(k get pods --namespace=kube-system | grep kube-dns | grep Running | wc | awk '{print $1}')
if (( ${running} == ${EXPECTED_DNS} )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${running} != ${EXPECTED_DNS} )); then
log "K8S: gave up waiting for kube-dns"; exit 1
fi
###### Check for Kube-Dashboard
if (( ${EXPECTED_DASHBOARD} != 0 )); then
log "Checking Kube-Dashboard"
count=60
while (( $count > 0 )); do
log " ... counting down $count"
running=$(k get pods --namespace=kube-system | grep kubernetes-dashboard | grep Running | wc | awk '{print $1}')
if (( ${running} == ${EXPECTED_DASHBOARD} )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${running} != ${EXPECTED_DASHBOARD} )); then
log "K8S: gave up waiting for kubernetes-dashboard"; exit 1
fi
else
log "Expecting no dashboard"
fi
###### Check for Kube-Proxys
log "Checking Kube-Proxys"
count=60
while (( $count > 0 )); do
log " ... counting down $count"
running=$(k get pods --namespace=kube-system | grep kube-proxy | grep Running | wc | awk '{print $1}')
if (( ${running} == ${KUBE_PROXY_COUNT} )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${running} != ${KUBE_PROXY_COUNT} )); then
log "K8S: gave up waiting for kube-proxy"; exit 1
fi
if ! [ $EXPECTED_WINDOWS_AGENTS -gt 0 ] ; then
if (( ${EXPECTED_DASHBOARD} != 0 )); then
# get master public hostname
master=$(k config view | grep server | cut -f 3- -d "/" | tr -d " ")
# get dashboard port
port=$(k get svc --namespace=kube-system | grep dashboard | awk '{print $4}' | sed -n 's/^'${DASHBOARD_PORT}':\(.*\)\/TCP$/\1/p')
# get internal IPs of the nodes
ips=$(k get nodes --all-namespaces -o yaml | grep -B 1 InternalIP | grep address | awk '{print $3}')
for ip in $ips; do
log "Probing IP address ${ip}"
count=60
success="n"
while (( $count > 0 )); do
log " ... counting down $count"
ret=$(ssh -i "${OUTPUT}/id_rsa" -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "azureuser@${master}" "curl --max-time 60 http://${ip}:${port}" || echo "curl_error")
if [[ ! $ret =~ .*curl_error.* ]]; then
success="y"
break
fi
if (( $count < 2 )); then
log $ret
fi
sleep 5; count=$((count-1))
done
if [[ "${success}" == "n" ]]; then
log "K8S: gave up verifying proxy"; exit 1
fi
done
fi
fi
if [ $EXPECTED_LINUX_AGENTS -gt 0 ] ; then
test_linux_deployment
fi
if [ $EXPECTED_WINDOWS_AGENTS -gt 0 ] ; then
test_windows_deployment
fi
check_node_count

Просмотреть файл

@ -1,43 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
# do not use 'set -e'
set -x
set -u
source "$DIR/../utils.sh"
SSH="ssh -i ${SSH_KEY} -o ConnectTimeout=30 -o StrictHostKeyChecking=no -p2200 azureuser@${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com"
deploy="docker -H :2375 run -d -p 80:80 yeasy/simple-web"
wait_duration=10
total_loops=30
while true; do
# || true is used to suppress the failure like "Error response from daemon: No elected primary cluster manager"
# it should be gone after a few retries
containerId="$($SSH $deploy 2>/dev/null )" || true
[[ ! -z $containerId ]] && [[ "$(echo $containerId | grep '[0-9a-z]\{64\}')" ]] && log "container deployed! containerId is $containerId" && break
log "Validation: Expected to get containerId. $(($total_loops*$wait_duration)) seconds remain"
sleep $wait_duration
total_loops=$((total_loops-1))
if [ $total_loops -eq 0 ]; then
log "Swarm validation failed: timeout"; exit 1;
fi
done
result=$($SSH curl localhost:2375/containers/json | jq "[.[].Id==\"$containerId\"] | any")
if [ "$result" != "true" ]; then
log "Swarm validation failed: container not found"; exit 1;
fi
log "Swarm validation completed"

Просмотреть файл

@ -1,81 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
# do not use 'set -e'
set -x
set -u
source "$DIR/../utils.sh"
ssh_args="-i ${SSH_KEY} -o ConnectTimeout=30 -o StrictHostKeyChecking=no -p2200 azureuser@${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com"
function teardown {
ssh ${ssh_args} docker service rm nginx
sleep 10
ssh ${ssh_args} docker network rm network
}
trap teardown EXIT
log "Starting swarmmode deployment validation in ${LOCATION}"
sleep 30
log "Creating network"
wait=10
count=12
args="${ssh_args} docker network create --driver overlay --subnet 10.0.9.0/24 --opt encrypted network"
while (( $count > 0 )); do
log " ... counting down $count"
timeout -k 60s -s KILL 60s ssh $args
retval=$?
if [[ "$retval" == "0" ]]; then break; fi
sleep $wait
count=$((count-1))
done
if [[ "$retval" != "0" ]]; then
log "DockerCE: gave up waiting for network to be created"
exit 1
fi
log "Creating service"
wait=5
count=12
args="${ssh_args} docker service create --replicas 3 --name nginx --network network --publish 80:80 nginx"
while (( $count > 0 )); do
log " ... counting down $count"
ssh $args
retval=$?
if [[ "$retval" == "0" ]]; then break; fi
sleep $wait
count=$((count-1))
done
if [[ "$retval" != "0" ]]; then
log "DockerCE: gave up waiting for service to be created"
exit 1
fi
sleep 10
log "Testing service"
wait=5
count=12
while (( $count > 0 )); do
log " ... counting down $count"
curl --fail "http://${INSTANCE_NAME}0.${LOCATION}.cloudapp.azure.com:80/"
retval=$?
if [[ "$retval" == "0" ]]; then break; fi
sleep $wait
count=$((count-1))
done
if [[ "$retval" != "0" ]]; then
log "DockerCE: gave up waiting for service to be externally reachable"
exit 1
fi

Просмотреть файл

@ -1,14 +0,0 @@
#!/bin/bash
function log {
local message="$1"
local caller
caller="$(caller 0)"
now=$(date +"%D %T %Z")
if [[ -n "${LOGFILE:-}" ]]; then
echo "[${now}] [${caller}] ${message}" | tee -a ${LOGFILE}
else
echo "[${now}] [${caller}] ${message}"
fi
}

Просмотреть файл

@ -1,219 +0,0 @@
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
ROOT="${DIR}/.."
# see: https://github.com/stedolan/jq/issues/105 & https://github.com/stedolan/jq/wiki/FAQ#general-questions
function jqi() { filename="${1}"; jqexpr="${2}"; jq "${jqexpr}" "${filename}" > "${filename}.tmp" && mv "${filename}.tmp" "${filename}"; }
function generate_template() {
# Check pre-requisites
[[ -n "${INSTANCE_NAME:-}" ]] || (echo "Must specify INSTANCE_NAME" >&2 && exit 1)
[[ -n "${CLUSTER_DEFINITION:-}" ]] || (echo "Must specify CLUSTER_DEFINITION" >&2 && exit 1)
[[ -n "${SERVICE_PRINCIPAL_CLIENT_ID:-}" ]] || [[ -n "${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_ID" >&2 && exit 1)
[[ -n "${SERVICE_PRINCIPAL_CLIENT_SECRET:-}" ]] || [[ -n "${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_SECRET" >&2 && exit 1)
[[ -n "${OUTPUT:-}" ]] || (echo "Must specify OUTPUT" >&2 && exit 1)
# Set output directory
mkdir -p "${OUTPUT}"
# Prep SSH Key
ssh-keygen -b 2048 -t rsa -f "${OUTPUT}/id_rsa" -q -N ""
ssh-keygen -y -f "${OUTPUT}/id_rsa" > "${OUTPUT}/id_rsa.pub"
SSH_KEY_DATA="$(cat "${OUTPUT}/id_rsa.pub")"
export SSH_KEY_DATA
# Allow different credentials for cluster vs the deployment
export CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID:-${SERVICE_PRINCIPAL_CLIENT_ID}}"
export CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET="${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET:-${SERVICE_PRINCIPAL_CLIENT_SECRET}}"
# Form the final cluster_definition file
export FINAL_CLUSTER_DEFINITION="${OUTPUT}/clusterdefinition.json"
cp "${CLUSTER_DEFINITION}" "${FINAL_CLUSTER_DEFINITION}"
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.masterProfile.dnsPrefix = \"${INSTANCE_NAME}\""
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.agentPoolProfiles |= map(if .name==\"agentpublic\" then .dnsPrefix = \"${INSTANCE_NAME}0\" else . end)"
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.linuxProfile.ssh.publicKeys[0].keyData = \"${SSH_KEY_DATA}\""
k8sServicePrincipal=$(jq 'getpath(["properties","servicePrincipalProfile"])' ${FINAL_CLUSTER_DEFINITION})
if [[ "${k8sServicePrincipal}" != "null" ]]; then
apiVersion=$(get_api_version)
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.servicePrincipalProfile.clientId = \"${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID}\""
if [[ ${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET} =~ /subscription.* ]]; then
vaultID=$(echo $CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET | awk -F"/secrets/" '{print $1}')
secretName=$(echo $CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET | awk -F"/secrets/" '{print $2}')
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.servicePrincipalProfile.keyvaultSecretRef.vaultID = \"${vaultID}\""
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.servicePrincipalProfile.keyvaultSecretRef.secretName = \"${secretName}\""
else
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.servicePrincipalProfile.secret = \"${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET}\""
fi
fi
secrets=$(jq 'getpath(["properties","linuxProfile","secrets"])' ${FINAL_CLUSTER_DEFINITION})
if [[ "${secrets}" != "null" ]]; then
[[ -n "${CERT_KEYVAULT_ID:-}" ]] || (echo "Must specify CERT_KEYVAULT_ID" >&2 && exit 1)
[[ -n "${CERT_SECRET_URL:-}" ]] || (echo "Must specify CERT_SECRET_URL" >&2 && exit 1)
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.linuxProfile.secrets[0].sourceVault.id = \"${CERT_KEYVAULT_ID}\""
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.linuxProfile.secrets[0].vaultCertificates[0].certificateUrl = \"${CERT_SECRET_URL}\""
fi
secrets=$(jq 'getpath(["properties","windowsProfile","secrets"])' ${FINAL_CLUSTER_DEFINITION})
if [[ "${secrets}" != "null" ]]; then
[[ -n "${CERT_KEYVAULT_ID:-}" ]] || (echo "Must specify CERT_KEYVAULT_ID" >&2 && exit 1)
[[ -n "${CERT_SECRET_URL:-}" ]] || (echo "Must specify CERT_SECRET_URL" >&2 && exit 1)
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.windowsProfile.secrets[0].sourceVault.id = \"${CERT_KEYVAULT_ID}\""
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.windowsProfile.secrets[0].vaultCertificates[0].certificateUrl = \"${CERT_SECRET_URL}\""
jqi "${FINAL_CLUSTER_DEFINITION}" ".properties.windowsProfile.secrets[0].vaultCertificates[0].certificateStore = \"My\""
fi
# Generate template
"${DIR}/../bin/aks-engine" generate --output-directory "${OUTPUT}" "${FINAL_CLUSTER_DEFINITION}" --debug
# Fill in custom hyperkube spec, if it was set
if [[ -n "${CUSTOM_HYPERKUBE_SPEC:-}" ]]; then
# TODO: plumb hyperkube into the apimodel
jqi "${OUTPUT}/azuredeploy.parameters.json" ".parameters.kubernetesHyperkubeSpec.value = \"${CUSTOM_HYPERKUBE_SPEC}\""
fi
}
function set_azure_account() {
# Check pre-requisites
[[ -n "${SUBSCRIPTION_ID:-}" ]] || (echo "Must specify SUBSCRIPTION_ID" >&2 && exit 1)
[[ -n "${TENANT_ID:-}" ]] || (echo "Must specify TENANT_ID" >&2 && exit 1)
[[ -n "${SERVICE_PRINCIPAL_CLIENT_ID:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_ID" >&2 && exit 1)
[[ -n "${SERVICE_PRINCIPAL_CLIENT_SECRET:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_SECRET" >&2 && exit 1)
command -v k || (echo "k must be on PATH" >&2 && exit 1)
command -v az || (echo "az must be on PATH" >&2 && exit 1)
# Login to Azure-Cli
az login --service-principal \
--username "${SERVICE_PRINCIPAL_CLIENT_ID}" \
--password "${SERVICE_PRINCIPAL_CLIENT_SECRET}" \
--tenant "${TENANT_ID}" &>/dev/null
az account set --subscription "${SUBSCRIPTION_ID}"
}
function create_resource_group() {
[[ -n "${LOCATION:-}" ]] || (echo "Must specify LOCATION" >&2 && exit 1)
[[ -n "${RESOURCE_GROUP:-}" ]] || (echo "Must specify RESOURCE_GROUP" >&2 && exit 1)
# Create resource group if doesn't exist
az group show --name="${RESOURCE_GROUP}" || [ $? -eq 3 ] && echo "will create resource group ${RESOURCE_GROUP}" || exit 1
az group create --name="${RESOURCE_GROUP}" --location="${LOCATION}" --tags "type=${RESOURCE_GROUP_TAG_TYPE:-}" "now=$(date +%s)" "job=${JOB_BASE_NAME:-}" "buildno=${BUILD_NUM:-}"
sleep 3 # TODO: investigate why this is needed (eventual consistency in ARM)
}
function deploy_template() {
# Check pre-requisites
[[ -n "${DEPLOYMENT_NAME:-}" ]] || (echo "Must specify DEPLOYMENT_NAME" >&2 && exit 1)
[[ -n "${LOCATION:-}" ]] || (echo "Must specify LOCATION" >&2 && exit 1)
[[ -n "${RESOURCE_GROUP:-}" ]] || (echo "Must specify RESOURCE_GROUP" >&2 && exit 1)
[[ -n "${OUTPUT:-}" ]] || (echo "Must specify OUTPUT" >&2 && exit 1)
command -v k || (echo "k must be on PATH" >&2 && exit 1)
command -v az || (echo "az must be on PATH" >&2 && exit 1)
create_resource_group
# Deploy the template
az group deployment create \
--name "${DEPLOYMENT_NAME}" \
--resource-group "${RESOURCE_GROUP}" \
--template-file "${OUTPUT}/azuredeploy.json" \
--parameters "@${OUTPUT}/azuredeploy.parameters.json"
}
function scale_agent_pool() {
# Check pre-requisites
[[ -n "${AGENT_POOL_SIZE:-}" ]] || (echo "Must specify AGENT_POOL_SIZE" >&2 && exit 1)
[[ -n "${DEPLOYMENT_NAME:-}" ]] || (echo "Must specify DEPLOYMENT_NAME" >&2 && exit 1)
[[ -n "${LOCATION:-}" ]] || (echo "Must specify LOCATION" >&2 && exit 1)
[[ -n "${RESOURCE_GROUP:-}" ]] || (echo "Must specify RESOURCE_GROUP" >&2 && exit 1)
[[ -n "${OUTPUT:-}" ]] || (echo "Must specify OUTPUT" >&2 && exit 1)
command -v az || (echo "az must be on PATH" >&2 && exit 1)
APIMODEL="${OUTPUT}/apimodel.json"
DEPLOYMENT_PARAMS="${OUTPUT}/azuredeploy.parameters.json"
for poolname in $(jq '.properties.agentPoolProfiles[].name' "${APIMODEL}" | tr -d '\"'); do
offset=$(jq "getpath([\"parameters\", \"${poolname}Count\", \"value\"])" ${DEPLOYMENT_PARAMS})
echo "$poolname : offset=$offset count=$AGENT_POOL_SIZE"
jqi "${DEPLOYMENT_PARAMS}" ".${poolname}Count.value = $AGENT_POOL_SIZE"
jqi "${DEPLOYMENT_PARAMS}" ".${poolname}Offset.value = $offset"
done
az group deployment create \
--name "${DEPLOYMENT_NAME}" \
--resource-group "${RESOURCE_GROUP}" \
--template-file "${OUTPUT}/azuredeploy.json" \
--parameters "@${OUTPUT}/azuredeploy.parameters.json"
}
function get_node_count() {
[[ -n "${CLUSTER_DEFINITION:-}" ]] || (echo "Must specify CLUSTER_DEFINITION" >&2 && exit 1)
count=$(jq '.properties.masterProfile.count' ${CLUSTER_DEFINITION})
linux_agents=0
windows_agents=0
nodes=$(jq -r '.properties.agentPoolProfiles[].count' ${CLUSTER_DEFINITION})
osTypes=$(jq -r '.properties.agentPoolProfiles[].osType' ${CLUSTER_DEFINITION})
nArr=( $nodes )
oArr=( $osTypes )
indx=0
for n in "${nArr[@]}"; do
count=$((count+n))
if [ "${oArr[$indx]}" = "Windows" ]; then
windows_agents=$((windows_agents+n))
else
linux_agents=$((linux_agents+n))
fi
indx=$((indx+1))
done
echo "${count}:${linux_agents}:${windows_agents}"
}
function get_orchestrator_type() {
[[ -n "${CLUSTER_DEFINITION:-}" ]] || (echo "Must specify CLUSTER_DEFINITION" >&2 && exit 1)
orchestratorType=$(jq -r 'getpath(["properties","orchestratorProfile","orchestratorType"])' ${CLUSTER_DEFINITION} | tr '[:upper:]' '[:lower:]')
echo $orchestratorType
}
function get_orchestrator_version() {
[[ -n "${CLUSTER_DEFINITION:-}" ]] || (echo "Must specify CLUSTER_DEFINITION" >&2 && exit 1)
orchestratorVersion=$(jq -r 'getpath(["properties","orchestratorProfile","orchestratorVersion"])' ${CLUSTER_DEFINITION})
if [[ "$orchestratorVersion" == "null" ]]; then
orchestratorVersion=""
fi
echo $orchestratorVersion
}
function get_api_version() {
[[ -n "${CLUSTER_DEFINITION:-}" ]] || (echo "Must specify CLUSTER_DEFINITION" >&2 && exit 1)
apiVersion=$(jq -r 'getpath(["apiVersion"])' ${CLUSTER_DEFINITION})
if [[ "$apiVersion" == "null" ]]; then
apiVersion=""
fi
echo $apiVersion
}
function cleanup() {
if [[ "${CLEANUP:-}" == "y" ]]; then
az group delete --no-wait --name="${RESOURCE_GROUP}" --yes || true
fi
}

Просмотреть файл

@ -1,102 +0,0 @@
#!/usr/bin/env bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
###############################################################################
set -e
set -u
set -o pipefail
ROOT="${DIR}/.."
# Usage:
#
# Manual user usage (Specific name):
# export INSTANCE_NAME=official-jenkins-infra
# ./scripts/deploy.sh ./examples/kubernetes.json
#
# Manual user usage (Lots of rapid fire):
# In this mode, the user can repeat the same deploy
# command blindly and get new clusters each time.
# unset INSTANCE_NAME
# vim ./test/user.env (add your stuff)
# ./scripts/deploy.sh ./examples.kubernetes.json
# sleep 1
# ./scripts/deploy.sh ./examples.kubernetes.json
#
# Prow:
# export PULL_NUMBER=...
# export VALIDATE=<script path>
# export CLUSTER_DEFIITION=examples/kubernetes.json
# ./scripts/deploy.sh
# Load any user set environment
if [[ -f "${ROOT}/test/user.env" ]]; then
source "${ROOT}/test/user.env"
fi
# Ensure Cluster Definition
if [[ -z "${CLUSTER_DEFINITION:-}" ]]; then
if [[ -z "${1:-}" ]]; then echo "You must specify a parameterized apimodel.json clusterdefinition" >&2; exit 1; fi
CLUSTER_DEFINITION="${1}"
fi
# Set Instance Name for PR or random run
if [[ -n "${PULL_NUMBER:-}" ]]; then
INSTANCE_NAME="${JOB_NAME}-${PULL_NUMBER}-$(printf "%x" $(date '+%s'))"
export INSTANCE_NAME
# if we're running a pull request, assume we want to cleanup unless the user specified otherwise
if [[ -z "${CLEANUP:-}" ]]; then
export CLEANUP="y"
fi
else
INSTANCE_NAME_DEFAULT="${INSTANCE_NAME_PREFIX}-$(printf "%x" $(date '+%s'))"
export INSTANCE_NAME_DEFAULT
export INSTANCE_NAME="${INSTANCE_NAME:-${INSTANCE_NAME_DEFAULT}}"
fi
# Let the example json.env file set any env vars it may need ahead of time
# (For example, the `managed-identity/kubernetes.json.env` sets env vars for a
# custom MSI-compatible build of Kubernetes, as well as the SP cred values.)
ENV_FILE="${CLUSTER_DEFINITION}.env"
if [ -e "${ENV_FILE}" ]; then
source "${ENV_FILE}"
fi
# Set extra parameters
export OUTPUT="${ROOT}/_output/${INSTANCE_NAME}"
export RESOURCE_GROUP="${INSTANCE_NAME}"
export DEPLOYMENT_NAME="${INSTANCE_NAME}"
source "${ROOT}/test/common.sh"
# Set custom dir so we don't clobber global 'az' config
AZURE_CONFIG_DIR="$(mktemp -d)"
export AZURE_CONFIG_DIR
trap 'rm -rf ${AZURE_CONFIG_DIR}' EXIT
make -C "${ROOT}" ci
generate_template
set_azure_account
trap cleanup EXIT
deploy_template
if [[ -z "${VALIDATE:-}" ]]; then
exit 0
fi
export SSH_KEY="${OUTPUT}/id_rsa"
export KUBECONFIG="${OUTPUT}/kubeconfig/kubeconfig.${LOCATION}.json"
"${ROOT}/${VALIDATE}"
echo "post-test..."

Просмотреть файл

@ -1,30 +0,0 @@
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
###############################################################################
set -e
set -o pipefail
ROOT="${DIR}/.."
# Check pre-requisites
[[ -n "${SERVICE_PRINCIPAL_CLIENT_ID:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_ID" >&2 && exit 1)
[[ -n "${SERVICE_PRINCIPAL_CLIENT_SECRET:-}" ]] || (echo "Must specify SERVICE_PRINCIPAL_CLIENT_SECRET" >&2 && exit 1)
[[ -n "${TENANT_ID:-}" ]] || (echo "Must specify TENANT_ID" >&2 && exit 1)
[[ -n "${SUBSCRIPTION_ID:-}" ]] || (echo "Must specify SUBSCRIPTION_ID" >&2 && exit 1)
[[ -n "${CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID:-}" ]] || (echo "Must specify CLUSTER_SERVICE_PRINCIPAL_CLIENT_ID" >&2 && exit 1)
[[ -n "${CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET:-}" ]] || (echo "Must specify CLUSTER_SERVICE_PRINCIPAL_CLIENT_SECRET" >&2 && exit 1)
[[ -n "${STAGE_TIMEOUT_MIN:-}" ]] || (echo "Must specify STAGE_TIMEOUT_MIN" >&2 && exit 1)
[[ -n "${TEST_CONFIG:-}" ]] || (echo "Must specify TEST_CONFIG" >&2 && exit 1)
make bootstrap build
${ROOT}/test/aks-engine-test/aks-engine-test -c ${TEST_CONFIG} -d ${ROOT} -e ${LOGERROR_CONFIG:-${ROOT}/test/aks-engine-test/aks-engine-errors.json} -j ${SA_NAME} -k ${SA_KEY}

Просмотреть файл

@ -1,20 +0,0 @@
{
"deployments": [
{
"cluster_definition": "disks-managed/dcos-preAttachedDisks-vmas.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/dcos-vmss.json",
"location": "eastus"
},
{
"cluster_definition": "disks-storageaccount/dcos.json",
"location": "eastus"
},
{
"cluster_definition": "vnet/dcosvnet.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,36 +0,0 @@
{
"deployments": [
{
"cluster_definition": "windows/kubernetes-hybrid.json",
"location": "eastus"
},
{
"cluster_definition": "windows/kubernetes.json",
"location": "eastus"
},
{
"cluster_definition": "disks-managed/kubernetes-preAttachedDisks-vmas.json",
"location": "westus2"
},
{
"cluster_definition": "disks-managed/kubernetes-vmas.json",
"location": "eastus"
},
{
"cluster_definition": "disks-storageaccount/kubernetes.json",
"location": "westus2"
},
{
"cluster_definition": "networkpolicy/kubernetes-calico.json",
"location": "eastus"
},
{
"cluster_definition": "kubernetes-config/kubernetes-clustersubnet.json",
"location": "eastus"
},
{
"cluster_definition": "vnet/kubernetesvnet.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,12 +0,0 @@
{
"deployments": [
{
"cluster_definition": "swarm.json",
"location": "westus2"
},
{
"cluster_definition": "disks-managed/swarm-preAttachedDisks-vmss.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,24 +0,0 @@
{
"deployments": [
{
"cluster_definition": "disks-managed/swarmmode-vmas.json",
"location": "westus2"
},
{
"cluster_definition": "disks-managed/swarmmode-vmss.json",
"location": "westus2"
},
{
"cluster_definition": "disks-storageaccount/swarmmode.json",
"location": "westus2"
},
{
"cluster_definition": "v20170131/swarmmode.json",
"location": "westus2"
},
{
"cluster_definition": "vnet/swarmmodevnet.json",
"location": "westus2"
}
]
}

Просмотреть файл

@ -1,48 +0,0 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package fake
import (
"github.com/Azure/aks-engine/pkg/i18n"
"github.com/leonelquinteros/gotext"
)
// This file is used to generate the test translation file
// 1. go-xgettext -o i18ntestinput.pot --keyword=translator.T --keyword-plural=translator.NT --msgid-bugs-address="" --sort-output test/i18n/i18ntestinput.go
// 2. go-xgettext -o i18ntestinput.err.pot --keyword=translator.Errorf --keyword-plural=translator.NErrorf --msgid-bugs-address="" --sort-output test/i18n/i18ntestinput.go
// 3. sed '1,18d' i18ntestinput.err.pot >> i18ntestinput.pot
// 4. msginit -l en_US -o i18ntestinput.po -i i18ntestinput.pot
// 5. Modify i18ntestinput.po using poedit as necessary
// Or msgfmt -c -v -o i18ntestinput.mo i18ntestinput.po
// 6. for d in "en_US"; do cp i18ntestinput.mo translations/test/$d/LC_MESSAGES/acsengine.mo; cp i18ntestinput.po translations/test/$d/LC_MESSAGES/acsengine.po; done
// 7. rm i18ntestinput.*
var (
locale = gotext.NewLocale("d", "l")
translator = &i18n.Translator{
Locale: locale,
}
world = "World"
resource = "Foo"
)
func aloha() {
translator.T("Aloha")
}
func foo() {
translator.T("Hello %s", world)
}
func bar() {
translator.NT("There is %d parameter in resource %s", "There are %d parameters in resource %s", 9, 9, resource)
}
func file() error {
return translator.Errorf("File not exists")
}
func api() error {
return translator.NErrorf("There is %d error in the api model", "There are %d errors in the api model", 3, 3)
}

0
test/junit/.gitignore поставляемый
Просмотреть файл

Просмотреть файл

@ -1,11 +0,0 @@
#!/usr/bin/env bash
source "${HOME}/test/common.sh"
function shunittest_deploy_template {
set -eux -o pipefail
export OUTPUT="${HOME}/_output/${INSTANCE_NAME}"
deploy_template
}

Просмотреть файл

@ -1,11 +0,0 @@
#!/usr/bin/env bash
source "${HOME}/test/common.sh"
function shunittest_generate_template {
set -eux -o pipefail
export OUTPUT="${HOME}/_output/${INSTANCE_NAME}"
generate_template
}

Просмотреть файл

@ -1,11 +0,0 @@
#!/usr/bin/env bash
source "${HOME}/test/common.sh"
function shunittest_scale_agent_pool {
set -eux -o pipefail
export OUTPUT="${HOME}/_output/${INSTANCE_NAME}"
scale_agent_pool
}

Просмотреть файл

@ -1,27 +0,0 @@
#!/usr/bin/env bash
function shunittest_validate_deployment {
set -eux -o pipefail
export OUTPUT="${HOME}/_output/${INSTANCE_NAME}"
export SSH_KEY="${OUTPUT}/id_rsa"
if [[ "${ORCHESTRATOR}" == "kubernetes" ]]; then
export KUBECONFIG="${OUTPUT}/kubeconfig/kubeconfig.${LOCATION}.json"
nodes=$(${HOME}/test/step.sh get_node_count)
IFS=':' read -r -a narr <<< "${nodes}"
export EXPECTED_NODE_COUNT=${narr[0]}
export EXPECTED_LINUX_AGENTS=${narr[1]}
export EXPECTED_WINDOWS_AGENTS=${narr[1]}
EXPECTED_ORCHESTRATOR_VERSION=$(${HOME}/test/step.sh get_orchestrator_release)
export EXPECTED_ORCHESTRATOR_VERSION
fi
script="${HOME}/test/cluster-tests/${ORCHESTRATOR}/test.sh"
if [ -x "${script}" ]; then
"${script}"
else
echo "${script}: not an executable or no such file"
exit 1
fi
}

Просмотреть файл

@ -1,81 +0,0 @@
#!/usr/bin/env bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
###############################################################################
set -e
set -u
set -o pipefail
ROOT="${DIR}/.."
source "${ROOT}/test/common.sh"
case $1 in
set_azure_account)
set_azure_account
;;
create_resource_group)
create_resource_group
;;
predeploy)
AKSE_PREDEPLOY=${AKSE_PREDEPLOY:-}
if [ -n "${AKSE_PREDEPLOY}" ] && [ -x "${AKSE_PREDEPLOY}" ]; then
"${AKSE_PREDEPLOY}"
fi
;;
postdeploy)
AKSE_POSTDEPLOY=${AKSE_POSTDEPLOY:-}
if [ -n "${AKSE_POSTDEPLOY}" ] && [ -x "${AKSE_POSTDEPLOY}" ]; then
"${AKSE_POSTDEPLOY}"
fi
;;
generate_template)
export OUTPUT="${ROOT}/_output/${INSTANCE_NAME}"
generate_template
;;
deploy_template)
export OUTPUT="${ROOT}/_output/${INSTANCE_NAME}"
deploy_template
;;
get_node_count)
export OUTPUT="${ROOT}/_output/${INSTANCE_NAME}"
get_node_count
;;
get_orchestrator_type)
get_orchestrator_type
;;
get_orchestrator_release)
get_orchestrator_release
;;
validate)
export OUTPUT="${ROOT}/_output/${INSTANCE_NAME}"
export SSH_KEY="${OUTPUT}/id_rsa"
if [ ${ORCHESTRATOR} = "kubernetes" ]; then
export KUBECONFIG="${OUTPUT}/kubeconfig/kubeconfig.${LOCATION}.json"
fi
"${ROOT}/test/cluster-tests/${ORCHESTRATOR}/test.sh"
;;
cleanup)
export CLEANUP="${CLEANUP:-y}"
cleanup
;;
esac

20
vendor/github.com/alexcesaro/statsd/LICENSE сгенерированный поставляемый
Просмотреть файл

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Alexandre Cesaro
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

270
vendor/github.com/alexcesaro/statsd/conn.go сгенерированный поставляемый
Просмотреть файл

@ -1,270 +0,0 @@
package statsd
import (
"io"
"math/rand"
"net"
"strconv"
"sync"
"time"
)
type conn struct {
// Fields settable with options at Client's creation.
addr string
errorHandler func(error)
flushPeriod time.Duration
maxPacketSize int
network string
tagFormat TagFormat
mu sync.Mutex
// Fields guarded by the mutex.
closed bool
w io.WriteCloser
buf []byte
rateCache map[float32]string
}
func newConn(conf connConfig, muted bool) (*conn, error) {
c := &conn{
addr: conf.Addr,
errorHandler: conf.ErrorHandler,
flushPeriod: conf.FlushPeriod,
maxPacketSize: conf.MaxPacketSize,
network: conf.Network,
tagFormat: conf.TagFormat,
}
if muted {
return c, nil
}
var err error
c.w, err = dialTimeout(c.network, c.addr, 5*time.Second)
if err != nil {
return c, err
}
// When using UDP do a quick check to see if something is listening on the
// given port to return an error as soon as possible.
if c.network[:3] == "udp" {
for i := 0; i < 2; i++ {
_, err = c.w.Write(nil)
if err != nil {
_ = c.w.Close()
c.w = nil
return c, err
}
}
}
// To prevent a buffer overflow add some capacity to the buffer to allow for
// an additional metric.
c.buf = make([]byte, 0, c.maxPacketSize+200)
if c.flushPeriod > 0 {
go func() {
ticker := time.NewTicker(c.flushPeriod)
for _ = range ticker.C {
c.mu.Lock()
if c.closed {
ticker.Stop()
c.mu.Unlock()
return
}
c.flush(0)
c.mu.Unlock()
}
}()
}
return c, nil
}
func (c *conn) metric(prefix, bucket string, n interface{}, typ string, rate float32, tags string) {
c.mu.Lock()
l := len(c.buf)
c.appendBucket(prefix, bucket, tags)
c.appendNumber(n)
c.appendType(typ)
c.appendRate(rate)
c.closeMetric(tags)
c.flushIfBufferFull(l)
c.mu.Unlock()
}
func (c *conn) gauge(prefix, bucket string, value interface{}, tags string) {
c.mu.Lock()
l := len(c.buf)
// To set a gauge to a negative value we must first set it to 0.
// https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges
if isNegative(value) {
c.appendBucket(prefix, bucket, tags)
c.appendGauge(0, tags)
}
c.appendBucket(prefix, bucket, tags)
c.appendGauge(value, tags)
c.flushIfBufferFull(l)
c.mu.Unlock()
}
func (c *conn) appendGauge(value interface{}, tags string) {
c.appendNumber(value)
c.appendType("g")
c.closeMetric(tags)
}
func (c *conn) unique(prefix, bucket string, value string, tags string) {
c.mu.Lock()
l := len(c.buf)
c.appendBucket(prefix, bucket, tags)
c.appendString(value)
c.appendType("s")
c.closeMetric(tags)
c.flushIfBufferFull(l)
c.mu.Unlock()
}
func (c *conn) appendByte(b byte) {
c.buf = append(c.buf, b)
}
func (c *conn) appendString(s string) {
c.buf = append(c.buf, s...)
}
func (c *conn) appendNumber(v interface{}) {
switch n := v.(type) {
case int:
c.buf = strconv.AppendInt(c.buf, int64(n), 10)
case uint:
c.buf = strconv.AppendUint(c.buf, uint64(n), 10)
case int64:
c.buf = strconv.AppendInt(c.buf, n, 10)
case uint64:
c.buf = strconv.AppendUint(c.buf, n, 10)
case int32:
c.buf = strconv.AppendInt(c.buf, int64(n), 10)
case uint32:
c.buf = strconv.AppendUint(c.buf, uint64(n), 10)
case int16:
c.buf = strconv.AppendInt(c.buf, int64(n), 10)
case uint16:
c.buf = strconv.AppendUint(c.buf, uint64(n), 10)
case int8:
c.buf = strconv.AppendInt(c.buf, int64(n), 10)
case uint8:
c.buf = strconv.AppendUint(c.buf, uint64(n), 10)
case float64:
c.buf = strconv.AppendFloat(c.buf, n, 'f', -1, 64)
case float32:
c.buf = strconv.AppendFloat(c.buf, float64(n), 'f', -1, 32)
}
}
func isNegative(v interface{}) bool {
switch n := v.(type) {
case int:
return n < 0
case uint:
return n < 0
case int64:
return n < 0
case uint64:
return n < 0
case int32:
return n < 0
case uint32:
return n < 0
case int16:
return n < 0
case uint16:
return n < 0
case int8:
return n < 0
case uint8:
return n < 0
case float64:
return n < 0
case float32:
return n < 0
}
return false
}
func (c *conn) appendBucket(prefix, bucket string, tags string) {
c.appendString(prefix)
c.appendString(bucket)
if c.tagFormat == InfluxDB {
c.appendString(tags)
}
c.appendByte(':')
}
func (c *conn) appendType(t string) {
c.appendByte('|')
c.appendString(t)
}
func (c *conn) appendRate(rate float32) {
if rate == 1 {
return
}
if c.rateCache == nil {
c.rateCache = make(map[float32]string)
}
c.appendString("|@")
if s, ok := c.rateCache[rate]; ok {
c.appendString(s)
} else {
s = strconv.FormatFloat(float64(rate), 'f', -1, 32)
c.rateCache[rate] = s
c.appendString(s)
}
}
func (c *conn) closeMetric(tags string) {
if c.tagFormat == Datadog {
c.appendString(tags)
}
c.appendByte('\n')
}
func (c *conn) flushIfBufferFull(lastSafeLen int) {
if len(c.buf) > c.maxPacketSize {
c.flush(lastSafeLen)
}
}
// flush flushes the first n bytes of the buffer.
// If n is 0, the whole buffer is flushed.
func (c *conn) flush(n int) {
if len(c.buf) == 0 {
return
}
if n == 0 {
n = len(c.buf)
}
// Trim the last \n, StatsD does not like it.
_, err := c.w.Write(c.buf[:n-1])
c.handleError(err)
if n < len(c.buf) {
copy(c.buf, c.buf[n:])
}
c.buf = c.buf[:len(c.buf)-n]
}
func (c *conn) handleError(err error) {
if err != nil && c.errorHandler != nil {
c.errorHandler(err)
}
}
// Stubbed out for testing.
var (
dialTimeout = net.DialTimeout
now = time.Now
randFloat = rand.Float32
)

29
vendor/github.com/alexcesaro/statsd/doc.go сгенерированный поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
/*
Package statsd is a simple and efficient StatsD client.
Options
Use options to configure the Client: target host/port, sampling rate, tags, etc.
Whenever you want to use different options (e.g. other tags, different sampling
rate), you should use the Clone() method of the Client.
Because when cloning a Client, the same connection is reused so this is way
cheaper and more efficient than creating another Client using New().
Internals
Client's methods buffer metrics. The buffer is flushed when either:
- the background goroutine flushes the buffer (every 100ms by default)
- the buffer is full (1440 bytes by default so that IP packets are not
fragmented)
The background goroutine can be disabled using the FlushPeriod(0) option.
Buffering can be disabled using the MaxPacketSize(0) option.
StatsD homepage: https://github.com/etsy/statsd
*/
package statsd

250
vendor/github.com/alexcesaro/statsd/options.go сгенерированный поставляемый
Просмотреть файл

@ -1,250 +0,0 @@
package statsd
import (
"bytes"
"strings"
"time"
)
type config struct {
Conn connConfig
Client clientConfig
}
type clientConfig struct {
Muted bool
Rate float32
Prefix string
Tags []tag
}
type connConfig struct {
Addr string
ErrorHandler func(error)
FlushPeriod time.Duration
MaxPacketSize int
Network string
TagFormat TagFormat
}
// An Option represents an option for a Client. It must be used as an
// argument to New() or Client.Clone().
type Option func(*config)
// Address sets the address of the StatsD daemon.
//
// By default, ":8125" is used. This option is ignored in Client.Clone().
func Address(addr string) Option {
return Option(func(c *config) {
c.Conn.Addr = addr
})
}
// ErrorHandler sets the function called when an error happens when sending
// metrics (e.g. the StatsD daemon is not listening anymore).
//
// By default, these errors are ignored. This option is ignored in
// Client.Clone().
func ErrorHandler(h func(error)) Option {
return Option(func(c *config) {
c.Conn.ErrorHandler = h
})
}
// FlushPeriod sets how often the Client's buffer is flushed. If p is 0, the
// goroutine that periodically flush the buffer is not lauched and the buffer
// is only flushed when it is full.
//
// By default, the flush period is 100 ms. This option is ignored in
// Client.Clone().
func FlushPeriod(p time.Duration) Option {
return Option(func(c *config) {
c.Conn.FlushPeriod = p
})
}
// MaxPacketSize sets the maximum packet size in bytes sent by the Client.
//
// By default, it is 1440 to avoid IP fragmentation. This option is ignored in
// Client.Clone().
func MaxPacketSize(n int) Option {
return Option(func(c *config) {
c.Conn.MaxPacketSize = n
})
}
// Network sets the network (udp, tcp, etc) used by the client. See the
// net.Dial documentation (https://golang.org/pkg/net/#Dial) for the available
// network options.
//
// By default, network is udp. This option is ignored in Client.Clone().
func Network(network string) Option {
return Option(func(c *config) {
c.Conn.Network = network
})
}
// Mute sets whether the Client is muted. All methods of a muted Client do
// nothing and return immedialtly.
//
// This option can be used in Client.Clone() only if the parent Client is not
// muted. The clones of a muted Client are always muted.
func Mute(b bool) Option {
return Option(func(c *config) {
c.Client.Muted = b
})
}
// SampleRate sets the sample rate of the Client. It allows sending the metrics
// less often which can be useful for performance intensive code paths.
func SampleRate(rate float32) Option {
return Option(func(c *config) {
c.Client.Rate = rate
})
}
// Prefix appends the prefix that will be used in every bucket name.
//
// Note that when used in cloned, the prefix of the parent Client is not
// replaced but is prepended to the given prefix.
func Prefix(p string) Option {
return Option(func(c *config) {
c.Client.Prefix += strings.TrimSuffix(p, ".") + "."
})
}
// TagFormat represents the format of tags sent by a Client.
type TagFormat uint8
// TagsFormat sets the format of tags.
func TagsFormat(tf TagFormat) Option {
return Option(func(c *config) {
c.Conn.TagFormat = tf
})
}
// Tags appends the given tags to the tags sent with every metrics. If a tag
// already exists, it is replaced.
//
// The tags must be set as key-value pairs. If the number of tags is not even,
// Tags panics.
//
// If the format of tags have not been set using the TagsFormat option, the tags
// will be ignored.
func Tags(tags ...string) Option {
if len(tags)%2 != 0 {
panic("statsd: Tags only accepts an even number of arguments")
}
return Option(func(c *config) {
if len(tags) == 0 {
return
}
newTags := make([]tag, len(tags)/2)
for i := 0; i < len(tags)/2; i++ {
newTags[i] = tag{K: tags[2*i], V: tags[2*i+1]}
}
for _, newTag := range newTags {
exists := false
for _, oldTag := range c.Client.Tags {
if newTag.K == oldTag.K {
exists = true
oldTag.V = newTag.V
}
}
if !exists {
c.Client.Tags = append(c.Client.Tags, tag{
K: newTag.K,
V: newTag.V,
})
}
}
})
}
type tag struct {
K, V string
}
func joinTags(tf TagFormat, tags []tag) string {
if len(tags) == 0 || tf == 0 {
return ""
}
join := joinFuncs[tf]
return join(tags)
}
func splitTags(tf TagFormat, tags string) []tag {
if len(tags) == 0 || tf == 0 {
return nil
}
split := splitFuncs[tf]
return split(tags)
}
const (
// InfluxDB tag format.
// See https://influxdb.com/blog/2015/11/03/getting_started_with_influx_statsd.html
InfluxDB TagFormat = iota + 1
// Datadog tag format.
// See http://docs.datadoghq.com/guides/metrics/#tags
Datadog
)
var (
joinFuncs = map[TagFormat]func([]tag) string{
// InfluxDB tag format: ,tag1=payroll,region=us-west
// https://influxdb.com/blog/2015/11/03/getting_started_with_influx_statsd.html
InfluxDB: func(tags []tag) string {
var buf bytes.Buffer
for _, tag := range tags {
_ = buf.WriteByte(',')
_, _ = buf.WriteString(tag.K)
_ = buf.WriteByte('=')
_, _ = buf.WriteString(tag.V)
}
return buf.String()
},
// Datadog tag format: |#tag1:value1,tag2:value2
// http://docs.datadoghq.com/guides/dogstatsd/#datagram-format
Datadog: func(tags []tag) string {
buf := bytes.NewBufferString("|#")
first := true
for _, tag := range tags {
if first {
first = false
} else {
_ = buf.WriteByte(',')
}
_, _ = buf.WriteString(tag.K)
_ = buf.WriteByte(':')
_, _ = buf.WriteString(tag.V)
}
return buf.String()
},
}
splitFuncs = map[TagFormat]func(string) []tag{
InfluxDB: func(s string) []tag {
s = s[1:]
pairs := strings.Split(s, ",")
tags := make([]tag, len(pairs))
for i, pair := range pairs {
kv := strings.Split(pair, "=")
tags[i] = tag{K: kv[0], V: kv[1]}
}
return tags
},
Datadog: func(s string) []tag {
s = s[2:]
pairs := strings.Split(s, ",")
tags := make([]tag, len(pairs))
for i, pair := range pairs {
kv := strings.Split(pair, ":")
tags[i] = tag{K: kv[0], V: kv[1]}
}
return tags
},
}
)

169
vendor/github.com/alexcesaro/statsd/statsd.go сгенерированный поставляемый
Просмотреть файл

@ -1,169 +0,0 @@
package statsd
import "time"
// A Client represents a StatsD client.
type Client struct {
conn *conn
muted bool
rate float32
prefix string
tags string
}
// New returns a new Client.
func New(opts ...Option) (*Client, error) {
// The default configuration.
conf := &config{
Client: clientConfig{
Rate: 1,
},
Conn: connConfig{
Addr: ":8125",
FlushPeriod: 100 * time.Millisecond,
// Worst-case scenario:
// Ethernet MTU - IPv6 Header - TCP Header = 1500 - 40 - 20 = 1440
MaxPacketSize: 1440,
Network: "udp",
},
}
for _, o := range opts {
o(conf)
}
conn, err := newConn(conf.Conn, conf.Client.Muted)
c := &Client{
conn: conn,
muted: conf.Client.Muted,
}
if err != nil {
c.muted = true
return c, err
}
c.rate = conf.Client.Rate
c.prefix = conf.Client.Prefix
c.tags = joinTags(conf.Conn.TagFormat, conf.Client.Tags)
return c, nil
}
// Clone returns a clone of the Client. The cloned Client inherits its
// configuration from its parent.
//
// All cloned Clients share the same connection, so cloning a Client is a cheap
// operation.
func (c *Client) Clone(opts ...Option) *Client {
tf := c.conn.tagFormat
conf := &config{
Client: clientConfig{
Rate: c.rate,
Prefix: c.prefix,
Tags: splitTags(tf, c.tags),
},
}
for _, o := range opts {
o(conf)
}
clone := &Client{
conn: c.conn,
muted: c.muted || conf.Client.Muted,
rate: conf.Client.Rate,
prefix: conf.Client.Prefix,
tags: joinTags(tf, conf.Client.Tags),
}
clone.conn = c.conn
return clone
}
// Count adds n to bucket.
func (c *Client) Count(bucket string, n interface{}) {
if c.skip() {
return
}
c.conn.metric(c.prefix, bucket, n, "c", c.rate, c.tags)
}
func (c *Client) skip() bool {
return c.muted || (c.rate != 1 && randFloat() > c.rate)
}
// Increment increment the given bucket. It is equivalent to Count(bucket, 1).
func (c *Client) Increment(bucket string) {
c.Count(bucket, 1)
}
// Gauge records an absolute value for the given bucket.
func (c *Client) Gauge(bucket string, value interface{}) {
if c.skip() {
return
}
c.conn.gauge(c.prefix, bucket, value, c.tags)
}
// Timing sends a timing value to a bucket.
func (c *Client) Timing(bucket string, value interface{}) {
if c.skip() {
return
}
c.conn.metric(c.prefix, bucket, value, "ms", c.rate, c.tags)
}
// Histogram sends an histogram value to a bucket.
func (c *Client) Histogram(bucket string, value interface{}) {
if c.skip() {
return
}
c.conn.metric(c.prefix, bucket, value, "h", c.rate, c.tags)
}
// A Timing is an helper object that eases sending timing values.
type Timing struct {
start time.Time
c *Client
}
// NewTiming creates a new Timing.
func (c *Client) NewTiming() Timing {
return Timing{start: now(), c: c}
}
// Send sends the time elapsed since the creation of the Timing.
func (t Timing) Send(bucket string) {
t.c.Timing(bucket, int(t.Duration()/time.Millisecond))
}
// Duration returns the time elapsed since the creation of the Timing.
func (t Timing) Duration() time.Duration {
return now().Sub(t.start)
}
// Unique sends the given value to a set bucket.
func (c *Client) Unique(bucket string, value string) {
if c.skip() {
return
}
c.conn.unique(c.prefix, bucket, value, c.tags)
}
// Flush flushes the Client's buffer.
func (c *Client) Flush() {
if c.muted {
return
}
c.conn.mu.Lock()
c.conn.flush(0)
c.conn.mu.Unlock()
}
// Close flushes the Client's buffer and releases the associated ressources. The
// Client and all the cloned Clients must not be used afterward.
func (c *Client) Close() {
if c.muted {
return
}
c.conn.mu.Lock()
c.conn.flush(0)
c.conn.handleError(c.conn.w.Close())
c.conn.closed = true
c.conn.mu.Unlock()
}