* Add Openshift e2e tests

* Make distro configurable

* avoid patch version checks

* make route test re-entrant

* infer TENANT_ID from circleci env

* enable openshift-3.9-rhel-e2e
This commit is contained in:
Michalis Kargakis 2018-05-05 01:20:42 +02:00 коммит произвёл Jack Francis
Родитель 6f5c9a3395
Коммит cbfabdb236
14 изменённых файлов: 416 добавлений и 15 удалений

Просмотреть файл

@ -353,6 +353,37 @@ jobs:
path: /go/src/github.com/Azure/acs-engine/_logs
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_output
openshift-3.9-rhel-e2e:
working_directory: /go/src/github.com/Azure/acs-engine
docker:
- image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9
environment:
GOPATH: /go
steps:
- checkout
- run: |
echo 'export TIMEOUT=30m' >> $BASH_ENV
echo 'export DISTRO=openshift39_rhel' >> $BASH_ENV
echo 'export LOCATION=eastus' >> $BASH_ENV
echo 'export ORCHESTRATOR_RELEASE=3.9' >> $BASH_ENV
echo 'export CLUSTER_DEFINITION=examples/openshift.json' >> $BASH_ENV
echo 'export CREATE_VNET=false' >> $BASH_ENV
echo 'export CLEANUP_ON_EXIT=false' >> $BASH_ENV
echo 'export RETAIN_SSH=false' >> $BASH_ENV
echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV
- run:
name: compile
command: make build-binary
- run:
name: ginkgo openshift e2e tests
command: make test-openshift
no_output_timeout: "30m"
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_logs
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_output
workflows:
version: 2
build_and_test_pr:
@ -428,6 +459,12 @@ workflows:
filters:
branches:
ignore: master
- openshift-3.9-rhel-e2e:
requires:
- pr-e2e-hold
filters:
branches:
ignore: master
- swarm-e2e:
requires:
- pr-e2e-hold
@ -488,6 +525,10 @@ workflows:
filters:
branches:
only: master
- openshift-3.9-rhel-e2e:
filters:
branches:
only: master
- swarm-e2e:
requires:
- test

1
.gitignore поставляемый
Просмотреть файл

@ -27,6 +27,7 @@ test/acs-engine-test/report/TestReport.json
# I have no idea why these get generated when I run the e2e test
test/e2e/kubernetes/translations/
test/e2e/openshift/translations/
# test outputs
cmd/_test_output

Просмотреть файл

@ -101,7 +101,7 @@ ifneq ($(GIT_BASEDIR),)
endif
test: generate
ginkgo -skipPackage test/e2e/dcos,test/e2e/kubernetes -r .
ginkgo -skipPackage test/e2e/dcos,test/e2e/kubernetes,test/e2e/openshift -r .
.PHONY: test-style
test-style:

Просмотреть файл

@ -25,7 +25,7 @@ hash goveralls 2>/dev/null || go get github.com/mattn/goveralls
hash godir 2>/dev/null || go get github.com/Masterminds/godir
generate_cover_data() {
ginkgo -skipPackage test/e2e/dcos,test/e2e/kubernetes -cover -r .
ginkgo -skipPackage test/e2e/dcos,test/e2e/kubernetes,test/e2e/openshift -cover -r .
echo "" > ${coveragetxt}
find . -type f -name "*.coverprofile" | while read -r file; do cat $file >> ${coveragetxt} && mv $file ${coverdir}; done
echo "mode: $covermode" >"$profile"

Просмотреть файл

@ -28,5 +28,8 @@ test-kubernetes:
test-dcos:
@ORCHESTRATOR=dcos go run ./test/e2e/runner.go
test-openshift:
@ORCHESTRATOR=openshift go run ./test/e2e/runner.go
test-azure-constants:
./scripts/azure-const.sh

Просмотреть файл

@ -38,6 +38,7 @@ const (
dcosOrchestrator = "dcos"
swarmModeOrchestrator = "swarmmode"
swarmOrchestrator = "swarm"
openShiftOrchestrator = "openshift"
)
// ParseConfig will parse needed environment variables for running the tests
@ -54,9 +55,24 @@ func ParseConfig() (*Config, error) {
// GetKubeConfig returns the absolute path to the kubeconfig for c.Location
func (c *Config) GetKubeConfig() string {
file := fmt.Sprintf("kubeconfig.%s.json", c.Location)
kubeconfig := filepath.Join(c.CurrentWorkingDir, "_output", c.Name, "kubeconfig", file)
return kubeconfig
var kubeconfigPath string
switch {
case c.IsKubernetes():
file := fmt.Sprintf("kubeconfig.%s.json", c.Location)
kubeconfigPath = filepath.Join(c.CurrentWorkingDir, "_output", c.Name, "kubeconfig", file)
case c.IsOpenShift():
artifactsDir := filepath.Join(c.CurrentWorkingDir, "_output", c.Name)
masterTarball := filepath.Join(artifactsDir, "master.tar.gz")
out, err := exec.Command("tar", "-xzf", masterTarball, "-C", artifactsDir).CombinedOutput()
if err != nil {
log.Fatalf("Cannot untar master tarball: %v: %v", out, err)
}
kubeconfigPath = filepath.Join(artifactsDir, "etc", "origin", "master", "admin.kubeconfig")
}
return kubeconfigPath
}
// SetKubeConfig will set the KUBECONIFG env var
@ -149,6 +165,11 @@ func (c *Config) IsSwarm() bool {
return c.Orchestrator == swarmOrchestrator
}
// IsOpenShift will return true if the ORCHESTRATOR env var is set to openshift
func (c *Config) IsOpenShift() bool {
return c.Orchestrator == openShiftOrchestrator
}
// SetRandomRegion sets Location to a random region
func (c *Config) SetRandomRegion() {
var regions []string

Просмотреть файл

@ -1,6 +1,8 @@
package engine
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
@ -30,6 +32,9 @@ type Config struct {
OutputDirectory string `envconfig:"OUTPUT_DIR" default:"_output"`
CreateVNET bool `envconfig:"CREATE_VNET" default:"false"`
EnableKMSEncryption bool `envconfig:"ENABLE_KMS_ENCRYPTION" default:"false"`
Distro string `envconfig:"DISTRO"`
SubscriptionID string `envconfig:"SUBSCRIPTION_ID"`
TenantID string `envconfig:"TENANT_ID"`
ClusterDefinitionPath string // The original template we want to use to build the cluster from.
ClusterDefinitionTemplate string // This is the template after we splice in the environment variables
@ -85,12 +90,37 @@ func Build(cfg *config.Config, subnetID string) (*Engine, error) {
Secret: config.ClientSecret,
}
}
if cfg.IsOpenShift() {
// azProfile
cs.ContainerService.Properties.AzProfile = &vlabs.AzProfile{
TenantID: config.TenantID,
SubscriptionID: config.SubscriptionID,
ResourceGroup: cfg.Name,
Location: cfg.Location,
}
// openshiftConfig
pass, err := generateRandomString(32)
if err != nil {
return nil, err
}
cs.ContainerService.Properties.OrchestratorProfile.OpenShiftConfig = &vlabs.OpenShiftConfig{
ClusterUsername: "test-user",
ClusterPassword: pass,
}
// master and agent config
cs.ContainerService.Properties.MasterProfile.Distro = vlabs.Distro(config.Distro)
cs.ContainerService.Properties.MasterProfile.ImageRef = nil
for i := range cs.ContainerService.Properties.AgentPoolProfiles {
cs.ContainerService.Properties.AgentPoolProfiles[i].Distro = vlabs.Distro(config.Distro)
cs.ContainerService.Properties.AgentPoolProfiles[i].ImageRef = nil
}
}
if config.MasterDNSPrefix != "" {
cs.ContainerService.Properties.MasterProfile.DNSPrefix = config.MasterDNSPrefix
}
if !cfg.IsKubernetes() && config.AgentDNSPrefix != "" {
if !cfg.IsKubernetes() && !cfg.IsOpenShift() && config.AgentDNSPrefix != "" {
for idx, pool := range cs.ContainerService.Properties.AgentPoolProfiles {
pool.DNSPrefix = fmt.Sprintf("%v-%v", config.AgentDNSPrefix, idx)
}
@ -236,3 +266,17 @@ func ParseOutput(path string) (*api.ContainerService, error) {
}
return containerService, nil
}
func generateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, err
}
return b, nil
}
func generateRandomString(s int) (string, error) {
b, err := generateRandomBytes(s)
return base64.URLEncoding.EncodeToString(b), err
}

Просмотреть файл

@ -0,0 +1,4 @@
approvers:
- jim-minter
- kargakis
- pweil-

Просмотреть файл

@ -0,0 +1,29 @@
package node
import (
"errors"
"fmt"
"log"
"os/exec"
"regexp"
"strings"
)
// Version returns the version of an OpenShift cluster.
func Version() (string, error) {
cmd := exec.Command("oc", "version")
fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " "))
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'oc version':%s", string(out))
return "", err
}
exp := regexp.MustCompile(`(openshift\s)+(v\d+.\d+.\d+)+`)
for _, line := range strings.Split(string(out), "\n") {
if strings.HasPrefix(line, "openshift") {
s := exp.FindStringSubmatch(line)
return s[2], nil
}
}
return "", errors.New("cannot find openshift version")
}

Просмотреть файл

@ -0,0 +1,13 @@
package openshift_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestOpenShift(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "OpenShift Suite")
}

Просмотреть файл

@ -0,0 +1,138 @@
package openshift
import (
"os"
"path/filepath"
"strings"
"time"
"github.com/Azure/acs-engine/pkg/api/common"
"github.com/Azure/acs-engine/test/e2e/config"
"github.com/Azure/acs-engine/test/e2e/engine"
knode "github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/pod"
"github.com/Azure/acs-engine/test/e2e/openshift/node"
"github.com/Azure/acs-engine/test/e2e/openshift/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
cfg config.Config
eng engine.Engine
)
var _ = BeforeSuite(func() {
cwd, _ := os.Getwd()
rootPath := filepath.Join(cwd, "../../..") // The current working dir of these tests is down a few levels from the root of the project. We should traverse up that path so we can find the _output dir
c, err := config.ParseConfig()
c.CurrentWorkingDir = rootPath
Expect(err).NotTo(HaveOccurred())
cfg = *c // We have to do this because golang anon functions and scoping and stuff
engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name)
Expect(err).NotTo(HaveOccurred())
csInput, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate)
Expect(err).NotTo(HaveOccurred())
csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath + "/apimodel.json")
Expect(err).NotTo(HaveOccurred())
eng = engine.Engine{
Config: engCfg,
ClusterDefinition: csInput,
ExpandedDefinition: csGenerated,
}
})
var _ = Describe("Azure Container Cluster using the OpenShift Orchestrator", func() {
It("should have bootstrap autoapprover running", func() {
running, err := pod.WaitOnReady("bootstrap-autoapprover", "openshift-infra", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have have the appropriate node count", func() {
ready := knode.WaitOnReady(eng.NodeCount(), 10*time.Second, cfg.Timeout)
Expect(ready).To(Equal(true))
})
It("should be running the expected version", func() {
version, err := node.Version()
Expect(err).NotTo(HaveOccurred())
// normalize patch version to zero so we can support testing
// across centos and rhel deployments where patch versions diverge.
version = strings.Join(append(strings.Split(version, ".")[:2], "0"), ".")
var expectedVersion string
if eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease != "" ||
eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion != "" {
expectedVersion = common.RationalizeReleaseAndVersion(
common.OpenShift,
eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease,
eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion,
false)
} else {
expectedVersion = common.RationalizeReleaseAndVersion(
common.OpenShift,
eng.Config.OrchestratorRelease,
eng.Config.OrchestratorVersion,
false)
}
expectedVersionRationalized := strings.Split(expectedVersion, "-")[0] // to account for -alpha and -beta suffixes
Expect(version).To(Equal("v" + expectedVersionRationalized))
})
It("should have router running", func() {
running, err := pod.WaitOnReady("router", "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have docker-registry running", func() {
running, err := pod.WaitOnReady("docker-registry", "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should deploy a sample app and access it via a route", func() {
err := util.CreateFromTemplate("nginx-example", "openshift", "default")
if err != nil && strings.Contains(err.Error(), "AlreadyExists") {
err = nil
}
Expect(err).NotTo(HaveOccurred())
Expect(util.WaitForDeploymentConfig("nginx-example", "default")).NotTo(HaveOccurred())
host, err := util.GetHost("nginx-example", "default")
Expect(err).NotTo(HaveOccurred())
Expect(util.TestHost(host, 10, 200*time.Microsecond)).NotTo(HaveOccurred())
})
It("should have the openshift webconsole running", func() {
running, err := pod.WaitOnReady("webconsole", "openshift-web-console", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have prometheus running", func() {
running, err := pod.WaitOnReady("prometheus", "openshift-metrics", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have service catalog apiserver running", func() {
running, err := pod.WaitOnReady("apiserver", "kube-service-catalog", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have service catalog controller-manager running", func() {
running, err := pod.WaitOnReady("controller-manager", "kube-service-catalog", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have template service broker running", func() {
running, err := pod.WaitOnReady("asb", "openshift-ansible-service-broker", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
})

Просмотреть файл

@ -0,0 +1,101 @@
package util
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
func printCmd(cmd *exec.Cmd) {
fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " "))
}
// CreateFromTemplate processes and creates the provided templateName/templateNamespace template
// in the provided namespace.
func CreateFromTemplate(templateName, templateNamespace, namespace string) error {
processCmd := exec.Command("oc", "process", templateName, "-n", templateNamespace)
printCmd(processCmd)
out, err := processCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("cannot process template %s: %v\noutput: %s", templateName, err, string(out))
}
if err := ioutil.WriteFile(templateName, out, 0644); err != nil {
return fmt.Errorf("cannot create tempfile for processed template %s: %v", templateName, err)
}
defer os.Remove(templateName)
createCmd := exec.Command("oc", "create", "-n", namespace, "-f", templateName)
printCmd(createCmd)
out, err = createCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("cannot create processed template %s: %v\noutput: %s", templateName, err, string(out))
}
return nil
}
// WaitForDeploymentConfig waits until the provided deploymentconfig namespace/name
// gets deployed.
func WaitForDeploymentConfig(name, namespace string) error {
cmd := exec.Command("oc", "rollout", "status", fmt.Sprintf("dc/%s", name), "-n", namespace)
printCmd(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to see the rollout status of dc/%s: %s", name, string(out))
return err
}
return nil
}
// GetHost expects the name and namespace of a route in order to
// return its host.
func GetHost(name, namespace string) (string, error) {
cmd := exec.Command("oc", "get", fmt.Sprintf("route/%s", name), "-n", namespace, "-o", "jsonpath={.spec.host}")
printCmd(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to get the hostname of route/%s: %s", name, string(out))
return "", err
}
return string(out), nil
}
// TestHost tries to access host and retries maxRetries times with a retryDelay
// that is doubled on every retry.
func TestHost(host string, maxRetries int, retryDelay time.Duration) error {
backoff := retryDelay
url := fmt.Sprintf("http://%s", host)
resp, err := http.Get(url)
if err == nil && resp.StatusCode == http.StatusOK {
return nil
}
if err == nil {
log.Printf("got status %q while trying to access %s", resp.Status, host)
resp.Body.Close()
} else {
log.Printf("error while trying to access %s: %v", host, err)
}
for retries := 1; retries <= maxRetries; retries++ {
log.Printf("Retry #%d to access %s", retries, host)
resp, err = http.Get(url)
if err != nil {
log.Printf("error while trying to access %s: %v", host, err)
continue
}
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
log.Printf("got status %q while trying to access %s", resp.Status, host)
time.Sleep(backoff)
backoff *= 2
}
if err != nil {
return err
}
return fmt.Errorf("unexpected response status: %v", resp.Status)
}

Просмотреть файл

@ -19,6 +19,7 @@ import (
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/Azure/acs-engine/test/e2e/metrics"
onode "github.com/Azure/acs-engine/test/e2e/openshift/node"
"github.com/Azure/acs-engine/test/e2e/remote"
"github.com/kelseyhightower/envconfig"
)
@ -180,7 +181,7 @@ func (cli *CLIProvisioner) generateName() string {
}
func (cli *CLIProvisioner) waitForNodes() error {
if cli.Config.IsKubernetes() {
if cli.Config.IsKubernetes() || cli.Config.IsOpenShift() {
if !cli.IsPrivate() {
cli.Config.SetKubeConfig()
log.Println("Waiting on nodes to go into ready state...")
@ -188,18 +189,24 @@ func (cli *CLIProvisioner) waitForNodes() error {
if !ready {
return errors.New("Error: Not all nodes in a healthy state")
}
version, err := node.Version()
var version string
var err error
if cli.Config.IsKubernetes() {
version, err = node.Version()
} else if cli.Config.IsOpenShift() {
version, err = onode.Version()
}
if err != nil {
log.Printf("Ready nodes did not return a version: %s", err)
}
log.Printf("Testing a Kubernetes %s cluster...\n", version)
log.Printf("Testing a %s %s cluster...\n", cli.Config.Orchestrator, version)
} else {
log.Println("This cluster is private")
if cli.Engine.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile == nil {
// TODO: add "bring your own jumpbox to e2e"
return errors.New("Error: cannot test a private cluster without provisioning a jumpbox")
}
log.Printf("Testing a Kubernetes private cluster...")
log.Printf("Testing a %s private cluster...", cli.Config.Orchestrator)
// TODO: create SSH connection and get nodes and k8s version
}
}
@ -272,8 +279,7 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con
// IsPrivate will return true if the cluster has no public IPs
func (cli *CLIProvisioner) IsPrivate() bool {
if cli.Config.IsKubernetes() && cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil && helpers.IsTrueBoolPointer(cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.Enabled) {
return true
}
return false
return (cli.Config.IsKubernetes() || cli.Config.IsOpenShift()) &&
cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil &&
helpers.IsTrueBoolPointer(cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.Enabled)
}

Просмотреть файл

@ -48,7 +48,7 @@ func (g *Ginkgo) Run() error {
err = cmd.Wait()
if err != nil {
g.Point.RecordTestError()
if g.Config.IsKubernetes() {
if g.Config.IsKubernetes() || g.Config.IsOpenShift() {
kubectl := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide")
util.PrintCommand(kubectl)
kubectl.CombinedOutput()