2017-08-30 21:38:09 +03:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-09-18 21:30:54 +03:00
|
|
|
"fmt"
|
2017-08-30 21:38:09 +03:00
|
|
|
"log"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"os/signal"
|
|
|
|
"path/filepath"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/Azure/acs-engine/test/e2e/azure"
|
|
|
|
"github.com/Azure/acs-engine/test/e2e/config"
|
2017-10-09 22:29:25 +03:00
|
|
|
"github.com/Azure/acs-engine/test/e2e/dcos"
|
2017-08-30 21:38:09 +03:00
|
|
|
"github.com/Azure/acs-engine/test/e2e/engine"
|
|
|
|
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
cfg *config.Config
|
|
|
|
acct *azure.Account
|
2017-08-31 21:41:17 +03:00
|
|
|
eng *engine.Engine
|
2017-09-18 21:30:54 +03:00
|
|
|
rgs []string
|
2017-08-30 21:38:09 +03:00
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
start := time.Now()
|
|
|
|
cwd, _ := os.Getwd()
|
|
|
|
cfg, err = config.ParseConfig()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error while trying to parse configuration: %s\n", err)
|
|
|
|
}
|
|
|
|
cfg.CurrentWorkingDir = cwd
|
|
|
|
|
|
|
|
acct, err = azure.NewAccount()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error while trying to setup azure account: %s\n", err)
|
|
|
|
}
|
|
|
|
|
2017-09-28 02:26:16 +03:00
|
|
|
err := acct.Login()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal("Error while trying to login to azure account!")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = acct.SetSubscription()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal("Error while trying to set azure subscription!")
|
|
|
|
}
|
2017-08-30 21:38:09 +03:00
|
|
|
|
|
|
|
// If an interrupt/kill signal is sent we will run the clean up procedure
|
|
|
|
trap()
|
|
|
|
|
|
|
|
// Only provision a cluster if there isnt a name present
|
|
|
|
if cfg.Name == "" {
|
|
|
|
for i := 1; i <= cfg.ProvisionRetries; i++ {
|
|
|
|
success := provisionCluster()
|
2017-09-18 21:30:54 +03:00
|
|
|
rgs = append(rgs, cfg.Name)
|
2017-08-30 21:38:09 +03:00
|
|
|
if success {
|
|
|
|
break
|
|
|
|
} else if i == cfg.ProvisionRetries {
|
2017-09-18 21:30:54 +03:00
|
|
|
teardown()
|
2017-08-30 21:38:09 +03:00
|
|
|
log.Fatalf("Exceeded Provision retry count!")
|
|
|
|
}
|
|
|
|
}
|
2017-09-18 21:30:54 +03:00
|
|
|
} else {
|
|
|
|
engCfg, err := engine.ParseConfig(cfg.CurrentWorkingDir, cfg.ClusterDefinition, cfg.Name)
|
|
|
|
if err != nil {
|
|
|
|
teardown()
|
|
|
|
log.Fatalf("Error trying to parse Engine config:%s\n", err)
|
|
|
|
}
|
|
|
|
cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate)
|
|
|
|
if err != nil {
|
|
|
|
teardown()
|
|
|
|
log.Fatalf("Error trying to parse engine template into memory:%s\n", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
eng = &engine.Engine{
|
|
|
|
Config: engCfg,
|
|
|
|
ClusterDefinition: cs,
|
|
|
|
}
|
2017-08-30 21:38:09 +03:00
|
|
|
}
|
|
|
|
|
2017-09-18 21:30:54 +03:00
|
|
|
if cfg.IsKubernetes() {
|
2017-08-30 21:38:09 +03:00
|
|
|
os.Setenv("KUBECONFIG", cfg.GetKubeConfig())
|
|
|
|
log.Printf("Kubeconfig:%s\n", cfg.GetKubeConfig())
|
|
|
|
log.Println("Waiting on nodes to go into ready state...")
|
2017-10-04 23:47:52 +03:00
|
|
|
ready := node.WaitOnReady(eng.NodeCount(), 10*time.Second, cfg.Timeout)
|
2017-08-30 21:38:09 +03:00
|
|
|
if ready == false {
|
|
|
|
teardown()
|
|
|
|
log.Fatalf("Error: Not all nodes in ready state!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-09 22:29:25 +03:00
|
|
|
if cfg.IsDCOS() {
|
|
|
|
host := fmt.Sprintf("%s.%s.cloudapp.azure.com", cfg.Name, cfg.Location)
|
|
|
|
user := eng.ClusterDefinition.Properties.LinuxProfile.AdminUsername
|
|
|
|
log.Printf("SSH Key: %s\n", cfg.GetSSHKeyPath())
|
|
|
|
log.Printf("Master Node: %s@%s\n", user, host)
|
|
|
|
log.Printf("SSH Command: ssh -i %s -p 2200 %s@%s", cfg.GetSSHKeyPath(), user, host)
|
|
|
|
cluster := dcos.NewCluster(cfg, eng)
|
|
|
|
err = cluster.InstallDCOSClient()
|
|
|
|
if err != nil {
|
|
|
|
teardown()
|
|
|
|
log.Fatalf("Error trying to install dcos client:%s\n", err)
|
|
|
|
}
|
2017-10-17 20:17:52 +03:00
|
|
|
ready := cluster.WaitForNodes(eng.NodeCount(), 10*time.Second, cfg.Timeout)
|
|
|
|
if ready == false {
|
|
|
|
teardown()
|
|
|
|
log.Fatal("Error: Not all nodes in healthy state!")
|
|
|
|
}
|
2017-10-09 22:29:25 +03:00
|
|
|
}
|
|
|
|
|
2017-08-30 21:38:09 +03:00
|
|
|
runGinkgo(cfg.Orchestrator)
|
|
|
|
teardown()
|
|
|
|
log.Printf("Total Testing Elapsed Time:%s\n", time.Since(start))
|
|
|
|
}
|
|
|
|
|
|
|
|
func trap() {
|
|
|
|
// If an interrupt/kill signal is sent we will run the clean up procedure
|
|
|
|
c := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(c, os.Interrupt)
|
|
|
|
signal.Notify(c, os.Kill)
|
|
|
|
go func() {
|
|
|
|
for sig := range c {
|
|
|
|
log.Printf("Received Signal:%s ... Clean Up On Exit?:%v\n", sig.String(), cfg.CleanUpOnExit)
|
|
|
|
teardown()
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func teardown() {
|
|
|
|
if cfg.CleanUpOnExit {
|
2017-09-18 21:30:54 +03:00
|
|
|
for _, rg := range rgs {
|
|
|
|
log.Printf("Deleting Group:%s\n", rg)
|
|
|
|
acct.DeleteGroup(rg)
|
|
|
|
}
|
2017-08-30 21:38:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func runGinkgo(orchestrator string) {
|
2017-10-09 22:29:25 +03:00
|
|
|
testDir := fmt.Sprintf("test/e2e/%s", orchestrator)
|
|
|
|
cmd := exec.Command("ginkgo", "-nodes", "10", "-slowSpecThreshold", "180", "-r", testDir)
|
2017-08-30 21:38:09 +03:00
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
err = cmd.Start()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error while trying to start ginkgo:%s\n", err)
|
|
|
|
teardown()
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = cmd.Wait()
|
|
|
|
if err != nil {
|
|
|
|
teardown()
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func provisionCluster() bool {
|
|
|
|
cfg.Name = cfg.GenerateName()
|
|
|
|
os.Setenv("NAME", cfg.Name)
|
|
|
|
log.Printf("Cluster name:%s\n", cfg.Name)
|
|
|
|
|
|
|
|
outputPath := filepath.Join(cfg.CurrentWorkingDir, "_output")
|
|
|
|
os.RemoveAll(outputPath)
|
|
|
|
os.Mkdir(outputPath, 0755)
|
|
|
|
|
|
|
|
out, err := exec.Command("ssh-keygen", "-f", cfg.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa").CombinedOutput()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error while trying to generate ssh key:%s\n\nOutput:%s\n", err, out)
|
|
|
|
}
|
|
|
|
exec.Command("chmod", "0600", cfg.GetSSHKeyPath()+"*")
|
|
|
|
|
|
|
|
publicSSHKey, err := cfg.ReadPublicSSHKey()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error while trying to read public ssh key: %s\n", err)
|
|
|
|
}
|
|
|
|
os.Setenv("PUBLIC_SSH_KEY", publicSSHKey)
|
|
|
|
os.Setenv("DNS_PREFIX", cfg.Name)
|
|
|
|
|
2017-09-18 21:30:54 +03:00
|
|
|
err = acct.CreateGroup(cfg.Name, cfg.Location)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error while trying to create resource group: %s\n", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
subnetID := ""
|
2017-10-09 22:29:25 +03:00
|
|
|
vnetName := fmt.Sprintf("%sCustomVnet", cfg.Name)
|
|
|
|
subnetName := fmt.Sprintf("%sCustomSubnet", cfg.Name)
|
2017-09-18 21:30:54 +03:00
|
|
|
if cfg.CreateVNET {
|
2017-10-09 22:29:25 +03:00
|
|
|
acct.CreateVnet(vnetName, "10.239.0.0/16", subnetName, "10.239.0.0/16")
|
|
|
|
subnetID = fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s", acct.SubscriptionID, acct.ResourceGroup.Name, vnetName, subnetName)
|
2017-09-18 21:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-08-30 21:38:09 +03:00
|
|
|
// Lets modify our template and call acs-engine generate on it
|
2017-09-18 21:30:54 +03:00
|
|
|
eng, err = engine.Build(cfg, subnetID)
|
2017-08-30 21:38:09 +03:00
|
|
|
if err != nil {
|
2017-09-18 21:30:54 +03:00
|
|
|
log.Printf("Error while trying to build cluster definition: %s\n", err)
|
|
|
|
return false
|
2017-08-30 21:38:09 +03:00
|
|
|
}
|
|
|
|
|
2017-09-18 21:30:54 +03:00
|
|
|
err = eng.Write()
|
2017-08-30 21:38:09 +03:00
|
|
|
if err != nil {
|
2017-09-18 21:30:54 +03:00
|
|
|
log.Printf("Error while trying to write Engine Template to disk:%s\n", err)
|
|
|
|
return false
|
2017-08-30 21:38:09 +03:00
|
|
|
}
|
|
|
|
|
2017-09-18 21:30:54 +03:00
|
|
|
err = eng.Generate()
|
2017-08-30 21:38:09 +03:00
|
|
|
if err != nil {
|
2017-09-18 21:30:54 +03:00
|
|
|
log.Printf("Error while trying to generate acs-engine template: %s\n", err)
|
|
|
|
return false
|
2017-08-30 21:38:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lets start by just using the normal az group deployment cli for creating a cluster
|
|
|
|
log.Println("Creating deployment this make take a few minutes...")
|
2017-08-31 21:41:17 +03:00
|
|
|
err = acct.CreateDeployment(cfg.Name, eng)
|
2017-08-30 21:38:09 +03:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
2017-09-18 21:30:54 +03:00
|
|
|
|
|
|
|
if cfg.CreateVNET {
|
2017-10-09 22:29:25 +03:00
|
|
|
acct.UpdateRouteTables(subnetName, vnetName)
|
2017-09-18 21:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-08-30 21:38:09 +03:00
|
|
|
return true
|
|
|
|
}
|