Add acs-engine deploy e2e test (#3777)

This commit is contained in:
Stéphane Erbrech 2018-09-19 20:56:36 +02:00 коммит произвёл Cecile Robert-Michon
Родитель 59a26deb5a
Коммит 0b7decf7ba
4 изменённых файлов: 92 добавлений и 38 удалений

Просмотреть файл

@ -33,6 +33,7 @@ type Config struct {
CurrentWorkingDir string
SoakClusterName string `envconfig:"SOAK_CLUSTER_NAME"`
ForceDeploy bool `envconfig:"FORCE_DEPLOY"`
UseDeployCommand bool `envconfig:"USE_DEPLOY_COMMAND"`
}
const (
@ -85,6 +86,9 @@ func (c *Config) SetKubeConfig() {
// GetSSHKeyPath will return the absolute path to the ssh private key
func (c *Config) GetSSHKeyPath() string {
if c.UseDeployCommand {
return filepath.Join(c.CurrentWorkingDir, "_output", c.Name, "azureuser_rsa")
}
return filepath.Join(c.CurrentWorkingDir, "_output", c.Name+"-ssh")
}

Просмотреть файл

@ -20,3 +20,22 @@ func (e *Engine) Generate() error {
}
return nil
}
// Deploy will run acs-engine deploy on a given cluster definition
func (e *Engine) Deploy(location string) error {
cmd := exec.Command("./bin/acs-engine", "deploy",
"--location", location,
"--api-model", e.Config.ClusterDefinitionPath,
"--dns-prefix", e.Config.DefinitionName,
"--output-directory", e.Config.GeneratedDefinitionPath,
"--resource-group", e.Config.DefinitionName,
)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to deploy acs-engine template with cluster definition - %s: %s\n", e.Config.ClusterDefinitionTemplate, err)
log.Printf("Output:%s\n", out)
return err
}
return nil
}

Просмотреть файл

@ -42,7 +42,7 @@ func main() {
err := acct.Login()
if err != nil {
log.Fatal("Error while trying to login to azure account!")
log.Fatalf("Error while trying to login to azure account! %s\n", err)
}
err = acct.SetSubscription()

Просмотреть файл

@ -79,6 +79,25 @@ func (cli *CLIProvisioner) Run() error {
return errors.New("Unable to run provisioner")
}
func createSaveSSH(outputPath string, privateKeyName string) (string, error) {
os.Mkdir(outputPath, 0755)
keyPath := filepath.Join(outputPath, privateKeyName)
cmd := exec.Command("ssh-keygen", "-f", keyPath, "-q", "-N", "", "-b", "2048", "-t", "rsa")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return "", errors.Wrapf(err, "Error while trying to generate ssh key\nOutput:%s", out)
}
os.Chmod(keyPath, 0600)
publicSSHKeyBytes, err := ioutil.ReadFile(keyPath + ".pub")
if err != nil {
return "", errors.Wrap(err, "Error while trying to read public ssh key")
}
return string(publicSSHKeyBytes), nil
}
func (cli *CLIProvisioner) provision() error {
cli.Config.Name = cli.generateName()
if cli.Config.SoakClusterName != "" {
@ -87,23 +106,17 @@ func (cli *CLIProvisioner) provision() error {
os.Setenv("NAME", cli.Config.Name)
outputPath := filepath.Join(cli.Config.CurrentWorkingDir, "_output")
os.Mkdir(outputPath, 0755)
cmd := exec.Command("ssh-keygen", "-f", cli.Config.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if !cli.Config.UseDeployCommand {
publicSSHKey, err := createSaveSSH(outputPath, cli.Config.Name+"-ssh")
if err != nil {
return errors.Wrapf(err, "Error while trying to generate ssh key\nOutput:%s", out)
}
publicSSHKey, err := cli.Config.ReadPublicSSHKey()
if err != nil {
return errors.Wrap(err, "Error while trying to read public ssh key")
return errors.Wrap(err, "Error while generating ssh keys")
}
os.Setenv("PUBLIC_SSH_KEY", publicSSHKey)
}
os.Setenv("DNS_PREFIX", cli.Config.Name)
err = cli.Account.CreateGroup(cli.Config.Name, cli.Config.Location)
err := cli.Account.CreateGroup(cli.Config.Name, cli.Config.Location)
if err != nil {
return errors.Wrap(err, "Error while trying to create resource group")
}
@ -159,10 +172,45 @@ func (cli *CLIProvisioner) provision() error {
return errors.Wrap(err, "Error while trying to write Engine Template to disk:%s")
}
err = cli.Engine.Generate()
err = cli.generateAndDeploy()
if err != nil {
return errors.Wrap(err, "Error in generateAndDeploy:%s")
}
if cli.Config.IsKubernetes() {
// Store the hosts for future introspection
hosts, err := cli.Account.GetHosts(cli.Config.Name)
if err != nil {
return errors.Wrap(err, "GetHosts:%s")
}
var masters, agents []azure.VM
for _, host := range hosts {
if strings.Contains(host.Name, "master") {
masters = append(masters, host)
} else if strings.Contains(host.Name, "agent") {
agents = append(agents, host)
}
}
cli.Masters = masters
cli.Agents = agents
}
return nil
}
func (cli *CLIProvisioner) generateAndDeploy() error {
if cli.Config.UseDeployCommand {
fmt.Printf("Provisionning with the Deploy Command\n")
err := cli.Engine.Deploy(cli.Config.Location)
if err != nil {
return errors.Wrap(err, "Error while trying to deploy acs-engine template")
}
} else {
err := cli.Engine.Generate()
if err != nil {
return errors.Wrap(err, "Error while trying to generate acs-engine template")
}
}
c, err := config.ParseConfig()
if err != nil {
@ -184,31 +232,14 @@ func (cli *CLIProvisioner) provision() error {
cli.Config.SetKubeConfig()
}
// Lets start by just using the normal az group deployment cli for creating a cluster
err = cli.Account.CreateDeployment(cli.Config.Name, eng)
//if we use Generate, then we need to call CreateDeployment
if !cli.Config.UseDeployCommand {
err = cli.Account.CreateDeployment(cli.Config.Name, cli.Engine)
if err != nil {
return errors.Wrap(err, "Error while trying to create deployment")
}
if cli.Config.IsKubernetes() {
// Store the hosts for future introspection
hosts, err := cli.Account.GetHosts(cli.Config.Name)
if err != nil {
}
return err
}
var masters, agents []azure.VM
for _, host := range hosts {
if strings.Contains(host.Name, "master") {
masters = append(masters, host)
} else if strings.Contains(host.Name, "agent") {
agents = append(agents, host)
}
}
cli.Masters = masters
cli.Agents = agents
}
return nil
}
// GenerateName will generate a new name if one has not been set