зеркало из https://github.com/Azure/aks-engine.git
refactor: simplify scale operation CLI output (#1345)
This commit is contained in:
Родитель
6ebbe327a8
Коммит
fec834d7fa
|
@ -394,6 +394,14 @@
|
|||
revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c"
|
||||
version = "v0.0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:063d55b87e200bced5e2be658cc70acafb4c5bbc4afa04d4b82f66298b73d089"
|
||||
name = "github.com/mgutz/ansi"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9520e82c474b0a04dd04f8a40959027271bab992"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
|
@ -525,6 +533,14 @@
|
|||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c950ed18f9e7d881d16e226c5612fea8acceb241a0d04c3dad5c0d22c24381c5"
|
||||
name = "github.com/x-cray/logrus-prefixed-formatter"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "bb2702d423886830dee131692131d35648c382e2"
|
||||
version = "v0.5.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:58f2854b50ff8862eb6a347f20dedaac83e1166f4040472e17bc37736841a12f"
|
||||
name = "go.opencensus.io"
|
||||
|
@ -1017,6 +1033,7 @@
|
|||
"github.com/sirupsen/logrus/hooks/test",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/pflag",
|
||||
"github.com/x-cray/logrus-prefixed-formatter",
|
||||
"golang.org/x/crypto/ssh",
|
||||
"golang.org/x/crypto/ssh/agent",
|
||||
"golang.org/x/sync/errgroup",
|
||||
|
|
|
@ -78,6 +78,10 @@ required = [
|
|||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/x-cray/logrus-prefixed-formatter"
|
||||
version = "0.5.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.0"
|
||||
|
|
|
@ -74,8 +74,12 @@ func newRotateCertsCmd() *cobra.Command {
|
|||
f.StringVarP(&rcc.resourceGroupName, "resource-group", "g", "", "the resource group where the cluster is deployed (required)")
|
||||
f.StringVarP(&rcc.apiModelPath, "api-model", "m", "", "path to the generated apimodel.json file (required)")
|
||||
f.StringVarP(&rcc.sshFilepath, "ssh", "", "", "the filepath of a valid private ssh key to access the cluster's nodes (required)")
|
||||
f.StringVar(&rcc.masterFQDN, "master-FQDN", "", "FQDN for the master load balancer (required)")
|
||||
f.StringVar(&rcc.masterFQDN, "master-FQDN", "", "FQDN for the master load balancer")
|
||||
f.StringVar(&rcc.masterFQDN, "apiserver", "", "apiserver endpoint (required)")
|
||||
f.StringVarP(&rcc.outputDirectory, "output-directory", "o", "", "output directory where generated TLS artifacts will be saved (derived from DNS prefix if absent)")
|
||||
|
||||
f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
|
||||
addAuthFlags(rcc.getAuthArgs(), f)
|
||||
|
||||
return command
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestNewRotateCertsCmd(t *testing.T) {
|
|||
t.Fatalf("rotate-certs command should have use %s equal %s, short %s equal %s and long %s equal to %s", output.Use, rotateCertsName, output.Short, rotateCertsShortDescription, output.Long, rotateCertsLongDescription)
|
||||
}
|
||||
|
||||
expectedFlags := []string{"location", "resource-group", "master-FQDN", "api-model", "ssh"}
|
||||
expectedFlags := []string{"location", "resource-group", "apiserver", "api-model", "ssh"}
|
||||
for _, f := range expectedFlags {
|
||||
if output.Flags().Lookup(f) == nil {
|
||||
t.Fatalf("rotate-certs command should have flag %s", f)
|
||||
|
|
146
cmd/scale.go
146
cmd/scale.go
|
@ -26,6 +26,8 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
prefixed "github.com/x-cray/logrus-prefixed-formatter"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type scaleCmd struct {
|
||||
|
@ -49,6 +51,9 @@ type scaleCmd struct {
|
|||
nameSuffix string
|
||||
agentPoolIndex int
|
||||
logger *log.Entry
|
||||
apiserverURL string
|
||||
kubeconfig string
|
||||
nodes []v1.Node
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -76,9 +81,11 @@ func newScaleCmd() *cobra.Command {
|
|||
f.StringVar(&sc.deploymentDirectory, "deployment-dir", "", "the location of the output from `generate`")
|
||||
f.IntVarP(&sc.newDesiredAgentCount, "new-node-count", "c", 0, "desired number of nodes")
|
||||
f.StringVar(&sc.agentPoolToScale, "node-pool", "", "node pool to scale")
|
||||
f.StringVar(&sc.masterFQDN, "master-FQDN", "", "FQDN for the master load balancer, Needed to scale down Kubernetes agent pools")
|
||||
f.StringVar(&sc.masterFQDN, "master-FQDN", "", "FQDN for the master load balancer that maps to the apiserver endpoint")
|
||||
f.StringVar(&sc.masterFQDN, "apiserver", "", "apiserver endpoint (required to cordon and drain nodes)")
|
||||
|
||||
f.MarkDeprecated("deployment-dir", "deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
f.MarkDeprecated("deployment-dir", "--deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
|
||||
addAuthFlags(&sc.authArgs, f)
|
||||
|
||||
|
@ -86,7 +93,7 @@ func newScaleCmd() *cobra.Command {
|
|||
}
|
||||
|
||||
func (sc *scaleCmd) validate(cmd *cobra.Command) error {
|
||||
log.Infoln("validating...")
|
||||
log.Debugln("validating scale command line arguments...")
|
||||
var err error
|
||||
|
||||
sc.locale, err = i18n.LoadTranslations()
|
||||
|
@ -125,7 +132,9 @@ func (sc *scaleCmd) validate(cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
func (sc *scaleCmd) load() error {
|
||||
sc.logger = log.New().WithField("source", "scaling command line")
|
||||
logger := log.New()
|
||||
logger.Formatter = new(prefixed.TextFormatter)
|
||||
sc.logger = log.NewEntry(log.New())
|
||||
var err error
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), armhelpers.DefaultARMOperationTimeout)
|
||||
|
@ -203,7 +212,22 @@ func (sc *scaleCmd) load() error {
|
|||
|
||||
//allows to identify VMs in the resource group that belong to this cluster.
|
||||
sc.nameSuffix = sc.containerService.Properties.GetClusterID()
|
||||
log.Infof("Name suffix: %s", sc.nameSuffix)
|
||||
log.Debugf("Cluster ID used in all agent pools: %s", sc.nameSuffix)
|
||||
|
||||
if sc.masterFQDN != "" {
|
||||
if strings.HasPrefix(sc.masterFQDN, "https://") {
|
||||
sc.apiserverURL = sc.masterFQDN
|
||||
} else if strings.HasPrefix(sc.masterFQDN, "http://") {
|
||||
return errors.New("apiserver URL cannot be insecure http://")
|
||||
} else {
|
||||
sc.apiserverURL = fmt.Sprintf("https://%s", sc.masterFQDN)
|
||||
}
|
||||
}
|
||||
|
||||
sc.kubeconfig, err = engine.GenerateKubeConfig(sc.containerService.Properties, sc.location)
|
||||
if err != nil {
|
||||
return errors.New("Unable to derive kubeconfig from api model")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -222,14 +246,23 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
winPoolIndex = -1
|
||||
indexes := make([]int, 0)
|
||||
indexToVM := make(map[int]string)
|
||||
|
||||
// Get nodes list from the k8s API before scaling for the desired pool
|
||||
if sc.apiserverURL != "" && orchestratorInfo.OrchestratorType == api.Kubernetes {
|
||||
nodes, err := operations.GetNodes(sc.client, sc.logger, sc.apiserverURL, sc.kubeconfig, time.Duration(5)*time.Minute, sc.agentPoolToScale, -1)
|
||||
if err == nil && nodes != nil {
|
||||
sc.nodes = nodes
|
||||
}
|
||||
}
|
||||
|
||||
if sc.agentPool.IsAvailabilitySets() {
|
||||
availabilitySetIDs := []string{}
|
||||
|
||||
for vmsListPage, err := sc.client.ListVirtualMachines(ctx, sc.resourceGroupName); vmsListPage.NotDone(); err = vmsListPage.Next() {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get vms in the resource group")
|
||||
return errors.Wrap(err, "failed to get VMs in the resource group")
|
||||
} else if len(vmsListPage.Values()) < 1 {
|
||||
return errors.New("The provided resource group does not contain any vms")
|
||||
return errors.New("The provided resource group does not contain any VMs")
|
||||
}
|
||||
for _, vm := range vmsListPage.Values() {
|
||||
vmName := *vm.Name
|
||||
|
@ -261,7 +294,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
currentNodeCount = len(indexes)
|
||||
|
||||
if currentNodeCount == sc.newDesiredAgentCount {
|
||||
log.Info("Cluster is currently at the desired agent count.")
|
||||
sc.printScaleTargetEqualsExisting(currentNodeCount)
|
||||
return nil
|
||||
}
|
||||
highestUsedIndex = indexes[len(indexes)-1]
|
||||
|
@ -273,11 +306,31 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
sc.containerService.SetPlatformFaultDomainCount(fdCount)
|
||||
|
||||
// Scale down Scenario
|
||||
// VMAS Scale down Scenario
|
||||
if currentNodeCount > sc.newDesiredAgentCount {
|
||||
if sc.masterFQDN == "" {
|
||||
if sc.apiserverURL == "" {
|
||||
cmd.Usage()
|
||||
return errors.New("master-FQDN is required to scale down a kubernetes cluster's agent pool")
|
||||
return errors.New("--apiserver is required to scale down a kubernetes cluster's agent pool")
|
||||
}
|
||||
|
||||
if sc.nodes != nil {
|
||||
if len(sc.nodes) == 1 {
|
||||
sc.logger.Infof("There is %d node in pool %s before scaling down to %d:\n", len(sc.nodes), sc.agentPoolToScale, sc.newDesiredAgentCount)
|
||||
} else {
|
||||
sc.logger.Infof("There are %d nodes in pool %s before scaling down to %d:\n", len(sc.nodes), sc.agentPoolToScale, sc.newDesiredAgentCount)
|
||||
}
|
||||
operations.PrintNodes(sc.nodes)
|
||||
numNodesFromK8sAPI := len(sc.nodes)
|
||||
if currentNodeCount != numNodesFromK8sAPI {
|
||||
sc.logger.Warnf("There are %d VMs named \"*%s*\" in the resource group %s, but there are %d nodes named \"*%s*\" in the Kubernetes cluster\n", currentNodeCount, sc.agentPoolToScale, sc.resourceGroupName, numNodesFromK8sAPI, sc.agentPoolToScale)
|
||||
} else {
|
||||
nodesToDelete := currentNodeCount - sc.newDesiredAgentCount
|
||||
if nodesToDelete > 1 {
|
||||
sc.logger.Infof("%d nodes will be deleted\n", nodesToDelete)
|
||||
} else {
|
||||
sc.logger.Infof("%d node will be deleted\n", nodesToDelete)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vmsToDelete := make([]string, 0)
|
||||
|
@ -286,17 +339,19 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
vmsToDelete = append(vmsToDelete, indexToVM[index])
|
||||
}
|
||||
|
||||
for _, node := range vmsToDelete {
|
||||
sc.logger.Infof("Node %s will be cordoned and drained\n", node)
|
||||
}
|
||||
if orchestratorInfo.OrchestratorType == api.Kubernetes {
|
||||
kubeConfig, err := engine.GenerateKubeConfig(sc.containerService.Properties, sc.location)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to generate kube config")
|
||||
}
|
||||
err = sc.drainNodes(kubeConfig, vmsToDelete)
|
||||
err := sc.drainNodes(vmsToDelete)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Got error while draining the nodes to be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range vmsToDelete {
|
||||
sc.logger.Infof("Node %s's VM will be deleted\n", node)
|
||||
}
|
||||
errList := operations.ScaleDownVMs(sc.client, sc.logger, sc.SubscriptionID.String(), sc.resourceGroupName, vmsToDelete...)
|
||||
if errList != nil {
|
||||
var err error
|
||||
|
@ -313,6 +368,16 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
if sc.nodes != nil {
|
||||
nodes, err := operations.GetNodes(sc.client, sc.logger, sc.apiserverURL, sc.kubeconfig, time.Duration(5)*time.Minute, sc.agentPoolToScale, sc.newDesiredAgentCount)
|
||||
if err == nil && nodes != nil {
|
||||
sc.nodes = nodes
|
||||
sc.logger.Infof("Nodes in pool %s after scaling:\n", sc.agentPoolToScale)
|
||||
operations.PrintNodes(sc.nodes)
|
||||
} else {
|
||||
sc.logger.Warningf("Unable to get nodes in pool %s after scaling:\n", sc.agentPoolToScale)
|
||||
}
|
||||
}
|
||||
|
||||
return sc.saveAPIModel()
|
||||
}
|
||||
|
@ -327,6 +392,16 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if vmss.Sku != nil {
|
||||
currentNodeCount = int(*vmss.Sku.Capacity)
|
||||
if int(*vmss.Sku.Capacity) == sc.newDesiredAgentCount {
|
||||
sc.printScaleTargetEqualsExisting(currentNodeCount)
|
||||
return nil
|
||||
} else if int(*vmss.Sku.Capacity) > sc.newDesiredAgentCount {
|
||||
log.Warnf("VMSS scale down is an alpha feature: VMSS VM nodes will not be cordoned and drained before scaling down!")
|
||||
}
|
||||
}
|
||||
|
||||
osPublisher := vmss.VirtualMachineProfile.StorageProfile.ImageReference.Publisher
|
||||
if osPublisher != nil && strings.EqualFold(*osPublisher, "MicrosoftWindowsServer") {
|
||||
_, _, winPoolIndex, _, err = utils.WindowsVMNameParts(vmName)
|
||||
|
@ -412,6 +487,10 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
deploymentSuffix := random.Int31()
|
||||
|
||||
if sc.nodes != nil {
|
||||
sc.logger.Infof("Nodes in pool %s before scaling:\n", sc.agentPoolToScale)
|
||||
operations.PrintNodes(sc.nodes)
|
||||
}
|
||||
_, err = sc.client.DeployTemplate(
|
||||
ctx,
|
||||
sc.resourceGroupName,
|
||||
|
@ -421,6 +500,16 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sc.nodes != nil {
|
||||
nodes, err := operations.GetNodes(sc.client, sc.logger, sc.apiserverURL, sc.kubeconfig, time.Duration(5)*time.Minute, sc.agentPoolToScale, sc.newDesiredAgentCount)
|
||||
if err == nil && nodes != nil {
|
||||
sc.nodes = nodes
|
||||
sc.logger.Infof("Nodes in pool %s cluster after scaling:\n", sc.agentPoolToScale)
|
||||
operations.PrintNodes(sc.nodes)
|
||||
} else {
|
||||
sc.logger.Warningf("Unable to get nodes in pool %s after scaling:\n", sc.agentPoolToScale)
|
||||
}
|
||||
}
|
||||
|
||||
return sc.saveAPIModel()
|
||||
}
|
||||
|
@ -480,18 +569,14 @@ func addValue(m paramsMap, k string, v interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func (sc *scaleCmd) drainNodes(kubeConfig string, vmsToDelete []string) error {
|
||||
masterURL := sc.masterFQDN
|
||||
if !strings.HasPrefix(masterURL, "https://") {
|
||||
masterURL = fmt.Sprintf("https://%s", masterURL)
|
||||
}
|
||||
func (sc *scaleCmd) drainNodes(vmsToDelete []string) error {
|
||||
numVmsToDrain := len(vmsToDelete)
|
||||
errChan := make(chan *operations.VMScalingErrorDetails, numVmsToDrain)
|
||||
defer close(errChan)
|
||||
for _, vmName := range vmsToDelete {
|
||||
go func(vmName string) {
|
||||
err := operations.SafelyDrainNode(sc.client, sc.logger,
|
||||
masterURL, kubeConfig, vmName, time.Duration(60)*time.Minute)
|
||||
sc.apiserverURL, sc.kubeconfig, vmName, time.Duration(60)*time.Minute)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to drain node %s, got error %v", vmName, err)
|
||||
errChan <- &operations.VMScalingErrorDetails{Error: err, Name: vmName}
|
||||
|
@ -510,3 +595,20 @@ func (sc *scaleCmd) drainNodes(kubeConfig string, vmsToDelete []string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scaleCmd) printScaleTargetEqualsExisting(currentNodeCount int) {
|
||||
var printNodes bool
|
||||
trailingChar := "."
|
||||
if sc.nodes != nil {
|
||||
printNodes = true
|
||||
trailingChar = ":"
|
||||
}
|
||||
log.Infof("Node pool %s is already at the desired count %d%s", sc.agentPoolToScale, sc.newDesiredAgentCount, trailingChar)
|
||||
if printNodes {
|
||||
operations.PrintNodes(sc.nodes)
|
||||
}
|
||||
numNodesFromK8sAPI := len(sc.nodes)
|
||||
if currentNodeCount != numNodesFromK8sAPI {
|
||||
sc.logger.Warnf("There are %d nodes named \"*%s*\" in the Kubernetes cluster, but there are %d VMs named \"*%s*\" in the resource group %s\n", numNodesFromK8sAPI, sc.agentPoolToScale, currentNodeCount, sc.agentPoolToScale, sc.resourceGroupName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
"scale",
|
||||
"--debug",
|
||||
"--api-model=${workspaceRoot}/_output/${input:clusterName}/apimodel.json",
|
||||
"--master-FQDN=${input:clusterName}.eastus.cloudapp.azure.com",
|
||||
"--apiserver=${input:clusterName}.eastus.cloudapp.azure.com",
|
||||
"--location=${input:location}",
|
||||
"--resource-group=${input:clusterName}",
|
||||
"--new-node-count=${input:newNodeCount}",
|
||||
|
|
|
@ -22,7 +22,7 @@ run `aks-engine rotate-certs`. For example:
|
|||
```bash
|
||||
CLUSTER="<CLUSTER_DNS_PREFIX>" && bin/aks-engine rotate-certs --api-model _output/${CLUSTER}/apimodel.json
|
||||
--client-id "<YOUR_CLIENT_ID>" --client-secret "<YOUR_CLIENT_SECRET>" --location <CLUSTER_LOCATION>
|
||||
--master-FQDN ${CLUSTER}.<CLUSTER_LOCATION>.cloudapp.azure.com --ssh _output/${CLUSTER}-ssh
|
||||
--apiserver ${CLUSTER}.<CLUSTER_LOCATION>.cloudapp.azure.com --ssh _output/${CLUSTER}-ssh
|
||||
--subscription-id "<YOUR_SUBSCRIPTION_ID>" -g ${CLUSTER} -o _output/${CLUSTER}
|
||||
```
|
||||
|
||||
|
@ -57,4 +57,4 @@ The certificate rotation tool has not been tested on and is expected to fail wit
|
|||
|
||||
The rotation involves rebooting the nodes. ALL VMs in the resource group will be restarted as part of running the `rotate-certs` command. If the resource group contains any VMs that are not part of the cluster, they will be restarted as well.
|
||||
|
||||
The tool is not currently idempotent, meaning that if the rotation fails halfway though or is interrupted, you will most likely not be able to re-run the operation without manual intervention. There is a risk that your cluster will become unrecoverable which is why it is strongly recommended to follow the [preparation step](#preparation).
|
||||
The tool is not currently idempotent, meaning that if the rotation fails halfway though or is interrupted, you will most likely not be able to re-run the operation without manual intervention. There is a risk that your cluster will become unrecoverable which is why it is strongly recommended to follow the [preparation step](#preparation).
|
||||
|
|
|
@ -20,7 +20,7 @@ $ aks-engine scale --subscription-id <subscription_id> \
|
|||
--client-id '<service principal client ID>' \
|
||||
--client-secret '<service principal client secret>' \
|
||||
--api-model _output/mycluster/apimodel.json --new-node-count <desired node count> \
|
||||
--node-pool agentpool1 --master-FQDN mycluster.<location>.cloudapp.azure.com
|
||||
--node-pool agentpool1 --apiserver mycluster.<location>.cloudapp.azure.com
|
||||
```
|
||||
|
||||
This command will re-use the `apimodel.json` file inside the output directory as input for a new ARM template deployment that will execute the scaling operation against the desired agent pool. When the scaling operation is done it will update the cluster definition in that same `apimodel.json` file to reflect the new node count and thus the updated, current cluster configuration.
|
||||
|
@ -38,6 +38,6 @@ This command will re-use the `apimodel.json` file inside the output directory as
|
|||
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|
||||
|--node-pool|depends|Required if there is more than one node pool. Which node pool should be scaled.|
|
||||
|--new-node-count|yes|Desired number of nodes in the node pool.|
|
||||
|--master-FQDN|depends|When scaling down a kubernetes cluster this is required. The master FDQN so that the nodes can be cordoned and drained before removal. This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.|
|
||||
|--apiserver|when scaling down|apiserver endpoint (required to cordon and drain nodes). This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.|
|
||||
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|
||||
|--language|no|Language to return error message in. Default value is "en-us").|
|
||||
|
|
|
@ -40,7 +40,7 @@ Add (or copy) an entry in the `agentPoolProfiles` array.
|
|||
Use the `aks-engine scale` command
|
||||
|
||||
aks-engine scale --location westeurope --subscription-id "xxx" --resource-group "<my-resource-group" \
|
||||
--api-model ./somedir/apimodel.json --node-pool <nodepool name> --new-node-count <desired number of nodes> --master-FQDN <fqdn of the master lb>
|
||||
--api-model ./somedir/apimodel.json --node-pool <nodepool name> --new-node-count <desired number of nodes> --apiserver <apiserver endpoint FQDN or IP address>
|
||||
|
||||
**Remember to also update your original api-model.json file (used for 1st deployment) or else you would end up with the original number of VM's after using the `generate` command described above**
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ sleep 180
|
|||
--api-model ${APIMODEL} \
|
||||
--location ${LOCATION} \
|
||||
--resource-group ${RESOURCE_GROUP} \
|
||||
--master-FQDN "${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com" \
|
||||
--apiserver "${INSTANCE_NAME}.${LOCATION}.cloudapp.azure.com" \
|
||||
--node-pool "agentpool1" \
|
||||
--new-node-count ${NEW_AGENT_NODE_COUNT} \
|
||||
--auth-method client_secret \
|
||||
|
|
|
@ -31,10 +31,10 @@ type KubernetesClientSetClient struct {
|
|||
interval, timeout time.Duration
|
||||
}
|
||||
|
||||
//GetKubernetesClient returns a KubernetesClient hooked up to the api server at the masterURL.
|
||||
func (az *AzureClient) GetKubernetesClient(masterURL, kubeConfig string, interval, timeout time.Duration) (armhelpers.KubernetesClient, error) {
|
||||
//GetKubernetesClient returns a KubernetesClient hooked up to the api server at the apiserverURL.
|
||||
func (az *AzureClient) GetKubernetesClient(apiserverURL, kubeConfig string, interval, timeout time.Duration) (armhelpers.KubernetesClient, error) {
|
||||
// creates the clientset
|
||||
config, err := clientcmd.BuildConfigFromKubeconfigGetter(masterURL, func() (*clientcmdapi.Config, error) { return clientcmd.Load([]byte(kubeConfig)) })
|
||||
config, err := clientcmd.BuildConfigFromKubeconfigGetter(apiserverURL, func() (*clientcmdapi.Config, error) { return clientcmd.Load([]byte(kubeConfig)) })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func (az *AzureClient) DeployTemplate(ctx context.Context, resourceGroupName, de
|
|||
},
|
||||
}
|
||||
|
||||
log.Infof("Starting ARM Deployment (%s). This will take some time...", deploymentName)
|
||||
log.Infof("Starting ARM Deployment %s in resource group %s. This will take some time...", deploymentName, resourceGroupName)
|
||||
future, err := az.deploymentsClient.CreateOrUpdate(ctx, resourceGroupName, deploymentName, deployment)
|
||||
if err != nil {
|
||||
return de, err
|
||||
|
|
|
@ -173,7 +173,7 @@ type AKSEngineClient interface {
|
|||
DeleteManagedDisk(ctx context.Context, resourceGroupName string, diskName string) error
|
||||
ListManagedDisksByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskListPage, err error)
|
||||
|
||||
GetKubernetesClient(masterURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error)
|
||||
GetKubernetesClient(apiserverURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error)
|
||||
|
||||
ListProviders(ctx context.Context) (ProviderListResultPage, error)
|
||||
|
||||
|
|
|
@ -30,10 +30,10 @@ type KubernetesClientSetClient struct {
|
|||
interval, timeout time.Duration
|
||||
}
|
||||
|
||||
// GetKubernetesClient returns a KubernetesClient hooked up to the api server at the masterURL.
|
||||
func (az *AzureClient) GetKubernetesClient(masterURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error) {
|
||||
// GetKubernetesClient returns a KubernetesClient hooked up to the api server at the apiserverURL.
|
||||
func (az *AzureClient) GetKubernetesClient(apiserverURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error) {
|
||||
// creates the clientset
|
||||
config, err := clientcmd.BuildConfigFromKubeconfigGetter(masterURL, func() (*clientcmdapi.Config, error) { return clientcmd.Load([]byte(kubeConfig)) })
|
||||
config, err := clientcmd.BuildConfigFromKubeconfigGetter(apiserverURL, func() (*clientcmdapi.Config, error) { return clientcmd.Load([]byte(kubeConfig)) })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -875,7 +875,7 @@ func (mc *MockAKSEngineClient) ListManagedDisksByResourceGroup(ctx context.Conte
|
|||
}
|
||||
|
||||
//GetKubernetesClient mock
|
||||
func (mc *MockAKSEngineClient) GetKubernetesClient(masterURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error) {
|
||||
func (mc *MockAKSEngineClient) GetKubernetesClient(apiserverURL, kubeConfig string, interval, timeout time.Duration) (KubernetesClient, error) {
|
||||
if mc.FailGetKubernetesClient {
|
||||
return nil, errors.New("GetKubernetesClient failed")
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ type Transformer struct {
|
|||
|
||||
// NormalizeForK8sSLBScalingOrUpgrade takes a template and removes elements that are unwanted in a K8s Standard LB cluster scale up/down case
|
||||
func (t *Transformer) NormalizeForK8sSLBScalingOrUpgrade(logger *logrus.Entry, templateMap map[string]interface{}) error {
|
||||
logger.Infoln("Running NormalizeForK8sSLBScalingOrUpgrade...")
|
||||
logger.Debugf("Running NormalizeForK8sSLBScalingOrUpgrade...")
|
||||
lbIndex := -1
|
||||
resources := templateMap[resourcesFieldName].([]interface{})
|
||||
|
||||
|
@ -194,7 +194,7 @@ func (t *Transformer) NormalizeForK8sVMASScalingUp(logger *logrus.Entry, templat
|
|||
}
|
||||
|
||||
if rtIndex == -1 {
|
||||
logger.Infof("Found no resources with type %s in the template.", rtResourceType)
|
||||
logger.Debugf("Found no resources with type %s in the template.", rtResourceType)
|
||||
} else {
|
||||
indexesToRemove = append(indexesToRemove, rtIndex)
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ type drainOperation struct {
|
|||
type podFilter func(v1.Pod) bool
|
||||
|
||||
// SafelyDrainNode safely drains a node so that it can be deleted from the cluster
|
||||
func SafelyDrainNode(az armhelpers.AKSEngineClient, logger *log.Entry, masterURL, kubeConfig, nodeName string, timeout time.Duration) error {
|
||||
func SafelyDrainNode(az armhelpers.AKSEngineClient, logger *log.Entry, apiserverURL, kubeConfig, nodeName string, timeout time.Duration) error {
|
||||
//get client using kubeconfig
|
||||
client, err := az.GetKubernetesClient(masterURL, kubeConfig, interval, timeout)
|
||||
client, err := az.GetKubernetesClient(apiserverURL, kubeConfig, interval, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -78,7 +78,14 @@ func (o *drainOperation) deleteOrEvictPodsSimple() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.logger.Infof("%d pods need to be removed/deleted", len(pods))
|
||||
if len(pods) > 0 {
|
||||
o.logger.WithFields(log.Fields{
|
||||
"prefix": "drain",
|
||||
"node": o.node.Name,
|
||||
}).Infof("%d pods will be deleted", len(pods))
|
||||
} else {
|
||||
o.logger.Infof("Node %s has no scheduled pods", o.node.Name)
|
||||
}
|
||||
|
||||
err = o.deleteOrEvictPods(pods)
|
||||
if err != nil {
|
||||
|
|
|
@ -23,19 +23,19 @@ const (
|
|||
func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry, subscriptionID, resourceGroup, name string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), armhelpers.DefaultARMOperationTimeout)
|
||||
defer cancel()
|
||||
logger.Infof("fetching VM: %s/%s", resourceGroup, name)
|
||||
logger.Debugf("fetching VM %s in resource group %s", name, resourceGroup)
|
||||
vm, err := az.GetVirtualMachine(ctx, resourceGroup, name)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to get VM: %s/%s: %s", resourceGroup, name, err.Error())
|
||||
logger.Errorf("failed to get VM %s in resource group %s: %s", name, resourceGroup, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
vhd := vm.VirtualMachineProperties.StorageProfile.OsDisk.Vhd
|
||||
managedDisk := vm.VirtualMachineProperties.StorageProfile.OsDisk.ManagedDisk
|
||||
if vhd == nil && managedDisk == nil {
|
||||
logger.Errorf("failed to get a valid os disk URI for VM: %s/%s", resourceGroup, name)
|
||||
logger.Errorf("failed to get a valid OS disk URI for VM %s in resource group %s", name, resourceGroup)
|
||||
|
||||
return errors.New("os disk does not have a VHD URI")
|
||||
return errors.New("OS disk does not have a VHD URI")
|
||||
}
|
||||
|
||||
osDiskName := vm.VirtualMachineProperties.StorageProfile.OsDisk.Name
|
||||
|
@ -43,23 +43,21 @@ func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry,
|
|||
var nicName string
|
||||
nicID := (*vm.VirtualMachineProperties.NetworkProfile.NetworkInterfaces)[0].ID
|
||||
if nicID == nil {
|
||||
logger.Warnf("NIC ID is not set for VM (%s/%s)", resourceGroup, name)
|
||||
logger.Warnf("NIC ID is not set for VM %s in resource group %s)", name, resourceGroup)
|
||||
} else {
|
||||
nicName, err = utils.ResourceName(*nicID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Infof("found nic name for VM (%s/%s): %s", resourceGroup, name, nicName)
|
||||
logger.Debugf("found NIC name \"%s\" for VM %s in resource group %s", nicName, name, resourceGroup)
|
||||
}
|
||||
logger.Infof("deleting VM: %s/%s", resourceGroup, name)
|
||||
logger.Infof("waiting for vm deletion: %s/%s", resourceGroup, name)
|
||||
logger.Infof("deleting VM %s in resource group %s ...", name, resourceGroup)
|
||||
if err = az.DeleteVirtualMachine(ctx, resourceGroup, name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nicName) > 0 {
|
||||
logger.Infof("deleting nic: %s/%s", resourceGroup, nicName)
|
||||
logger.Infof("waiting for nic deletion: %s/%s", resourceGroup, nicName)
|
||||
logger.Infof("deleting NIC %s in resource group %s ...", nicName, resourceGroup)
|
||||
if err = az.DeleteNetworkInterface(ctx, resourceGroup, nicName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -72,7 +70,7 @@ func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry,
|
|||
return err
|
||||
}
|
||||
|
||||
logger.Infof("found os disk storage reference: %s %s %s", accountName, vhdContainer, vhdBlob)
|
||||
logger.Debugf("found OS disk storage reference: %s:%s/%s", accountName, vhdContainer, vhdBlob)
|
||||
|
||||
var as armhelpers.AKSStorageClient
|
||||
as, err = az.GetStorageClient(ctx, resourceGroup, accountName)
|
||||
|
@ -80,15 +78,15 @@ func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry,
|
|||
return err
|
||||
}
|
||||
|
||||
logger.Infof("deleting blob: %s/%s", vhdContainer, vhdBlob)
|
||||
logger.Infof("deleting blob %s/%s ...", vhdContainer, vhdBlob)
|
||||
if err = as.DeleteBlob(vhdContainer, vhdBlob, &azStorage.DeleteBlobOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if managedDisk != nil {
|
||||
if osDiskName == nil {
|
||||
logger.Warnf("osDisk is not set for VM %s/%s", resourceGroup, name)
|
||||
logger.Warnf("managed disk Name is not set for VM %s in resource group %s", name, resourceGroup)
|
||||
} else {
|
||||
logger.Infof("deleting managed disk: %s/%s", resourceGroup, *osDiskName)
|
||||
logger.Infof("deleting managed disk %s in resource group %s ...", *osDiskName, resourceGroup)
|
||||
if err = az.DeleteManagedDisk(ctx, resourceGroup, *osDiskName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -100,7 +98,7 @@ func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry,
|
|||
// The role assignments should only be relevant if managed identities are used,
|
||||
// but always cleaning them up is easier than adding rule based logic here and there.
|
||||
scope := fmt.Sprintf(AADRoleResourceGroupScopeTemplate, subscriptionID, resourceGroup)
|
||||
logger.Infof("fetching roleAssignments: %s with principal %s", scope, *vm.Identity.PrincipalID)
|
||||
logger.Debugf("fetching role assignments: %s with principal %s", scope, *vm.Identity.PrincipalID)
|
||||
for vmRoleAssignmentsPage, err := az.ListRoleAssignmentsForPrincipal(ctx, scope, *vm.Identity.PrincipalID); vmRoleAssignmentsPage.NotDone(); err = vmRoleAssignmentsPage.Next() {
|
||||
if err != nil {
|
||||
logger.Errorf("failed to list role assignments: %s/%s: %s", scope, *vm.Identity.PrincipalID, err)
|
||||
|
@ -108,7 +106,7 @@ func CleanDeleteVirtualMachine(az armhelpers.AKSEngineClient, logger *log.Entry,
|
|||
}
|
||||
|
||||
for _, roleAssignment := range vmRoleAssignmentsPage.Values() {
|
||||
logger.Infof("deleting role assignment: %s", *roleAssignment.ID)
|
||||
logger.Infof("deleting role assignment %s ...", *roleAssignment.ID)
|
||||
_, deleteRoleAssignmentErr := az.DeleteRoleAssignmentByID(ctx, *roleAssignment.ID)
|
||||
if deleteRoleAssignmentErr != nil {
|
||||
logger.Errorf("failed to delete role assignment: %s: %s", *roleAssignment.ID, deleteRoleAssignmentErr.Error())
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT license.
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type getNodesResult struct {
|
||||
nodes []v1.Node
|
||||
err error
|
||||
}
|
||||
|
||||
// GetNodes is a thin wrapper around the k8s api list nodes interface
|
||||
// Pass in a pool string to filter only node objects in that AKS Engine-deployed node pool
|
||||
// Pass in a waitForNumNodes int to wait for an explicit target node count before returning
|
||||
func GetNodes(az armhelpers.AKSEngineClient, logger *log.Entry, apiserverURL, kubeConfig string, timeout time.Duration, pool string, waitForNumNodes int) ([]v1.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
ch := make(chan getNodesResult)
|
||||
var mostRecentGetNodesErr error
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- listNodes(az, logger, apiserverURL, kubeConfig, timeout):
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case result := <-ch:
|
||||
mostRecentGetNodesErr = result.err
|
||||
if result.err == nil {
|
||||
var ret []v1.Node
|
||||
for _, node := range result.nodes {
|
||||
if strings.Contains(node.Name, pool) {
|
||||
ret = append(ret, node)
|
||||
}
|
||||
}
|
||||
if waitForNumNodes >= 0 {
|
||||
if len(ret) == waitForNumNodes {
|
||||
return ret, nil
|
||||
}
|
||||
} else {
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, errors.Errorf("GetAllNodes timed out: %s\n", mostRecentGetNodesErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func listNodes(az armhelpers.AKSEngineClient, logger *log.Entry, apiserverURL, kubeConfig string, timeout time.Duration) getNodesResult {
|
||||
logger.Debugf("Instantiating a Kubernetes client object at apiserver %s", apiserverURL)
|
||||
client, err := az.GetKubernetesClient(apiserverURL, kubeConfig, interval, timeout)
|
||||
if err != nil {
|
||||
return getNodesResult{
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
logger.Debugf("Listing Nodes at apiserver %s", apiserverURL)
|
||||
nodes, err := client.ListNodes()
|
||||
if err != nil {
|
||||
return getNodesResult{
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return getNodesResult{
|
||||
nodes: nodes.Items,
|
||||
err: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// PrintNodes outputs nodes to stdout
|
||||
func PrintNodes(nodes []v1.Node) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 4, ' ', tabwriter.FilterHTML)
|
||||
fmt.Fprintln(w, "NODE\tSTATUS\tVERSION\tOS\tKERNEL")
|
||||
for _, node := range nodes {
|
||||
nodeStatus := "NotReady"
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == "Ready" && condition.Status == "True" {
|
||||
nodeStatus = "Ready"
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t", node.Name)
|
||||
fmt.Fprintf(w, "%s\t", nodeStatus)
|
||||
fmt.Fprintf(w, "%s\t", node.Status.NodeInfo.KubeletVersion)
|
||||
fmt.Fprintf(w, "%s\t", node.Status.NodeInfo.OSImage)
|
||||
fmt.Fprintf(w, "%s\n", node.Status.NodeInfo.KernelVersion)
|
||||
|
||||
}
|
||||
w.Flush()
|
||||
}
|
|
@ -0,0 +1,212 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT license.
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/aks-engine/pkg/armhelpers"
|
||||
. "github.com/Azure/aks-engine/pkg/test"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGetNodes(t *testing.T) {
|
||||
RunSpecsWithReporters(t, "Kubernetes API Operations conveniences library", "Server Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("GetNodes tests", func() {
|
||||
It("listNodes should return a result set with nodes", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Minute * 1
|
||||
|
||||
result := listNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout)
|
||||
|
||||
Expect(result.err).To(BeNil())
|
||||
Expect(result.nodes).To(HaveLen(2))
|
||||
Expect(result.nodes[0].Name).To(Equal("k8s-master-1234"))
|
||||
Expect(result.nodes[0].Status.Conditions[0].Type).To(Equal(v1.NodeReady))
|
||||
Expect(result.nodes[0].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(result.nodes[0].Status.NodeInfo.KubeletVersion).To(Equal("1.9.10"))
|
||||
Expect(result.nodes[1].Name).To(Equal("k8s-agentpool3-1234"))
|
||||
Expect(result.nodes[1].Status.Conditions[0].Type).To(Equal(v1.NodeOutOfDisk))
|
||||
Expect(result.nodes[1].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(result.nodes[1].Status.NodeInfo.KubeletVersion).To(Equal("1.9.9"))
|
||||
})
|
||||
|
||||
It("listNodes should error when the k8s client getter fails", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{
|
||||
FailGetKubernetesClient: true,
|
||||
MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Minute * 1
|
||||
|
||||
result := listNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout)
|
||||
|
||||
Expect(result.err).NotTo(BeNil())
|
||||
Expect(result.err.Error()).To(Equal("GetKubernetesClient failed"))
|
||||
Expect(result.nodes).To(HaveLen(0))
|
||||
})
|
||||
|
||||
It("listNodes should error when the k8s API fails to list nodes", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{
|
||||
FailListNodes: true,
|
||||
}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Minute * 1
|
||||
|
||||
result := listNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout)
|
||||
|
||||
Expect(result.err).NotTo(BeNil())
|
||||
Expect(result.err.Error()).To(Equal("ListNodes failed"))
|
||||
Expect(result.nodes).To(HaveLen(0))
|
||||
})
|
||||
|
||||
It("GetNodes should return nodes", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Minute * 1
|
||||
|
||||
nodes, err := GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", -1)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(nodes).To(HaveLen(2))
|
||||
Expect(nodes[0].Name).To(Equal("k8s-master-1234"))
|
||||
Expect(nodes[0].Status.Conditions[0].Type).To(Equal(v1.NodeReady))
|
||||
Expect(nodes[0].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[0].Status.NodeInfo.KubeletVersion).To(Equal("1.9.10"))
|
||||
Expect(nodes[1].Name).To(Equal("k8s-agentpool3-1234"))
|
||||
Expect(nodes[1].Status.Conditions[0].Type).To(Equal(v1.NodeOutOfDisk))
|
||||
Expect(nodes[1].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[1].Status.NodeInfo.KubeletVersion).To(Equal("1.9.9"))
|
||||
})
|
||||
|
||||
It("GetNodes should only return nodes in a pool when a pool string is specified", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Minute * 1
|
||||
|
||||
nodes, err := GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "agentpool3", -1)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
Expect(nodes[0].Name).To(Equal("k8s-agentpool3-1234"))
|
||||
Expect(nodes[0].Status.Conditions[0].Type).To(Equal(v1.NodeOutOfDisk))
|
||||
Expect(nodes[0].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[0].Status.NodeInfo.KubeletVersion).To(Equal("1.9.9"))
|
||||
|
||||
nodes, err = GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "nonexistent", -1)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(nodes).To(HaveLen(0))
|
||||
})
|
||||
|
||||
It("GetNodes should respect the waitForNumNodes arg", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Second * 1
|
||||
|
||||
nodes, err := GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", 2)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(nodes).To(HaveLen(2))
|
||||
Expect(nodes[0].Name).To(Equal("k8s-master-1234"))
|
||||
Expect(nodes[0].Status.Conditions[0].Type).To(Equal(v1.NodeReady))
|
||||
Expect(nodes[0].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[0].Status.NodeInfo.KubeletVersion).To(Equal("1.9.10"))
|
||||
Expect(nodes[1].Name).To(Equal("k8s-agentpool3-1234"))
|
||||
Expect(nodes[1].Status.Conditions[0].Type).To(Equal(v1.NodeOutOfDisk))
|
||||
Expect(nodes[1].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[1].Status.NodeInfo.KubeletVersion).To(Equal("1.9.9"))
|
||||
|
||||
// waiting for more nodes than the API returns should timeout
|
||||
nodes, err = GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", 3)
|
||||
var mostRecentGetNodesErr error
|
||||
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(Equal(fmt.Sprintf("GetAllNodes timed out: %s\n", mostRecentGetNodesErr)))
|
||||
Expect(nodes).To(BeNil())
|
||||
|
||||
// waiting for fewer nodes than the API returns should timeout
|
||||
nodes, err = GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", 1)
|
||||
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(Equal(fmt.Sprintf("GetAllNodes timed out: %s\n", mostRecentGetNodesErr)))
|
||||
Expect(nodes).To(BeNil())
|
||||
|
||||
// filtering by pool name and and the waiting for the expected node count
|
||||
nodes, err = GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "agentpool3", 1)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
Expect(nodes[0].Name).To(Equal("k8s-agentpool3-1234"))
|
||||
Expect(nodes[0].Status.Conditions[0].Type).To(Equal(v1.NodeOutOfDisk))
|
||||
Expect(nodes[0].Status.Conditions[0].Status).To(Equal(v1.ConditionTrue))
|
||||
Expect(nodes[0].Status.NodeInfo.KubeletVersion).To(Equal("1.9.9"))
|
||||
})
|
||||
|
||||
It("GetNodes should return a meaningful timeout error when the k8s API fails to list nodes", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{
|
||||
FailListNodes: true,
|
||||
}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Second * 1 // set the timeout value high enough to allow for a single attempt
|
||||
|
||||
nodes, err := GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", -1)
|
||||
mostRecentGetNodesErr := errors.New("ListNodes failed")
|
||||
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(Equal(fmt.Sprintf("GetAllNodes timed out: %s\n", mostRecentGetNodesErr)))
|
||||
Expect(nodes).To(BeNil())
|
||||
})
|
||||
|
||||
It("GetNodes should return a vanilla timeout error if timeout occurs before a single request", func() {
|
||||
mockClient := armhelpers.MockAKSEngineClient{MockKubernetesClient: &armhelpers.MockKubernetesClient{}}
|
||||
logger := log.NewEntry(log.New())
|
||||
apiserverURL := "https://apiserver"
|
||||
kubeconfig := "kubeconfig"
|
||||
timeout := time.Second * 0 // by setting the timeout to 0 we time out immediately
|
||||
|
||||
nodes, err := GetNodes(&mockClient, logger, apiserverURL, kubeconfig, timeout, "", -1)
|
||||
var mostRecentGetNodesErr error
|
||||
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(Equal(fmt.Sprintf("GetAllNodes timed out: %s\n", mostRecentGetNodesErr)))
|
||||
Expect(nodes).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
func ExamplePrintNodes() {
|
||||
var nodes []v1.Node
|
||||
node := v1.Node{}
|
||||
node.Name = "k8s-master-1234"
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{Type: v1.NodeReady, Status: v1.ConditionTrue})
|
||||
node.Status.NodeInfo.KubeletVersion = "1.10.0"
|
||||
node.Status.NodeInfo.OSImage = "my-os"
|
||||
node.Status.NodeInfo.KernelVersion = "3.1.4"
|
||||
nodes = append(nodes, node)
|
||||
PrintNodes(nodes)
|
||||
// Output: NODE STATUS VERSION OS KERNEL
|
||||
// k8s-master-1234 Ready 1.10.0 my-os 3.1.4
|
||||
}
|
|
@ -121,15 +121,15 @@ func (kan *UpgradeAgentNode) Validate(vmName *string) error {
|
|||
nodeName := strings.ToLower(*vmName)
|
||||
|
||||
kan.logger.Infof("Validating %s", nodeName)
|
||||
var masterURL string
|
||||
var apiserverURL string
|
||||
if kan.UpgradeContainerService.Properties.HostedMasterProfile != nil {
|
||||
apiServerListeningPort := 443
|
||||
masterURL = fmt.Sprintf("https://%s:%d", kan.UpgradeContainerService.Properties.HostedMasterProfile.FQDN, apiServerListeningPort)
|
||||
apiserverURL = fmt.Sprintf("https://%s:%d", kan.UpgradeContainerService.Properties.HostedMasterProfile.FQDN, apiServerListeningPort)
|
||||
} else {
|
||||
masterURL = kan.UpgradeContainerService.Properties.MasterProfile.FQDN
|
||||
apiserverURL = kan.UpgradeContainerService.Properties.MasterProfile.FQDN
|
||||
}
|
||||
|
||||
client, err := kan.Client.GetKubernetesClient(masterURL, kan.kubeConfig, interval, kan.timeout)
|
||||
client, err := kan.Client.GetKubernetesClient(apiserverURL, kan.kubeConfig, interval, kan.timeout)
|
||||
if err != nil {
|
||||
return &armhelpers.DeploymentValidationError{Err: err}
|
||||
}
|
||||
|
|
|
@ -85,9 +85,9 @@ func (kmn *UpgradeMasterNode) Validate(vmName *string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
masterURL := kmn.UpgradeContainerService.Properties.MasterProfile.FQDN
|
||||
apiserverURL := kmn.UpgradeContainerService.Properties.MasterProfile.FQDN
|
||||
|
||||
client, err := kmn.Client.GetKubernetesClient(masterURL, kmn.kubeConfig, interval, kmn.timeout)
|
||||
client, err := kmn.Client.GetKubernetesClient(apiserverURL, kmn.kubeConfig, interval, kmn.timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Mario L. Gutierrez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,285 @@
|
|||
package ansi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
black = iota
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
defaultt = 9
|
||||
|
||||
normalIntensityFG = 30
|
||||
highIntensityFG = 90
|
||||
normalIntensityBG = 40
|
||||
highIntensityBG = 100
|
||||
|
||||
start = "\033["
|
||||
bold = "1;"
|
||||
blink = "5;"
|
||||
underline = "4;"
|
||||
inverse = "7;"
|
||||
strikethrough = "9;"
|
||||
|
||||
// Reset is the ANSI reset escape sequence
|
||||
Reset = "\033[0m"
|
||||
// DefaultBG is the default background
|
||||
DefaultBG = "\033[49m"
|
||||
// DefaultFG is the default foreground
|
||||
DefaultFG = "\033[39m"
|
||||
)
|
||||
|
||||
// Black FG
|
||||
var Black string
|
||||
|
||||
// Red FG
|
||||
var Red string
|
||||
|
||||
// Green FG
|
||||
var Green string
|
||||
|
||||
// Yellow FG
|
||||
var Yellow string
|
||||
|
||||
// Blue FG
|
||||
var Blue string
|
||||
|
||||
// Magenta FG
|
||||
var Magenta string
|
||||
|
||||
// Cyan FG
|
||||
var Cyan string
|
||||
|
||||
// White FG
|
||||
var White string
|
||||
|
||||
// LightBlack FG
|
||||
var LightBlack string
|
||||
|
||||
// LightRed FG
|
||||
var LightRed string
|
||||
|
||||
// LightGreen FG
|
||||
var LightGreen string
|
||||
|
||||
// LightYellow FG
|
||||
var LightYellow string
|
||||
|
||||
// LightBlue FG
|
||||
var LightBlue string
|
||||
|
||||
// LightMagenta FG
|
||||
var LightMagenta string
|
||||
|
||||
// LightCyan FG
|
||||
var LightCyan string
|
||||
|
||||
// LightWhite FG
|
||||
var LightWhite string
|
||||
|
||||
var (
|
||||
plain = false
|
||||
// Colors maps common color names to their ANSI color code.
|
||||
Colors = map[string]int{
|
||||
"black": black,
|
||||
"red": red,
|
||||
"green": green,
|
||||
"yellow": yellow,
|
||||
"blue": blue,
|
||||
"magenta": magenta,
|
||||
"cyan": cyan,
|
||||
"white": white,
|
||||
"default": defaultt,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 256; i++ {
|
||||
Colors[strconv.Itoa(i)] = i
|
||||
}
|
||||
|
||||
Black = ColorCode("black")
|
||||
Red = ColorCode("red")
|
||||
Green = ColorCode("green")
|
||||
Yellow = ColorCode("yellow")
|
||||
Blue = ColorCode("blue")
|
||||
Magenta = ColorCode("magenta")
|
||||
Cyan = ColorCode("cyan")
|
||||
White = ColorCode("white")
|
||||
LightBlack = ColorCode("black+h")
|
||||
LightRed = ColorCode("red+h")
|
||||
LightGreen = ColorCode("green+h")
|
||||
LightYellow = ColorCode("yellow+h")
|
||||
LightBlue = ColorCode("blue+h")
|
||||
LightMagenta = ColorCode("magenta+h")
|
||||
LightCyan = ColorCode("cyan+h")
|
||||
LightWhite = ColorCode("white+h")
|
||||
}
|
||||
|
||||
// ColorCode returns the ANSI color color code for style.
|
||||
func ColorCode(style string) string {
|
||||
return colorCode(style).String()
|
||||
}
|
||||
|
||||
// Gets the ANSI color code for a style.
|
||||
func colorCode(style string) *bytes.Buffer {
|
||||
buf := bytes.NewBufferString("")
|
||||
if plain || style == "" {
|
||||
return buf
|
||||
}
|
||||
if style == "reset" {
|
||||
buf.WriteString(Reset)
|
||||
return buf
|
||||
} else if style == "off" {
|
||||
return buf
|
||||
}
|
||||
|
||||
foregroundBackground := strings.Split(style, ":")
|
||||
foreground := strings.Split(foregroundBackground[0], "+")
|
||||
fgKey := foreground[0]
|
||||
fg := Colors[fgKey]
|
||||
fgStyle := ""
|
||||
if len(foreground) > 1 {
|
||||
fgStyle = foreground[1]
|
||||
}
|
||||
|
||||
bg, bgStyle := "", ""
|
||||
|
||||
if len(foregroundBackground) > 1 {
|
||||
background := strings.Split(foregroundBackground[1], "+")
|
||||
bg = background[0]
|
||||
if len(background) > 1 {
|
||||
bgStyle = background[1]
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(start)
|
||||
base := normalIntensityFG
|
||||
if len(fgStyle) > 0 {
|
||||
if strings.Contains(fgStyle, "b") {
|
||||
buf.WriteString(bold)
|
||||
}
|
||||
if strings.Contains(fgStyle, "B") {
|
||||
buf.WriteString(blink)
|
||||
}
|
||||
if strings.Contains(fgStyle, "u") {
|
||||
buf.WriteString(underline)
|
||||
}
|
||||
if strings.Contains(fgStyle, "i") {
|
||||
buf.WriteString(inverse)
|
||||
}
|
||||
if strings.Contains(fgStyle, "s") {
|
||||
buf.WriteString(strikethrough)
|
||||
}
|
||||
if strings.Contains(fgStyle, "h") {
|
||||
base = highIntensityFG
|
||||
}
|
||||
}
|
||||
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(fgKey)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "38;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+fg)
|
||||
}
|
||||
|
||||
base = normalIntensityBG
|
||||
if len(bg) > 0 {
|
||||
if strings.Contains(bgStyle, "h") {
|
||||
base = highIntensityBG
|
||||
}
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(bg)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "48;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+Colors[bg])
|
||||
}
|
||||
}
|
||||
|
||||
// remove last ";"
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
buf.WriteRune('m')
|
||||
return buf
|
||||
}
|
||||
|
||||
// Color colors a string based on the ANSI color code for style.
|
||||
func Color(s, style string) string {
|
||||
if plain || len(style) < 1 {
|
||||
return s
|
||||
}
|
||||
buf := colorCode(style)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ColorFunc creates a closure to avoid computation ANSI color code.
|
||||
func ColorFunc(style string) func(string) string {
|
||||
if style == "" {
|
||||
return func(s string) string {
|
||||
return s
|
||||
}
|
||||
}
|
||||
color := ColorCode(style)
|
||||
return func(s string) string {
|
||||
if plain || s == "" {
|
||||
return s
|
||||
}
|
||||
buf := bytes.NewBufferString(color)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
result := buf.String()
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// DisableColors disables ANSI color codes. The default is false (colors are on).
|
||||
func DisableColors(disable bool) {
|
||||
plain = disable
|
||||
if plain {
|
||||
Black = ""
|
||||
Red = ""
|
||||
Green = ""
|
||||
Yellow = ""
|
||||
Blue = ""
|
||||
Magenta = ""
|
||||
Cyan = ""
|
||||
White = ""
|
||||
LightBlack = ""
|
||||
LightRed = ""
|
||||
LightGreen = ""
|
||||
LightYellow = ""
|
||||
LightBlue = ""
|
||||
LightMagenta = ""
|
||||
LightCyan = ""
|
||||
LightWhite = ""
|
||||
} else {
|
||||
Black = ColorCode("black")
|
||||
Red = ColorCode("red")
|
||||
Green = ColorCode("green")
|
||||
Yellow = ColorCode("yellow")
|
||||
Blue = ColorCode("blue")
|
||||
Magenta = ColorCode("magenta")
|
||||
Cyan = ColorCode("cyan")
|
||||
White = ColorCode("white")
|
||||
LightBlack = ColorCode("black+h")
|
||||
LightRed = ColorCode("red+h")
|
||||
LightGreen = ColorCode("green+h")
|
||||
LightYellow = ColorCode("yellow+h")
|
||||
LightBlue = ColorCode("blue+h")
|
||||
LightMagenta = ColorCode("magenta+h")
|
||||
LightCyan = ColorCode("cyan+h")
|
||||
LightWhite = ColorCode("white+h")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Package ansi is a small, fast library to create ANSI colored strings and codes.
|
||||
|
||||
Installation
|
||||
|
||||
# this installs the color viewer and the package
|
||||
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
|
||||
|
||||
Example
|
||||
|
||||
// colorize a string, SLOW
|
||||
msg := ansi.Color("foo", "red+b:white")
|
||||
|
||||
// create a closure to avoid recalculating ANSI code compilation
|
||||
phosphorize := ansi.ColorFunc("green+h:black")
|
||||
msg = phosphorize("Bring back the 80s!")
|
||||
msg2 := phospohorize("Look, I'm a CRT!")
|
||||
|
||||
// cache escape codes and build strings manually
|
||||
lime := ansi.ColorCode("green+h:black")
|
||||
reset := ansi.ColorCode("reset")
|
||||
|
||||
fmt.Println(lime, "Bring back the 80s!", reset)
|
||||
|
||||
Other examples
|
||||
|
||||
Color(s, "red") // red
|
||||
Color(s, "red+b") // red bold
|
||||
Color(s, "red+B") // red blinking
|
||||
Color(s, "red+u") // red underline
|
||||
Color(s, "red+bh") // red bold bright
|
||||
Color(s, "red:white") // red on white
|
||||
Color(s, "red+b:white+h") // red bold on white bright
|
||||
Color(s, "red+B:white+h") // red blink on white bright
|
||||
|
||||
To view color combinations, from terminal
|
||||
|
||||
ansi-mgutz
|
||||
|
||||
Style format
|
||||
|
||||
"foregroundColor+attributes:backgroundColor+attributes"
|
||||
|
||||
Colors
|
||||
|
||||
black
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
|
||||
Attributes
|
||||
|
||||
b = bold foreground
|
||||
B = Blink foreground
|
||||
u = underline foreground
|
||||
h = high intensity (bright) foreground, background
|
||||
i = inverse
|
||||
|
||||
Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
|
||||
*/
|
||||
package ansi
|
|
@ -0,0 +1,57 @@
|
|||
package ansi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
// PrintStyles prints all style combinations to the terminal.
|
||||
func PrintStyles() {
|
||||
// for compatibility with Windows, not needed for *nix
|
||||
stdout := colorable.NewColorableStdout()
|
||||
|
||||
bgColors := []string{
|
||||
"",
|
||||
":black",
|
||||
":red",
|
||||
":green",
|
||||
":yellow",
|
||||
":blue",
|
||||
":magenta",
|
||||
":cyan",
|
||||
":white",
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(Colors))
|
||||
for k := range Colors {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(keys))
|
||||
|
||||
for _, fg := range keys {
|
||||
for _, bg := range bgColors {
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+s" + bg, "+i" + bg}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"}))
|
||||
fmt.Fprintln(stdout, padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pad(s string, length int) string {
|
||||
for len(s) < length {
|
||||
s += " "
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func padColor(color string, styles []string) string {
|
||||
buffer := ""
|
||||
for _, style := range styles {
|
||||
buffer += Color(pad(color+style, 20), color+style)
|
||||
}
|
||||
return buffer
|
||||
}
|
21
vendor/github.com/x-cray/logrus-prefixed-formatter/LICENSE
сгенерированный
поставляемый
Normal file
21
vendor/github.com/x-cray/logrus-prefixed-formatter/LICENSE
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Denis Parchenko
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
366
vendor/github.com/x-cray/logrus-prefixed-formatter/formatter.go
сгенерированный
поставляемый
Normal file
366
vendor/github.com/x-cray/logrus-prefixed-formatter/formatter.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,366 @@
|
|||
package prefixed
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/mgutz/ansi"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time = time.Now()
|
||||
defaultColorScheme *ColorScheme = &ColorScheme{
|
||||
InfoLevelStyle: "green",
|
||||
WarnLevelStyle: "yellow",
|
||||
ErrorLevelStyle: "red",
|
||||
FatalLevelStyle: "red",
|
||||
PanicLevelStyle: "red",
|
||||
DebugLevelStyle: "blue",
|
||||
PrefixStyle: "cyan",
|
||||
TimestampStyle: "black+h",
|
||||
}
|
||||
noColorsColorScheme *compiledColorScheme = &compiledColorScheme{
|
||||
InfoLevelColor: ansi.ColorFunc(""),
|
||||
WarnLevelColor: ansi.ColorFunc(""),
|
||||
ErrorLevelColor: ansi.ColorFunc(""),
|
||||
FatalLevelColor: ansi.ColorFunc(""),
|
||||
PanicLevelColor: ansi.ColorFunc(""),
|
||||
DebugLevelColor: ansi.ColorFunc(""),
|
||||
PrefixColor: ansi.ColorFunc(""),
|
||||
TimestampColor: ansi.ColorFunc(""),
|
||||
}
|
||||
defaultCompiledColorScheme *compiledColorScheme = compileColorScheme(defaultColorScheme)
|
||||
)
|
||||
|
||||
func miniTS() int {
|
||||
return int(time.Since(baseTimestamp) / time.Second)
|
||||
}
|
||||
|
||||
type ColorScheme struct {
|
||||
InfoLevelStyle string
|
||||
WarnLevelStyle string
|
||||
ErrorLevelStyle string
|
||||
FatalLevelStyle string
|
||||
PanicLevelStyle string
|
||||
DebugLevelStyle string
|
||||
PrefixStyle string
|
||||
TimestampStyle string
|
||||
}
|
||||
|
||||
type compiledColorScheme struct {
|
||||
InfoLevelColor func(string) string
|
||||
WarnLevelColor func(string) string
|
||||
ErrorLevelColor func(string) string
|
||||
FatalLevelColor func(string) string
|
||||
PanicLevelColor func(string) string
|
||||
DebugLevelColor func(string) string
|
||||
PrefixColor func(string) string
|
||||
TimestampColor func(string) string
|
||||
}
|
||||
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
||||
// Force disabling colors. For a TTY colors are enabled by default.
|
||||
DisableColors bool
|
||||
|
||||
// Force formatted layout, even for non-TTY output.
|
||||
ForceFormatting bool
|
||||
|
||||
// Disable timestamp logging. useful when output is redirected to logging
|
||||
// system that already adds timestamps.
|
||||
DisableTimestamp bool
|
||||
|
||||
// Disable the conversion of the log levels to uppercase
|
||||
DisableUppercase bool
|
||||
|
||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
// the time passed since beginning of execution.
|
||||
FullTimestamp bool
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// The fields are sorted by default for a consistent output. For applications
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// Wrap empty fields in quotes if true.
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Can be set to the override the default quoting character "
|
||||
// with something else. For example: ', or `.
|
||||
QuoteCharacter string
|
||||
|
||||
// Pad msg field with spaces on the right for display.
|
||||
// The value for this parameter will be the size of padding.
|
||||
// Its default value is zero, which means no padding will be applied for msg.
|
||||
SpacePadding int
|
||||
|
||||
// Color scheme to use.
|
||||
colorScheme *compiledColorScheme
|
||||
|
||||
// Whether the logger's out is to a terminal.
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
var style string
|
||||
if main != "" {
|
||||
style = main
|
||||
} else {
|
||||
style = fallback
|
||||
}
|
||||
return ansi.ColorFunc(style)
|
||||
}
|
||||
|
||||
func compileColorScheme(s *ColorScheme) *compiledColorScheme {
|
||||
return &compiledColorScheme{
|
||||
InfoLevelColor: getCompiledColor(s.InfoLevelStyle, defaultColorScheme.InfoLevelStyle),
|
||||
WarnLevelColor: getCompiledColor(s.WarnLevelStyle, defaultColorScheme.WarnLevelStyle),
|
||||
ErrorLevelColor: getCompiledColor(s.ErrorLevelStyle, defaultColorScheme.ErrorLevelStyle),
|
||||
FatalLevelColor: getCompiledColor(s.FatalLevelStyle, defaultColorScheme.FatalLevelStyle),
|
||||
PanicLevelColor: getCompiledColor(s.PanicLevelStyle, defaultColorScheme.PanicLevelStyle),
|
||||
DebugLevelColor: getCompiledColor(s.DebugLevelStyle, defaultColorScheme.DebugLevelStyle),
|
||||
PrefixColor: getCompiledColor(s.PrefixStyle, defaultColorScheme.PrefixStyle),
|
||||
TimestampColor: getCompiledColor(s.TimestampStyle, defaultColorScheme.TimestampStyle),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *logrus.Entry) {
|
||||
if len(f.QuoteCharacter) == 0 {
|
||||
f.QuoteCharacter = "\""
|
||||
}
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
f.colorScheme = compileColorScheme(colorScheme)
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
var keys []string = make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
lastKeyIdx := len(keys) - 1
|
||||
|
||||
if !f.DisableSorting {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isFormatted := f.ForceFormatting || f.isTerminal
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isFormatted {
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
var colorScheme *compiledColorScheme
|
||||
if isColored {
|
||||
if f.colorScheme == nil {
|
||||
colorScheme = defaultCompiledColorScheme
|
||||
} else {
|
||||
colorScheme = f.colorScheme
|
||||
}
|
||||
} else {
|
||||
colorScheme = noColorsColorScheme
|
||||
}
|
||||
f.printColored(b, entry, keys, timestampFormat, colorScheme)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat), true)
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String(), true)
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, "msg", entry.Message, lastKeyIdx >= 0)
|
||||
}
|
||||
for i, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key], lastKeyIdx != i)
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
switch entry.Level {
|
||||
case logrus.InfoLevel:
|
||||
levelColor = colorScheme.InfoLevelColor
|
||||
case logrus.WarnLevel:
|
||||
levelColor = colorScheme.WarnLevelColor
|
||||
case logrus.ErrorLevel:
|
||||
levelColor = colorScheme.ErrorLevelColor
|
||||
case logrus.FatalLevel:
|
||||
levelColor = colorScheme.FatalLevelColor
|
||||
case logrus.PanicLevel:
|
||||
levelColor = colorScheme.PanicLevelColor
|
||||
default:
|
||||
levelColor = colorScheme.DebugLevelColor
|
||||
}
|
||||
|
||||
if entry.Level != logrus.WarnLevel {
|
||||
levelText = entry.Level.String()
|
||||
} else {
|
||||
levelText = "warn"
|
||||
}
|
||||
|
||||
if !f.DisableUppercase {
|
||||
levelText = strings.ToUpper(levelText)
|
||||
}
|
||||
|
||||
level := levelColor(fmt.Sprintf("%5s", levelText))
|
||||
prefix := ""
|
||||
message := entry.Message
|
||||
|
||||
if prefixValue, ok := entry.Data["prefix"]; ok {
|
||||
prefix = colorScheme.PrefixColor(" " + prefixValue.(string) + ":")
|
||||
} else {
|
||||
prefixValue, trimmedMsg := extractPrefix(entry.Message)
|
||||
if len(prefixValue) > 0 {
|
||||
prefix = colorScheme.PrefixColor(" " + prefixValue + ":")
|
||||
message = trimmedMsg
|
||||
}
|
||||
}
|
||||
|
||||
messageFormat := "%s"
|
||||
if f.SpacePadding != 0 {
|
||||
messageFormat = fmt.Sprintf("%%-%ds", f.SpacePadding)
|
||||
}
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "%s%s "+messageFormat, level, prefix, message)
|
||||
} else {
|
||||
var timestamp string
|
||||
if !f.FullTimestamp {
|
||||
timestamp = fmt.Sprintf("[%04d]", miniTS())
|
||||
} else {
|
||||
timestamp = fmt.Sprintf("[%s]", entry.Time.Format(timestampFormat))
|
||||
}
|
||||
fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
if k != "prefix" {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " %s=%+v", levelColor(k), v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func extractPrefix(msg string) (string, string) {
|
||||
prefix := ""
|
||||
regex := regexp.MustCompile("^\\[(.*?)\\]")
|
||||
if regex.MatchString(msg) {
|
||||
match := regex.FindString(msg)
|
||||
prefix, msg = match[1:len(match)-1], strings.TrimSpace(msg[len(match):])
|
||||
}
|
||||
return prefix, msg
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}, appendSpace bool) {
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
|
||||
if appendSpace {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if !f.needsQuoting(value) {
|
||||
b.WriteString(value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
|
||||
}
|
||||
case error:
|
||||
errmsg := value.Error()
|
||||
if !f.needsQuoting(errmsg) {
|
||||
b.WriteString(errmsg)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(b, value)
|
||||
}
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// would just silently drop the user provided level. Instead with this code
|
||||
// it'll be logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
func prefixFieldClashes(data logrus.Fields) {
|
||||
if t, ok := data["time"]; ok {
|
||||
data["fields.time"] = t
|
||||
}
|
||||
|
||||
if m, ok := data["msg"]; ok {
|
||||
data["fields.msg"] = m
|
||||
}
|
||||
|
||||
if l, ok := data["level"]; ok {
|
||||
data["fields.level"] = l
|
||||
}
|
||||
}
|
Загрузка…
Ссылка в новой задаче