зеркало из https://github.com/Azure/aks-engine.git
chore: check all error returns in main code (#3184)
* chore: check all error returns in main code * refactor: return named error from defer in DeepCopy() * refactor: log sp.Refresh() errors instead of returning them * ci: enforce errcheck linter
This commit is contained in:
Родитель
c3cb4b2074
Коммит
fce292453c
|
@ -10,6 +10,7 @@ run:
|
|||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- errcheck
|
||||
- goimports
|
||||
- gosimple
|
||||
- golint
|
||||
|
|
|
@ -88,24 +88,24 @@ func (apc *addPoolCmd) validate(cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
if apc.resourceGroupName == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--resource-group must be specified")
|
||||
}
|
||||
|
||||
if apc.location == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--location must be specified")
|
||||
}
|
||||
|
||||
apc.location = helpers.NormalizeAzureRegion(apc.location)
|
||||
|
||||
if apc.apiModelPath == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--api-model must be specified")
|
||||
}
|
||||
|
||||
if apc.nodePoolPath == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--nodepool must be specified")
|
||||
}
|
||||
return nil
|
||||
|
@ -144,7 +144,9 @@ func (apc *addPoolCmd) load() error {
|
|||
}
|
||||
|
||||
if apc.containerService.Properties.IsCustomCloudProfile() {
|
||||
writeCustomCloudProfile(apc.containerService)
|
||||
if err = writeCustomCloudProfile(apc.containerService); err != nil {
|
||||
return errors.Wrap(err, "error writing custom cloud profile")
|
||||
}
|
||||
if err = apc.containerService.Properties.SetCustomCloudSpec(api.AzureCustomCloudSpecParams{IsUpgrade: false, IsScale: true}); err != nil {
|
||||
return errors.Wrap(err, "error parsing the api model")
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error {
|
|||
if len(args) == 1 {
|
||||
dc.apimodelPath = args[0]
|
||||
} else if len(args) > 1 {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("too many arguments were provided to 'deploy'")
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,9 @@ func (dc *deployCmd) loadAPIModel() error {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "error parsing the api model")
|
||||
}
|
||||
writeCustomCloudProfile(dc.containerService)
|
||||
if err = writeCustomCloudProfile(dc.containerService); err != nil {
|
||||
return errors.Wrap(err, "error writing custom cloud profile")
|
||||
}
|
||||
|
||||
if dc.containerService.Properties.CustomCloudProfile.IdentitySystem == "" || dc.containerService.Properties.CustomCloudProfile.IdentitySystem != dc.authProvider.getAuthArgs().IdentitySystem {
|
||||
if dc.authProvider != nil {
|
||||
|
|
|
@ -102,10 +102,10 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error {
|
|||
if len(args) == 1 {
|
||||
gc.apimodelPath = args[0]
|
||||
} else if len(args) > 1 {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("too many arguments were provided to 'generate'")
|
||||
} else {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--api-model was not supplied, nor was one specified as a positional argument")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func newGetLogsCmd() *cobra.Command {
|
|||
Long: getLogsLongDescription,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := glc.validateArgs(); err != nil {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.Wrap(err, "validating get-logs args")
|
||||
}
|
||||
if err := glc.loadAPIModel(); err != nil {
|
||||
|
@ -77,11 +77,11 @@ func newGetLogsCmd() *cobra.Command {
|
|||
command.Flags().StringVar(&glc.linuxScriptPath, "linux-script", "", "path to the log collection script to execute on the cluster's Linux nodes (required)")
|
||||
command.Flags().StringVarP(&glc.outputDirectory, "output-directory", "o", "", "collected logs destination directory, derived from --api-model if missing")
|
||||
command.Flags().BoolVarP(&glc.controlPlaneOnly, "control-plane-only", "", false, "get logs from control plane VMs only")
|
||||
command.MarkFlagRequired("location")
|
||||
command.MarkFlagRequired("api-model")
|
||||
command.MarkFlagRequired("ssh-host")
|
||||
command.MarkFlagRequired("linux-ssh-private-key")
|
||||
command.MarkFlagRequired("linux-script") // optional once in VHD
|
||||
_ = command.MarkFlagRequired("location")
|
||||
_ = command.MarkFlagRequired("api-model")
|
||||
_ = command.MarkFlagRequired("ssh-host")
|
||||
_ = command.MarkFlagRequired("linux-ssh-private-key")
|
||||
_ = command.MarkFlagRequired("linux-script") // optional once in VHD
|
||||
return command
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func newRotateCertsCmd() *cobra.Command {
|
|||
f.StringVar(&rcc.masterFQDN, "apiserver", "", "apiserver endpoint (required)")
|
||||
f.StringVarP(&rcc.outputDirectory, "output-directory", "o", "", "output directory where generated TLS artifacts will be saved (derived from DNS prefix if absent)")
|
||||
|
||||
f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
_ = f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
|
||||
addAuthFlags(rcc.getAuthArgs(), f)
|
||||
|
||||
|
|
20
cmd/scale.go
20
cmd/scale.go
|
@ -85,8 +85,8 @@ func newScaleCmd() *cobra.Command {
|
|||
f.StringVar(&sc.masterFQDN, "master-FQDN", "", "FQDN for the master load balancer that maps to the apiserver endpoint")
|
||||
f.StringVar(&sc.masterFQDN, "apiserver", "", "apiserver endpoint (required to cordon and drain nodes)")
|
||||
|
||||
f.MarkDeprecated("deployment-dir", "--deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
_ = f.MarkDeprecated("deployment-dir", "--deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
_ = f.MarkDeprecated("master-FQDN", "--apiserver is preferred")
|
||||
|
||||
addAuthFlags(&sc.authArgs, f)
|
||||
|
||||
|
@ -103,29 +103,29 @@ func (sc *scaleCmd) validate(cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
if sc.resourceGroupName == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--resource-group must be specified")
|
||||
}
|
||||
|
||||
if sc.location == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--location must be specified")
|
||||
}
|
||||
|
||||
sc.location = helpers.NormalizeAzureRegion(sc.location)
|
||||
|
||||
if sc.newDesiredAgentCount == 0 {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--new-node-count must be specified")
|
||||
}
|
||||
|
||||
if sc.apiModelPath == "" && sc.deploymentDirectory == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--api-model must be specified")
|
||||
}
|
||||
|
||||
if sc.apiModelPath != "" && sc.deploymentDirectory != "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("ambiguous, please specify only one of --api-model and --deployment-dir")
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,9 @@ func (sc *scaleCmd) load() error {
|
|||
}
|
||||
|
||||
if sc.containerService.Properties.IsCustomCloudProfile() {
|
||||
writeCustomCloudProfile(sc.containerService)
|
||||
if err = writeCustomCloudProfile(sc.containerService); err != nil {
|
||||
return errors.Wrap(err, "error writing custom cloud profile")
|
||||
}
|
||||
|
||||
if err = sc.containerService.Properties.SetCustomCloudSpec(api.AzureCustomCloudSpecParams{IsUpgrade: false, IsScale: true}); err != nil {
|
||||
return errors.Wrap(err, "error parsing the api model")
|
||||
|
@ -296,7 +298,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
|
|||
// VMAS Scale down Scenario
|
||||
if currentNodeCount > sc.newDesiredAgentCount {
|
||||
if sc.apiserverURL == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--apiserver is required to scale down a kubernetes cluster's agent pool")
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func newUpgradeCmd() *cobra.Command {
|
|||
f.BoolVarP(&uc.controlPlaneOnly, "control-plane-only", "", false, "upgrade control plane VMs only, do not upgrade node pools")
|
||||
addAuthFlags(uc.getAuthArgs(), f)
|
||||
|
||||
f.MarkDeprecated("deployment-dir", "deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
_ = f.MarkDeprecated("deployment-dir", "deployment-dir is no longer required for scale or upgrade. Please use --api-model.")
|
||||
|
||||
return upgradeCmd
|
||||
}
|
||||
|
@ -101,12 +101,12 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
if uc.resourceGroupName == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--resource-group must be specified")
|
||||
}
|
||||
|
||||
if uc.location == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--location must be specified")
|
||||
}
|
||||
uc.location = helpers.NormalizeAzureRegion(uc.location)
|
||||
|
@ -122,17 +122,17 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
if uc.upgradeVersion == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--upgrade-version must be specified")
|
||||
}
|
||||
|
||||
if uc.apiModelPath == "" && uc.deploymentDirectory == "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("--api-model must be specified")
|
||||
}
|
||||
|
||||
if uc.apiModelPath != "" && uc.deploymentDirectory != "" {
|
||||
cmd.Usage()
|
||||
_ = cmd.Usage()
|
||||
return errors.New("ambiguous, please specify only one of --api-model and --deployment-dir")
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,9 @@ func (uc *upgradeCmd) loadCluster() error {
|
|||
}
|
||||
|
||||
if uc.containerService.Properties.IsCustomCloudProfile() {
|
||||
writeCustomCloudProfile(uc.containerService)
|
||||
if err = writeCustomCloudProfile(uc.containerService); err != nil {
|
||||
return errors.Wrap(err, "error writing custom cloud profile")
|
||||
}
|
||||
if err = uc.containerService.Properties.SetCustomCloudSpec(api.AzureCustomCloudSpecParams{
|
||||
IsUpgrade: true,
|
||||
IsScale: false,
|
||||
|
|
|
@ -297,7 +297,9 @@ func isAgentPoolOnlyClusterJSON(contents []byte) bool {
|
|||
|
||||
func propertiesAsMap(contents []byte) (map[string]interface{}, bool) {
|
||||
var raw interface{}
|
||||
json.Unmarshal(contents, &raw)
|
||||
if err := json.Unmarshal(contents, &raw); err != nil {
|
||||
return nil, false
|
||||
}
|
||||
jsonMap := raw.(map[string]interface{})
|
||||
properties, propertiesPresent := jsonMap["properties"]
|
||||
if !propertiesPresent {
|
||||
|
|
|
@ -1120,7 +1120,7 @@ func mapToString(valueMap map[string]string) string {
|
|||
|
||||
func generateEtcdEncryptionKey() string {
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
_, _ = rand.Read(b)
|
||||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
|
|
|
@ -1148,11 +1148,11 @@ func (p *Properties) GetClusterID() string {
|
|||
// from the master dns name
|
||||
h := fnv.New64a()
|
||||
if p.MasterProfile != nil {
|
||||
h.Write([]byte(p.MasterProfile.DNSPrefix))
|
||||
_, _ = h.Write([]byte(p.MasterProfile.DNSPrefix))
|
||||
} else if p.HostedMasterProfile != nil {
|
||||
h.Write([]byte(p.HostedMasterProfile.DNSPrefix))
|
||||
_, _ = h.Write([]byte(p.HostedMasterProfile.DNSPrefix))
|
||||
} else if len(p.AgentPoolProfiles) > 0 {
|
||||
h.Write([]byte(p.AgentPoolProfiles[0].Name))
|
||||
_, _ = h.Write([]byte(p.AgentPoolProfiles[0].Name))
|
||||
}
|
||||
r := rand.New(rand.NewSource(int64(h.Sum64())))
|
||||
mutex.Lock()
|
||||
|
|
|
@ -163,7 +163,9 @@ func NewAzureClientWithDeviceAuth(env azure.Environment, subscriptionID string)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
armSpt.Refresh()
|
||||
if err = armSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
adRawToken := armSpt.Token()
|
||||
adRawToken.Resource = env.GraphEndpoint
|
||||
|
@ -171,7 +173,9 @@ func NewAzureClientWithDeviceAuth(env azure.Environment, subscriptionID string)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
@ -191,7 +195,9 @@ func NewAzureClientWithClientSecret(env azure.Environment, subscriptionID, clien
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
@ -211,7 +217,9 @@ func NewAzureClientWithClientSecretExternalTenant(env azure.Environment, subscri
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
@ -278,7 +286,9 @@ func newAzureClientWithCertificate(env azure.Environment, oauthConfig *adal.OAut
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
|
|
@ -89,7 +89,9 @@ func NewAzureClientWithClientSecret(env azure.Environment, subscriptionID, clien
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
@ -109,7 +111,9 @@ func NewAzureClientWithClientSecretExternalTenant(env azure.Environment, subscri
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
@ -181,7 +185,9 @@ func newAzureClientWithCertificate(env azure.Environment, oauthConfig *adal.OAut
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphSpt.Refresh()
|
||||
if err = graphSpt.Refresh(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
|
||||
}
|
||||
|
|
|
@ -9,12 +9,11 @@ import (
|
|||
|
||||
// DeepCopy dst and src should be the same type in different API version
|
||||
// dst should be pointer type
|
||||
func DeepCopy(dst, src interface{}) error {
|
||||
defer func() error {
|
||||
func DeepCopy(dst, src interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
return fmt.Errorf("fail to copy object %v", r)
|
||||
err = fmt.Errorf("fail to copy object %v", r)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
dstValue := reflect.ValueOf(dst)
|
||||
srcValue := reflect.ValueOf(src)
|
||||
|
@ -26,7 +25,7 @@ func DeepCopy(dst, src interface{}) error {
|
|||
return fmt.Errorf("the dst type (%q) and src type (%q) are not the same", dstValue.Type().String(), srcValue.Type().String())
|
||||
}
|
||||
deepCopyInternal(dstValue, srcValue, 0)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func deepCopyInternal(dstValue, srcValue reflect.Value, depth int) {
|
||||
|
|
|
@ -79,7 +79,7 @@ func DeployTemplateSync(az armhelpers.AKSEngineClient, logger *logrus.Entry, res
|
|||
// try to extract error from ARM Response
|
||||
if deploymentExtended.Response.Response != nil && deploymentExtended.Body != nil {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(deploymentExtended.Body)
|
||||
_, _ = buf.ReadFrom(deploymentExtended.Body)
|
||||
logger.Infof("StatusCode: %d, Error: %s", deploymentExtended.Response.StatusCode, buf.String())
|
||||
deploymentErr.Response = buf.Bytes()
|
||||
deploymentErr.StatusCode = deploymentExtended.Response.StatusCode
|
||||
|
|
|
@ -9,12 +9,11 @@ import (
|
|||
|
||||
// DeepCopy dst and src should be the same type in different API version
|
||||
// dst should be pointer type
|
||||
func DeepCopy(dst, src interface{}) error {
|
||||
defer func() error {
|
||||
func DeepCopy(dst, src interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
return fmt.Errorf("fail to copy object %v", r)
|
||||
err = fmt.Errorf("fail to copy object %v", r)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
dstValue := reflect.ValueOf(dst)
|
||||
srcValue := reflect.ValueOf(src)
|
||||
|
@ -26,7 +25,7 @@ func DeepCopy(dst, src interface{}) error {
|
|||
return fmt.Errorf("the dst type (%q) and src type (%q) are not the same", dstValue.Type().String(), srcValue.Type().String())
|
||||
}
|
||||
deepCopyInternal(dstValue, srcValue, 0)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func deepCopyInternal(dstValue, srcValue reflect.Value, depth int) {
|
||||
|
|
|
@ -78,7 +78,7 @@ func DeployTemplateSync(az AKSEngineClient, logger *logrus.Entry, resourceGroupN
|
|||
// try to extract error from ARM Response
|
||||
if deploymentExtended.Response.Response != nil && deploymentExtended.Body != nil {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(deploymentExtended.Body)
|
||||
_, _ = buf.ReadFrom(deploymentExtended.Body)
|
||||
logger.Infof("StatusCode: %d, Error: %s", deploymentExtended.Response.StatusCode, buf.String())
|
||||
deploymentErr.Response = buf.Bytes()
|
||||
deploymentErr.StatusCode = deploymentExtended.Response.StatusCode
|
||||
|
|
|
@ -673,6 +673,6 @@ func getWindowsProfileVars(wp *api.WindowsProfile) map[string]interface{} {
|
|||
func getSizeMap() map[string]interface{} {
|
||||
var sizeMap map[string]interface{}
|
||||
sizeMapStr := fmt.Sprintf("{%s}", helpers.GetSizeMap())
|
||||
json.Unmarshal([]byte(sizeMapStr), &sizeMap)
|
||||
_ = json.Unmarshal([]byte(sizeMapStr), &sizeMap)
|
||||
return sizeMap
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func buildConfigStringCustomFiles(source io.Reader, destinationFile string) stri
|
|||
|
||||
func getBase64CustomFile(source io.Reader) string {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(source)
|
||||
_, _ = buf.ReadFrom(source)
|
||||
cfStr := buf.String()
|
||||
cfStr = strings.Replace(cfStr, "\r\n", "\n", -1)
|
||||
return getBase64EncodedGzippedCustomScriptFromStr(cfStr)
|
||||
|
|
|
@ -630,7 +630,7 @@ func getBase64EncodedGzippedCustomScript(csFilename string, cs *api.ContainerSer
|
|||
panic(fmt.Sprintf("BUG: %s", err.Error()))
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
templ.Execute(&buffer, cs)
|
||||
_ = templ.Execute(&buffer, cs)
|
||||
csStr := buffer.String()
|
||||
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
|
||||
return getBase64EncodedGzippedCustomScriptFromStr(csStr)
|
||||
|
@ -645,7 +645,7 @@ func getStringFromBase64(str string) (string, error) {
|
|||
func getBase64EncodedGzippedCustomScriptFromStr(str string) string {
|
||||
var gzipB bytes.Buffer
|
||||
w := gzip.NewWriter(&gzipB)
|
||||
w.Write([]byte(str))
|
||||
_, _ = w.Write([]byte(str))
|
||||
w.Close()
|
||||
return base64.StdEncoding.EncodeToString(gzipB.Bytes())
|
||||
}
|
||||
|
@ -917,7 +917,7 @@ func getComponentsString(cs *api.ContainerService, sourcePath string) string {
|
|||
return ""
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
templ.Execute(&buffer, component)
|
||||
_ = templ.Execute(&buffer, component)
|
||||
input = buffer.String()
|
||||
}
|
||||
if componentName == common.ClusterInitComponentName {
|
||||
|
@ -974,7 +974,7 @@ func getAddonsString(cs *api.ContainerService, sourcePath string) string {
|
|||
return ""
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
templ.Execute(&buffer, addon)
|
||||
_ = templ.Execute(&buffer, addon)
|
||||
input = buffer.String()
|
||||
}
|
||||
result += getComponentString(input, "/etc/kubernetes/addons", setting.destinationFile)
|
||||
|
|
|
@ -90,17 +90,26 @@ func MergeValuesWithAPIModel(apiModelPath string, m map[string]APIModelValue) (s
|
|||
arrayPath := fmt.Sprint("properties.", flagValue.arrayName)
|
||||
arrayValue := jsonObj.Path(arrayPath)
|
||||
if flagValue.arrayProperty != "" {
|
||||
arrayValue.Index(flagValue.arrayIndex).SetP(flagValue.value, flagValue.arrayProperty)
|
||||
c := arrayValue.Index(flagValue.arrayIndex)
|
||||
if _, err = c.SetP(flagValue.value, flagValue.arrayProperty); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
count, _ := arrayValue.ArrayCount()
|
||||
for i := count; i <= flagValue.arrayIndex; i++ {
|
||||
jsonObj.ArrayAppendP(nil, arrayPath)
|
||||
if err = jsonObj.ArrayAppendP(nil, arrayPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
arrayValue = jsonObj.Path(arrayPath)
|
||||
arrayValue.SetIndex(flagValue.value, flagValue.arrayIndex)
|
||||
if _, err = arrayValue.SetIndex(flagValue.value, flagValue.arrayIndex); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
jsonObj.SetP(flagValue.value, fmt.Sprint("properties.", key))
|
||||
if _, err = jsonObj.SetP(flagValue.value, fmt.Sprint("properties.", key)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ func certificateToPem(derBytes []byte) []byte {
|
|||
Bytes: derBytes,
|
||||
}
|
||||
pemBuffer := bytes.Buffer{}
|
||||
pem.Encode(&pemBuffer, pemBlock)
|
||||
_ = pem.Encode(&pemBuffer, pemBlock)
|
||||
|
||||
return pemBuffer.Bytes()
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func privateKeyToPem(privateKey *rsa.PrivateKey) []byte {
|
|||
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
||||
}
|
||||
pemBuffer := bytes.Buffer{}
|
||||
pem.Encode(&pemBuffer, pemBlock)
|
||||
_ = pem.Encode(&pemBuffer, pemBlock)
|
||||
|
||||
return pemBuffer.Bytes()
|
||||
}
|
||||
|
|
|
@ -437,7 +437,9 @@ func (uc *UpgradeCluster) addVMToUpgradeSets(vm compute.VirtualMachine, currentV
|
|||
uc.Logger.Infof("Master VM name: %s, orchestrator: %s (MasterVMs)", *vm.Name, currentVersion)
|
||||
*uc.MasterVMs = append(*uc.MasterVMs, vm)
|
||||
} else {
|
||||
uc.addVMToAgentPool(vm, true)
|
||||
if err := uc.addVMToAgentPool(vm, true); err != nil {
|
||||
uc.Logger.Errorf("Failed to add VM %s to agent pool: %s", *vm.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -446,6 +448,8 @@ func (uc *UpgradeCluster) addVMToFinishedSets(vm compute.VirtualMachine, current
|
|||
uc.Logger.Infof("Master VM name: %s, orchestrator: %s (UpgradedMasterVMs)", *vm.Name, currentVersion)
|
||||
*uc.UpgradedMasterVMs = append(*uc.UpgradedMasterVMs, vm)
|
||||
} else {
|
||||
uc.addVMToAgentPool(vm, false)
|
||||
if err := uc.addVMToAgentPool(vm, false); err != nil {
|
||||
uc.Logger.Errorf("Failed to add VM %s to agent pool: %s", *vm.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче