style: refactorings suggested by the gocritic linter (#748)

This commit is contained in:
Matt Boersma 2019-03-20 09:44:24 -06:00 коммит произвёл GitHub
Родитель 59a7f5644e
Коммит 31f17bbe27
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
21 изменённых файлов: 67 добавлений и 91 удалений

Просмотреть файл

@ -35,9 +35,7 @@ func newGetVersionsCmd() *cobra.Command {
Use: getVersionsName,
Short: getVersionsShortDescription,
Long: getVersionsLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return gvc.run(cmd, args)
},
RunE: gvc.run,
}
f := command.Flags()

Просмотреть файл

@ -17,12 +17,10 @@ func newOrchestratorsCmd() *cobra.Command {
gvc := getVersionsCmd{}
command := &cobra.Command{
Use: orchestratorsName,
Short: orchestratorsShortDescription,
Long: orchestratorsLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return gvc.run(cmd, args)
},
Use: orchestratorsName,
Short: orchestratorsShortDescription,
Long: orchestratorsLongDescription,
RunE: gvc.run,
Hidden: true,
}

Просмотреть файл

@ -67,9 +67,7 @@ func newScaleCmd() *cobra.Command {
Use: scaleName,
Short: scaleShortDescription,
Long: scaleLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return sc.run(cmd, args)
},
RunE: sc.run,
}
f := scaleCmd.Flags()
@ -276,8 +274,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
vmsToDelete = append(vmsToDelete, indexToVM[index])
}
switch orchestratorInfo.OrchestratorType {
case api.Kubernetes:
if orchestratorInfo.OrchestratorType == api.Kubernetes {
kubeConfig, err := engine.GenerateKubeConfig(sc.containerService.Properties, sc.location)
if err != nil {
return errors.Wrap(err, "failed to generate kube config")
@ -382,8 +379,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
if winPoolIndex != -1 {
templateJSON["variables"].(map[string]interface{})[sc.agentPool.Name+"Index"] = winPoolIndex
}
switch orchestratorInfo.OrchestratorType {
case api.Kubernetes:
if orchestratorInfo.OrchestratorType == api.Kubernetes {
err = transformer.NormalizeForK8sVMASScalingUp(sc.logger, templateJSON)
if err != nil {
return errors.Wrapf(err, "error transforming the template for scaling template %s", sc.apiModelPath)

Просмотреть файл

@ -61,9 +61,7 @@ func newUpgradeCmd() *cobra.Command {
Use: upgradeName,
Short: upgradeShortDescription,
Long: upgradeLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return uc.run(cmd, args)
},
RunE: uc.run,
}
f := upgradeCmd.Flags()

Просмотреть файл

@ -144,9 +144,9 @@ func validateVNET(a *Properties) error {
return err
}
subIDMap[agentSubID] = subIDMap[agentSubID] + 1
resourceGroupMap[agentRG] = resourceGroupMap[agentRG] + 1
agentVNETMap[agentVNET] = agentVNETMap[agentVNET] + 1
subIDMap[agentSubID]++
resourceGroupMap[agentRG]++
agentVNETMap[agentVNET]++
}
// TODO: Add more validation to ensure all agent pools belong to the same VNET, subscription, and resource group

Просмотреть файл

@ -262,26 +262,20 @@ func validateAgentPoolVNET(a []*AgentPoolProfile) error {
if subscription == "" {
subscription = subnetSubscription
} else {
if subscription != subnetSubscription {
return ErrorSubscriptionNotMatch
}
} else if subscription != subnetSubscription {
return ErrorSubscriptionNotMatch
}
if resourceGroup == "" {
resourceGroup = subnetResourceGroup
} else {
if resourceGroup != subnetResourceGroup {
return ErrorResourceGroupNotMatch
}
} else if resourceGroup != subnetResourceGroup {
return ErrorResourceGroupNotMatch
}
if vnet == "" {
vnet = subnetVnet
} else {
if vnet != subnetVnet {
return ErrorVnetNotMatch
}
} else if vnet != subnetVnet {
return ErrorVnetNotMatch
}
}
}

Просмотреть файл

@ -134,9 +134,9 @@ func validateVNET(a *Properties) error {
return err
}
subIDMap[agentSubID] = subIDMap[agentSubID] + 1
resourceGroupMap[agentRG] = resourceGroupMap[agentRG] + 1
agentVNETMap[agentVNET] = agentVNETMap[agentVNET] + 1
subIDMap[agentSubID]++
resourceGroupMap[agentRG]++
agentVNETMap[agentVNET]++
}
// TODO: Add more validation to ensure all agent pools belong to the same VNET, subscription, and resource group

Просмотреть файл

@ -265,8 +265,7 @@ func dcosInfo(csOrch *OrchestratorProfile, hasWindows bool) ([]*OrchestratorVers
func dcosUpgrades(csOrch *OrchestratorProfile) ([]*OrchestratorProfile, error) {
ret := []*OrchestratorProfile{}
switch csOrch.OrchestratorVersion {
case common.DCOSVersion1Dot11Dot0:
if csOrch.OrchestratorVersion == common.DCOSVersion1Dot11Dot0 {
ret = append(ret, &OrchestratorProfile{
OrchestratorType: DCOS,
OrchestratorVersion: common.DCOSVersion1Dot11Dot2,

Просмотреть файл

@ -256,10 +256,8 @@ func TestStrictJSONValidationIsAppliedToVersionsAbove20170701(t *testing.T) {
_, e := a.LoadContainerService([]byte(jsonWithTypo), version, true, false, nil)
if e == nil {
t.Error("Expected mistyped 'ventSubnetID' key to be detected but it wasn't")
} else {
if !strings.Contains(e.Error(), "ventSubnetID") {
t.Errorf("Expected error on 'ventSubnetID' but error was %v", e)
}
} else if !strings.Contains(e.Error(), "ventSubnetID") {
t.Errorf("Expected error on 'ventSubnetID' but error was %v", e)
}
}
}

Просмотреть файл

@ -765,7 +765,7 @@ func (p *Properties) TotalNodes() int {
totalNodes = p.MasterProfile.Count
}
for _, pool := range p.AgentPoolProfiles {
totalNodes = totalNodes + pool.Count
totalNodes += pool.Count
}
return totalNodes
}

Просмотреть файл

@ -101,10 +101,8 @@ func (a *AgentPoolProfile) Validate(orchestratorType string) error {
} else {
a.Ports = []int{80, 443, 8080}
}
} else {
if e := validate.Var(a.Ports, "len=0"); e != nil {
return errors.Errorf("AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: %s", orchestratorType)
}
} else if e := validate.Var(a.Ports, "len=0"); e != nil {
return errors.Errorf("AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: %s", orchestratorType)
}
return nil
}

Просмотреть файл

@ -780,6 +780,7 @@ func (a *AgentPoolProfile) validateKubernetesDistro() error {
return errors.Errorf("The %s VM SKU must use the %s or %s Distro as they require the docker-engine container runtime with Ubuntu 16.04-LTS", a.VMSize, AKSDockerEngine, Ubuntu)
}
}
return nil
}
@ -877,10 +878,8 @@ func (a *AgentPoolProfile) validateOrchestratorSpecificProperties(orchestratorTy
} else {
a.Ports = []int{80, 443, 8080}
}
} else {
if e := validate.Var(a.Ports, "len=0"); e != nil {
return errors.Errorf("AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: %s", orchestratorType)
}
} else if e := validate.Var(a.Ports, "len=0"); e != nil {
return errors.Errorf("AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: %s", orchestratorType)
}
if len(a.DiskSizesGB) > 0 {
@ -1270,9 +1269,9 @@ func validatePoolOSType(os OSType) error {
return nil
}
func validatePoolAcceleratedNetworking(VMSize string) error {
if !helpers.AcceleratedNetworkingSupported(VMSize) {
return fmt.Errorf("AgentPoolProfile.vmsize %s does not support AgentPoolProfile.acceleratedNetworking", VMSize)
func validatePoolAcceleratedNetworking(vmSize string) error {
if !helpers.AcceleratedNetworkingSupported(vmSize) {
return fmt.Errorf("AgentPoolProfile.vmsize %s does not support AgentPoolProfile.acceleratedNetworking", vmSize)
}
return nil
}

Просмотреть файл

@ -48,8 +48,8 @@ func init() {
}
// ResourceName returns the last segment (the resource name) for the specified resource identifier.
func ResourceName(ID string) (string, error) {
parts := strings.Split(ID, "/")
func ResourceName(id string) (string, error) {
parts := strings.Split(id, "/")
name := parts[len(parts)-1]
if len(name) == 0 {
return "", errors.Errorf("resource name was missing from identifier")
@ -59,14 +59,14 @@ func ResourceName(ID string) (string, error) {
}
// SplitBlobURI returns a decomposed blob URI parts: accountName, containerName, blobName.
func SplitBlobURI(URI string) (string, string, string, error) {
uri, err := url.Parse(URI)
func SplitBlobURI(uri string) (string, string, string, error) {
parsed, err := url.Parse(uri)
if err != nil {
return "", "", "", err
}
accountName := strings.Split(uri.Host, ".")[0]
urlParts := strings.Split(uri.Path, "/")
accountName := strings.Split(parsed.Host, ".")[0]
urlParts := strings.Split(parsed.Path, "/")
containerName := urlParts[1]
blobPath := strings.Join(urlParts[2:], "/")

Просмотреть файл

@ -474,11 +474,9 @@ func getK8sAgentVars(cs *api.ContainerService, profile *api.AgentPoolProfile) ma
if profile.IsAvailabilitySets() {
agentVars[agentOffset] = fmt.Sprintf("[parameters('%s')]", agentOffset)
agentVars[agentAvailabilitySet] = fmt.Sprintf("[concat('%s-availabilitySet-', parameters('nameSuffix'))]", agentName)
} else {
if profile.IsLowPriorityScaleSet() {
agentVars[agentScaleSetPriority] = fmt.Sprintf("[parameters('%s')]", agentScaleSetPriority)
agentVars[agentScaleSetEvictionPolicy] = fmt.Sprintf("[parameters('%s')]", agentScaleSetEvictionPolicy)
}
} else if profile.IsLowPriorityScaleSet() {
agentVars[agentScaleSetPriority] = fmt.Sprintf("[parameters('%s')]", agentScaleSetPriority)
agentVars[agentScaleSetEvictionPolicy] = fmt.Sprintf("[parameters('%s')]", agentScaleSetEvictionPolicy)
}
agentVars[agentVMSize] = fmt.Sprintf("[parameters('%s')]", agentVMSize)

Просмотреть файл

@ -906,7 +906,7 @@ write_files:
for _, file := range files {
b64GzipString := getBase64CustomScript(file)
fileNoPath := strings.TrimPrefix(file, "swarm/")
filelines = filelines + fmt.Sprintf(writeFileBlock, b64GzipString, fileNoPath)
filelines += fmt.Sprintf(writeFileBlock, b64GzipString, fileNoPath)
}
return fmt.Sprintf(clusterYamlFile, filelines)
}

Просмотреть файл

@ -186,7 +186,7 @@ func (a *APIModelTestFile) WriteArmTemplateParamsErrFilename(contents []byte) (s
}
// IterateTestFilesDirectory iterates the test data directory adding api model files to the test file slice.
func IterateTestFilesDirectory(directory string, APIModelTestFiles *[]APIModelTestFile) error {
func IterateTestFilesDirectory(directory string, apiModelTestFiles *[]APIModelTestFile) error {
files, err := ioutil.ReadDir(directory)
if err != nil {
return err
@ -194,14 +194,14 @@ func IterateTestFilesDirectory(directory string, APIModelTestFiles *[]APIModelTe
for _, file := range files {
if file.IsDir() {
if e := IterateTestFilesDirectory(filepath.Join(directory, file.Name()), APIModelTestFiles); e != nil {
if e := IterateTestFilesDirectory(filepath.Join(directory, file.Name()), apiModelTestFiles); e != nil {
return e
}
} else {
if !strings.Contains(file.Name(), "_expected") && strings.HasSuffix(file.Name(), ".json") {
tuple := &APIModelTestFile{}
tuple.APIModelFilename = filepath.Join(directory, file.Name())
*APIModelTestFiles = append(*APIModelTestFiles, *tuple)
*apiModelTestFiles = append(*apiModelTestFiles, *tuple)
}
}
}

Просмотреть файл

@ -115,8 +115,7 @@ func getParameters(cs *api.ContainerService, generatorCode string, aksEngineVers
dcosClusterPackageListID := cloudSpecConfig.DCOSSpecConfig.DcosClusterPackageListID
dcosProviderPackageID := cloudSpecConfig.DCOSSpecConfig.DcosProviderPackageID
switch properties.OrchestratorProfile.OrchestratorType {
case api.DCOS:
if properties.OrchestratorProfile.OrchestratorType == api.DCOS {
switch properties.OrchestratorProfile.OrchestratorVersion {
case common.DCOSVersion1Dot8Dot8:
dcosBootstrapURL = cloudSpecConfig.DCOSSpecConfig.DCOS188BootstrapDownloadURL

Просмотреть файл

@ -61,8 +61,7 @@ func ParseConfig() (*Config, error) {
func (c *Config) GetKubeConfig() string {
var kubeconfigPath string
switch {
case c.IsKubernetes():
if c.IsKubernetes() {
file := fmt.Sprintf("kubeconfig.%s.json", c.Location)
kubeconfigPath = filepath.Join(c.CurrentWorkingDir, "_output", c.Name, "kubeconfig", file)
}

Просмотреть файл

@ -169,7 +169,7 @@ func Build(cfg *config.Config, masterSubnetID string, agentSubnetIDs []string, i
func (e *Engine) NodeCount() int {
expectedCount := e.ExpandedDefinition.Properties.MasterProfile.Count
for _, pool := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
expectedCount = expectedCount + pool.Count
expectedCount += pool.Count
}
return expectedCount
}
@ -206,17 +206,18 @@ func (e *Engine) GetWindowsTestImages() (*WindowsTestImages, error) {
return nil, errors.New("Can't guess a Windows version without Windows nodes in the cluster")
}
if strings.Contains(e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku(), "1809") || strings.Contains(e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku(), "2019") {
windowsSku := e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku()
switch {
case strings.Contains(windowsSku, "1809"), strings.Contains(windowsSku, "2019"):
return &WindowsTestImages{IIS: "mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019",
ServerCore: "mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019"}, nil
} else if strings.Contains(e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku(), "1803") {
case strings.Contains(windowsSku, "1803"):
return &WindowsTestImages{IIS: "microsoft/iis:windowsservercore-1803",
ServerCore: "microsoft/iis:windowsservercore-1803"}, nil
} else if strings.Contains(e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku(), "1709") {
case strings.Contains(windowsSku, "1709"):
return nil, errors.New("Windows Server version 1709 hasn't been tested in a long time and is deprecated")
}
return nil, errors.New("Unknown Windows version. GetWindowsSku() = " + e.ExpandedDefinition.Properties.WindowsProfile.GetWindowsSku())
return nil, errors.New("Unknown Windows version. GetWindowsSku() = " + windowsSku)
}
// HasAddon will return true if an addon is enabled

Просмотреть файл

@ -251,7 +251,7 @@ func RunCommandMultipleTimes(podRunnerCmd podRunnerCmd, image, name, command str
if err != nil {
log.Printf("Unable to get logs from pod %s\n", podName)
} else {
log.Printf("%s\n", string(out[:]))
log.Printf("%s\n", string(out))
}
err = p.Delete(3)
@ -472,13 +472,13 @@ func WaitOnReady(podPrefix, namespace string, successesNeeded int, sleep, durati
return
}
if ready {
successCount = successCount + 1
successCount++
if successCount >= successesNeeded {
readyCh <- true
}
} else {
if successCount > 1 {
failureCount = failureCount + 1
failureCount++
if failureCount >= successesNeeded {
errCh <- errors.Errorf("Pods from deployment (%s) in namespace (%s) have been checked out as all Ready %d times, but NotReady %d times. This behavior may mean it is in a crashloop", podPrefix, namespace, successCount, failureCount)
}
@ -970,15 +970,16 @@ func (c *Container) ValidateResources(a api.KubernetesContainerSpec) error {
actualCPULimits := c.getCPULimits()
actualMemoryRequests := c.getMemoryRequests()
actualLimits := c.getMemoryLimits()
if expectedCPURequests != "" && expectedCPURequests != actualCPURequests {
switch {
case expectedCPURequests != "" && expectedCPURequests != actualCPURequests:
return errors.Errorf("expected CPU requests %s does not match %s", expectedCPURequests, actualCPURequests)
} else if expectedCPULimits != "" && expectedCPULimits != actualCPULimits {
case expectedCPULimits != "" && expectedCPULimits != actualCPULimits:
return errors.Errorf("expected CPU limits %s does not match %s", expectedCPULimits, actualCPULimits)
} else if expectedMemoryRequests != "" && expectedMemoryRequests != actualMemoryRequests {
case expectedMemoryRequests != "" && expectedMemoryRequests != actualMemoryRequests:
return errors.Errorf("expected Memory requests %s does not match %s", expectedMemoryRequests, actualMemoryRequests)
} else if expectedMemoryLimits != "" && expectedMemoryLimits != actualLimits {
case expectedMemoryLimits != "" && expectedMemoryLimits != actualLimits:
return errors.Errorf("expected Memory limits %s does not match %s", expectedMemoryLimits, actualLimits)
} else {
default:
return nil
}
}

Просмотреть файл

@ -114,7 +114,7 @@ func (p *Point) SetProvisionStart() {
// RecordProvisionError sets appropriate values for when a test error occurs
func (p *Point) RecordProvisionError() {
p.ProvisionDuration = time.Since(p.ProvisionStart)
p.ProvisionErrorCount = p.ProvisionErrorCount + 1
p.ProvisionErrorCount++
}
// RecordProvisionSuccess sets TestErrorCount to 0 to mark tests succeeded
@ -130,7 +130,7 @@ func (p *Point) SetNodeWaitStart() {
// RecordNodeWait will set NodeWaitDuration to time.Since(p.NodeWaitStart)
func (p *Point) RecordNodeWait(err error) {
if err != nil {
p.NodeWaitErrorCount = p.NodeWaitErrorCount + 1
p.NodeWaitErrorCount++
}
p.NodeWaitDuration = time.Since(p.NodeWaitStart)
}
@ -138,7 +138,7 @@ func (p *Point) RecordNodeWait(err error) {
// RecordTestError sets appropriate values for when a test error occurs
func (p *Point) RecordTestError() {
p.TestDuration = time.Since(p.TestStart)
p.TestErrorCount = p.TestErrorCount + 1
p.TestErrorCount++
}
// RecordTestSuccess sets TestErrorCount to 0 to mark tests succeeded