зеркало из https://github.com/Azure/k8s-deploy.git
Removing js files from main branch (#122)
This commit is contained in:
Родитель
282a81e1fc
Коммит
2c09684db9
|
@ -1 +1,2 @@
|
|||
node_modules
|
||||
node_modules
|
||||
lib
|
|
@ -1,107 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.promote = void 0;
|
||||
const core = require("@actions/core");
|
||||
const deploymentHelper = require("../utilities/strategy-helpers/deployment-helper");
|
||||
const canaryDeploymentHelper = require("../utilities/strategy-helpers/canary-deployment-helper");
|
||||
const SMICanaryDeploymentHelper = require("../utilities/strategy-helpers/smi-canary-deployment-helper");
|
||||
const utils = require("../utilities/manifest-utilities");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
const manifest_utilities_1 = require("../utilities/manifest-utilities");
|
||||
const KubernetesObjectUtility = require("../utilities/resource-object-utility");
|
||||
const models = require("../constants");
|
||||
const KubernetesManifestUtility = require("../utilities/manifest-stability-utility");
|
||||
const blue_green_helper_1 = require("../utilities/strategy-helpers/blue-green-helper");
|
||||
const blue_green_helper_2 = require("../utilities/strategy-helpers/blue-green-helper");
|
||||
const service_blue_green_helper_1 = require("../utilities/strategy-helpers/service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("../utilities/strategy-helpers/ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("../utilities/strategy-helpers/smi-blue-green-helper");
|
||||
const kubectl_object_model_1 = require("../kubectl-object-model");
|
||||
function promote() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const kubectl = new kubectl_object_model_1.Kubectl(yield utils.getKubectl(), TaskInputParameters.namespace, true);
|
||||
if (canaryDeploymentHelper.isCanaryDeploymentStrategy()) {
|
||||
yield promoteCanary(kubectl);
|
||||
}
|
||||
else if (blue_green_helper_2.isBlueGreenDeploymentStrategy()) {
|
||||
yield promoteBlueGreen(kubectl);
|
||||
}
|
||||
else {
|
||||
core.debug('Strategy is not canary or blue-green deployment. Invalid request.');
|
||||
throw ('InvalidPromotetActionDeploymentStrategy');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.promote = promote;
|
||||
function promoteCanary(kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let includeServices = false;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
includeServices = true;
|
||||
// In case of SMI traffic split strategy when deployment is promoted, first we will redirect traffic to
|
||||
// Canary deployment, then update stable deployment and then redirect traffic to stable deployment
|
||||
core.debug('Redirecting traffic to canary deployment');
|
||||
SMICanaryDeploymentHelper.redirectTrafficToCanaryDeployment(kubectl, TaskInputParameters.manifests);
|
||||
core.debug('Deploying input manifests with SMI canary strategy');
|
||||
yield deploymentHelper.deploy(kubectl, TaskInputParameters.manifests, 'None');
|
||||
core.debug('Redirecting traffic to stable deployment');
|
||||
SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, TaskInputParameters.manifests);
|
||||
}
|
||||
else {
|
||||
core.debug('Deploying input manifests');
|
||||
yield deploymentHelper.deploy(kubectl, TaskInputParameters.manifests, 'None');
|
||||
}
|
||||
core.debug('Deployment strategy selected is Canary. Deleting canary and baseline workloads.');
|
||||
try {
|
||||
canaryDeploymentHelper.deleteCanaryDeployment(kubectl, TaskInputParameters.manifests, includeServices);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning('Exception occurred while deleting canary and baseline workloads. Exception: ' + ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
function promoteBlueGreen(kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// updated container images and pull secrets
|
||||
let inputManifestFiles = manifest_utilities_1.getUpdatedManifestFiles(TaskInputParameters.manifests);
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(inputManifestFiles);
|
||||
core.debug('deleting old deployment and making new ones');
|
||||
let result;
|
||||
if (blue_green_helper_2.isIngressRoute()) {
|
||||
result = yield ingress_blue_green_helper_1.promoteBlueGreenIngress(kubectl, manifestObjects);
|
||||
}
|
||||
else if (blue_green_helper_2.isSMIRoute()) {
|
||||
result = yield smi_blue_green_helper_1.promoteBlueGreenSMI(kubectl, manifestObjects);
|
||||
}
|
||||
else {
|
||||
result = yield service_blue_green_helper_1.promoteBlueGreenService(kubectl, manifestObjects);
|
||||
}
|
||||
// checking stability of newly created deployments
|
||||
const deployedManifestFiles = result.newFilePaths;
|
||||
const resources = KubernetesObjectUtility.getResources(deployedManifestFiles, models.deploymentTypes.concat([models.DiscoveryAndLoadBalancerResource.service]));
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
core.debug('routing to new deployments');
|
||||
if (blue_green_helper_2.isIngressRoute()) {
|
||||
ingress_blue_green_helper_1.routeBlueGreenIngress(kubectl, null, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
blue_green_helper_1.deleteWorkloadsAndServicesWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else if (blue_green_helper_2.isSMIRoute()) {
|
||||
smi_blue_green_helper_1.routeBlueGreenSMI(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
smi_blue_green_helper_1.cleanupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else {
|
||||
service_blue_green_helper_1.routeBlueGreenService(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
}
|
||||
});
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.reject = void 0;
|
||||
const core = require("@actions/core");
|
||||
const canaryDeploymentHelper = require("../utilities/strategy-helpers/canary-deployment-helper");
|
||||
const SMICanaryDeploymentHelper = require("../utilities/strategy-helpers/smi-canary-deployment-helper");
|
||||
const kubectl_object_model_1 = require("../kubectl-object-model");
|
||||
const utils = require("../utilities/manifest-utilities");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
const service_blue_green_helper_1 = require("../utilities/strategy-helpers/service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("../utilities/strategy-helpers/ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("../utilities/strategy-helpers/smi-blue-green-helper");
|
||||
const blue_green_helper_1 = require("../utilities/strategy-helpers/blue-green-helper");
|
||||
const deployment_helper_1 = require("../utilities/strategy-helpers/deployment-helper");
|
||||
function reject() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const kubectl = new kubectl_object_model_1.Kubectl(yield utils.getKubectl(), TaskInputParameters.namespace, true);
|
||||
if (canaryDeploymentHelper.isCanaryDeploymentStrategy()) {
|
||||
yield rejectCanary(kubectl);
|
||||
}
|
||||
else if (blue_green_helper_1.isBlueGreenDeploymentStrategy()) {
|
||||
yield rejectBlueGreen(kubectl);
|
||||
}
|
||||
else {
|
||||
core.debug('Strategy is not canary or blue-green deployment. Invalid request.');
|
||||
throw ('InvalidDeletetActionDeploymentStrategy');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.reject = reject;
|
||||
function rejectCanary(kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let includeServices = false;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
core.debug('Reject deployment with SMI canary strategy');
|
||||
includeServices = true;
|
||||
SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, TaskInputParameters.manifests);
|
||||
}
|
||||
core.debug('Deployment strategy selected is Canary. Deleting baseline and canary workloads.');
|
||||
canaryDeploymentHelper.deleteCanaryDeployment(kubectl, TaskInputParameters.manifests, includeServices);
|
||||
});
|
||||
}
|
||||
function rejectBlueGreen(kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let inputManifestFiles = deployment_helper_1.getManifestFiles(TaskInputParameters.manifests);
|
||||
if (blue_green_helper_1.isIngressRoute()) {
|
||||
yield ingress_blue_green_helper_1.rejectBlueGreenIngress(kubectl, inputManifestFiles);
|
||||
}
|
||||
else if (blue_green_helper_1.isSMIRoute()) {
|
||||
yield smi_blue_green_helper_1.rejectBlueGreenSMI(kubectl, inputManifestFiles);
|
||||
}
|
||||
else {
|
||||
yield service_blue_green_helper_1.rejectBlueGreenService(kubectl, inputManifestFiles);
|
||||
}
|
||||
});
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getWorkflowAnnotationKeyLabel = exports.getWorkflowAnnotationsJson = exports.workloadTypesWithRolloutStatus = exports.workloadTypes = exports.deploymentTypes = exports.ServiceTypes = exports.DiscoveryAndLoadBalancerResource = exports.KubernetesWorkload = void 0;
|
||||
class KubernetesWorkload {
|
||||
}
|
||||
exports.KubernetesWorkload = KubernetesWorkload;
|
||||
KubernetesWorkload.pod = 'Pod';
|
||||
KubernetesWorkload.replicaset = 'Replicaset';
|
||||
KubernetesWorkload.deployment = 'Deployment';
|
||||
KubernetesWorkload.statefulSet = 'StatefulSet';
|
||||
KubernetesWorkload.daemonSet = 'DaemonSet';
|
||||
KubernetesWorkload.job = 'job';
|
||||
KubernetesWorkload.cronjob = 'cronjob';
|
||||
class DiscoveryAndLoadBalancerResource {
|
||||
}
|
||||
exports.DiscoveryAndLoadBalancerResource = DiscoveryAndLoadBalancerResource;
|
||||
DiscoveryAndLoadBalancerResource.service = 'service';
|
||||
DiscoveryAndLoadBalancerResource.ingress = 'ingress';
|
||||
class ServiceTypes {
|
||||
}
|
||||
exports.ServiceTypes = ServiceTypes;
|
||||
ServiceTypes.loadBalancer = 'LoadBalancer';
|
||||
ServiceTypes.nodePort = 'NodePort';
|
||||
ServiceTypes.clusterIP = 'ClusterIP';
|
||||
exports.deploymentTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset'];
|
||||
exports.workloadTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset', 'job', 'cronjob'];
|
||||
exports.workloadTypesWithRolloutStatus = ['deployment', 'daemonset', 'statefulset'];
|
||||
function getWorkflowAnnotationsJson(lastSuccessRunSha, workflowFilePath, deploymentConfig) {
|
||||
let annotationObject = {};
|
||||
annotationObject["run"] = process.env.GITHUB_RUN_ID;
|
||||
annotationObject["repository"] = process.env.GITHUB_REPOSITORY;
|
||||
annotationObject["workflow"] = process.env.GITHUB_WORKFLOW;
|
||||
annotationObject["workflowFileName"] = workflowFilePath.replace(".github/workflows/", "");
|
||||
annotationObject["jobName"] = process.env.GITHUB_JOB;
|
||||
annotationObject["createdBy"] = process.env.GITHUB_ACTOR;
|
||||
annotationObject["runUri"] = `https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`;
|
||||
annotationObject["commit"] = process.env.GITHUB_SHA;
|
||||
annotationObject["lastSuccessRunCommit"] = lastSuccessRunSha;
|
||||
annotationObject["branch"] = process.env.GITHUB_REF;
|
||||
annotationObject["deployTimestamp"] = Date.now();
|
||||
annotationObject["dockerfilePaths"] = deploymentConfig.dockerfilePaths;
|
||||
annotationObject["manifestsPaths"] = deploymentConfig.manifestFilePaths;
|
||||
annotationObject["helmChartPaths"] = deploymentConfig.helmChartFilePaths;
|
||||
annotationObject["provider"] = "GitHub";
|
||||
return JSON.stringify(annotationObject);
|
||||
}
|
||||
exports.getWorkflowAnnotationsJson = getWorkflowAnnotationsJson;
|
||||
function getWorkflowAnnotationKeyLabel(workflowFilePath) {
|
||||
const hashKey = require("crypto").createHash("MD5")
|
||||
.update(`${process.env.GITHUB_REPOSITORY}/${workflowFilePath}`)
|
||||
.digest("hex");
|
||||
return `githubWorkflow_${hashKey}`;
|
||||
}
|
||||
exports.getWorkflowAnnotationKeyLabel = getWorkflowAnnotationKeyLabel;
|
|
@ -1,31 +0,0 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DockerExec = void 0;
|
||||
const tool_runner_1 = require("./utilities/tool-runner");
|
||||
class DockerExec {
|
||||
constructor(dockerPath) {
|
||||
this.dockerPath = dockerPath;
|
||||
}
|
||||
;
|
||||
pull(image, args, silent) {
|
||||
args = ['pull', image, ...args];
|
||||
let result = this.execute(args, silent);
|
||||
if (result.stderr != '' && result.code != 0) {
|
||||
throw new Error(`docker images pull failed with: ${result.error}`);
|
||||
}
|
||||
}
|
||||
inspect(image, args, silent) {
|
||||
args = ['inspect', image, ...args];
|
||||
let result = this.execute(args, silent);
|
||||
if (result.stderr != '' && result.code != 0) {
|
||||
throw new Error(`docker inspect call failed with: ${result.error}`);
|
||||
}
|
||||
return result.stdout;
|
||||
}
|
||||
execute(args, silent) {
|
||||
const command = new tool_runner_1.ToolRunner(this.dockerPath);
|
||||
command.arg(args);
|
||||
return command.execSync({ silent: !!silent });
|
||||
}
|
||||
}
|
||||
exports.DockerExec = DockerExec;
|
|
@ -1,35 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.GitHubClient = void 0;
|
||||
const core = require("@actions/core");
|
||||
const httpClient_1 = require("./utilities/httpClient");
|
||||
class GitHubClient {
|
||||
constructor(repository, token) {
|
||||
this._repository = repository;
|
||||
this._token = token;
|
||||
}
|
||||
getWorkflows() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const getWorkflowFileNameUrl = `https://api.github.com/repos/${this._repository}/actions/workflows`;
|
||||
const webRequest = new httpClient_1.WebRequest();
|
||||
webRequest.method = "GET";
|
||||
webRequest.uri = getWorkflowFileNameUrl;
|
||||
webRequest.headers = {
|
||||
Authorization: `Bearer ${this._token}`
|
||||
};
|
||||
core.debug(`Getting workflows for repo: ${this._repository}`);
|
||||
const response = yield httpClient_1.sendRequest(webRequest);
|
||||
return Promise.resolve(response);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.GitHubClient = GitHubClient;
|
|
@ -1,57 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.githubToken = exports.forceDeployment = exports.args = exports.baselineAndCanaryReplicas = exports.versionSwitchBuffer = exports.routeMethod = exports.trafficSplitMethod = exports.deploymentStrategy = exports.canaryPercentage = exports.manifests = exports.imagePullSecrets = exports.containers = exports.namespace = void 0;
|
||||
const core = require("@actions/core");
|
||||
exports.namespace = core.getInput('namespace');
|
||||
exports.containers = core.getInput('images').split('\n');
|
||||
exports.imagePullSecrets = core.getInput('imagepullsecrets').split('\n').filter(secret => secret.trim().length > 0);
|
||||
exports.manifests = core.getInput('manifests').split('\n');
|
||||
exports.canaryPercentage = core.getInput('percentage');
|
||||
exports.deploymentStrategy = core.getInput('strategy');
|
||||
exports.trafficSplitMethod = core.getInput('traffic-split-method');
|
||||
exports.routeMethod = core.getInput('route-method');
|
||||
exports.versionSwitchBuffer = core.getInput('version-switch-buffer');
|
||||
exports.baselineAndCanaryReplicas = core.getInput('baseline-and-canary-replicas');
|
||||
exports.args = core.getInput('arguments');
|
||||
exports.forceDeployment = core.getInput('force').toLowerCase() == 'true';
|
||||
exports.githubToken = core.getInput("token");
|
||||
if (!exports.namespace) {
|
||||
core.debug('Namespace was not supplied; using "default" namespace instead.');
|
||||
exports.namespace = 'default';
|
||||
}
|
||||
if (!exports.githubToken) {
|
||||
core.error("'token' input is not supplied. Set it to a PAT/GITHUB_TOKEN");
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.canaryPercentage);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed('A valid percentage value is between 0 and 100');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'percentage' integer value ");
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.baselineAndCanaryReplicas);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed('A valid baseline-and-canary-replicas value is between 0 and 100');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'baseline-and-canary-replicas' integer value");
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.versionSwitchBuffer);
|
||||
if (pe < 0 || pe > 300) {
|
||||
core.setFailed('Invalid buffer time, valid version-switch-buffer is a value more than or equal to 0 and lesser than or equal 300');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'version-switch-buffer' integer value");
|
||||
process.exit(1);
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Kubectl = void 0;
|
||||
const tool_runner_1 = require("./utilities/tool-runner");
|
||||
class Kubectl {
|
||||
constructor(kubectlPath, namespace, ignoreSSLErrors) {
|
||||
this.kubectlPath = kubectlPath;
|
||||
this.ignoreSSLErrors = !!ignoreSSLErrors;
|
||||
if (!!namespace) {
|
||||
this.namespace = namespace;
|
||||
}
|
||||
else {
|
||||
this.namespace = 'default';
|
||||
}
|
||||
}
|
||||
apply(configurationPaths, force) {
|
||||
let applyArgs = ['apply', '-f', this.createInlineArray(configurationPaths)];
|
||||
if (!!force) {
|
||||
console.log("force flag is on, deployment will continue even if previous deployment already exists");
|
||||
applyArgs.push('--force');
|
||||
}
|
||||
return this.execute(applyArgs);
|
||||
}
|
||||
describe(resourceType, resourceName, silent) {
|
||||
return this.execute(['describe', resourceType, resourceName], silent);
|
||||
}
|
||||
getNewReplicaSet(deployment) {
|
||||
let newReplicaSet = '';
|
||||
const result = this.describe('deployment', deployment, true);
|
||||
if (result && result.stdout) {
|
||||
const stdout = result.stdout.split('\n');
|
||||
stdout.forEach((line) => {
|
||||
if (!!line && line.toLowerCase().indexOf('newreplicaset') > -1) {
|
||||
newReplicaSet = line.substr(14).trim().split(' ')[0];
|
||||
}
|
||||
});
|
||||
}
|
||||
return newReplicaSet;
|
||||
}
|
||||
annotate(resourceType, resourceName, annotation) {
|
||||
let args = ['annotate', resourceType, resourceName];
|
||||
args.push(annotation);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
annotateFiles(files, annotation) {
|
||||
let args = ['annotate'];
|
||||
args = args.concat(['-f', this.createInlineArray(files)]);
|
||||
args.push(annotation);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
labelFiles(files, labels) {
|
||||
let args = ['label'];
|
||||
args = args.concat(['-f', this.createInlineArray(files)]);
|
||||
args = args.concat(labels);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
getAllPods() {
|
||||
return this.execute(['get', 'pods', '-o', 'json'], true);
|
||||
}
|
||||
getClusterInfo() {
|
||||
return this.execute(['cluster-info'], true);
|
||||
}
|
||||
checkRolloutStatus(resourceType, name) {
|
||||
return this.execute(['rollout', 'status', resourceType + '/' + name]);
|
||||
}
|
||||
getResource(resourceType, name) {
|
||||
return this.execute(['get', resourceType + '/' + name, '-o', 'json']);
|
||||
}
|
||||
getResources(applyOutput, filterResourceTypes) {
|
||||
const outputLines = applyOutput.split('\n');
|
||||
const results = [];
|
||||
outputLines.forEach(line => {
|
||||
const words = line.split(' ');
|
||||
if (words.length > 2) {
|
||||
const resourceType = words[0].trim();
|
||||
const resourceName = JSON.parse(words[1].trim());
|
||||
if (filterResourceTypes.filter(type => !!type && resourceType.toLowerCase().startsWith(type.toLowerCase())).length > 0) {
|
||||
results.push({
|
||||
type: resourceType,
|
||||
name: resourceName
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
return results;
|
||||
}
|
||||
executeCommand(customCommand, args) {
|
||||
if (!customCommand)
|
||||
throw new Error('NullCommandForKubectl');
|
||||
return args ? this.execute([customCommand, args]) : this.execute([customCommand]);
|
||||
}
|
||||
delete(args) {
|
||||
if (typeof args === 'string')
|
||||
return this.execute(['delete', args]);
|
||||
else
|
||||
return this.execute(['delete'].concat(args));
|
||||
}
|
||||
execute(args, silent) {
|
||||
if (this.ignoreSSLErrors) {
|
||||
args.push('--insecure-skip-tls-verify');
|
||||
}
|
||||
args = args.concat(['--namespace', this.namespace]);
|
||||
const command = new tool_runner_1.ToolRunner(this.kubectlPath);
|
||||
command.arg(args);
|
||||
return command.execSync({ silent: !!silent });
|
||||
}
|
||||
createInlineArray(str) {
|
||||
if (typeof str === 'string') {
|
||||
return str;
|
||||
}
|
||||
return str.join(',');
|
||||
}
|
||||
}
|
||||
exports.Kubectl = Kubectl;
|
|
@ -1,80 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const os = require("os");
|
||||
const path = require("path");
|
||||
const util = require("util");
|
||||
const fs = require("fs");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const core = require("@actions/core");
|
||||
const kubectlToolName = 'kubectl';
|
||||
const stableKubectlVersion = 'v1.15.0';
|
||||
const stableVersionUrl = 'https://storage.googleapis.com/kubernetes-release/release/stable.txt';
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
function getkubectlDownloadURL(version) {
|
||||
switch (os.type()) {
|
||||
case 'Linux':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/amd64/kubectl', version);
|
||||
case 'Darwin':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/darwin/amd64/kubectl', version);
|
||||
case 'Windows_NT':
|
||||
default:
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/windows/amd64/kubectl.exe', version);
|
||||
}
|
||||
}
|
||||
function getStableKubectlVersion() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return toolCache.downloadTool(stableVersionUrl).then((downloadPath) => {
|
||||
let version = fs.readFileSync(downloadPath, 'utf8').toString().trim();
|
||||
if (!version) {
|
||||
version = stableKubectlVersion;
|
||||
}
|
||||
return version;
|
||||
}, (error) => {
|
||||
core.debug(error);
|
||||
core.warning('GetStableVersionFailed');
|
||||
return stableKubectlVersion;
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getStableKubectlVersion = getStableKubectlVersion;
|
||||
function downloadKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let cachedToolpath = toolCache.find(kubectlToolName, version);
|
||||
let kubectlDownloadPath = '';
|
||||
if (!cachedToolpath) {
|
||||
try {
|
||||
kubectlDownloadPath = yield toolCache.downloadTool(getkubectlDownloadURL(version));
|
||||
}
|
||||
catch (exception) {
|
||||
throw new Error('DownloadKubectlFailed');
|
||||
}
|
||||
cachedToolpath = yield toolCache.cacheFile(kubectlDownloadPath, kubectlToolName + getExecutableExtension(), kubectlToolName, version);
|
||||
}
|
||||
const kubectlPath = path.join(cachedToolpath, kubectlToolName + getExecutableExtension());
|
||||
fs.chmodSync(kubectlPath, '777');
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.downloadKubectl = downloadKubectl;
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
const result = kubectl.executeCommand('api-versions');
|
||||
const trafficSplitAPIVersion = result.stdout.split('\n').find(version => version.startsWith('split.smi-spec.io'));
|
||||
if (trafficSplitAPIVersion == null || typeof trafficSplitAPIVersion == 'undefined') {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
91
lib/run.js
91
lib/run.js
|
@ -1,91 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.run = void 0;
|
||||
const core = require("@actions/core");
|
||||
const io = require("@actions/io");
|
||||
const path = require("path");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const kubectl_util_1 = require("./utilities/kubectl-util");
|
||||
const utility_1 = require("./utilities/utility");
|
||||
const kubectl_object_model_1 = require("./kubectl-object-model");
|
||||
const deployment_helper_1 = require("./utilities/strategy-helpers/deployment-helper");
|
||||
const promote_1 = require("./actions/promote");
|
||||
const reject_1 = require("./actions/reject");
|
||||
let kubectlPath = "";
|
||||
function setKubectlPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (core.getInput('kubectl-version')) {
|
||||
const version = core.getInput('kubectl-version');
|
||||
kubectlPath = toolCache.find('kubectl', version);
|
||||
if (!kubectlPath) {
|
||||
kubectlPath = yield installKubectl(version);
|
||||
}
|
||||
}
|
||||
else {
|
||||
kubectlPath = yield io.which('kubectl', false);
|
||||
if (!kubectlPath) {
|
||||
const allVersions = toolCache.findAllVersions('kubectl');
|
||||
kubectlPath = allVersions.length > 0 ? toolCache.find('kubectl', allVersions[0]) : '';
|
||||
if (!kubectlPath) {
|
||||
throw new Error('Kubectl is not installed, either add install-kubectl action or provide "kubectl-version" input to download kubectl');
|
||||
}
|
||||
kubectlPath = path.join(kubectlPath, `kubectl${utility_1.getExecutableExtension()}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
function installKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (utility_1.isEqual(version, 'latest')) {
|
||||
version = yield kubectl_util_1.getStableKubectlVersion();
|
||||
}
|
||||
return yield kubectl_util_1.downloadKubectl(version);
|
||||
});
|
||||
}
|
||||
function checkClusterContext() {
|
||||
if (!process.env["KUBECONFIG"]) {
|
||||
throw new Error('Cluster context not set. Use k8ssetcontext action to set cluster context');
|
||||
}
|
||||
}
|
||||
function run() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
checkClusterContext();
|
||||
yield setKubectlPath();
|
||||
let manifestsInput = core.getInput('manifests');
|
||||
if (!manifestsInput) {
|
||||
core.setFailed('No manifests supplied to deploy');
|
||||
return;
|
||||
}
|
||||
let namespace = core.getInput('namespace');
|
||||
if (!namespace) {
|
||||
namespace = 'default';
|
||||
}
|
||||
let action = core.getInput('action');
|
||||
let manifests = manifestsInput.split('\n');
|
||||
if (action === 'deploy') {
|
||||
let strategy = core.getInput('strategy');
|
||||
console.log("strategy: ", strategy);
|
||||
yield deployment_helper_1.deploy(new kubectl_object_model_1.Kubectl(kubectlPath, namespace), manifests, strategy);
|
||||
}
|
||||
else if (action === 'promote') {
|
||||
yield promote_1.promote();
|
||||
}
|
||||
else if (action === 'reject') {
|
||||
yield reject_1.reject();
|
||||
}
|
||||
else {
|
||||
core.setFailed('Not a valid action. The allowed actions are deploy, promote, reject');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.run = run;
|
||||
run().catch(core.setFailed);
|
|
@ -1,78 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.writeManifestToFile = exports.writeObjectsToFile = exports.assertFileExists = exports.ensureDirExists = exports.getNewUserDirPath = exports.getTempDirectory = void 0;
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const core = require("@actions/core");
|
||||
const os = require("os");
|
||||
function getTempDirectory() {
|
||||
return process.env['runner.tempDirectory'] || os.tmpdir();
|
||||
}
|
||||
exports.getTempDirectory = getTempDirectory;
|
||||
function getNewUserDirPath() {
|
||||
let userDir = path.join(getTempDirectory(), 'kubectlTask');
|
||||
ensureDirExists(userDir);
|
||||
userDir = path.join(userDir, getCurrentTime().toString());
|
||||
ensureDirExists(userDir);
|
||||
return userDir;
|
||||
}
|
||||
exports.getNewUserDirPath = getNewUserDirPath;
|
||||
function ensureDirExists(dirPath) {
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath);
|
||||
}
|
||||
}
|
||||
exports.ensureDirExists = ensureDirExists;
|
||||
function assertFileExists(path) {
|
||||
if (!fs.existsSync(path)) {
|
||||
core.error(`FileNotFoundException : ${path}`);
|
||||
throw new Error(`FileNotFoundException: ${path}`);
|
||||
}
|
||||
}
|
||||
exports.assertFileExists = assertFileExists;
|
||||
function writeObjectsToFile(inputObjects) {
|
||||
const newFilePaths = [];
|
||||
if (!!inputObjects) {
|
||||
inputObjects.forEach((inputObject) => {
|
||||
try {
|
||||
const inputObjectString = JSON.stringify(inputObject);
|
||||
if (!!inputObject.kind && !!inputObject.metadata && !!inputObject.metadata.name) {
|
||||
const fileName = getManifestFileName(inputObject.kind, inputObject.metadata.name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
newFilePaths.push(fileName);
|
||||
}
|
||||
else {
|
||||
core.debug('Input object is not proper K8s resource object. Object: ' + inputObjectString);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while writing object to file : ' + inputObject + ' . Exception: ' + ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
return newFilePaths;
|
||||
}
|
||||
exports.writeObjectsToFile = writeObjectsToFile;
|
||||
function writeManifestToFile(inputObjectString, kind, name) {
|
||||
if (inputObjectString) {
|
||||
try {
|
||||
const fileName = getManifestFileName(kind, name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
return fileName;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while writing object to file : ' + inputObjectString + ' . Exception: ' + ex);
|
||||
}
|
||||
}
|
||||
return '';
|
||||
}
|
||||
exports.writeManifestToFile = writeManifestToFile;
|
||||
function getManifestFileName(kind, name) {
|
||||
const filePath = kind + '_' + name + '_' + getCurrentTime().toString();
|
||||
const tempDirectory = getTempDirectory();
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
return fileName;
|
||||
}
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.sleepFor = exports.sendRequest = exports.WebRequestOptions = exports.WebResponse = exports.WebRequest = exports.StatusCodes = void 0;
|
||||
// Taken from https://github.com/Azure/aks-set-context/blob/master/src/client.ts
|
||||
const util = require("util");
|
||||
const fs = require("fs");
|
||||
const httpClient = require("typed-rest-client/HttpClient");
|
||||
const core = require("@actions/core");
|
||||
var httpCallbackClient = new httpClient.HttpClient('GITHUB_RUNNER', null, {});
|
||||
var StatusCodes;
|
||||
(function (StatusCodes) {
|
||||
StatusCodes[StatusCodes["OK"] = 200] = "OK";
|
||||
StatusCodes[StatusCodes["CREATED"] = 201] = "CREATED";
|
||||
StatusCodes[StatusCodes["ACCEPTED"] = 202] = "ACCEPTED";
|
||||
StatusCodes[StatusCodes["UNAUTHORIZED"] = 401] = "UNAUTHORIZED";
|
||||
StatusCodes[StatusCodes["NOT_FOUND"] = 404] = "NOT_FOUND";
|
||||
StatusCodes[StatusCodes["INTERNAL_SERVER_ERROR"] = 500] = "INTERNAL_SERVER_ERROR";
|
||||
StatusCodes[StatusCodes["SERVICE_UNAVAILABLE"] = 503] = "SERVICE_UNAVAILABLE";
|
||||
})(StatusCodes = exports.StatusCodes || (exports.StatusCodes = {}));
|
||||
class WebRequest {
|
||||
}
|
||||
exports.WebRequest = WebRequest;
|
||||
class WebResponse {
|
||||
}
|
||||
exports.WebResponse = WebResponse;
|
||||
class WebRequestOptions {
|
||||
}
|
||||
exports.WebRequestOptions = WebRequestOptions;
|
||||
function sendRequest(request, options) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let i = 0;
|
||||
let retryCount = options && options.retryCount ? options.retryCount : 5;
|
||||
let retryIntervalInSeconds = options && options.retryIntervalInSeconds ? options.retryIntervalInSeconds : 2;
|
||||
let retriableErrorCodes = options && options.retriableErrorCodes ? options.retriableErrorCodes : ["ETIMEDOUT", "ECONNRESET", "ENOTFOUND", "ESOCKETTIMEDOUT", "ECONNREFUSED", "EHOSTUNREACH", "EPIPE", "EA_AGAIN"];
|
||||
let retriableStatusCodes = options && options.retriableStatusCodes ? options.retriableStatusCodes : [408, 409, 500, 502, 503, 504];
|
||||
let timeToWait = retryIntervalInSeconds;
|
||||
while (true) {
|
||||
try {
|
||||
if (request.body && typeof (request.body) !== 'string' && !request.body["readable"]) {
|
||||
request.body = fs.createReadStream(request.body["path"]);
|
||||
}
|
||||
let response = yield sendRequestInternal(request);
|
||||
if (retriableStatusCodes.indexOf(response.statusCode) != -1 && ++i < retryCount) {
|
||||
core.debug(util.format("Encountered a retriable status code: %s. Message: '%s'.", response.statusCode, response.statusMessage));
|
||||
yield sleepFor(timeToWait);
|
||||
timeToWait = timeToWait * retryIntervalInSeconds + retryIntervalInSeconds;
|
||||
continue;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
catch (error) {
|
||||
if (retriableErrorCodes.indexOf(error.code) != -1 && ++i < retryCount) {
|
||||
core.debug(util.format("Encountered a retriable error:%s. Message: %s.", error.code, error.message));
|
||||
yield sleepFor(timeToWait);
|
||||
timeToWait = timeToWait * retryIntervalInSeconds + retryIntervalInSeconds;
|
||||
}
|
||||
else {
|
||||
if (error.code) {
|
||||
core.debug("error code =" + error.code);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.sendRequest = sendRequest;
|
||||
function sleepFor(sleepDurationInSeconds) {
|
||||
return new Promise((resolve, reject) => {
|
||||
setTimeout(resolve, sleepDurationInSeconds * 1000);
|
||||
});
|
||||
}
|
||||
exports.sleepFor = sleepFor;
|
||||
function sendRequestInternal(request) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
core.debug(util.format("[%s]%s", request.method, request.uri));
|
||||
var response = yield httpCallbackClient.request(request.method, request.uri, request.body, request.headers);
|
||||
return yield toWebResponse(response);
|
||||
});
|
||||
}
|
||||
function toWebResponse(response) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
var res = new WebResponse();
|
||||
if (response) {
|
||||
res.statusCode = response.message.statusCode;
|
||||
res.statusMessage = response.message.statusMessage;
|
||||
res.headers = response.message.headers;
|
||||
var body = yield response.readBody();
|
||||
if (body) {
|
||||
try {
|
||||
res.body = JSON.parse(body);
|
||||
}
|
||||
catch (error) {
|
||||
core.debug("Could not parse response: " + JSON.stringify(error));
|
||||
core.debug("Response: " + JSON.stringify(res.body));
|
||||
res.body = body;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
});
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getTrafficSplitAPIVersion = exports.downloadKubectl = exports.getStableKubectlVersion = exports.getkubectlDownloadURL = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const os = require("os");
|
||||
const path = require("path");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const util = require("util");
|
||||
const httpClient_1 = require("./httpClient");
|
||||
const kubectlToolName = 'kubectl';
|
||||
const stableKubectlVersion = 'v1.15.0';
|
||||
const stableVersionUrl = 'https://storage.googleapis.com/kubernetes-release/release/stable.txt';
|
||||
const trafficSplitAPIVersionPrefix = 'split.smi-spec.io';
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
function getKubectlArch() {
|
||||
let arch = os.arch();
|
||||
if (arch === 'x64') {
|
||||
return 'amd64';
|
||||
}
|
||||
return arch;
|
||||
}
|
||||
function getkubectlDownloadURL(version, arch) {
|
||||
switch (os.type()) {
|
||||
case 'Linux':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/%s/kubectl', version, arch);
|
||||
case 'Darwin':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/darwin/%s/kubectl', version, arch);
|
||||
case 'Windows_NT':
|
||||
default:
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/windows/%s/kubectl.exe', version, arch);
|
||||
}
|
||||
}
|
||||
exports.getkubectlDownloadURL = getkubectlDownloadURL;
|
||||
function getStableKubectlVersion() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return toolCache.downloadTool(stableVersionUrl).then((downloadPath) => {
|
||||
let version = fs.readFileSync(downloadPath, 'utf8').toString().trim();
|
||||
if (!version) {
|
||||
version = stableKubectlVersion;
|
||||
}
|
||||
return version;
|
||||
}, (error) => {
|
||||
core.debug(error);
|
||||
core.warning('GetStableVersionFailed');
|
||||
return stableKubectlVersion;
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getStableKubectlVersion = getStableKubectlVersion;
|
||||
function downloadKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let cachedToolpath = toolCache.find(kubectlToolName, version);
|
||||
let kubectlDownloadPath = '';
|
||||
let arch = getKubectlArch();
|
||||
if (!cachedToolpath) {
|
||||
try {
|
||||
kubectlDownloadPath = yield toolCache.downloadTool(getkubectlDownloadURL(version, arch));
|
||||
}
|
||||
catch (exception) {
|
||||
if (exception instanceof toolCache.HTTPError && exception.httpStatusCode === httpClient_1.StatusCodes.NOT_FOUND) {
|
||||
throw new Error(util.format("Kubectl '%s' for '%s' arch not found.", version, arch));
|
||||
}
|
||||
else {
|
||||
throw new Error('DownloadKubectlFailed');
|
||||
}
|
||||
}
|
||||
cachedToolpath = yield toolCache.cacheFile(kubectlDownloadPath, kubectlToolName + getExecutableExtension(), kubectlToolName, version);
|
||||
}
|
||||
const kubectlPath = path.join(cachedToolpath, kubectlToolName + getExecutableExtension());
|
||||
fs.chmodSync(kubectlPath, '777');
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.downloadKubectl = downloadKubectl;
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
const result = kubectl.executeCommand('api-versions');
|
||||
const trafficSplitAPIVersion = result.stdout.split('\n').find(version => version.startsWith(trafficSplitAPIVersionPrefix));
|
||||
if (!trafficSplitAPIVersion) {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
|
@ -1,158 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkPodStatus = exports.checkManifestStability = void 0;
|
||||
const core = require("@actions/core");
|
||||
const utils = require("./utility");
|
||||
const KubernetesConstants = require("../constants");
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let rolloutStatusHasErrors = false;
|
||||
const numberOfResources = resources.length;
|
||||
for (let i = 0; i < numberOfResources; i++) {
|
||||
const resource = resources[i];
|
||||
if (KubernetesConstants.workloadTypesWithRolloutStatus.indexOf(resource.type.toLowerCase()) >= 0) {
|
||||
try {
|
||||
var result = kubectl.checkRolloutStatus(resource.type, resource.name);
|
||||
utils.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
core.error(ex);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
rolloutStatusHasErrors = true;
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.KubernetesWorkload.pod, true)) {
|
||||
try {
|
||||
yield checkPodStatus(kubectl, resource.name);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDeterminePodStatus ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.DiscoveryAndLoadBalancerResource.service, true)) {
|
||||
try {
|
||||
const service = getService(kubectl, resource.name);
|
||||
const spec = service.spec;
|
||||
const status = service.status;
|
||||
if (utils.isEqual(spec.type, KubernetesConstants.ServiceTypes.loadBalancer, true)) {
|
||||
if (!isLoadBalancerIPAssigned(status)) {
|
||||
yield waitForServiceExternalIPAssignment(kubectl, resource.name);
|
||||
}
|
||||
else {
|
||||
console.log('ServiceExternalIP', resource.name, status.loadBalancer.ingress[0].ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDetermineServiceStatus of: ${resource.name} Error: ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rolloutStatusHasErrors) {
|
||||
throw new Error('RolloutStatusTimedout');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function checkPodStatus(kubectl, podName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 60; // 60 * 10 seconds timeout = 10 minutes max timeout
|
||||
let podStatus;
|
||||
let kubectlDescribeNeeded = false;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
yield utils.sleep(sleepTimeout);
|
||||
core.debug(`Polling for pod status: ${podName}`);
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
if (podStatus.phase && podStatus.phase !== 'Pending' && podStatus.phase !== 'Unknown') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
switch (podStatus.phase) {
|
||||
case 'Succeeded':
|
||||
case 'Running':
|
||||
if (isPodReady(podStatus)) {
|
||||
console.log(`pod/${podName} is successfully rolled out`);
|
||||
}
|
||||
else {
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case 'Pending':
|
||||
if (!isPodReady(podStatus)) {
|
||||
core.warning(`pod/${podName} rollout status check timedout`);
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case 'Failed':
|
||||
core.error(`pod/${podName} rollout failed`);
|
||||
kubectlDescribeNeeded = true;
|
||||
break;
|
||||
default:
|
||||
core.warning(`pod/${podName} rollout status: ${podStatus.phase}`);
|
||||
}
|
||||
if (kubectlDescribeNeeded) {
|
||||
kubectl.describe('pod', podName);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkPodStatus = checkPodStatus;
|
||||
function getPodStatus(kubectl, podName) {
|
||||
const podResult = kubectl.getResource('pod', podName);
|
||||
utils.checkForErrors([podResult]);
|
||||
const podStatus = JSON.parse(podResult.stdout).status;
|
||||
core.debug(`Pod Status: ${JSON.stringify(podStatus)}`);
|
||||
return podStatus;
|
||||
}
|
||||
function isPodReady(podStatus) {
|
||||
let allContainersAreReady = true;
|
||||
podStatus.containerStatuses.forEach(container => {
|
||||
if (container.ready === false) {
|
||||
console.log(`'${container.name}' status: ${JSON.stringify(container.state)}`);
|
||||
allContainersAreReady = false;
|
||||
}
|
||||
});
|
||||
if (!allContainersAreReady) {
|
||||
core.warning('AllContainersNotInReadyState');
|
||||
}
|
||||
return allContainersAreReady;
|
||||
}
|
||||
function getService(kubectl, serviceName) {
|
||||
const serviceResult = kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.service, serviceName);
|
||||
utils.checkForErrors([serviceResult]);
|
||||
return JSON.parse(serviceResult.stdout);
|
||||
}
|
||||
function waitForServiceExternalIPAssignment(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 18; // 18 * 10 seconds timeout = 3 minutes max timeout
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
console.log(`waitForServiceIpAssignment : ${serviceName}`);
|
||||
yield utils.sleep(sleepTimeout);
|
||||
let status = (getService(kubectl, serviceName)).status;
|
||||
if (isLoadBalancerIPAssigned(status)) {
|
||||
console.log('ServiceExternalIP', serviceName, status.loadBalancer.ingress[0].ip);
|
||||
return;
|
||||
}
|
||||
}
|
||||
core.warning(`waitForServiceIpAssignmentTimedOut ${serviceName}`);
|
||||
});
|
||||
}
|
||||
function isLoadBalancerIPAssigned(status) {
|
||||
if (status && status.loadBalancer && status.loadBalancer.ingress && status.loadBalancer.ingress.length > 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isWorkloadEntity = exports.getUpdatedManifestFiles = exports.updateImagePullSecrets = exports.substituteImageNameInSpecFile = exports.getDeleteCmdArgs = exports.createKubectlArgs = exports.getKubectl = exports.getManifestFiles = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const path = require("path");
|
||||
const kubectlutility = require("./kubectl-util");
|
||||
const io = require("@actions/io");
|
||||
const utility_1 = require("./utility");
|
||||
const fileHelper = require("./files-helper");
|
||||
const KubernetesObjectUtility = require("./resource-object-utility");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
if (!manifestFilePaths) {
|
||||
core.debug('file input is not present');
|
||||
return null;
|
||||
}
|
||||
return manifestFilePaths;
|
||||
}
|
||||
exports.getManifestFiles = getManifestFiles;
|
||||
function getKubectl() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
return Promise.resolve(io.which('kubectl', true));
|
||||
}
|
||||
catch (ex) {
|
||||
return kubectlutility.downloadKubectl(yield kubectlutility.getStableKubectlVersion());
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.getKubectl = getKubectl;
|
||||
function createKubectlArgs(kinds, names) {
|
||||
let args = '';
|
||||
if (!!kinds && kinds.size > 0) {
|
||||
args = args + createInlineArray(Array.from(kinds.values()));
|
||||
}
|
||||
if (!!names && names.size > 0) {
|
||||
args = args + ' ' + Array.from(names.values()).join(' ');
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.createKubectlArgs = createKubectlArgs;
|
||||
function getDeleteCmdArgs(argsPrefix, inputArgs) {
|
||||
let args = '';
|
||||
if (!!argsPrefix && argsPrefix.length > 0) {
|
||||
args = argsPrefix;
|
||||
}
|
||||
if (!!inputArgs && inputArgs.length > 0) {
|
||||
if (args.length > 0) {
|
||||
args = args + ' ';
|
||||
}
|
||||
args = args + inputArgs;
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.getDeleteCmdArgs = getDeleteCmdArgs;
|
||||
/*
|
||||
For example,
|
||||
currentString: `image: "example/example-image"`
|
||||
imageName: `example/example-image`
|
||||
imageNameWithNewTag: `example/example-image:identifiertag`
|
||||
|
||||
This substituteImageNameInSpecFile function would return
|
||||
return Value: `image: "example/example-image:identifiertag"`
|
||||
*/
|
||||
function substituteImageNameInSpecFile(currentString, imageName, imageNameWithNewTag) {
|
||||
if (currentString.indexOf(imageName) < 0) {
|
||||
core.debug(`No occurence of replacement token: ${imageName} found`);
|
||||
return currentString;
|
||||
}
|
||||
return currentString.split('\n').reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
let [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, '') // replace allowed quotes with nothing
|
||||
.split(':');
|
||||
if (!currentImageTag && currentImageName.indexOf(' ') > 0) {
|
||||
currentImageName = currentImageName.split(' ')[0]; // Stripping off comments
|
||||
}
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + '\n';
|
||||
}, '');
|
||||
}
|
||||
exports.substituteImageNameInSpecFile = substituteImageNameInSpecFile;
|
||||
function createInlineArray(str) {
|
||||
if (typeof str === 'string') {
|
||||
return str;
|
||||
}
|
||||
return str.join(',');
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, 'pod')
|
||||
&& inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.imagePullSecrets) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, 'cronjob')
|
||||
&& inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.jobTemplate
|
||||
&& inputObject.spec.jobTemplate.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
else if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.template
|
||||
&& inputObject.spec.template.spec
|
||||
&& inputObject.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, 'pod')) {
|
||||
if (inputObject
|
||||
&& inputObject.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, 'cronjob')) {
|
||||
if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.jobTemplate
|
||||
&& inputObject.spec.jobTemplate.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.template
|
||||
&& inputObject.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
function substituteImageNameInSpecContent(currentString, imageName, imageNameWithNewTag) {
|
||||
if (currentString.indexOf(imageName) < 0) {
|
||||
core.debug(`No occurence of replacement token: ${imageName} found`);
|
||||
return currentString;
|
||||
}
|
||||
return currentString.split('\n').reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
const [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, '') // replace allowed quotes with nothing
|
||||
.split(':');
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + '\n';
|
||||
}, '');
|
||||
}
|
||||
function updateContainerImagesInManifestFiles(filePaths, containers) {
|
||||
if (!!containers && containers.length > 0) {
|
||||
const newFilePaths = [];
|
||||
const tempDirectory = fileHelper.getTempDirectory();
|
||||
filePaths.forEach((filePath) => {
|
||||
let contents = fs.readFileSync(filePath).toString();
|
||||
containers.forEach((container) => {
|
||||
let imageName = container.split(':')[0];
|
||||
if (imageName.indexOf('@') > 0) {
|
||||
imageName = imageName.split('@')[0];
|
||||
}
|
||||
if (contents.indexOf(imageName) > 0) {
|
||||
contents = substituteImageNameInSpecFile(contents, imageName, container);
|
||||
}
|
||||
});
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
fs.writeFileSync(path.join(fileName), contents);
|
||||
newFilePaths.push(fileName);
|
||||
});
|
||||
return newFilePaths;
|
||||
}
|
||||
return filePaths;
|
||||
}
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
let newImagePullSecretsObjects;
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
newImagePullSecretsObjects = Array.from(newImagePullSecrets, x => { return !!x ? { 'name': x } : null; });
|
||||
}
|
||||
else {
|
||||
newImagePullSecretsObjects = [];
|
||||
}
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateImagePullSecretsInManifestFiles(filePaths, imagePullSecrets) {
|
||||
if (!!imagePullSecrets && imagePullSecrets.length > 0) {
|
||||
const newObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
if (!!inputObject && !!inputObject.kind) {
|
||||
const kind = inputObject.kind;
|
||||
if (KubernetesObjectUtility.isWorkloadEntity(kind)) {
|
||||
KubernetesObjectUtility.updateImagePullSecrets(inputObject, imagePullSecrets, false);
|
||||
}
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
core.debug('New K8s objects after adding imagePullSecrets are :' + JSON.stringify(newObjectsList));
|
||||
const newFilePaths = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
return newFilePaths;
|
||||
}
|
||||
return filePaths;
|
||||
}
|
||||
function getUpdatedManifestFiles(manifestFilePaths) {
|
||||
let inputManifestFiles = getManifestFiles(manifestFilePaths);
|
||||
if (!inputManifestFiles || inputManifestFiles.length === 0) {
|
||||
throw new Error(`ManifestFileNotFound : ${manifestFilePaths}`);
|
||||
}
|
||||
// artifact substitution
|
||||
inputManifestFiles = updateContainerImagesInManifestFiles(inputManifestFiles, TaskInputParameters.containers);
|
||||
// imagePullSecrets addition
|
||||
inputManifestFiles = updateImagePullSecretsInManifestFiles(inputManifestFiles, TaskInputParameters.imagePullSecrets);
|
||||
return inputManifestFiles;
|
||||
}
|
||||
exports.getUpdatedManifestFiles = getUpdatedManifestFiles;
|
||||
const workloadTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset', 'job', 'cronjob'];
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind) {
|
||||
core.debug('ResourceKindNotDefined');
|
||||
return false;
|
||||
}
|
||||
return workloadTypes.some((type) => {
|
||||
return utility_1.isEqual(type, kind);
|
||||
});
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
|
@ -1,277 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getResources = exports.updateSelectorLabels = exports.updateSpecLabels = exports.updateImagePullSecrets = exports.updateObjectAnnotations = exports.updateObjectLabels = exports.getReplicaCount = exports.isIngressEntity = exports.isServiceEntity = exports.isWorkloadEntity = exports.isDeploymentEntity = void 0;
|
||||
const fs = require("fs");
|
||||
const core = require("@actions/core");
|
||||
const yaml = require("js-yaml");
|
||||
const constants_1 = require("../constants");
|
||||
const string_comparison_1 = require("./string-comparison");
|
||||
const INGRESS = "Ingress";
|
||||
function isDeploymentEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return constants_1.deploymentTypes.some((type) => {
|
||||
return string_comparison_1.isEqual(type, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
});
|
||||
}
|
||||
exports.isDeploymentEntity = isDeploymentEntity;
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return constants_1.workloadTypes.some((type) => {
|
||||
return string_comparison_1.isEqual(type, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
});
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
function isServiceEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return string_comparison_1.isEqual("Service", kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
exports.isServiceEntity = isServiceEntity;
|
||||
function isIngressEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return string_comparison_1.isEqual(INGRESS, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
exports.isIngressEntity = isIngressEntity;
|
||||
function getReplicaCount(inputObject) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
const kind = inputObject.kind;
|
||||
if (!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase) && !string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.daemonSet, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.replicas;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
exports.getReplicaCount = getReplicaCount;
|
||||
function updateObjectLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw ('NullInputObjectMetadata');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.labels = newLabels;
|
||||
}
|
||||
else {
|
||||
let existingLabels = inputObject.metadata.labels;
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
inputObject.metadata.labels = existingLabels;
|
||||
}
|
||||
}
|
||||
exports.updateObjectLabels = updateObjectLabels;
|
||||
function updateObjectAnnotations(inputObject, newAnnotations, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw ('NullInputObjectMetadata');
|
||||
}
|
||||
if (!newAnnotations) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.annotations = newAnnotations;
|
||||
}
|
||||
else {
|
||||
let existingAnnotations = inputObject.metadata.annotations;
|
||||
if (!existingAnnotations) {
|
||||
existingAnnotations = new Map();
|
||||
}
|
||||
Object.keys(newAnnotations).forEach(function (key) {
|
||||
existingAnnotations[key] = newAnnotations[key];
|
||||
});
|
||||
inputObject.metadata.annotations = existingAnnotations;
|
||||
}
|
||||
}
|
||||
exports.updateObjectAnnotations = updateObjectAnnotations;
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets, override) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
const newImagePullSecretsObjects = Array.from(newImagePullSecrets, x => { return { 'name': x }; });
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (override) {
|
||||
existingImagePullSecretObjects = newImagePullSecretsObjects;
|
||||
}
|
||||
else {
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
}
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateSpecLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSpecLabels = updateSpecLabels;
|
||||
function updateSelectorLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecSelectorLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecSelectorLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSelectorLabels = updateSelectorLabels;
|
||||
function getResources(filePaths, filterResourceTypes) {
|
||||
if (!filePaths) {
|
||||
return [];
|
||||
}
|
||||
const resources = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const inputObjectKind = inputObject ? inputObject.kind : '';
|
||||
if (filterResourceTypes.filter(type => string_comparison_1.isEqual(inputObjectKind, type, string_comparison_1.StringComparer.OrdinalIgnoreCase)).length > 0) {
|
||||
const resource = {
|
||||
type: inputObject.kind,
|
||||
name: inputObject.metadata.name
|
||||
};
|
||||
resources.push(resource);
|
||||
}
|
||||
});
|
||||
});
|
||||
return resources;
|
||||
}
|
||||
exports.getResources = getResources;
|
||||
function getSpecLabels(inputObject) {
|
||||
if (!inputObject) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.metadata.labels;
|
||||
}
|
||||
if (!!inputObject.spec && !!inputObject.spec.template && !!inputObject.spec.template.metadata) {
|
||||
return inputObject.spec.template.metadata.labels;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.cronjob, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Fetching imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.cronjob, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Overriding imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
//Do nothing
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
function setSpecLabels(inputObject, newLabels) {
|
||||
let specLabels = getSpecLabels(inputObject);
|
||||
if (!!newLabels) {
|
||||
specLabels = newLabels;
|
||||
}
|
||||
}
|
||||
function getSpecSelectorLabels(inputObject) {
|
||||
if (!!inputObject && !!inputObject.spec && !!inputObject.spec.selector) {
|
||||
if (isServiceEntity(inputObject.kind)) {
|
||||
return inputObject.spec.selector;
|
||||
}
|
||||
else {
|
||||
return inputObject.spec.selector.matchLabels;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setSpecSelectorLabels(inputObject, newLabels) {
|
||||
let selectorLabels = getSpecSelectorLabels(inputObject);
|
||||
if (!!selectorLabels) {
|
||||
selectorLabels = newLabels;
|
||||
}
|
||||
}
|
|
@ -1,305 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.fetchResource = exports.isServiceSelectorSubsetOfMatchLabel = exports.getServiceSelector = exports.getDeploymentMatchLabels = exports.getBlueGreenResourceName = exports.addBlueGreenLabelsAndAnnotations = exports.getNewBlueGreenObject = exports.createWorkloadsWithLabel = exports.isServiceRouted = exports.getManifestObjects = exports.getSuffix = exports.deleteObjects = exports.deleteWorkloadsAndServicesWithLabel = exports.deleteWorkloadsWithLabel = exports.routeBlueGreen = exports.isSMIRoute = exports.isIngressRoute = exports.isBlueGreenDeploymentStrategy = exports.STABLE_SUFFIX = exports.GREEN_SUFFIX = exports.BLUE_GREEN_VERSION_LABEL = exports.NONE_LABEL_VALUE = exports.GREEN_LABEL_VALUE = exports.BLUE_GREEN_DEPLOYMENT_STRATEGY = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const utility_1 = require("../utility");
|
||||
const constants_1 = require("../../constants");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const service_blue_green_helper_1 = require("./service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("./ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("./smi-blue-green-helper");
|
||||
exports.BLUE_GREEN_DEPLOYMENT_STRATEGY = 'BLUE-GREEN';
|
||||
exports.GREEN_LABEL_VALUE = 'green';
|
||||
exports.NONE_LABEL_VALUE = 'None';
|
||||
exports.BLUE_GREEN_VERSION_LABEL = 'k8s.deploy.color';
|
||||
exports.GREEN_SUFFIX = '-green';
|
||||
exports.STABLE_SUFFIX = '-stable';
|
||||
const INGRESS_ROUTE = 'INGRESS';
|
||||
const SMI_ROUTE = 'SMI';
|
||||
function isBlueGreenDeploymentStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.deploymentStrategy;
|
||||
return deploymentStrategy && deploymentStrategy.toUpperCase() === exports.BLUE_GREEN_DEPLOYMENT_STRATEGY;
|
||||
}
|
||||
exports.isBlueGreenDeploymentStrategy = isBlueGreenDeploymentStrategy;
|
||||
function isIngressRoute() {
|
||||
const routeMethod = TaskInputParameters.routeMethod;
|
||||
return routeMethod && routeMethod.toUpperCase() === INGRESS_ROUTE;
|
||||
}
|
||||
exports.isIngressRoute = isIngressRoute;
|
||||
function isSMIRoute() {
|
||||
const routeMethod = TaskInputParameters.routeMethod;
|
||||
return routeMethod && routeMethod.toUpperCase() === SMI_ROUTE;
|
||||
}
|
||||
exports.isSMIRoute = isSMIRoute;
|
||||
function routeBlueGreen(kubectl, inputManifestFiles) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get buffer time
|
||||
let bufferTime = parseInt(TaskInputParameters.versionSwitchBuffer);
|
||||
//logging start of buffer time
|
||||
let dateNow = new Date();
|
||||
console.log(`Starting buffer time of ${bufferTime} minute(s) at ${dateNow.toISOString()}`);
|
||||
// waiting
|
||||
yield utility_1.sleep(bufferTime * 1000 * 60);
|
||||
// logging end of buffer time
|
||||
dateNow = new Date();
|
||||
console.log(`Stopping buffer time of ${bufferTime} minute(s) at ${dateNow.toISOString()}`);
|
||||
const manifestObjects = getManifestObjects(inputManifestFiles);
|
||||
// routing to new deployments
|
||||
if (isIngressRoute()) {
|
||||
ingress_blue_green_helper_1.routeBlueGreenIngress(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
}
|
||||
else if (isSMIRoute()) {
|
||||
smi_blue_green_helper_1.routeBlueGreenSMI(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else {
|
||||
service_blue_green_helper_1.routeBlueGreenService(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreen = routeBlueGreen;
|
||||
function deleteWorkloadsWithLabel(kubectl, deleteLabel, deploymentEntityList) {
|
||||
let resourcesToDelete = [];
|
||||
deploymentEntityList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// if dellabel is none, deletes stable deployments
|
||||
const resourceToDelete = { name: name, kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// if dellabel is not none, then deletes new green deployments
|
||||
const resourceToDelete = { name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX), kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
// deletes the deployments
|
||||
deleteObjects(kubectl, resourcesToDelete);
|
||||
}
|
||||
exports.deleteWorkloadsWithLabel = deleteWorkloadsWithLabel;
|
||||
function deleteWorkloadsAndServicesWithLabel(kubectl, deleteLabel, deploymentEntityList, serviceEntityList) {
|
||||
// need to delete services and deployments
|
||||
const deletionEntitiesList = deploymentEntityList.concat(serviceEntityList);
|
||||
let resourcesToDelete = [];
|
||||
deletionEntitiesList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// if not dellabel, delete stable objects
|
||||
const resourceToDelete = { name: name, kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// else delete green labels
|
||||
const resourceToDelete = { name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX), kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
deleteObjects(kubectl, resourcesToDelete);
|
||||
}
|
||||
exports.deleteWorkloadsAndServicesWithLabel = deleteWorkloadsAndServicesWithLabel;
|
||||
function deleteObjects(kubectl, deleteList) {
|
||||
// delete services and deployments
|
||||
deleteList.forEach((delObject) => {
|
||||
try {
|
||||
const result = kubectl.delete([delObject.kind, delObject.name]);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if doesn't exist
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.deleteObjects = deleteObjects;
|
||||
function getSuffix(label) {
|
||||
if (label === exports.GREEN_LABEL_VALUE) {
|
||||
return exports.GREEN_SUFFIX;
|
||||
}
|
||||
else {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
exports.getSuffix = getSuffix;
|
||||
// other common functions
|
||||
function getManifestObjects(filePaths) {
|
||||
const deploymentEntityList = [];
|
||||
const routedServiceEntityList = [];
|
||||
const unroutedServiceEntityList = [];
|
||||
const ingressEntityList = [];
|
||||
const otherEntitiesList = [];
|
||||
let serviceNameMap = new Map();
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
if (!!inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
const name = inputObject.metadata.name;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
deploymentEntityList.push(inputObject);
|
||||
}
|
||||
else if (helper.isServiceEntity(kind)) {
|
||||
if (isServiceRouted(inputObject, deploymentEntityList)) {
|
||||
routedServiceEntityList.push(inputObject);
|
||||
serviceNameMap.set(name, getBlueGreenResourceName(name, exports.GREEN_SUFFIX));
|
||||
}
|
||||
else {
|
||||
unroutedServiceEntityList.push(inputObject);
|
||||
}
|
||||
}
|
||||
else if (helper.isIngressEntity(kind)) {
|
||||
ingressEntityList.push(inputObject);
|
||||
}
|
||||
else {
|
||||
otherEntitiesList.push(inputObject);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
return { serviceEntityList: routedServiceEntityList, serviceNameMap: serviceNameMap, unroutedServiceEntityList: unroutedServiceEntityList, deploymentEntityList: deploymentEntityList, ingressEntityList: ingressEntityList, otherObjects: otherEntitiesList };
|
||||
}
|
||||
exports.getManifestObjects = getManifestObjects;
|
||||
function isServiceRouted(serviceObject, deploymentEntityList) {
|
||||
let shouldBeRouted = false;
|
||||
const serviceSelector = getServiceSelector(serviceObject);
|
||||
if (!!serviceSelector) {
|
||||
if (deploymentEntityList.some((depObject) => {
|
||||
// finding if there is a deployment in the given manifests the service targets
|
||||
const matchLabels = getDeploymentMatchLabels(depObject);
|
||||
return (!!matchLabels && isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels));
|
||||
})) {
|
||||
shouldBeRouted = true;
|
||||
}
|
||||
}
|
||||
return shouldBeRouted;
|
||||
}
|
||||
exports.isServiceRouted = isServiceRouted;
|
||||
function createWorkloadsWithLabel(kubectl, deploymentObjectList, nextLabel) {
|
||||
const newObjectsList = [];
|
||||
deploymentObjectList.forEach((inputObject) => {
|
||||
// creating deployment with label
|
||||
const newBlueGreenObject = getNewBlueGreenObject(inputObject, nextLabel);
|
||||
core.debug('New blue-green object is: ' + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles);
|
||||
return { 'result': result, 'newFilePaths': manifestFiles };
|
||||
}
|
||||
exports.createWorkloadsWithLabel = createWorkloadsWithLabel;
|
||||
function getNewBlueGreenObject(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name only if label is green label is given
|
||||
if (labelValue === exports.GREEN_LABEL_VALUE) {
|
||||
newObject.metadata.name = getBlueGreenResourceName(inputObject.metadata.name, exports.GREEN_SUFFIX);
|
||||
}
|
||||
// Adding labels and annotations
|
||||
addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
exports.getNewBlueGreenObject = getNewBlueGreenObject;
|
||||
function addBlueGreenLabelsAndAnnotations(inputObject, labelValue) {
|
||||
//creating the k8s.deploy.color label
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.BLUE_GREEN_VERSION_LABEL] = labelValue;
|
||||
// updating object labels and selector labels
|
||||
helper.updateObjectLabels(inputObject, newLabels, false);
|
||||
helper.updateSelectorLabels(inputObject, newLabels, false);
|
||||
// updating spec labels if it is a service
|
||||
if (!helper.isServiceEntity(inputObject.kind)) {
|
||||
helper.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
exports.addBlueGreenLabelsAndAnnotations = addBlueGreenLabelsAndAnnotations;
|
||||
function getBlueGreenResourceName(name, suffix) {
|
||||
return `${name}${suffix}`;
|
||||
}
|
||||
exports.getBlueGreenResourceName = getBlueGreenResourceName;
|
||||
function getDeploymentMatchLabels(deploymentObject) {
|
||||
if (!!deploymentObject && deploymentObject.kind.toUpperCase() == constants_1.KubernetesWorkload.pod.toUpperCase() && !!deploymentObject.metadata && !!deploymentObject.metadata.labels) {
|
||||
return deploymentObject.metadata.labels;
|
||||
}
|
||||
else if (!!deploymentObject && deploymentObject.spec && deploymentObject.spec.selector && deploymentObject.spec.selector.matchLabels) {
|
||||
return deploymentObject.spec.selector.matchLabels;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.getDeploymentMatchLabels = getDeploymentMatchLabels;
|
||||
function getServiceSelector(serviceObject) {
|
||||
if (!!serviceObject && serviceObject.spec && serviceObject.spec.selector) {
|
||||
return serviceObject.spec.selector;
|
||||
}
|
||||
else
|
||||
return null;
|
||||
}
|
||||
exports.getServiceSelector = getServiceSelector;
|
||||
function isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels) {
|
||||
let serviceSelectorMap = new Map();
|
||||
let matchLabelsMap = new Map();
|
||||
JSON.parse(JSON.stringify(serviceSelector), (key, value) => {
|
||||
serviceSelectorMap.set(key, value);
|
||||
});
|
||||
JSON.parse(JSON.stringify(matchLabels), (key, value) => {
|
||||
matchLabelsMap.set(key, value);
|
||||
});
|
||||
let isMatch = true;
|
||||
serviceSelectorMap.forEach((value, key) => {
|
||||
if (!!key && (!matchLabelsMap.has(key) || matchLabelsMap.get(key)) != value) {
|
||||
isMatch = false;
|
||||
}
|
||||
});
|
||||
return isMatch;
|
||||
}
|
||||
exports.isServiceSelectorSubsetOfMatchLabel = isServiceSelectorSubsetOfMatchLabel;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
const result = kubectl.getResource(kind, name);
|
||||
if (result == null || !!result.stderr) {
|
||||
return null;
|
||||
}
|
||||
if (!!result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
UnsetsClusterSpecficDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while Parsing ' + resource + ' in Json object');
|
||||
core.debug(`Exception:${ex}`);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function UnsetsClusterSpecficDetails(resource) {
|
||||
if (resource == null) {
|
||||
return;
|
||||
}
|
||||
// Unsets the cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const metadata = resource.metadata;
|
||||
const status = resource.status;
|
||||
if (!!metadata) {
|
||||
const newMetadata = {
|
||||
'annotations': metadata.annotations,
|
||||
'labels': metadata.labels,
|
||||
'name': metadata.name
|
||||
};
|
||||
resource.metadata = newMetadata;
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,187 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getStableResourceName = exports.getBaselineResourceName = exports.getCanaryResourceName = exports.isSMICanaryStrategy = exports.isCanaryDeploymentStrategy = exports.fetchResource = exports.fetchCanaryResource = exports.getNewCanaryResource = exports.getNewBaselineResource = exports.getStableResource = exports.isResourceMarkedAsStable = exports.markResourceAsStable = exports.deleteCanaryDeployment = exports.STABLE_LABEL_VALUE = exports.STABLE_SUFFIX = exports.CANARY_LABEL_VALUE = exports.BASELINE_LABEL_VALUE = exports.CANARY_VERSION_LABEL = exports.TRAFFIC_SPLIT_STRATEGY = exports.CANARY_DEPLOYMENT_STRATEGY = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const core = require("@actions/core");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const helper = require("../resource-object-utility");
|
||||
const constants_1 = require("../../constants");
|
||||
const string_comparison_1 = require("../string-comparison");
|
||||
const utility_1 = require("../utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
exports.CANARY_DEPLOYMENT_STRATEGY = 'CANARY';
|
||||
exports.TRAFFIC_SPLIT_STRATEGY = 'SMI';
|
||||
exports.CANARY_VERSION_LABEL = 'workflow/version';
|
||||
const BASELINE_SUFFIX = '-baseline';
|
||||
exports.BASELINE_LABEL_VALUE = 'baseline';
|
||||
const CANARY_SUFFIX = '-canary';
|
||||
exports.CANARY_LABEL_VALUE = 'canary';
|
||||
exports.STABLE_SUFFIX = '-stable';
|
||||
exports.STABLE_LABEL_VALUE = 'stable';
|
||||
function deleteCanaryDeployment(kubectl, manifestFilePaths, includeServices) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
throw new Error('ManifestFileNotFound');
|
||||
}
|
||||
// create delete cmd prefix
|
||||
cleanUpCanary(kubectl, inputManifestFiles, includeServices);
|
||||
}
|
||||
exports.deleteCanaryDeployment = deleteCanaryDeployment;
|
||||
function markResourceAsStable(inputObject) {
|
||||
if (isResourceMarkedAsStable(inputObject)) {
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, exports.STABLE_LABEL_VALUE);
|
||||
core.debug("Added stable label: " + JSON.stringify(newObject));
|
||||
return newObject;
|
||||
}
|
||||
exports.markResourceAsStable = markResourceAsStable;
|
||||
function isResourceMarkedAsStable(inputObject) {
|
||||
return inputObject &&
|
||||
inputObject.metadata &&
|
||||
inputObject.metadata.labels &&
|
||||
inputObject.metadata.labels[exports.CANARY_VERSION_LABEL] == exports.STABLE_LABEL_VALUE;
|
||||
}
|
||||
exports.isResourceMarkedAsStable = isResourceMarkedAsStable;
|
||||
function getStableResource(inputObject) {
|
||||
var replicaCount = isSpecContainsReplicas(inputObject.kind) ? inputObject.metadata.replicas : 0;
|
||||
return getNewCanaryObject(inputObject, replicaCount, exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.getStableResource = getStableResource;
|
||||
function getNewBaselineResource(stableObject, replicas) {
|
||||
return getNewCanaryObject(stableObject, replicas, exports.BASELINE_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewBaselineResource = getNewBaselineResource;
|
||||
function getNewCanaryResource(inputObject, replicas) {
|
||||
return getNewCanaryObject(inputObject, replicas, exports.CANARY_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewCanaryResource = getNewCanaryResource;
|
||||
function fetchCanaryResource(kubectl, kind, name) {
|
||||
return fetchResource(kubectl, kind, getCanaryResourceName(name));
|
||||
}
|
||||
exports.fetchCanaryResource = fetchCanaryResource;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
const result = kubectl.getResource(kind, name);
|
||||
if (result == null || !!result.stderr) {
|
||||
return null;
|
||||
}
|
||||
if (!!result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
UnsetsClusterSpecficDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while Parsing ' + resource + ' in Json object');
|
||||
core.debug(`Exception:${ex}`);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function isCanaryDeploymentStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.deploymentStrategy;
|
||||
return deploymentStrategy && deploymentStrategy.toUpperCase() === exports.CANARY_DEPLOYMENT_STRATEGY;
|
||||
}
|
||||
exports.isCanaryDeploymentStrategy = isCanaryDeploymentStrategy;
|
||||
function isSMICanaryStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.trafficSplitMethod;
|
||||
return isCanaryDeploymentStrategy() && deploymentStrategy && deploymentStrategy.toUpperCase() === exports.TRAFFIC_SPLIT_STRATEGY;
|
||||
}
|
||||
exports.isSMICanaryStrategy = isSMICanaryStrategy;
|
||||
function getCanaryResourceName(name) {
|
||||
return name + CANARY_SUFFIX;
|
||||
}
|
||||
exports.getCanaryResourceName = getCanaryResourceName;
|
||||
function getBaselineResourceName(name) {
|
||||
return name + BASELINE_SUFFIX;
|
||||
}
|
||||
exports.getBaselineResourceName = getBaselineResourceName;
|
||||
function getStableResourceName(name) {
|
||||
return name + exports.STABLE_SUFFIX;
|
||||
}
|
||||
exports.getStableResourceName = getStableResourceName;
|
||||
function UnsetsClusterSpecficDetails(resource) {
|
||||
if (resource == null) {
|
||||
return;
|
||||
}
|
||||
// Unsets the cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const metadata = resource.metadata;
|
||||
const status = resource.status;
|
||||
if (!!metadata) {
|
||||
const newMetadata = {
|
||||
'annotations': metadata.annotations,
|
||||
'labels': metadata.labels,
|
||||
'name': metadata.name
|
||||
};
|
||||
resource.metadata = newMetadata;
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
function getNewCanaryObject(inputObject, replicas, type) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name
|
||||
if (type === exports.CANARY_LABEL_VALUE) {
|
||||
newObject.metadata.name = getCanaryResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else if (type === exports.STABLE_LABEL_VALUE) {
|
||||
newObject.metadata.name = getStableResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else {
|
||||
newObject.metadata.name = getBaselineResourceName(inputObject.metadata.name);
|
||||
}
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, type);
|
||||
// Updating no. of replicas
|
||||
if (isSpecContainsReplicas(newObject.kind)) {
|
||||
newObject.spec.replicas = replicas;
|
||||
}
|
||||
return newObject;
|
||||
}
|
||||
function isSpecContainsReplicas(kind) {
|
||||
return !string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.daemonSet, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!helper.isServiceEntity(kind);
|
||||
}
|
||||
function addCanaryLabelsAndAnnotations(inputObject, type) {
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.CANARY_VERSION_LABEL] = type;
|
||||
helper.updateObjectLabels(inputObject, newLabels, false);
|
||||
helper.updateObjectAnnotations(inputObject, newLabels, false);
|
||||
helper.updateSelectorLabels(inputObject, newLabels, false);
|
||||
if (!helper.isServiceEntity(inputObject.kind)) {
|
||||
helper.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
function cleanUpCanary(kubectl, files, includeServices) {
|
||||
var deleteObject = function (kind, name) {
|
||||
try {
|
||||
const result = kubectl.delete([kind, name]);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if doesn't exist
|
||||
}
|
||||
};
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind) || (includeServices && helper.isServiceEntity(kind))) {
|
||||
const canaryObjectName = getCanaryResourceName(name);
|
||||
const baselineObjectName = getBaselineResourceName(name);
|
||||
deleteObject(kind, canaryObjectName);
|
||||
deleteObject(kind, baselineObjectName);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getManifestFiles = exports.deploy = void 0;
|
||||
const fs = require("fs");
|
||||
const core = require("@actions/core");
|
||||
const yaml = require("js-yaml");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const KubernetesObjectUtility = require("../resource-object-utility");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const models = require("../../constants");
|
||||
const fileHelper = require("../files-helper");
|
||||
const utils = require("../manifest-utilities");
|
||||
const KubernetesManifestUtility = require("../manifest-stability-utility");
|
||||
const KubernetesConstants = require("../../constants");
|
||||
const manifest_utilities_1 = require("../manifest-utilities");
|
||||
const pod_canary_deployment_helper_1 = require("./pod-canary-deployment-helper");
|
||||
const smi_canary_deployment_helper_1 = require("./smi-canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const service_blue_green_helper_1 = require("./service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("./ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("./smi-blue-green-helper");
|
||||
function deploy(kubectl, manifestFilePaths, deploymentStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get manifest files
|
||||
let inputManifestFiles = manifest_utilities_1.getUpdatedManifestFiles(manifestFilePaths);
|
||||
// deployment
|
||||
const deployedManifestFiles = deployManifests(inputManifestFiles, kubectl, isCanaryDeploymentStrategy(deploymentStrategy), blue_green_helper_1.isBlueGreenDeploymentStrategy());
|
||||
// check manifest stability
|
||||
const resourceTypes = KubernetesObjectUtility.getResources(deployedManifestFiles, models.deploymentTypes.concat([KubernetesConstants.DiscoveryAndLoadBalancerResource.service]));
|
||||
yield checkManifestStability(kubectl, resourceTypes);
|
||||
// route blue-green deployments
|
||||
if (blue_green_helper_1.isBlueGreenDeploymentStrategy()) {
|
||||
yield blue_green_helper_1.routeBlueGreen(kubectl, inputManifestFiles);
|
||||
}
|
||||
// print ingress resources
|
||||
const ingressResources = KubernetesObjectUtility.getResources(deployedManifestFiles, [KubernetesConstants.DiscoveryAndLoadBalancerResource.ingress]);
|
||||
ingressResources.forEach(ingressResource => {
|
||||
kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.ingress, ingressResource.name);
|
||||
});
|
||||
// annotate resources
|
||||
let allPods;
|
||||
try {
|
||||
allPods = JSON.parse((kubectl.getAllPods()).stdout);
|
||||
}
|
||||
catch (e) {
|
||||
core.debug("Unable to parse pods; Error: " + e);
|
||||
}
|
||||
annotateAndLabelResources(deployedManifestFiles, kubectl, resourceTypes, allPods);
|
||||
});
|
||||
}
|
||||
exports.deploy = deploy;
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
const files = utils.getManifestFiles(manifestFilePaths);
|
||||
if (files == null || files.length === 0) {
|
||||
throw new Error(`ManifestFileNotFound : ${manifestFilePaths}`);
|
||||
}
|
||||
return files;
|
||||
}
|
||||
exports.getManifestFiles = getManifestFiles;
|
||||
function deployManifests(files, kubectl, isCanaryDeploymentStrategy, isBlueGreenDeploymentStrategy) {
|
||||
let result;
|
||||
if (isCanaryDeploymentStrategy) {
|
||||
let canaryDeploymentOutput;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
canaryDeploymentOutput = smi_canary_deployment_helper_1.deploySMICanary(kubectl, files);
|
||||
}
|
||||
else {
|
||||
canaryDeploymentOutput = pod_canary_deployment_helper_1.deployPodCanary(kubectl, files);
|
||||
}
|
||||
result = canaryDeploymentOutput.result;
|
||||
files = canaryDeploymentOutput.newFilePaths;
|
||||
}
|
||||
else if (isBlueGreenDeploymentStrategy) {
|
||||
let blueGreenDeploymentOutput;
|
||||
if (blue_green_helper_1.isIngressRoute()) {
|
||||
blueGreenDeploymentOutput = ingress_blue_green_helper_1.deployBlueGreenIngress(kubectl, files);
|
||||
}
|
||||
else if (blue_green_helper_1.isSMIRoute()) {
|
||||
blueGreenDeploymentOutput = smi_blue_green_helper_1.deployBlueGreenSMI(kubectl, files);
|
||||
}
|
||||
else {
|
||||
blueGreenDeploymentOutput = service_blue_green_helper_1.deployBlueGreenService(kubectl, files);
|
||||
}
|
||||
result = blueGreenDeploymentOutput.result;
|
||||
files = blueGreenDeploymentOutput.newFilePaths;
|
||||
}
|
||||
else {
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
const updatedManifests = appendStableVersionLabelToResource(files, kubectl);
|
||||
result = kubectl.apply(updatedManifests, TaskInputParameters.forceDeployment);
|
||||
}
|
||||
else {
|
||||
result = kubectl.apply(files, TaskInputParameters.forceDeployment);
|
||||
}
|
||||
}
|
||||
utility_1.checkForErrors([result]);
|
||||
return files;
|
||||
}
|
||||
function appendStableVersionLabelToResource(files, kubectl) {
|
||||
const manifestFiles = [];
|
||||
const newObjectsList = [];
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
if (KubernetesObjectUtility.isDeploymentEntity(kind)) {
|
||||
const updatedObject = canaryDeploymentHelper.markResourceAsStable(inputObject);
|
||||
newObjectsList.push(updatedObject);
|
||||
}
|
||||
else {
|
||||
manifestFiles.push(filePath);
|
||||
}
|
||||
});
|
||||
});
|
||||
const updatedManifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...updatedManifestFiles);
|
||||
return manifestFiles;
|
||||
}
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
});
|
||||
}
|
||||
function annotateAndLabelResources(files, kubectl, resourceTypes, allPods) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const workflowFilePath = yield utility_1.getWorkflowFilePath(TaskInputParameters.githubToken);
|
||||
const deploymentConfig = yield utility_1.getDeploymentConfig();
|
||||
const annotationKeyLabel = models.getWorkflowAnnotationKeyLabel(workflowFilePath);
|
||||
annotateResources(files, kubectl, resourceTypes, allPods, annotationKeyLabel, workflowFilePath, deploymentConfig);
|
||||
labelResources(files, kubectl, annotationKeyLabel);
|
||||
});
|
||||
}
|
||||
function annotateResources(files, kubectl, resourceTypes, allPods, annotationKey, workflowFilePath, deploymentConfig) {
|
||||
const annotateResults = [];
|
||||
const lastSuccessSha = utility_1.getLastSuccessfulRunSha(kubectl, TaskInputParameters.namespace, annotationKey);
|
||||
let annotationKeyValStr = annotationKey + '=' + models.getWorkflowAnnotationsJson(lastSuccessSha, workflowFilePath, deploymentConfig);
|
||||
annotateResults.push(kubectl.annotate('namespace', TaskInputParameters.namespace, annotationKeyValStr));
|
||||
annotateResults.push(kubectl.annotateFiles(files, annotationKeyValStr));
|
||||
resourceTypes.forEach(resource => {
|
||||
if (resource.type.toUpperCase() !== models.KubernetesWorkload.pod.toUpperCase()) {
|
||||
utility_1.annotateChildPods(kubectl, resource.type, resource.name, annotationKeyValStr, allPods)
|
||||
.forEach(execResult => annotateResults.push(execResult));
|
||||
}
|
||||
});
|
||||
utility_1.checkForErrors(annotateResults, true);
|
||||
}
|
||||
function labelResources(files, kubectl, label) {
|
||||
let workflowName = process.env.GITHUB_WORKFLOW;
|
||||
workflowName = workflowName.startsWith('.github/workflows/') ?
|
||||
workflowName.replace(".github/workflows/", "") : workflowName;
|
||||
const labels = [`workflowFriendlyName=${workflowName}`, `workflow=${label}`];
|
||||
utility_1.checkForErrors([kubectl.labelFiles(files, labels)], true);
|
||||
}
|
||||
function isCanaryDeploymentStrategy(deploymentStrategy) {
|
||||
return deploymentStrategy != null && deploymentStrategy.toUpperCase() === canaryDeploymentHelper.CANARY_DEPLOYMENT_STRATEGY.toUpperCase();
|
||||
}
|
|
@ -1,158 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.updateIngressBackend = exports.getUpdatedBlueGreenIngress = exports.validateIngressesState = exports.routeBlueGreenIngress = exports.rejectBlueGreenIngress = exports.promoteBlueGreenIngress = exports.deployBlueGreenIngress = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
const BACKEND = 'BACKEND';
|
||||
function deployBlueGreenIngress(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// create new services and other objects
|
||||
let newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach(inputObject => {
|
||||
const newBlueGreenObject = blue_green_helper_1.getNewBlueGreenObject(inputObject, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
;
|
||||
core.debug('New blue-green object is: ' + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
newObjectsList = newObjectsList.concat(manifestObjects.otherObjects).concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// return results to check for manifest stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenIngress = deployBlueGreenIngress;
|
||||
function promoteBlueGreenIngress(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
//checking if anything to promote
|
||||
if (!validateIngressesState(kubectl, manifestObjects.ingressEntityList, manifestObjects.serviceNameMap)) {
|
||||
throw ('NotInPromoteStateIngress');
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// create stable services with new configuration
|
||||
const newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach((inputObject) => {
|
||||
const newBlueGreenObject = blue_green_helper_1.getNewBlueGreenObject(inputObject, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
core.debug('New blue-green object is: ' + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// returning deployments to check for rollout stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenIngress = promoteBlueGreenIngress;
|
||||
function rejectBlueGreenIngress(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing ingress to stables services
|
||||
routeBlueGreenIngress(kubectl, null, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
// deleting green services and deployments
|
||||
blue_green_helper_1.deleteWorkloadsAndServicesWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenIngress = rejectBlueGreenIngress;
|
||||
function routeBlueGreenIngress(kubectl, nextLabel, serviceNameMap, ingressEntityList) {
|
||||
let newObjectsList = [];
|
||||
if (!nextLabel) {
|
||||
newObjectsList = ingressEntityList.filter(ingress => isIngressRouted(ingress, serviceNameMap));
|
||||
}
|
||||
else {
|
||||
ingressEntityList.forEach((inputObject) => {
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
const newBlueGreenIngressObject = getUpdatedBlueGreenIngress(inputObject, serviceNameMap, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
newObjectsList.push(newBlueGreenIngressObject);
|
||||
}
|
||||
else {
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
}
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
}
|
||||
exports.routeBlueGreenIngress = routeBlueGreenIngress;
|
||||
function validateIngressesState(kubectl, ingressEntityList, serviceNameMap) {
|
||||
let areIngressesTargetingNewServices = true;
|
||||
ingressEntityList.forEach((inputObject) => {
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
//querying existing ingress
|
||||
let existingIngress = blue_green_helper_1.fetchResource(kubectl, inputObject.kind, inputObject.metadata.name);
|
||||
if (!!existingIngress) {
|
||||
let currentLabel;
|
||||
// checking its label
|
||||
try {
|
||||
currentLabel = existingIngress.metadata.labels[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL];
|
||||
}
|
||||
catch (_a) {
|
||||
// if no label exists, then not an ingress targeting green deployments
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
if (currentLabel != blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
// if not green label, then wrong configuration
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// no ingress at all, so nothing to promote
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
}
|
||||
});
|
||||
return areIngressesTargetingNewServices;
|
||||
}
|
||||
exports.validateIngressesState = validateIngressesState;
|
||||
function isIngressRouted(ingressObject, serviceNameMap) {
|
||||
let isIngressRouted = false;
|
||||
// sees if ingress targets a service in the given manifests
|
||||
JSON.parse(JSON.stringify(ingressObject), (key, value) => {
|
||||
if (key === 'serviceName' && serviceNameMap.has(value)) {
|
||||
isIngressRouted = true;
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return isIngressRouted;
|
||||
}
|
||||
function getUpdatedBlueGreenIngress(inputObject, serviceNameMap, type) {
|
||||
if (!type) {
|
||||
// returning original with no modifications
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// adding green labels and values
|
||||
blue_green_helper_1.addBlueGreenLabelsAndAnnotations(newObject, type);
|
||||
// Updating ingress labels
|
||||
let finalObject = updateIngressBackend(newObject, serviceNameMap);
|
||||
return finalObject;
|
||||
}
|
||||
exports.getUpdatedBlueGreenIngress = getUpdatedBlueGreenIngress;
|
||||
function updateIngressBackend(inputObject, serviceNameMap) {
|
||||
inputObject = JSON.parse(JSON.stringify(inputObject), (key, value) => {
|
||||
if (key.toUpperCase() === BACKEND) {
|
||||
let serviceName = value.serviceName;
|
||||
if (serviceNameMap.has(serviceName)) {
|
||||
// updating service name with corresponding bluegreen name only if service is provied in given manifests
|
||||
value.serviceName = serviceNameMap.get(serviceName);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return inputObject;
|
||||
}
|
||||
exports.updateIngressBackend = updateIngressBackend;
|
|
@ -1,58 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deployPodCanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
function deployPodCanary(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage);
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
core.debug('Calculating replica count for canary');
|
||||
const canaryReplicaCount = calculateReplicaCountForCanary(inputObject, percentage);
|
||||
core.debug('Replica count is ' + canaryReplicaCount);
|
||||
// Get stable object
|
||||
core.debug('Querying stable object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug('Stable object not found. Creating only canary object');
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
core.debug('Stable object found. Creating canary and baseline objects');
|
||||
// If canary object not found, create canary and baseline object.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Updating non deployment entity as it is.
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
return { 'result': result, 'newFilePaths': manifestFiles };
|
||||
}
|
||||
exports.deployPodCanary = deployPodCanary;
|
||||
function calculateReplicaCountForCanary(inputObject, percentage) {
|
||||
const inputReplicaCount = helper.getReplicaCount(inputObject);
|
||||
return Math.round((inputReplicaCount * percentage) / 100);
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getServiceSpecLabel = exports.validateServicesState = exports.routeBlueGreenService = exports.rejectBlueGreenService = exports.promoteBlueGreenService = exports.deployBlueGreenService = void 0;
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
function deployBlueGreenService(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// create other non deployment and non service entities
|
||||
const newObjectsList = manifestObjects.otherObjects.concat(manifestObjects.ingressEntityList).concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// returning deployment details to check for rollout stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenService = deployBlueGreenService;
|
||||
function promoteBlueGreenService(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if services are in the right state ie. targeting green deployments
|
||||
if (!validateServicesState(kubectl, manifestObjects.serviceEntityList)) {
|
||||
throw ('NotInPromoteState');
|
||||
}
|
||||
// creating stable deployments with new configurations
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// returning deployment details to check for rollout stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenService = promoteBlueGreenService;
|
||||
function rejectBlueGreenService(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing to stable objects
|
||||
routeBlueGreenService(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// deleting the new deployments with green suffix
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenService = rejectBlueGreenService;
|
||||
function routeBlueGreenService(kubectl, nextLabel, serviceEntityList) {
|
||||
const newObjectsList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
const newBlueGreenServiceObject = getUpdatedBlueGreenService(serviceObject, nextLabel);
|
||||
newObjectsList.push(newBlueGreenServiceObject);
|
||||
});
|
||||
// configures the services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
}
|
||||
exports.routeBlueGreenService = routeBlueGreenService;
|
||||
// adding green labels to configure existing service
|
||||
function getUpdatedBlueGreenService(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
blue_green_helper_1.addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
function validateServicesState(kubectl, serviceEntityList) {
|
||||
let areServicesGreen = true;
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// finding the existing routed service
|
||||
const existingService = blue_green_helper_1.fetchResource(kubectl, serviceObject.kind, serviceObject.metadata.name);
|
||||
if (!!existingService) {
|
||||
let currentLabel = getServiceSpecLabel(existingService);
|
||||
if (currentLabel != blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
// service should be targeting deployments with green label
|
||||
areServicesGreen = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// service targeting deployment doesn't exist
|
||||
areServicesGreen = false;
|
||||
}
|
||||
});
|
||||
return areServicesGreen;
|
||||
}
|
||||
exports.validateServicesState = validateServicesState;
|
||||
function getServiceSpecLabel(inputObject) {
|
||||
if (!!inputObject && inputObject.spec && inputObject.spec.selector && inputObject.spec.selector[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL]) {
|
||||
return inputObject.spec.selector[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL];
|
||||
}
|
||||
return '';
|
||||
}
|
||||
exports.getServiceSpecLabel = getServiceSpecLabel;
|
|
@ -1,184 +0,0 @@
|
|||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.cleanupSMI = exports.validateTrafficSplitsState = exports.routeBlueGreenSMI = exports.getSMIServiceResource = exports.setupSMI = exports.rejectBlueGreenSMI = exports.promoteBlueGreenSMI = exports.deployBlueGreenSMI = void 0;
|
||||
const kubectlUtils = require("../kubectl-util");
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
let trafficSplitAPIVersion = "";
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = '-trafficsplit';
|
||||
const TRAFFIC_SPLIT_OBJECT = 'TrafficSplit';
|
||||
const MIN_VAL = '0';
|
||||
const MAX_VAL = '100';
|
||||
function deployBlueGreenSMI(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// creating services and other objects
|
||||
const newObjectsList = manifestObjects.otherObjects.concat(manifestObjects.serviceEntityList).concat(manifestObjects.ingressEntityList).concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// make extraservices and trafficsplit
|
||||
setupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
// create new deloyments
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// return results to check for manifest stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenSMI = deployBlueGreenSMI;
|
||||
function promoteBlueGreenSMI(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if there is something to promote
|
||||
if (!validateTrafficSplitsState(kubectl, manifestObjects.serviceEntityList)) {
|
||||
throw ('NotInPromoteStateSMI');
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// return result to check for stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenSMI = promoteBlueGreenSMI;
|
||||
function rejectBlueGreenSMI(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing trafficsplit to stable deploymetns
|
||||
routeBlueGreenSMI(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// deleting rejected new bluegreen deplyments
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
//deleting trafficsplit and extra services
|
||||
cleanupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenSMI = rejectBlueGreenSMI;
|
||||
function setupSMI(kubectl, serviceEntityList) {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// create a trafficsplit for service
|
||||
trafficObjectList.push(serviceObject);
|
||||
// setting up the services for trafficsplit
|
||||
const newStableService = getSMIServiceResource(serviceObject, blue_green_helper_2.STABLE_SUFFIX);
|
||||
const newGreenService = getSMIServiceResource(serviceObject, blue_green_helper_2.GREEN_SUFFIX);
|
||||
newObjectsList.push(newStableService);
|
||||
newObjectsList.push(newGreenService);
|
||||
});
|
||||
// creating services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// route to stable service
|
||||
trafficObjectList.forEach((inputObject) => {
|
||||
createTrafficSplitObject(kubectl, inputObject.metadata.name, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
});
|
||||
}
|
||||
exports.setupSMI = setupSMI;
|
||||
function createTrafficSplitObject(kubectl, name, nextLabel) {
|
||||
// getting smi spec api version
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
// deciding weights based on nextlabel
|
||||
let stableWeight;
|
||||
let greenWeight;
|
||||
if (nextLabel === blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
stableWeight = parseInt(MIN_VAL);
|
||||
greenWeight = parseInt(MAX_VAL);
|
||||
}
|
||||
else {
|
||||
stableWeight = parseInt(MAX_VAL);
|
||||
greenWeight = parseInt(MIN_VAL);
|
||||
}
|
||||
//traffic split json
|
||||
const trafficSplitObject = `{
|
||||
"apiVersion": "${trafficSplitAPIVersion}",
|
||||
"kind": "TrafficSplit",
|
||||
"metadata": {
|
||||
"name": "${blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX)}"
|
||||
},
|
||||
"spec": {
|
||||
"service": "${name}",
|
||||
"backends": [
|
||||
{
|
||||
"service": "${blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.STABLE_SUFFIX)}",
|
||||
"weight": ${stableWeight}
|
||||
},
|
||||
{
|
||||
"service": "${blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.GREEN_SUFFIX)}",
|
||||
"weight": ${greenWeight}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`;
|
||||
// creating trafficplit object
|
||||
const trafficSplitManifestFile = fileHelper.writeManifestToFile(trafficSplitObject, TRAFFIC_SPLIT_OBJECT, blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
kubectl.apply(trafficSplitManifestFile);
|
||||
}
|
||||
function getSMIServiceResource(inputObject, suffix) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
if (suffix === blue_green_helper_2.STABLE_SUFFIX) {
|
||||
// adding stable suffix to service name
|
||||
newObject.metadata.name = blue_green_helper_1.getBlueGreenResourceName(inputObject.metadata.name, blue_green_helper_2.STABLE_SUFFIX);
|
||||
return blue_green_helper_1.getNewBlueGreenObject(newObject, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
}
|
||||
else {
|
||||
// green label will be added for these
|
||||
return blue_green_helper_1.getNewBlueGreenObject(newObject, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
}
|
||||
}
|
||||
exports.getSMIServiceResource = getSMIServiceResource;
|
||||
function routeBlueGreenSMI(kubectl, nextLabel, serviceEntityList) {
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// routing trafficsplit to given label
|
||||
createTrafficSplitObject(kubectl, serviceObject.metadata.name, nextLabel);
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreenSMI = routeBlueGreenSMI;
|
||||
function validateTrafficSplitsState(kubectl, serviceEntityList) {
|
||||
let areTrafficSplitsInRightState = true;
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
const name = serviceObject.metadata.name;
|
||||
let trafficSplitObject = blue_green_helper_1.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
if (!trafficSplitObject) {
|
||||
// no trafficplit exits
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
trafficSplitObject = JSON.parse(JSON.stringify(trafficSplitObject));
|
||||
trafficSplitObject.spec.backends.forEach(element => {
|
||||
// checking if trafficsplit in right state to deploy
|
||||
if (element.service === blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.GREEN_SUFFIX)) {
|
||||
if (element.weight != MAX_VAL) {
|
||||
// green service should have max weight
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
}
|
||||
if (element.service === blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.STABLE_SUFFIX)) {
|
||||
if (element.weight != MIN_VAL) {
|
||||
// stable service should have 0 weight
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
return areTrafficSplitsInRightState;
|
||||
}
|
||||
exports.validateTrafficSplitsState = validateTrafficSplitsState;
|
||||
function cleanupSMI(kubectl, serviceEntityList) {
|
||||
const deleteList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
deleteList.push({ name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX), kind: TRAFFIC_SPLIT_OBJECT });
|
||||
deleteList.push({ name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, blue_green_helper_2.GREEN_SUFFIX), kind: serviceObject.kind });
|
||||
deleteList.push({ name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, blue_green_helper_2.STABLE_SUFFIX), kind: serviceObject.kind });
|
||||
});
|
||||
// deleting all objects
|
||||
blue_green_helper_1.deleteObjects(kubectl, deleteList);
|
||||
}
|
||||
exports.cleanupSMI = cleanupSMI;
|
|
@ -1,199 +0,0 @@
|
|||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.redirectTrafficToStableDeployment = exports.redirectTrafficToCanaryDeployment = exports.deploySMICanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const util = require("util");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
const kubectlUtils = require("../kubectl-util");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = '-workflow-rollout';
|
||||
const TRAFFIC_SPLIT_OBJECT = 'TrafficSplit';
|
||||
let trafficSplitAPIVersion = "";
|
||||
function deploySMICanary(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const canaryReplicaCount = parseInt(TaskInputParameters.baselineAndCanaryReplicas);
|
||||
core.debug('Replica count is ' + canaryReplicaCount);
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
// Get stable object
|
||||
core.debug('Querying stable object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug('Stable object not found. Creating only canary object');
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
if (!canaryDeploymentHelper.isResourceMarkedAsStable(stableObject)) {
|
||||
throw (`StableSpecSelectorNotExist : ${name}`);
|
||||
}
|
||||
core.debug('Stable object found. Creating canary and baseline objects');
|
||||
// If canary object not found, create canary and baseline object.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Updating non deployment entity as it is.
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
createCanaryService(kubectl, filePaths);
|
||||
return { 'result': result, 'newFilePaths': manifestFiles };
|
||||
}
|
||||
exports.deploySMICanary = deploySMICanary;
|
||||
function createCanaryService(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
const newCanaryServiceObject = canaryDeploymentHelper.getNewCanaryResource(inputObject);
|
||||
core.debug('New canary service object is: ' + JSON.stringify(newCanaryServiceObject));
|
||||
newObjectsList.push(newCanaryServiceObject);
|
||||
const newBaselineServiceObject = canaryDeploymentHelper.getNewBaselineResource(inputObject);
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineServiceObject));
|
||||
newObjectsList.push(newBaselineServiceObject);
|
||||
core.debug('Querying for stable service object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, canaryDeploymentHelper.getStableResourceName(name));
|
||||
if (!stableObject) {
|
||||
const newStableServiceObject = canaryDeploymentHelper.getStableResource(inputObject);
|
||||
core.debug('New stable service object is: ' + JSON.stringify(newStableServiceObject));
|
||||
newObjectsList.push(newStableServiceObject);
|
||||
core.debug('Creating the traffic object for service: ' + name);
|
||||
const trafficObject = createTrafficSplitManifestFile(kubectl, name, 0, 0, 1000);
|
||||
core.debug('Creating the traffic object for service: ' + trafficObject);
|
||||
trafficObjectsList.push(trafficObject);
|
||||
}
|
||||
else {
|
||||
let updateTrafficObject = true;
|
||||
const trafficObject = canaryDeploymentHelper.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, getTrafficSplitResourceName(name));
|
||||
if (trafficObject) {
|
||||
const trafficJObject = JSON.parse(JSON.stringify(trafficObject));
|
||||
if (trafficJObject && trafficJObject.spec && trafficJObject.spec.backends) {
|
||||
trafficJObject.spec.backends.forEach((s) => {
|
||||
if (s.service === canaryDeploymentHelper.getCanaryResourceName(name) && s.weight === "1000m") {
|
||||
core.debug('Update traffic objcet not required');
|
||||
updateTrafficObject = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (updateTrafficObject) {
|
||||
core.debug('Stable service object present so updating the traffic object for service: ' + name);
|
||||
trafficObjectsList.push(updateTrafficSplitObject(kubectl, name));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...trafficObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function redirectTrafficToCanaryDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 0, 1000);
|
||||
}
|
||||
exports.redirectTrafficToCanaryDeployment = redirectTrafficToCanaryDeployment;
|
||||
function redirectTrafficToStableDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 1000, 0);
|
||||
}
|
||||
exports.redirectTrafficToStableDeployment = redirectTrafficToStableDeployment;
|
||||
function adjustTraffic(kubectl, manifestFilePaths, stableWeight, canaryWeight) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
return;
|
||||
}
|
||||
const trafficSplitManifests = [];
|
||||
const serviceObjects = [];
|
||||
inputManifestFiles.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
trafficSplitManifests.push(createTrafficSplitManifestFile(kubectl, name, stableWeight, 0, canaryWeight));
|
||||
serviceObjects.push(name);
|
||||
}
|
||||
});
|
||||
});
|
||||
if (trafficSplitManifests.length <= 0) {
|
||||
return;
|
||||
}
|
||||
const result = kubectl.apply(trafficSplitManifests, TaskInputParameters.forceDeployment);
|
||||
core.debug('serviceObjects:' + serviceObjects.join(',') + ' result:' + result);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function updateTrafficSplitObject(kubectl, serviceName) {
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage) * 10;
|
||||
const baselineAndCanaryWeight = percentage / 2;
|
||||
const stableDeploymentWeight = 1000 - percentage;
|
||||
core.debug('Creating the traffic object with canary weight: ' + baselineAndCanaryWeight + ',baseling weight: ' + baselineAndCanaryWeight + ',stable: ' + stableDeploymentWeight);
|
||||
return createTrafficSplitManifestFile(kubectl, serviceName, stableDeploymentWeight, baselineAndCanaryWeight, baselineAndCanaryWeight);
|
||||
}
|
||||
function createTrafficSplitManifestFile(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight) {
|
||||
const smiObjectString = getTrafficSplitObject(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight);
|
||||
const manifestFile = fileHelper.writeManifestToFile(smiObjectString, TRAFFIC_SPLIT_OBJECT, serviceName);
|
||||
if (!manifestFile) {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return manifestFile;
|
||||
}
|
||||
function getTrafficSplitObject(kubectl, name, stableWeight, baselineWeight, canaryWeight) {
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
const trafficSplitObjectJson = `{
|
||||
"apiVersion": "${trafficSplitAPIVersion}",
|
||||
"kind": "TrafficSplit",
|
||||
"metadata": {
|
||||
"name": "%s"
|
||||
},
|
||||
"spec": {
|
||||
"backends": [
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
},
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
},
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
}
|
||||
],
|
||||
"service": "%s"
|
||||
}
|
||||
}`;
|
||||
const trafficSplitObject = util.format(trafficSplitObjectJson, getTrafficSplitResourceName(name), canaryDeploymentHelper.getStableResourceName(name), stableWeight, canaryDeploymentHelper.getBaselineResourceName(name), baselineWeight, canaryDeploymentHelper.getCanaryResourceName(name), canaryWeight, name);
|
||||
return trafficSplitObject;
|
||||
}
|
||||
function getTrafficSplitResourceName(name) {
|
||||
return name + TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isEqual = exports.StringComparer = void 0;
|
||||
var StringComparer;
|
||||
(function (StringComparer) {
|
||||
StringComparer[StringComparer["Ordinal"] = 0] = "Ordinal";
|
||||
StringComparer[StringComparer["OrdinalIgnoreCase"] = 1] = "OrdinalIgnoreCase";
|
||||
})(StringComparer = exports.StringComparer || (exports.StringComparer = {}));
|
||||
function isEqual(str1, str2, stringComparer) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null) {
|
||||
return false;
|
||||
}
|
||||
if (str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (stringComparer == StringComparer.OrdinalIgnoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
|
@ -1,527 +0,0 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ToolRunner = void 0;
|
||||
const os = require("os");
|
||||
const events = require("events");
|
||||
const child = require("child_process");
|
||||
const core = require("@actions/core");
|
||||
class ToolRunner extends events.EventEmitter {
|
||||
constructor(toolPath) {
|
||||
super();
|
||||
if (!toolPath) {
|
||||
throw new Error('Parameter \'toolPath\' cannot be null or empty.');
|
||||
}
|
||||
this.toolPath = toolPath;
|
||||
this.args = [];
|
||||
core.debug('toolRunner toolPath: ' + toolPath);
|
||||
}
|
||||
_debug(message) {
|
||||
this.emit('debug', message);
|
||||
}
|
||||
_argStringToArray(argString) {
|
||||
var args = [];
|
||||
var inQuotes = false;
|
||||
var escaped = false;
|
||||
var lastCharWasSpace = true;
|
||||
var arg = '';
|
||||
var append = function (c) {
|
||||
// we only escape double quotes.
|
||||
if (escaped && c !== '"') {
|
||||
arg += '\\';
|
||||
}
|
||||
arg += c;
|
||||
escaped = false;
|
||||
};
|
||||
for (var i = 0; i < argString.length; i++) {
|
||||
var c = argString.charAt(i);
|
||||
if (c === ' ' && !inQuotes) {
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg);
|
||||
arg = '';
|
||||
}
|
||||
lastCharWasSpace = true;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (c === '"') {
|
||||
if (!escaped) {
|
||||
inQuotes = !inQuotes;
|
||||
}
|
||||
else {
|
||||
append(c);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && escaped) {
|
||||
append(c);
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && inQuotes) {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
append(c);
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg.trim());
|
||||
}
|
||||
return args;
|
||||
}
|
||||
_getCommandString(options, noPrefix) {
|
||||
let toolPath = this._getSpawnFileName();
|
||||
let args = this._getSpawnArgs(options);
|
||||
let cmd = noPrefix ? '' : '[command]'; // omit prefix when piped to a second tool
|
||||
if (process.platform == 'win32') {
|
||||
// Windows + cmd file
|
||||
if (this._isCmdFile()) {
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows + verbatim
|
||||
else if (options.windowsVerbatimArguments) {
|
||||
cmd += `"${toolPath}"`;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows (regular)
|
||||
else {
|
||||
cmd += this._windowsQuoteCmdArg(toolPath);
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${this._windowsQuoteCmdArg(a)}`;
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
// OSX/Linux - this can likely be improved with some form of quoting.
|
||||
// creating processes on Unix is fundamentally different than Windows.
|
||||
// on Unix, execvp() takes an arg array.
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// append second tool
|
||||
if (this.pipeOutputToTool) {
|
||||
cmd += ' | ' + this.pipeOutputToTool._getCommandString(options, /*noPrefix:*/ true);
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
_getSpawnFileName() {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
return process.env['COMSPEC'] || 'cmd.exe';
|
||||
}
|
||||
}
|
||||
return this.toolPath;
|
||||
}
|
||||
_getSpawnArgs(options) {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
let argline = `/D /S /C "${this._windowsQuoteCmdArg(this.toolPath)}`;
|
||||
for (let i = 0; i < this.args.length; i++) {
|
||||
argline += ' ';
|
||||
argline += options.windowsVerbatimArguments ? this.args[i] : this._windowsQuoteCmdArg(this.args[i]);
|
||||
}
|
||||
argline += '"';
|
||||
return [argline];
|
||||
}
|
||||
if (options.windowsVerbatimArguments) {
|
||||
// note, in Node 6.x options.argv0 can be used instead of overriding args.slice and args.unshift.
|
||||
// for more details, refer to https://github.com/nodejs/node/blob/v6.x/lib/child_process.js
|
||||
let args = this.args.slice(0); // copy the array
|
||||
// override slice to prevent Node from creating a copy of the arg array.
|
||||
// we need Node to use the "unshift" override below.
|
||||
args.slice = function () {
|
||||
if (arguments.length != 1 || arguments[0] != 0) {
|
||||
throw new Error('Unexpected arguments passed to args.slice when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return args;
|
||||
};
|
||||
// override unshift
|
||||
//
|
||||
// when using the windowsVerbatimArguments option, Node does not quote the tool path when building
|
||||
// the cmdline parameter for the win32 function CreateProcess(). an unquoted space in the tool path
|
||||
// causes problems for tools when attempting to parse their own command line args. tools typically
|
||||
// assume their arguments begin after arg 0.
|
||||
//
|
||||
// by hijacking unshift, we can quote the tool path when it pushed onto the args array. Node builds
|
||||
// the cmdline parameter from the args array.
|
||||
//
|
||||
// note, we can't simply pass a quoted tool path to Node for multiple reasons:
|
||||
// 1) Node verifies the file exists (calls win32 function GetFileAttributesW) and the check returns
|
||||
// false if the path is quoted.
|
||||
// 2) Node passes the tool path as the application parameter to CreateProcess, which expects the
|
||||
// path to be unquoted.
|
||||
//
|
||||
// also note, in addition to the tool path being embedded within the cmdline parameter, Node also
|
||||
// passes the tool path to CreateProcess via the application parameter (optional parameter). when
|
||||
// present, Windows uses the application parameter to determine which file to run, instead of
|
||||
// interpreting the file from the cmdline parameter.
|
||||
args.unshift = function () {
|
||||
if (arguments.length != 1) {
|
||||
throw new Error('Unexpected arguments passed to args.unshift when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return Array.prototype.unshift.call(args, `"${arguments[0]}"`); // quote the file name
|
||||
};
|
||||
return args;
|
||||
}
|
||||
}
|
||||
return this.args;
|
||||
}
|
||||
_isCmdFile() {
|
||||
let upperToolPath = this.toolPath.toUpperCase();
|
||||
return this._endsWith(upperToolPath, '.CMD') || this._endsWith(upperToolPath, '.BAT');
|
||||
}
|
||||
_endsWith(str, end) {
|
||||
return str.slice(-end.length) == end;
|
||||
}
|
||||
_windowsQuoteCmdArg(arg) {
|
||||
// for .exe, apply the normal quoting rules that libuv applies
|
||||
if (!this._isCmdFile()) {
|
||||
return this._uv_quote_cmd_arg(arg);
|
||||
}
|
||||
// otherwise apply quoting rules specific to the cmd.exe command line parser.
|
||||
// the libuv rules are generic and are not designed specifically for cmd.exe
|
||||
// command line parser.
|
||||
//
|
||||
// for a detailed description of the cmd.exe command line parser, refer to
|
||||
// http://stackoverflow.com/questions/4094699/how-does-the-windows-command-interpreter-cmd-exe-parse-scripts/7970912#7970912
|
||||
// need quotes for empty arg
|
||||
if (!arg) {
|
||||
return '""';
|
||||
}
|
||||
// determine whether the arg needs to be quoted
|
||||
const cmdSpecialChars = [' ', '\t', '&', '(', ')', '[', ']', '{', '}', '^', '=', ';', '!', '\'', '+', ',', '`', '~', '|', '<', '>', '"'];
|
||||
let needsQuotes = false;
|
||||
for (let char of arg) {
|
||||
if (cmdSpecialChars.some(x => x == char)) {
|
||||
needsQuotes = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// short-circuit if quotes not needed
|
||||
if (!needsQuotes) {
|
||||
return arg;
|
||||
}
|
||||
// the following quoting rules are very similar to the rules that by libuv applies.
|
||||
//
|
||||
// 1) wrap the string in quotes
|
||||
//
|
||||
// 2) double-up quotes - i.e. " => ""
|
||||
//
|
||||
// this is different from the libuv quoting rules. libuv replaces " with \", which unfortunately
|
||||
// doesn't work well with a cmd.exe command line.
|
||||
//
|
||||
// note, replacing " with "" also works well if the arg is passed to a downstream .NET console app.
|
||||
// for example, the command line:
|
||||
// foo.exe "myarg:""my val"""
|
||||
// is parsed by a .NET console app into an arg array:
|
||||
// [ "myarg:\"my val\"" ]
|
||||
// which is the same end result when applying libuv quoting rules. although the actual
|
||||
// command line from libuv quoting rules would look like:
|
||||
// foo.exe "myarg:\"my val\""
|
||||
//
|
||||
// 3) double-up slashes that preceed a quote,
|
||||
// e.g. hello \world => "hello \world"
|
||||
// hello\"world => "hello\\""world"
|
||||
// hello\\"world => "hello\\\\""world"
|
||||
// hello world\ => "hello world\\"
|
||||
//
|
||||
// technically this is not required for a cmd.exe command line, or the batch argument parser.
|
||||
// the reasons for including this as a .cmd quoting rule are:
|
||||
//
|
||||
// a) this is optimized for the scenario where the argument is passed from the .cmd file to an
|
||||
// external program. many programs (e.g. .NET console apps) rely on the slash-doubling rule.
|
||||
//
|
||||
// b) it's what we've been doing previously (by deferring to node default behavior) and we
|
||||
// haven't heard any complaints about that aspect.
|
||||
//
|
||||
// note, a weakness of the quoting rules chosen here, is that % is not escaped. in fact, % cannot be
|
||||
// escaped when used on the command line directly - even though within a .cmd file % can be escaped
|
||||
// by using %%.
|
||||
//
|
||||
// the saving grace is, on the command line, %var% is left as-is if var is not defined. this contrasts
|
||||
// the line parsing rules within a .cmd file, where if var is not defined it is replaced with nothing.
|
||||
//
|
||||
// one option that was explored was replacing % with ^% - i.e. %var% => ^%var^%. this hack would
|
||||
// often work, since it is unlikely that var^ would exist, and the ^ character is removed when the
|
||||
// variable is used. the problem, however, is that ^ is not removed when %* is used to pass the args
|
||||
// to an external program.
|
||||
//
|
||||
// an unexplored potential solution for the % escaping problem, is to create a wrapper .cmd file.
|
||||
// % can be escaped within a .cmd file.
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\'; // double the slash
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '"'; // double the quote
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_uv_quote_cmd_arg(arg) {
|
||||
// Tool runner wraps child_process.spawn() and needs to apply the same quoting as
|
||||
// Node in certain cases where the undocumented spawn option windowsVerbatimArguments
|
||||
// is used.
|
||||
//
|
||||
// Since this function is a port of quote_cmd_arg from Node 4.x (technically, lib UV,
|
||||
// see https://github.com/nodejs/node/blob/v4.x/deps/uv/src/win/process.c for details),
|
||||
// pasting copyright notice from Node within this function:
|
||||
//
|
||||
// Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
if (!arg) {
|
||||
// Need double quotation for empty argument
|
||||
return '""';
|
||||
}
|
||||
if (arg.indexOf(' ') < 0 && arg.indexOf('\t') < 0 && arg.indexOf('"') < 0) {
|
||||
// No quotation needed
|
||||
return arg;
|
||||
}
|
||||
if (arg.indexOf('"') < 0 && arg.indexOf('\\') < 0) {
|
||||
// No embedded double quotes or backslashes, so I can just wrap
|
||||
// quote marks around the whole thing.
|
||||
return `"${arg}"`;
|
||||
}
|
||||
// Expected input/output:
|
||||
// input : hello"world
|
||||
// output: "hello\"world"
|
||||
// input : hello""world
|
||||
// output: "hello\"\"world"
|
||||
// input : hello\world
|
||||
// output: hello\world
|
||||
// input : hello\\world
|
||||
// output: hello\\world
|
||||
// input : hello\"world
|
||||
// output: "hello\\\"world"
|
||||
// input : hello\\"world
|
||||
// output: "hello\\\\\"world"
|
||||
// input : hello world\
|
||||
// output: "hello world\\" - note the comment in libuv actually reads "hello world\"
|
||||
// but it appears the comment is wrong, it should be "hello world\\"
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\';
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '\\';
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_cloneExecOptions(options) {
|
||||
options = options || {};
|
||||
let result = {
|
||||
cwd: options.cwd || process.cwd(),
|
||||
env: options.env || process.env,
|
||||
silent: options.silent || false,
|
||||
failOnStdErr: options.failOnStdErr || false,
|
||||
ignoreReturnCode: options.ignoreReturnCode || false,
|
||||
windowsVerbatimArguments: options.windowsVerbatimArguments || false
|
||||
};
|
||||
result.outStream = options.outStream || process.stdout;
|
||||
result.errStream = options.errStream || process.stderr;
|
||||
return result;
|
||||
}
|
||||
_getSpawnSyncOptions(options) {
|
||||
let result = {};
|
||||
result.cwd = options.cwd;
|
||||
result.env = options.env;
|
||||
result['windowsVerbatimArguments'] = options.windowsVerbatimArguments || this._isCmdFile();
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Add argument
|
||||
* Append an argument or an array of arguments
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
arg(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
if (val instanceof Array) {
|
||||
core.debug(this.toolPath + ' arg: ' + JSON.stringify(val));
|
||||
this.args = this.args.concat(val);
|
||||
}
|
||||
else if (typeof (val) === 'string') {
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(val.trim());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Parses an argument line into one or more arguments
|
||||
* e.g. .line('"arg one" two -z') is equivalent to .arg(['arg one', 'two', '-z'])
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string argument line
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
line(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(this._argStringToArray(val));
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add argument(s) if a condition is met
|
||||
* Wraps arg(). See arg for details
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param condition boolean condition
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
argIf(condition, val) {
|
||||
if (condition) {
|
||||
this.arg(val);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Pipe output of exec() to another tool
|
||||
* @param tool
|
||||
* @param file optional filename to additionally stream the output to.
|
||||
* @returns {ToolRunner}
|
||||
*/
|
||||
pipeExecOutputToTool(tool, file) {
|
||||
this.pipeOutputToTool = tool;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Exec a tool synchronously.
|
||||
* Output will be *not* be streamed to the live console. It will be returned after execution is complete.
|
||||
* Appropriate for short running tools
|
||||
* Returns IExecSyncResult with output and return code
|
||||
*
|
||||
* @param tool path to tool to exec
|
||||
* @param options optional exec options. See IExecSyncOptions
|
||||
* @returns IExecSyncResult
|
||||
*/
|
||||
execSync(options) {
|
||||
core.debug('exec tool: ' + this.toolPath);
|
||||
core.debug('arguments:');
|
||||
this.args.forEach((arg) => {
|
||||
core.debug(' ' + arg);
|
||||
});
|
||||
options = this._cloneExecOptions(options);
|
||||
if (!options.silent) {
|
||||
options.outStream.write(this._getCommandString(options) + os.EOL);
|
||||
}
|
||||
var r = child.spawnSync(this._getSpawnFileName(), this._getSpawnArgs(options), this._getSpawnSyncOptions(options));
|
||||
var res = { code: r.status, error: r.error };
|
||||
if (!options.silent && r.stdout && r.stdout.length > 0) {
|
||||
options.outStream.write(r.stdout);
|
||||
}
|
||||
if (!options.silent && r.stderr && r.stderr.length > 0) {
|
||||
options.errStream.write(r.stderr);
|
||||
}
|
||||
res.stdout = (r.stdout) ? r.stdout.toString() : '';
|
||||
res.stderr = (r.stderr) ? r.stderr.toString() : '';
|
||||
return res;
|
||||
}
|
||||
}
|
||||
exports.ToolRunner = ToolRunner;
|
||||
class ExecState extends events.EventEmitter {
|
||||
constructor(options, toolPath) {
|
||||
super();
|
||||
this.delay = 10000; // 10 seconds
|
||||
this.timeout = null;
|
||||
if (!toolPath) {
|
||||
throw new Error('toolPath must not be empty');
|
||||
}
|
||||
this.options = options;
|
||||
this.toolPath = toolPath;
|
||||
let delay = process.env['TASKLIB_TEST_TOOLRUNNER_EXITDELAY'];
|
||||
if (delay) {
|
||||
this.delay = parseInt(delay);
|
||||
}
|
||||
}
|
||||
CheckComplete() {
|
||||
if (this.done) {
|
||||
return;
|
||||
}
|
||||
if (this.processClosed) {
|
||||
this._setResult();
|
||||
}
|
||||
else if (this.processExited) {
|
||||
this.timeout = setTimeout(ExecState.HandleTimeout, this.delay, this);
|
||||
}
|
||||
}
|
||||
_setResult() {
|
||||
// determine whether there is an error
|
||||
let error;
|
||||
if (this.processExited) {
|
||||
if (this.processError) {
|
||||
error = new Error(`LIB_ProcessError: \n tool: ${this.toolPath} \n error: ${this.processError}`);
|
||||
}
|
||||
else if (this.processExitCode != 0 && !this.options.ignoreReturnCode) {
|
||||
error = new Error(`LIB_ProcessExitCode\n tool: ${this.toolPath} \n Exit Code: ${this.processExitCode}`);
|
||||
}
|
||||
else if (this.processStderr && this.options.failOnStdErr) {
|
||||
error = new Error(`LIB_ProcessStderr', ${this.toolPath}`);
|
||||
}
|
||||
}
|
||||
// clear the timeout
|
||||
if (this.timeout) {
|
||||
clearTimeout(this.timeout);
|
||||
this.timeout = null;
|
||||
}
|
||||
this.done = true;
|
||||
this.emit('done', error, this.processExitCode);
|
||||
}
|
||||
static HandleTimeout(state) {
|
||||
if (state.done) {
|
||||
return;
|
||||
}
|
||||
if (!state.processClosed && state.processExited) {
|
||||
core.debug(`LIB_StdioNotClosed`);
|
||||
}
|
||||
state._setResult();
|
||||
}
|
||||
}
|
|
@ -1,222 +0,0 @@
|
|||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getNormalizedPath = exports.isHttpUrl = exports.getCurrentTime = exports.getRandomInt = exports.sleep = exports.getDeploymentConfig = exports.annotateChildPods = exports.getWorkflowFilePath = exports.getLastSuccessfulRunSha = exports.checkForErrors = exports.isEqual = exports.getExecutableExtension = void 0;
|
||||
const os = require("os");
|
||||
const core = require("@actions/core");
|
||||
const githubClient_1 = require("../githubClient");
|
||||
const httpClient_1 = require("./httpClient");
|
||||
const inputParams = require("../input-parameters");
|
||||
const docker_object_model_1 = require("../docker-object-model");
|
||||
const io = require("@actions/io");
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
exports.getExecutableExtension = getExecutableExtension;
|
||||
function isEqual(str1, str2, ignoreCase) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null || str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (ignoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
||||
function checkForErrors(execResults, warnIfError) {
|
||||
if (execResults.length !== 0) {
|
||||
let stderr = '';
|
||||
execResults.forEach(result => {
|
||||
if (result && result.stderr) {
|
||||
if (result.code !== 0) {
|
||||
stderr += result.stderr + '\n';
|
||||
}
|
||||
else {
|
||||
core.warning(result.stderr);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (stderr.length > 0) {
|
||||
if (warnIfError) {
|
||||
core.warning(stderr.trim());
|
||||
}
|
||||
else {
|
||||
throw new Error(stderr.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.checkForErrors = checkForErrors;
|
||||
function getLastSuccessfulRunSha(kubectl, namespaceName, annotationKey) {
|
||||
try {
|
||||
const result = kubectl.getResource('namespace', namespaceName);
|
||||
if (result) {
|
||||
if (result.stderr) {
|
||||
core.warning(`${result.stderr}`);
|
||||
return process.env.GITHUB_SHA;
|
||||
}
|
||||
else if (result.stdout) {
|
||||
const annotationsSet = JSON.parse(result.stdout).metadata.annotations;
|
||||
if (annotationsSet && annotationsSet[annotationKey]) {
|
||||
return JSON.parse(annotationsSet[annotationKey].replace(/'/g, '"')).commit;
|
||||
}
|
||||
else {
|
||||
return 'NA';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get commits from cluster. ${JSON.stringify(ex)}`);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
exports.getLastSuccessfulRunSha = getLastSuccessfulRunSha;
|
||||
function getWorkflowFilePath(githubToken) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let workflowFilePath = process.env.GITHUB_WORKFLOW;
|
||||
if (!workflowFilePath.startsWith('.github/workflows/')) {
|
||||
const githubClient = new githubClient_1.GitHubClient(process.env.GITHUB_REPOSITORY, githubToken);
|
||||
const response = yield githubClient.getWorkflows();
|
||||
if (response) {
|
||||
if (response.statusCode == httpClient_1.StatusCodes.OK
|
||||
&& response.body
|
||||
&& response.body.total_count) {
|
||||
if (response.body.total_count > 0) {
|
||||
for (let workflow of response.body.workflows) {
|
||||
if (process.env.GITHUB_WORKFLOW === workflow.name) {
|
||||
workflowFilePath = workflow.path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (response.statusCode != httpClient_1.StatusCodes.OK) {
|
||||
core.debug(`An error occured while getting list of workflows on the repo. Statuscode: ${response.statusCode}, StatusMessage: ${response.statusMessage}`);
|
||||
}
|
||||
}
|
||||
else {
|
||||
core.warning(`Failed to get response from workflow list API`);
|
||||
}
|
||||
}
|
||||
return Promise.resolve(workflowFilePath);
|
||||
});
|
||||
}
|
||||
exports.getWorkflowFilePath = getWorkflowFilePath;
|
||||
function annotateChildPods(kubectl, resourceType, resourceName, annotationKeyValStr, allPods) {
|
||||
const commandExecutionResults = [];
|
||||
let owner = resourceName;
|
||||
if (resourceType.toLowerCase().indexOf('deployment') > -1) {
|
||||
owner = kubectl.getNewReplicaSet(resourceName);
|
||||
}
|
||||
if (allPods && allPods.items && allPods.items.length > 0) {
|
||||
allPods.items.forEach((pod) => {
|
||||
const owners = pod.metadata.ownerReferences;
|
||||
if (owners) {
|
||||
for (let ownerRef of owners) {
|
||||
if (ownerRef.name === owner) {
|
||||
commandExecutionResults.push(kubectl.annotate('pod', pod.metadata.name, annotationKeyValStr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return commandExecutionResults;
|
||||
}
|
||||
exports.annotateChildPods = annotateChildPods;
|
||||
function getDeploymentConfig() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let helmChartPaths = (process.env.HELM_CHART_PATHS && process.env.HELM_CHART_PATHS.split(';').filter(path => path != "")) || [];
|
||||
helmChartPaths = helmChartPaths.map(helmchart => getNormalizedPath(helmchart.trim()));
|
||||
let inputManifestFiles = inputParams.manifests || [];
|
||||
if (!helmChartPaths || helmChartPaths.length == 0) {
|
||||
inputManifestFiles = inputManifestFiles.map(manifestFile => getNormalizedPath(manifestFile));
|
||||
}
|
||||
const imageNames = inputParams.containers || [];
|
||||
let imageDockerfilePathMap = {};
|
||||
//Fetching from image label if available
|
||||
for (const image of imageNames) {
|
||||
try {
|
||||
imageDockerfilePathMap[image] = yield getDockerfilePath(image);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get dockerfile path for image ${image.toString()} | ` + ex);
|
||||
}
|
||||
}
|
||||
const deploymentConfig = {
|
||||
manifestFilePaths: inputManifestFiles,
|
||||
helmChartFilePaths: helmChartPaths,
|
||||
dockerfilePaths: imageDockerfilePathMap
|
||||
};
|
||||
return Promise.resolve(deploymentConfig);
|
||||
});
|
||||
}
|
||||
exports.getDeploymentConfig = getDeploymentConfig;
|
||||
function sleep(timeout) {
|
||||
return new Promise(resolve => setTimeout(resolve, timeout));
|
||||
}
|
||||
exports.sleep = sleep;
|
||||
function getRandomInt(max) {
|
||||
return Math.floor(Math.random() * Math.floor(max));
|
||||
}
|
||||
exports.getRandomInt = getRandomInt;
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
exports.getCurrentTime = getCurrentTime;
|
||||
function checkDockerPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let dockerPath = yield io.which('docker', false);
|
||||
if (!dockerPath) {
|
||||
throw new Error('Docker is not installed.');
|
||||
}
|
||||
});
|
||||
}
|
||||
function getDockerfilePath(image) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let imageConfig, imageInspectResult;
|
||||
var dockerExec = new docker_object_model_1.DockerExec('docker');
|
||||
yield checkDockerPath();
|
||||
dockerExec.pull(image, [], true);
|
||||
imageInspectResult = dockerExec.inspect(image, [], true);
|
||||
imageConfig = JSON.parse(imageInspectResult)[0];
|
||||
const DOCKERFILE_PATH_LABEL_KEY = 'dockerfile-path';
|
||||
let pathValue = '';
|
||||
if (imageConfig) {
|
||||
if ((imageConfig.Config) && (imageConfig.Config.Labels) && (imageConfig.Config.Labels[DOCKERFILE_PATH_LABEL_KEY])) {
|
||||
const pathLabel = imageConfig.Config.Labels[DOCKERFILE_PATH_LABEL_KEY];
|
||||
pathValue = getNormalizedPath(pathLabel);
|
||||
}
|
||||
}
|
||||
return Promise.resolve(pathValue);
|
||||
});
|
||||
}
|
||||
function isHttpUrl(url) {
|
||||
const HTTP_REGEX = /^https?:\/\/.*$/;
|
||||
return HTTP_REGEX.test(url);
|
||||
}
|
||||
exports.isHttpUrl = isHttpUrl;
|
||||
function getNormalizedPath(pathValue) {
|
||||
if (!isHttpUrl(pathValue)) { //if it is not an http url then convert to link from current repo and commit
|
||||
return `https://github.com/${process.env.GITHUB_REPOSITORY}/blob/${process.env.GITHUB_SHA}/${pathValue}`;
|
||||
}
|
||||
return pathValue;
|
||||
}
|
||||
exports.getNormalizedPath = getNormalizedPath;
|
Загрузка…
Ссылка в новой задаче