зеркало из https://github.com/Azure/azure-cli.git
[Containerapp] `az containerapp`: Move `containerapp` from CLI extension to core CLI (#27078)
This commit is contained in:
Родитель
a0b940589e
Коммит
f73c7e9108
|
@ -65,3 +65,4 @@
|
|||
/src/azure-cli/azure/cli/command_modules/synapse/ @jsntcy @idear1203 @zesluo @evelyn-ys
|
||||
/src/azure-cli/azure/cli/command_modules/util/ @jiasli @zhoxing-ms @evelyn-ys
|
||||
/src/azure-cli/azure/cli/command_modules/vm/ @zhoxing-ms @jsntcy @wangzelin007 @yanzhudd @Drewm3 @TravisCragg-MSFT @nikhilpatel909 @sandeepraichura @hilaryw29 @GabstaMSFT @ramankumarlive @ushnaarshadkhan
|
||||
/src/azure-cli/azure/cli/command_modules/containerapp/ @ruslany @sanchitmehta @ebencarek @JennyLawrance @howang-ms @vinisoto @chinadragon0515 @vturecek @torosent @pagariyaalok @Juliehzl @jijohn14 @Greedygre
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
"configure": "src/azure-cli/azure/cli/command_modules/configure/_help.py",
|
||||
"consumption": "src/azure-cli/azure/cli/command_modules/consumption/_help.py",
|
||||
"container": "src/azure-cli/azure/cli/command_modules/container/_help.py",
|
||||
"containerapp": "src/azure-cli/azure/cli/command_modules/containerapp/_help.py",
|
||||
"cosmosdb": "src/azure-cli/azure/cli/command_modules/cosmosdb/_help.py",
|
||||
"databoxedge": "src/azure-cli/azure/cli/command_modules/databoxedge/_help.py",
|
||||
"deployment": "src/azure-cli/azure/cli/command_modules/resource/_help.py",
|
||||
|
|
|
@ -576,6 +576,30 @@
|
|||
"src\\azure-cli\\azure\\cli\\command_modules\\acs\\tests\\latest\\data\\setup_proxy.sh"
|
||||
],
|
||||
"_justification": "Dummy self-signed certificate + private key used for testing only."
|
||||
},
|
||||
{
|
||||
"placeholder": "\"sharedKey\":\"abc123\"",
|
||||
"_justification": "[containerapp] request body for create containerapp environment contains property sharedKey recognized as secret"
|
||||
},
|
||||
{
|
||||
"placeholder": "\"primarySharedKey\":\"abc123\"",
|
||||
"_justification": "[containerapp] Get log workspace primary sharedkey, response body contains sharedKey recognized as secret"
|
||||
},
|
||||
{
|
||||
"placeholder": "abc123",
|
||||
"_justification": "[containerapp] request body contains sharedKey recognized as secret"
|
||||
},
|
||||
{
|
||||
"placeholder": "test12",
|
||||
"_justification": "[containerapp] certificate password for load certificate file"
|
||||
},
|
||||
{
|
||||
"file": [
|
||||
"src\\azure-cli\\azure\\cli\\command_modules\\containerapp\\tests\\latest\\data\\cert.pfx",
|
||||
"src\\azure-cli\\azure\\cli\\command_modules\\containerapp\\tests\\latest\\data\\cert.pem",
|
||||
"src\\azure-cli\\azure\\cli\\command_modules\\containerapp\\tests\\latest\\data\\cert.txt"
|
||||
],
|
||||
"_justification": "[containerapp] Test certs"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=super-with-arguments
|
||||
|
||||
from azure.cli.core import AzCommandsLoader
|
||||
|
||||
from azure.cli.command_modules.containerapp._help import helps # pylint: disable=unused-import
|
||||
|
||||
|
||||
class ContainerappCommandsLoader(AzCommandsLoader):
|
||||
|
||||
def __init__(self, cli_ctx=None):
|
||||
from azure.cli.core.commands import CliCommandType
|
||||
containerapp_custom = CliCommandType(
|
||||
operations_tmpl='azure.cli.command_modules.containerapp.custom#{}',
|
||||
client_factory=None)
|
||||
super(ContainerappCommandsLoader, self).__init__(cli_ctx=cli_ctx,
|
||||
custom_command_type=containerapp_custom)
|
||||
|
||||
def load_command_table(self, args):
|
||||
from azure.cli.command_modules.containerapp.commands import load_command_table
|
||||
from azure.cli.core.aaz import load_aaz_command_table
|
||||
try:
|
||||
from . import aaz
|
||||
except ImportError:
|
||||
aaz = None
|
||||
if aaz:
|
||||
load_aaz_command_table(
|
||||
loader=self,
|
||||
aaz_pkg_name=aaz.__name__,
|
||||
args=args
|
||||
)
|
||||
load_command_table(self, args)
|
||||
return self.command_table
|
||||
|
||||
def load_arguments(self, command):
|
||||
from azure.cli.command_modules.containerapp._params import load_arguments
|
||||
load_arguments(self, command)
|
||||
|
||||
|
||||
COMMAND_LOADER_CLS = ContainerappCommandsLoader
|
|
@ -0,0 +1,112 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string
|
||||
|
||||
import time
|
||||
|
||||
from msrest import Deserializer
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.cli.core.profiles import ResourceType
|
||||
from azure.cli.command_modules.acr._constants import get_acr_task_models
|
||||
from azure.core.polling import PollingMethod, LROPoller
|
||||
|
||||
|
||||
def get_run_with_polling(cmd,
|
||||
client,
|
||||
run_id,
|
||||
registry_name,
|
||||
resource_group_name):
|
||||
deserializer = Deserializer(
|
||||
{k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)})
|
||||
|
||||
def deserialize_run(response):
|
||||
return deserializer('Run', response)
|
||||
|
||||
return LROPoller(
|
||||
client=client,
|
||||
initial_response=client.get(
|
||||
resource_group_name, registry_name, run_id, cls=lambda x, y, z: x),
|
||||
deserialization_callback=deserialize_run,
|
||||
polling_method=RunPolling(
|
||||
cmd=cmd,
|
||||
registry_name=registry_name,
|
||||
run_id=run_id
|
||||
))
|
||||
|
||||
|
||||
class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes
|
||||
|
||||
def __init__(self, cmd, registry_name, run_id, timeout=30):
|
||||
self._cmd = cmd
|
||||
self._registry_name = registry_name
|
||||
self._run_id = run_id
|
||||
self._timeout = timeout
|
||||
self._client = None
|
||||
self._response = None # Will hold latest received response
|
||||
self._url = None # The URL used to get the run
|
||||
self._deserialize = None # The deserializer for Run
|
||||
self.operation_status = ""
|
||||
self.operation_result = None
|
||||
|
||||
def initialize(self, client, initial_response, deserialization_callback):
|
||||
self._client = client._client # pylint: disable=protected-access
|
||||
self._response = initial_response
|
||||
self._url = initial_response.http_request.url
|
||||
self._deserialize = deserialization_callback
|
||||
|
||||
self._set_operation_status(initial_response)
|
||||
|
||||
def run(self):
|
||||
while not self.finished():
|
||||
time.sleep(self._timeout)
|
||||
self._update_status()
|
||||
|
||||
if self.operation_status not in get_succeeded_run_status(self._cmd):
|
||||
from knack.util import CLIError
|
||||
raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. "
|
||||
"Show run details by 'az acr task show-run -r {} --run-id {}'. "
|
||||
"Show run logs by 'az acr task logs -r {} --run-id {}'.".format(
|
||||
self._run_id,
|
||||
self.operation_status,
|
||||
self._registry_name,
|
||||
self._run_id,
|
||||
self._registry_name,
|
||||
self._run_id
|
||||
))
|
||||
|
||||
def status(self):
|
||||
return self.operation_status
|
||||
|
||||
def finished(self):
|
||||
return self.operation_status in get_finished_run_status(self._cmd)
|
||||
|
||||
def resource(self):
|
||||
return self.operation_result
|
||||
|
||||
def _set_operation_status(self, response):
|
||||
if response.http_response.status_code == 200:
|
||||
self.operation_result = self._deserialize(response)
|
||||
self.operation_status = self.operation_result.status
|
||||
return
|
||||
raise CloudError(response)
|
||||
|
||||
def _update_status(self):
|
||||
self._response = self._client._pipeline.run( # pylint: disable=protected-access
|
||||
self._client.get(self._url), stream=False)
|
||||
self._set_operation_status(self._response)
|
||||
|
||||
|
||||
def get_succeeded_run_status(cmd):
|
||||
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
|
||||
return [RunStatus.succeeded.value]
|
||||
|
||||
|
||||
def get_finished_run_status(cmd):
|
||||
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
|
||||
return [RunStatus.succeeded.value,
|
||||
RunStatus.failed.value,
|
||||
RunStatus.canceled.value,
|
||||
RunStatus.error.value,
|
||||
RunStatus.timeout.value]
|
|
@ -0,0 +1,236 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=consider-using-f-string, consider-using-with, no-member
|
||||
|
||||
import tarfile
|
||||
import os
|
||||
import re
|
||||
import codecs
|
||||
from io import open
|
||||
import requests
|
||||
from knack.log import get_logger
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.cli.core.azclierror import (CLIInternalError)
|
||||
from azure.cli.core.profiles import ResourceType, get_sdk
|
||||
from azure.cli.command_modules.acr._constants import TASK_VALID_VSTS_URLS
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def upload_source_code(cmd, client,
|
||||
registry_name,
|
||||
resource_group_name,
|
||||
source_location,
|
||||
tar_file_path,
|
||||
docker_file_path,
|
||||
docker_file_in_tar):
|
||||
_pack_source_code(source_location,
|
||||
tar_file_path,
|
||||
docker_file_path,
|
||||
docker_file_in_tar)
|
||||
|
||||
size = os.path.getsize(tar_file_path)
|
||||
unit = 'GiB'
|
||||
for S in ['Bytes', 'KiB', 'MiB', 'GiB']:
|
||||
if size < 1024:
|
||||
unit = S
|
||||
break
|
||||
size = size / 1024.0
|
||||
|
||||
logger.info("Uploading archived source code from '%s'...", tar_file_path)
|
||||
upload_url = None
|
||||
relative_path = None
|
||||
try:
|
||||
source_upload_location = client.get_build_source_upload_url(
|
||||
resource_group_name, registry_name)
|
||||
upload_url = source_upload_location.upload_url
|
||||
relative_path = source_upload_location.relative_path
|
||||
except (AttributeError, CloudError) as e:
|
||||
raise CLIInternalError("Failed to get a SAS URL to upload context. Error: {}".format(e.message)) from e
|
||||
|
||||
if not upload_url:
|
||||
raise CLIInternalError("Failed to get a SAS URL to upload context.")
|
||||
|
||||
BlobClient = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE_BLOB, '_blob_client#BlobClient')
|
||||
BlobClient = BlobClient.from_blob_url(upload_url, connection_timeout=300)
|
||||
with open(tar_file_path, "rb") as data:
|
||||
BlobClient.upload_blob(data=data, blob_type="BlockBlob", overwrite=True)
|
||||
logger.info("Sending context ({0:.3f} {1}) to registry: {2}...".format(
|
||||
size, unit, registry_name))
|
||||
return relative_path
|
||||
|
||||
|
||||
def _pack_source_code(source_location, tar_file_path, docker_file_path, docker_file_in_tar):
|
||||
logger.info("Packing source code into tar to upload...")
|
||||
|
||||
original_docker_file_name = os.path.basename(docker_file_path.replace("\\", os.sep))
|
||||
ignore_list, ignore_list_size = _load_dockerignore_file(source_location, original_docker_file_name)
|
||||
common_vcs_ignore_list = {'.git', '.gitignore', '.bzr', 'bzrignore', '.hg', '.hgignore', '.svn'}
|
||||
|
||||
def _ignore_check(tarinfo, parent_ignored, parent_matching_rule_index):
|
||||
# ignore common vcs dir or file
|
||||
if tarinfo.name in common_vcs_ignore_list:
|
||||
logger.info("Excluding '%s' based on default ignore rules", tarinfo.name)
|
||||
return True, parent_matching_rule_index
|
||||
|
||||
if ignore_list is None:
|
||||
# if .dockerignore doesn't exists, inherit from parent
|
||||
# eg, it will ignore the files under .git folder.
|
||||
return parent_ignored, parent_matching_rule_index
|
||||
|
||||
for index, item in enumerate(ignore_list):
|
||||
# stop checking the remaining rules whose priorities are lower than the parent matching rule
|
||||
# at this point, current item should just inherit from parent
|
||||
if index >= parent_matching_rule_index:
|
||||
break
|
||||
if re.match(item.pattern, tarinfo.name):
|
||||
logger.debug(".dockerignore: rule '%s' matches '%s'.",
|
||||
item.rule, tarinfo.name)
|
||||
return item.ignore, index
|
||||
|
||||
logger.debug(".dockerignore: no rule for '%s'. parent ignore '%s'",
|
||||
tarinfo.name, parent_ignored)
|
||||
# inherit from parent
|
||||
return parent_ignored, parent_matching_rule_index
|
||||
|
||||
with tarfile.open(tar_file_path, "w:gz") as tar:
|
||||
# need to set arcname to empty string as the archive root path
|
||||
_archive_file_recursively(tar,
|
||||
source_location,
|
||||
arcname="",
|
||||
parent_ignored=False,
|
||||
parent_matching_rule_index=ignore_list_size,
|
||||
ignore_check=_ignore_check)
|
||||
|
||||
# Add the Dockerfile if it's specified.
|
||||
# In the case of run, there will be no Dockerfile.
|
||||
if docker_file_path:
|
||||
docker_file_tarinfo = tar.gettarinfo(
|
||||
docker_file_path, docker_file_in_tar)
|
||||
with open(docker_file_path, "rb") as f:
|
||||
tar.addfile(docker_file_tarinfo, f)
|
||||
|
||||
|
||||
class IgnoreRule: # pylint: disable=too-few-public-methods
|
||||
def __init__(self, rule):
|
||||
|
||||
self.rule = rule
|
||||
self.ignore = True
|
||||
# ! makes exceptions to exclusions
|
||||
if rule.startswith('!'):
|
||||
self.ignore = False
|
||||
rule = rule[1:] # remove !
|
||||
# load path without leading slash in linux and windows
|
||||
# environments (interferes with dockerignore file)
|
||||
if rule.startswith('/'):
|
||||
rule = rule[1:] # remove beginning '/'
|
||||
|
||||
self.pattern = "^"
|
||||
tokens = rule.split('/')
|
||||
token_length = len(tokens)
|
||||
for index, token in enumerate(tokens, 1):
|
||||
# ** matches any number of directories
|
||||
if token == "**":
|
||||
self.pattern += ".*" # treat **/ as **
|
||||
else:
|
||||
# * matches any sequence of non-seperator characters
|
||||
# ? matches any single non-seperator character
|
||||
# . matches dot character
|
||||
self.pattern += token.replace(
|
||||
"*", "[^/]*").replace("?", "[^/]").replace(".", "\\.")
|
||||
if index < token_length:
|
||||
self.pattern += "/" # add back / if it's not the last
|
||||
self.pattern += "$"
|
||||
|
||||
|
||||
def _load_dockerignore_file(source_location, original_docker_file_name):
|
||||
# reference: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
docker_ignore_file = os.path.join(source_location, ".dockerignore")
|
||||
docker_ignore_file_override = None
|
||||
if original_docker_file_name != "Dockerfile":
|
||||
docker_ignore_file_override = os.path.join(
|
||||
source_location, "{}.dockerignore".format(original_docker_file_name))
|
||||
if os.path.exists(docker_ignore_file_override):
|
||||
logger.info("Overriding .dockerignore with %s", docker_ignore_file_override)
|
||||
docker_ignore_file = docker_ignore_file_override
|
||||
|
||||
if not os.path.exists(docker_ignore_file):
|
||||
return None, 0
|
||||
|
||||
encoding = "utf-8"
|
||||
header = open(docker_ignore_file, "rb").read(len(codecs.BOM_UTF8))
|
||||
if header.startswith(codecs.BOM_UTF8):
|
||||
encoding = "utf-8-sig"
|
||||
|
||||
ignore_list = []
|
||||
if docker_ignore_file == docker_ignore_file_override:
|
||||
ignore_list.append(IgnoreRule(".dockerignore"))
|
||||
|
||||
for line in open(docker_ignore_file, 'r', encoding=encoding).readlines():
|
||||
rule = line.rstrip()
|
||||
|
||||
# skip empty line and comment
|
||||
if not rule or rule.startswith('#'):
|
||||
continue
|
||||
|
||||
# the ignore rule at the end has higher priority
|
||||
ignore_list = [IgnoreRule(rule)] + ignore_list
|
||||
|
||||
return ignore_list, len(ignore_list)
|
||||
|
||||
|
||||
def _archive_file_recursively(tar, name, arcname, parent_ignored, parent_matching_rule_index, ignore_check):
|
||||
# create a TarInfo object from the file
|
||||
tarinfo = tar.gettarinfo(name, arcname)
|
||||
|
||||
if tarinfo is None:
|
||||
raise CLIInternalError("tarfile: unsupported type {}".format(name))
|
||||
|
||||
# check if the file/dir is ignored
|
||||
ignored, matching_rule_index = ignore_check(
|
||||
tarinfo, parent_ignored, parent_matching_rule_index)
|
||||
|
||||
if not ignored:
|
||||
# append the tar header and data to the archive
|
||||
if tarinfo.isreg():
|
||||
with open(name, "rb") as f:
|
||||
tar.addfile(tarinfo, f)
|
||||
else:
|
||||
tar.addfile(tarinfo)
|
||||
|
||||
# even the dir is ignored, its child items can still be included, so continue to scan
|
||||
if tarinfo.isdir():
|
||||
for f in os.listdir(name):
|
||||
_archive_file_recursively(tar, os.path.join(name, f), os.path.join(arcname, f),
|
||||
parent_ignored=ignored, parent_matching_rule_index=matching_rule_index,
|
||||
ignore_check=ignore_check)
|
||||
|
||||
|
||||
def check_remote_source_code(source_location):
|
||||
lower_source_location = source_location.lower()
|
||||
|
||||
# git
|
||||
if lower_source_location.startswith("git@") or lower_source_location.startswith("git://"):
|
||||
return source_location
|
||||
|
||||
# http
|
||||
if lower_source_location.startswith("https://") or lower_source_location.startswith("http://") \
|
||||
or lower_source_location.startswith("github.com/"):
|
||||
isVSTS = any(url in lower_source_location for url in TASK_VALID_VSTS_URLS)
|
||||
if isVSTS or re.search(r"\.git(?:#.+)?$", lower_source_location):
|
||||
# git url must contain ".git" or be from VSTS/Azure DevOps.
|
||||
# This is because Azure DevOps doesn't follow the standard git server convention of putting
|
||||
# .git at the end of their URLs, so we have to special case them.
|
||||
return source_location
|
||||
if not lower_source_location.startswith("github.com/"):
|
||||
# Others are tarball
|
||||
if requests.head(source_location).status_code < 400:
|
||||
return source_location
|
||||
raise CLIInternalError("'{}' doesn't exist.".format(source_location))
|
||||
|
||||
# oci
|
||||
if lower_source_location.startswith("oci://"):
|
||||
return source_location
|
||||
raise CLIInternalError("'{}' doesn't exist.".format(source_location))
|
|
@ -0,0 +1,131 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string
|
||||
|
||||
from azure.cli.core.commands.client_factory import get_mgmt_service_client
|
||||
from azure.cli.core.profiles import ResourceType
|
||||
from azure.cli.core.azclierror import CLIInternalError
|
||||
|
||||
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
def ex_handler_factory(no_throw=False):
|
||||
def _polish_bad_errors(ex):
|
||||
import json
|
||||
try:
|
||||
content = json.loads(ex.response.content)
|
||||
if 'message' in content:
|
||||
detail = content['message']
|
||||
elif 'Message' in content:
|
||||
detail = content['Message']
|
||||
|
||||
ex = CLIInternalError(detail)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
pass
|
||||
if no_throw:
|
||||
return ex
|
||||
raise ex
|
||||
return _polish_bad_errors
|
||||
|
||||
|
||||
def handle_raw_exception(e):
|
||||
import json
|
||||
|
||||
stringErr = str(e)
|
||||
|
||||
if "WorkloadProfileNameRequired" in stringErr:
|
||||
raise CLIInternalError("Workload profile name is required. Please provide --workload-profile-name.")
|
||||
|
||||
if "Error starting job" in stringErr:
|
||||
raise CLIInternalError("There was an error starting the job execution. Please check input parameters and try again.")
|
||||
|
||||
if "{" in stringErr and "}" in stringErr:
|
||||
jsonError = stringErr[stringErr.index("{"):stringErr.rindex("}") + 1]
|
||||
jsonError = json.loads(jsonError)
|
||||
|
||||
if 'error' in jsonError:
|
||||
jsonError = jsonError['error']
|
||||
|
||||
if 'code' in jsonError and 'message' in jsonError:
|
||||
code = jsonError['code']
|
||||
message = jsonError['message']
|
||||
raise CLIInternalError('({}) {}'.format(code, message))
|
||||
elif "Message" in jsonError:
|
||||
message = jsonError["Message"]
|
||||
raise CLIInternalError(message)
|
||||
elif "message" in jsonError:
|
||||
message = jsonError["message"]
|
||||
raise CLIInternalError(message)
|
||||
raise e
|
||||
|
||||
|
||||
def handle_non_resource_not_found_exception(e):
|
||||
import json
|
||||
|
||||
stringErr = str(e)
|
||||
|
||||
if "{" in stringErr and "}" in stringErr:
|
||||
jsonError = stringErr[stringErr.index("{"):stringErr.rindex("}") + 1]
|
||||
jsonError = json.loads(jsonError)
|
||||
|
||||
if 'error' in jsonError:
|
||||
jsonError = jsonError['error']
|
||||
|
||||
if 'code' in jsonError and 'message' in jsonError:
|
||||
code = jsonError['code']
|
||||
message = jsonError['message']
|
||||
if code != "ResourceNotFound":
|
||||
raise CLIInternalError('({}) {}'.format(code, message))
|
||||
return jsonError
|
||||
elif "Message" in jsonError:
|
||||
message = jsonError["Message"]
|
||||
raise CLIInternalError(message)
|
||||
elif "message" in jsonError:
|
||||
message = jsonError["message"]
|
||||
raise CLIInternalError(message)
|
||||
raise e
|
||||
|
||||
|
||||
def handle_non_404_status_code_exception(e):
|
||||
import json
|
||||
|
||||
if hasattr(e, 'status_code') and e.status_code == 404:
|
||||
return e
|
||||
|
||||
string_err = str(e)
|
||||
if "{" in string_err and "}" in string_err:
|
||||
json_error = string_err[string_err.index("{"):string_err.rindex("}") + 1]
|
||||
json_error = json.loads(json_error)
|
||||
if 'error' in json_error:
|
||||
json_error = json_error['error']
|
||||
if 'code' in json_error and 'message' in json_error:
|
||||
return json_error
|
||||
elif "Message" in json_error:
|
||||
message = json_error["Message"]
|
||||
raise CLIInternalError(message)
|
||||
elif "message" in json_error:
|
||||
message = json_error["message"]
|
||||
raise CLIInternalError(message)
|
||||
raise e
|
||||
|
||||
|
||||
def providers_client_factory(cli_ctx, subscription_id=None):
|
||||
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, subscription_id=subscription_id).providers
|
||||
|
||||
|
||||
def cf_resource_groups(cli_ctx, subscription_id=None):
|
||||
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
|
||||
subscription_id=subscription_id).resource_groups
|
||||
|
||||
|
||||
def log_analytics_client_factory(cli_ctx):
|
||||
from azure.mgmt.loganalytics import LogAnalyticsManagementClient
|
||||
|
||||
return get_mgmt_service_client(cli_ctx, LogAnalyticsManagementClient).workspaces
|
||||
|
||||
|
||||
def log_analytics_shared_key_client_factory(cli_ctx):
|
||||
from azure.mgmt.loganalytics import LogAnalyticsManagementClient
|
||||
|
||||
return get_mgmt_service_client(cli_ctx, LogAnalyticsManagementClient).shared_keys
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,378 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, arguments-differ, abstract-method, logging-format-interpolation, broad-except
|
||||
|
||||
from knack.log import get_logger
|
||||
from knack.prompting import prompt, prompt_choice_list
|
||||
|
||||
from .custom import create_managed_environment
|
||||
from ._up_utils import (ContainerApp,
|
||||
ContainerAppEnvironment,
|
||||
ResourceGroup,
|
||||
_get_registry_from_app,
|
||||
_get_registry_details,
|
||||
) # pylint: disable=unused-import
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
# Monkey patch for log analytics workspace name
|
||||
# this allows the test framework to pass down a specific
|
||||
# name to support playback of recorded tests.
|
||||
|
||||
|
||||
def create_containerapps_compose_environment(cmd,
|
||||
name,
|
||||
resource_group_name,
|
||||
tags=None):
|
||||
|
||||
return create_managed_environment(cmd,
|
||||
name,
|
||||
resource_group_name,
|
||||
tags=tags)
|
||||
|
||||
|
||||
def build_containerapp_from_compose_service(cmd,
|
||||
name,
|
||||
source,
|
||||
dockerfile,
|
||||
resource_group_name,
|
||||
managed_env,
|
||||
location,
|
||||
image,
|
||||
target_port,
|
||||
ingress,
|
||||
registry_server,
|
||||
registry_user,
|
||||
registry_pass,
|
||||
env_vars,
|
||||
logs_key=None,
|
||||
logs_customer_id=None):
|
||||
|
||||
resource_group = ResourceGroup(cmd, name=resource_group_name, location=location)
|
||||
env = ContainerAppEnvironment(cmd,
|
||||
managed_env,
|
||||
resource_group,
|
||||
location=location,
|
||||
logs_key=logs_key,
|
||||
logs_customer_id=logs_customer_id)
|
||||
app = ContainerApp(cmd,
|
||||
name,
|
||||
resource_group,
|
||||
None,
|
||||
image,
|
||||
env,
|
||||
target_port,
|
||||
registry_server,
|
||||
registry_user,
|
||||
registry_pass,
|
||||
env_vars,
|
||||
ingress)
|
||||
|
||||
if not registry_server:
|
||||
_get_registry_from_app(app, True) # if the app exists, get the registry
|
||||
_get_registry_details(cmd, app, True) # fetch ACR creds from arguments registry arguments
|
||||
|
||||
app.create_acr_if_needed()
|
||||
app.run_acr_build(dockerfile, source, False)
|
||||
return app.image, app.registry_server, app.registry_user, app.registry_pass
|
||||
|
||||
|
||||
def resolve_configuration_element_list(compose_service, unsupported_configuration, area=None):
|
||||
if area is not None:
|
||||
compose_service = getattr(compose_service, area)
|
||||
config_list = []
|
||||
for configuration_element in unsupported_configuration:
|
||||
try:
|
||||
attribute = getattr(compose_service, configuration_element)
|
||||
except AttributeError:
|
||||
logger.critical("Failed to resolve %s", configuration_element)
|
||||
if attribute is not None:
|
||||
config_list.append(f"{compose_service.compose_path}/{configuration_element}")
|
||||
return config_list
|
||||
|
||||
|
||||
def warn_about_unsupported_build_configuration(compose_service):
|
||||
unsupported_configuration = ["args", "ssh", "cache_from", "cache_to", "extra_hosts",
|
||||
"isolation", "labels", "no_cache", "pull", "shm_size",
|
||||
"target", "secrets", "tags"]
|
||||
if compose_service.build is not None:
|
||||
config_list = resolve_configuration_element_list(compose_service, unsupported_configuration, 'build')
|
||||
message = "These build configuration settings from the docker-compose file are yet supported."
|
||||
message += " Currently, we support supplying a build context and optionally target Dockerfile for a service."
|
||||
message += " See https://aka.ms/containerapp/compose/build_support for more information or to add feedback."
|
||||
if len(config_list) >= 1:
|
||||
logger.warning(message)
|
||||
for item in config_list:
|
||||
logger.warning(" %s", item)
|
||||
|
||||
|
||||
def warn_about_unsupported_runtime_host_configuration(compose_service):
|
||||
unsupported_configuration = ["blkio_config", "cpu_count", "cpu_percent", "cpu_shares", "cpu_period",
|
||||
"cpu_quota", "cpu_rt_runtime", "cpu_rt_period", "cpuset", "cap_add",
|
||||
"cap_drop", "cgroup_parent", "configs", "credential_spec",
|
||||
"device_cgroup_rules", "devices", "dns", "dns_opt", "dns_search",
|
||||
"domainname", "external_links", "extra_hosts", "group_add", "healthcheck",
|
||||
"hostname", "init", "ipc", "isolation", "links", "logging", "mem_limit",
|
||||
"mem_swappiness", "memswap_limit", "oom_kill_disable", "oom_score_adj",
|
||||
"pid", "pids_limit", "privileged", "profiles", "pull_policy", "read_only",
|
||||
"restart", "runtime", "security_opt", "shm_size", "stdin_open",
|
||||
"stop_grace_period", "stop_signal", "storage_opt", "sysctls", "tmpfs",
|
||||
"tty", "ulimits", "user", "working_dir"]
|
||||
config_list = resolve_configuration_element_list(compose_service, unsupported_configuration)
|
||||
message = "These container and host configuration elements from the docker-compose file are not supported"
|
||||
message += " in Azure Container Apps. For more information about supported configuration,"
|
||||
message += " please see https://aka.ms/containerapp/compose/configuration"
|
||||
if len(config_list) >= 1:
|
||||
logger.warning(message)
|
||||
for item in config_list:
|
||||
logger.warning(" %s", item)
|
||||
|
||||
|
||||
def warn_about_unsupported_volumes(compose_service):
|
||||
unsupported_configuration = ["volumes", "volumes_from"]
|
||||
config_list = resolve_configuration_element_list(compose_service, unsupported_configuration)
|
||||
message = "These volume mount elements from the docker-compose file are not supported"
|
||||
message += " in Azure Container Apps. For more information about supported storage configuration,"
|
||||
message += " please see https://aka.ms/containerapp/compose/volumes"
|
||||
if len(config_list) >= 1:
|
||||
logger.warning(message)
|
||||
for item in config_list:
|
||||
logger.warning(" %s", item)
|
||||
|
||||
|
||||
def warn_about_unsupported_network(compose_service):
|
||||
unsupported_configuration = ["networks", "network_mode", "mac_address"]
|
||||
config_list = resolve_configuration_element_list(compose_service, unsupported_configuration)
|
||||
message = "These network configuration settings from the docker-compose file are not supported"
|
||||
message += " in Azure Container Apps. For more information about supported networking configuration,"
|
||||
message += " please see https://aka.ms/containerapp/compose/networking"
|
||||
if len(config_list) >= 1:
|
||||
logger.warning(message)
|
||||
for item in config_list:
|
||||
logger.warning(" %s", item)
|
||||
|
||||
|
||||
def warn_about_unsupported_elements(compose_service):
|
||||
warn_about_unsupported_build_configuration(compose_service)
|
||||
warn_about_unsupported_runtime_host_configuration(compose_service)
|
||||
warn_about_unsupported_volumes(compose_service)
|
||||
warn_about_unsupported_network(compose_service)
|
||||
|
||||
|
||||
def check_supported_platform(platform):
|
||||
if platform is not None:
|
||||
platform = platform.split('/')
|
||||
if len(platform) >= 2:
|
||||
return platform[0] == 'linux' and platform[1] == 'amd64'
|
||||
return platform[0] == 'linux'
|
||||
return True
|
||||
|
||||
|
||||
def service_deploy_exists(service):
|
||||
return service.deploy is not None
|
||||
|
||||
|
||||
def service_deploy_resources_exists(service):
|
||||
return service_deploy_exists(service) and service.deploy.resources is not None
|
||||
|
||||
|
||||
def flatten_list(source_value):
|
||||
flat_list = []
|
||||
for sub_list in source_value:
|
||||
flat_list += sub_list
|
||||
return flat_list
|
||||
|
||||
|
||||
def resolve_transport_from_cli_args(service_name, transport):
|
||||
if transport is not None:
|
||||
transport = flatten_list(transport)
|
||||
for setting in transport:
|
||||
key, value = setting.split('=')
|
||||
if key.lower() == service_name.lower():
|
||||
return value
|
||||
return 'auto'
|
||||
|
||||
|
||||
def resolve_registry_from_cli_args(registry_server, registry_user, registry_pass):
|
||||
if registry_server is not None:
|
||||
if registry_user is None and registry_pass is None:
|
||||
registry_user = prompt("Please enter the registry's username: ")
|
||||
registry_pass = prompt("Please enter the registry's password: ")
|
||||
elif registry_user is not None and registry_pass is None:
|
||||
registry_pass = prompt("Please enter the registry's password: ")
|
||||
return (registry_server, registry_user, registry_pass)
|
||||
|
||||
|
||||
def resolve_environment_from_service(service):
|
||||
env_array = []
|
||||
|
||||
env_vars = service.resolve_environment_hierarchy()
|
||||
|
||||
if env_vars is None:
|
||||
return None
|
||||
|
||||
for k, v in env_vars.items():
|
||||
if v is None:
|
||||
v = prompt(f"{k} is empty. What would you like the value to be? ")
|
||||
env_array.append(f"{k}={v}")
|
||||
|
||||
return env_array
|
||||
|
||||
|
||||
def resolve_secret_from_service(service, secrets_map):
|
||||
secret_array = []
|
||||
secret_env_ref = []
|
||||
|
||||
if service.secrets is None:
|
||||
return (None, None)
|
||||
|
||||
for secret in service.secrets:
|
||||
|
||||
secret_config = secrets_map[secret.source]
|
||||
if secret_config is not None and secret_config.file is not None:
|
||||
value = secret_config.file.readFile()
|
||||
if secret.target is None:
|
||||
secret_name = secret.source.replace('_', '-')
|
||||
else:
|
||||
secret_name = secret.target.replace('_', '-')
|
||||
secret_array.append(f"{secret_name}={value}")
|
||||
secret_env_ref.append(f"{secret_name}=secretref:{secret_name}")
|
||||
|
||||
if len(secret_array) == 0:
|
||||
return (None, None)
|
||||
|
||||
logger.warning("Note: Secrets will be mapped as secure environment variables in Azure Container Apps.")
|
||||
|
||||
return (secret_array, secret_env_ref)
|
||||
|
||||
|
||||
def resolve_replicas_from_service(service):
|
||||
replicas = None
|
||||
|
||||
if service.scale:
|
||||
replicas = service.scale
|
||||
if service_deploy_exists(service):
|
||||
if service.deploy.replicas is not None:
|
||||
replicas = service.deploy.replicas
|
||||
if service.deploy.mode == "global":
|
||||
replicas = 1
|
||||
|
||||
return replicas
|
||||
|
||||
|
||||
def valid_resource_settings():
|
||||
# vCPU and Memory reservations
|
||||
# https://docs.microsoft.com/azure/container-apps/containers#configuration
|
||||
return {
|
||||
"0.25": "0.5",
|
||||
"0.5": "1.0",
|
||||
"0.75": "1.5",
|
||||
"1.0": "2.0",
|
||||
"1.25": "2.5",
|
||||
"1.5": "3.0",
|
||||
"1.75": "3.5",
|
||||
"2.0": "4.0",
|
||||
}
|
||||
|
||||
|
||||
def validate_memory_and_cpu_setting(cpu, memory, managed_environment):
|
||||
# only v1 cluster do the validation
|
||||
from ._utils import safe_get
|
||||
if safe_get(managed_environment, "properties", "workloadProfiles"):
|
||||
return cpu, memory
|
||||
|
||||
settings = valid_resource_settings()
|
||||
|
||||
if cpu in settings.keys(): # pylint: disable=C0201
|
||||
if memory != settings[cpu]:
|
||||
if memory is not None:
|
||||
warning = f"Unsupported memory reservation request of {memory}."
|
||||
warning += f"The default value of {settings[cpu]}Gi will be used."
|
||||
logger.warning(warning)
|
||||
memory = settings[cpu]
|
||||
return cpu, f"{memory}Gi"
|
||||
|
||||
if cpu is not None:
|
||||
logger.warning( # pylint: disable=W1203
|
||||
f"Invalid CPU reservation request of {cpu}. The default resource values will be used.")
|
||||
return None, None
|
||||
|
||||
|
||||
def resolve_cpu_configuration_from_service(service):
|
||||
cpu = None
|
||||
if service_deploy_resources_exists(service):
|
||||
resources = service.deploy.resources
|
||||
if resources.reservations is not None and resources.reservations.cpus is not None:
|
||||
cpu = str(resources.reservations.cpus)
|
||||
elif service.cpus is not None:
|
||||
cpu = str(service.cpus)
|
||||
return cpu
|
||||
|
||||
|
||||
def resolve_memory_configuration_from_service(service):
|
||||
memory = None
|
||||
if service_deploy_resources_exists(service):
|
||||
resources = service.deploy.resources
|
||||
if resources.reservations is not None and resources.reservations.memory is not None:
|
||||
memory = str(resources.reservations.memory.as_gigabytes())
|
||||
elif service.mem_reservation is not None:
|
||||
memory = str(service.mem_reservation.as_gigabytes())
|
||||
return memory
|
||||
|
||||
|
||||
def resolve_port_or_expose_list(ports, name):
|
||||
if len(ports) > 1:
|
||||
message = f"You have more than one {name} mapping defined in your docker-compose file."
|
||||
message += " Which port would you like to use? "
|
||||
choice_index = prompt_choice_list(message, ports)
|
||||
|
||||
return ports[choice_index]
|
||||
|
||||
|
||||
def resolve_ingress_and_target_port(service):
|
||||
# External Ingress Check
|
||||
if service.ports is not None:
|
||||
ingress_type = "external"
|
||||
|
||||
if len(service.ports) == 1:
|
||||
target_port = service.ports[0].target
|
||||
else:
|
||||
ports_list = []
|
||||
|
||||
for p in service.ports:
|
||||
ports_list.append(p.target)
|
||||
target_port = resolve_port_or_expose_list(ports_list, "port")
|
||||
|
||||
# Internal Ingress Check
|
||||
elif service.expose is not None:
|
||||
ingress_type = "internal"
|
||||
|
||||
if len(service.expose) == 1:
|
||||
target_port = service.expose[0]
|
||||
else:
|
||||
target_port = resolve_port_or_expose_list(service.expose, "expose")
|
||||
else:
|
||||
ingress_type = None
|
||||
target_port = None
|
||||
return (ingress_type, target_port)
|
||||
|
||||
|
||||
def resolve_service_startup_command(service):
|
||||
startup_command_array = []
|
||||
startup_args_array = []
|
||||
if service.entrypoint is not None:
|
||||
startup_command = service.entrypoint.command_string()
|
||||
startup_command_array.append(startup_command)
|
||||
if service.command is not None:
|
||||
startup_args = service.command.command_string()
|
||||
startup_args_array.append(startup_args)
|
||||
elif service.command is not None:
|
||||
startup_args = service.command.command_string()
|
||||
startup_command_array.append(startup_args)
|
||||
startup_args_array = None
|
||||
else:
|
||||
startup_command_array = None
|
||||
startup_args_array = None
|
||||
return (startup_command_array, startup_args_array)
|
|
@ -0,0 +1,79 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
MANAGED_ENVIRONMENT_TYPE = "managed"
|
||||
CONNECTED_ENVIRONMENT_TYPE = "connected"
|
||||
MANAGED_ENVIRONMENT_RESOURCE_TYPE = "managedEnvironments"
|
||||
CONNECTED_ENVIRONMENT_RESOURCE_TYPE = "connectedEnvironments"
|
||||
|
||||
MAXIMUM_SECRET_LENGTH = 20
|
||||
MAXIMUM_CONTAINER_APP_NAME_LENGTH = 32
|
||||
|
||||
SHORT_POLLING_INTERVAL_SECS = 3
|
||||
LONG_POLLING_INTERVAL_SECS = 10
|
||||
|
||||
ACR_IMAGE_SUFFIX = ".azurecr.io"
|
||||
|
||||
CONTAINER_APPS_SDK_MODELS = "azure.cli.command_modules.containerapp._sdk_models"
|
||||
|
||||
LOG_ANALYTICS_RP = "Microsoft.OperationalInsights"
|
||||
CONTAINER_APPS_RP = "Microsoft.App"
|
||||
SERVICE_LINKER_RP = "Microsoft.ServiceLinker"
|
||||
|
||||
MANAGED_CERTIFICATE_RT = "managedCertificates"
|
||||
PRIVATE_CERTIFICATE_RT = "certificates"
|
||||
|
||||
DEV_SERVICE_LIST = ["kafka", "postgres", "redis", "mariadb"]
|
||||
|
||||
DEV_KAFKA_IMAGE = 'kafka'
|
||||
DEV_KAFKA_SERVICE_TYPE = 'kafka'
|
||||
DEV_KAFKA_CONTAINER_NAME = 'kafka'
|
||||
|
||||
DEV_POSTGRES_IMAGE = 'postgres'
|
||||
DEV_POSTGRES_SERVICE_TYPE = 'postgres'
|
||||
DEV_POSTGRES_CONTAINER_NAME = 'postgres'
|
||||
|
||||
DEV_REDIS_IMAGE = 'redis'
|
||||
DEV_REDIS_SERVICE_TYPE = 'redis'
|
||||
DEV_REDIS_CONTAINER_NAME = 'redis'
|
||||
|
||||
DEV_MARIADB_IMAGE = 'mariadb'
|
||||
DEV_MARIADB_SERVICE_TYPE = 'mariadb'
|
||||
DEV_MARIADB_CONTAINER_NAME = 'mariadb'
|
||||
|
||||
PENDING_STATUS = "Pending"
|
||||
SUCCEEDED_STATUS = "Succeeded"
|
||||
UPDATING_STATUS = "Updating"
|
||||
|
||||
MICROSOFT_SECRET_SETTING_NAME = "microsoft-provider-authentication-secret"
|
||||
FACEBOOK_SECRET_SETTING_NAME = "facebook-provider-authentication-secret"
|
||||
GITHUB_SECRET_SETTING_NAME = "github-provider-authentication-secret"
|
||||
GOOGLE_SECRET_SETTING_NAME = "google-provider-authentication-secret"
|
||||
MSA_SECRET_SETTING_NAME = "msa-provider-authentication-secret"
|
||||
TWITTER_SECRET_SETTING_NAME = "twitter-provider-authentication-secret"
|
||||
APPLE_SECRET_SETTING_NAME = "apple-provider-authentication-secret"
|
||||
UNAUTHENTICATED_CLIENT_ACTION = ['RedirectToLoginPage', 'AllowAnonymous', 'Return401', 'Return403']
|
||||
FORWARD_PROXY_CONVENTION = ['NoProxy', 'Standard', 'Custom']
|
||||
CHECK_CERTIFICATE_NAME_AVAILABILITY_TYPE = "Microsoft.App/managedEnvironments/certificates"
|
||||
|
||||
NAME_INVALID = "Invalid"
|
||||
NAME_ALREADY_EXISTS = "AlreadyExists"
|
||||
|
||||
LOG_TYPE_CONSOLE = "console"
|
||||
LOG_TYPE_SYSTEM = "system"
|
||||
|
||||
ACR_TASK_TEMPLATE = """version: v1.1.0
|
||||
steps:
|
||||
- cmd: mcr.microsoft.com/oryx/cli:debian-buster-20230222.1 oryx dockerfile --bind-port {{target_port}} --output ./Dockerfile .
|
||||
timeout: 28800
|
||||
- build: -t $Registry/{{image_name}} -f Dockerfile .
|
||||
timeout: 28800
|
||||
- push: ["$Registry/{{image_name}}"]
|
||||
timeout: 1800
|
||||
"""
|
||||
DEFAULT_PORT = 8080 # used for no dockerfile scenario; not the hello world image
|
||||
|
||||
HELLO_WORLD_IMAGE = "mcr.microsoft.com/k8se/quickstart:latest"
|
||||
|
||||
LOGS_STRING = '[{"category":"ContainerAppConsoleLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}},{"category":"ContainerAppSystemLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}}]' # pylint: disable=line-too-long
|
|
@ -0,0 +1,80 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, broad-except, pointless-statement, bare-except
|
||||
import sys
|
||||
|
||||
from azure.cli.core.azclierror import (ValidationError)
|
||||
|
||||
from ._utils import safe_get
|
||||
|
||||
|
||||
def load_yaml_file(file_name):
|
||||
import yaml
|
||||
import errno
|
||||
|
||||
try:
|
||||
with open(file_name) as stream: # pylint: disable=unspecified-encoding
|
||||
return yaml.safe_load(stream.read().replace('\x00', ''))
|
||||
except (IOError, OSError) as ex:
|
||||
if getattr(ex, 'errno', 0) == errno.ENOENT:
|
||||
raise ValidationError('{} does not exist'.format(file_name)) from ex
|
||||
raise
|
||||
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
|
||||
raise ValidationError('Error parsing {} ({})'.format(file_name, str(ex))) from ex
|
||||
|
||||
|
||||
def create_deserializer(models):
|
||||
from msrest import Deserializer
|
||||
import inspect
|
||||
from importlib import import_module
|
||||
|
||||
import_module(models)
|
||||
|
||||
sdkClasses = inspect.getmembers(sys.modules[models])
|
||||
deserializer = {}
|
||||
|
||||
for sdkClass in sdkClasses:
|
||||
deserializer[sdkClass[0]] = sdkClass[1]
|
||||
|
||||
return Deserializer(deserializer)
|
||||
|
||||
|
||||
def process_loaded_yaml(yaml_containerapp):
|
||||
if not isinstance(yaml_containerapp, dict): # pylint: disable=unidiomatic-typecheck
|
||||
raise ValidationError('Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
|
||||
if not yaml_containerapp.get('properties'):
|
||||
yaml_containerapp['properties'] = {}
|
||||
|
||||
if safe_get(yaml_containerapp, "identity", "userAssignedIdentities"):
|
||||
for identity in yaml_containerapp['identity']['userAssignedIdentities']:
|
||||
# properties (principalId and clientId) are readonly and create (PUT) will throw error if they are provided
|
||||
# Update (PATCH) ignores them, so it's okay to remove them as well
|
||||
yaml_containerapp['identity']['userAssignedIdentities'][identity] = {}
|
||||
|
||||
nested_properties = ["provisioningState",
|
||||
"managedEnvironmentId",
|
||||
"environmentId",
|
||||
"latestRevisionName",
|
||||
"latestRevisionFqdn",
|
||||
"customDomainVerificationId",
|
||||
"configuration",
|
||||
"template",
|
||||
"outboundIPAddresses",
|
||||
"workloadProfileName",
|
||||
"latestReadyRevisionName",
|
||||
"eventStreamEndpoint"]
|
||||
for nested_property in nested_properties:
|
||||
tmp = yaml_containerapp.get(nested_property)
|
||||
if nested_property in yaml_containerapp:
|
||||
yaml_containerapp['properties'][nested_property] = tmp
|
||||
del yaml_containerapp[nested_property]
|
||||
# remove property managedEnvironmentId, can not use safe_get()
|
||||
if "managedEnvironmentId" in yaml_containerapp['properties']:
|
||||
tmp = yaml_containerapp['properties']['managedEnvironmentId']
|
||||
if tmp:
|
||||
yaml_containerapp['properties']["environmentId"] = tmp
|
||||
del yaml_containerapp['properties']['managedEnvironmentId']
|
||||
|
||||
return yaml_containerapp
|
|
@ -0,0 +1,138 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=consider-using-f-string
|
||||
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from knack.log import get_logger
|
||||
from azure.cli.core.util import open_page_in_browser
|
||||
from azure.cli.core.auth.persistence import SecretStore, build_persistence
|
||||
from azure.cli.core.azclierror import (ValidationError, CLIInternalError, UnclassifiedUserFault)
|
||||
|
||||
from ._utils import repo_url_to_name
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
'''
|
||||
Get Github personal access token following Github oauth for command line tools
|
||||
https://docs.github.com/en/developers/apps/authorizing-oauth-apps#device-flow
|
||||
'''
|
||||
|
||||
|
||||
GITHUB_OAUTH_CLIENT_ID = "8d8e1f6000648c575489"
|
||||
GITHUB_OAUTH_SCOPES = [
|
||||
"admin:repo_hook",
|
||||
"repo",
|
||||
"workflow"
|
||||
]
|
||||
|
||||
|
||||
def _get_github_token_secret_store(cmd):
|
||||
location = os.path.join(cmd.cli_ctx.config.config_dir, "github_token_cache")
|
||||
# TODO use core CLI util to take care of this once it's merged and released
|
||||
encrypt = sys.platform.startswith('win32') # encryption not supported on non-windows platforms
|
||||
file_persistence = build_persistence(location, encrypt)
|
||||
return SecretStore(file_persistence)
|
||||
|
||||
|
||||
def cache_github_token(cmd, token, repo):
|
||||
repo = repo_url_to_name(repo)
|
||||
secret_store = _get_github_token_secret_store(cmd)
|
||||
cache = secret_store.load()
|
||||
|
||||
for entry in cache:
|
||||
if isinstance(entry, dict) and entry.get("value") == token:
|
||||
if repo not in entry.get("repos", []):
|
||||
entry["repos"] = [*entry.get("repos", []), repo]
|
||||
entry["last_modified_timestamp"] = datetime.utcnow().timestamp()
|
||||
break
|
||||
else:
|
||||
cache_entry = {"last_modified_timestamp": datetime.utcnow().timestamp(), "value": token, "repos": [repo]}
|
||||
cache = [cache_entry, *cache]
|
||||
|
||||
secret_store.save(cache)
|
||||
|
||||
|
||||
def load_github_token_from_cache(cmd, repo):
|
||||
repo = repo_url_to_name(repo)
|
||||
secret_store = _get_github_token_secret_store(cmd)
|
||||
cache = secret_store.load()
|
||||
|
||||
if isinstance(cache, list):
|
||||
for entry in cache:
|
||||
if isinstance(entry, dict) and repo in entry.get("repos", []):
|
||||
return entry.get("value")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_github_access_token(cmd, scope_list=None, token=None): # pylint: disable=unused-argument
|
||||
if token:
|
||||
return token
|
||||
if scope_list:
|
||||
for scope in scope_list:
|
||||
if scope not in GITHUB_OAUTH_SCOPES:
|
||||
raise ValidationError("Requested github oauth scope is invalid")
|
||||
scope_list = ' '.join(scope_list)
|
||||
|
||||
authorize_url = 'https://github.com/login/device/code'
|
||||
authorize_url_data = {
|
||||
'scope': scope_list,
|
||||
'client_id': GITHUB_OAUTH_CLIENT_ID
|
||||
}
|
||||
|
||||
import requests
|
||||
import time
|
||||
from urllib.parse import parse_qs
|
||||
|
||||
try:
|
||||
response = requests.post(authorize_url, data=authorize_url_data)
|
||||
parsed_response = parse_qs(response.content.decode('ascii'))
|
||||
|
||||
device_code = parsed_response['device_code'][0]
|
||||
user_code = parsed_response['user_code'][0]
|
||||
verification_uri = parsed_response['verification_uri'][0]
|
||||
interval = int(parsed_response['interval'][0])
|
||||
expires_in_seconds = int(parsed_response['expires_in'][0])
|
||||
logger.warning('Please navigate to %s and enter the user code %s to activate and '
|
||||
'retrieve your github personal access token', verification_uri, user_code)
|
||||
open_page_in_browser("https://github.com/login/device")
|
||||
|
||||
timeout = time.time() + expires_in_seconds
|
||||
logger.warning("Waiting up to '%s' minutes for activation", str(expires_in_seconds // 60))
|
||||
|
||||
confirmation_url = 'https://github.com/login/oauth/access_token'
|
||||
confirmation_url_data = {
|
||||
'client_id': GITHUB_OAUTH_CLIENT_ID,
|
||||
'device_code': device_code,
|
||||
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code'
|
||||
}
|
||||
|
||||
pending = True
|
||||
while pending:
|
||||
time.sleep(interval)
|
||||
|
||||
if time.time() > timeout:
|
||||
raise UnclassifiedUserFault('Activation did not happen in time. Please try again')
|
||||
|
||||
confirmation_response = requests.post(confirmation_url, data=confirmation_url_data)
|
||||
parsed_confirmation_response = parse_qs(confirmation_response.content.decode('ascii'))
|
||||
|
||||
if 'error' in parsed_confirmation_response and parsed_confirmation_response['error'][0]:
|
||||
if parsed_confirmation_response['error'][0] == 'slow_down':
|
||||
interval += 5 # if slow_down error is received, 5 seconds is added to minimum polling interval
|
||||
elif parsed_confirmation_response['error'][0] != 'authorization_pending':
|
||||
pending = False
|
||||
|
||||
if 'access_token' in parsed_confirmation_response and parsed_confirmation_response['access_token'][0]:
|
||||
return parsed_confirmation_response['access_token'][0]
|
||||
except Exception as e:
|
||||
raise CLIInternalError(
|
||||
'Error: {}. Please try again, or retrieve personal access token from the Github website'.format(e)) from e
|
||||
|
||||
raise UnclassifiedUserFault('Activation did not happen in time. Please try again')
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,409 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: disable=line-too-long, too-many-statements, super-with-arguments
|
||||
|
||||
VnetConfiguration = {
|
||||
"infrastructureSubnetId": None,
|
||||
"dockerBridgeCidr": None,
|
||||
"platformReservedCidr": None,
|
||||
"platformReservedDnsIP": None
|
||||
}
|
||||
|
||||
ManagedEnvironment = {
|
||||
"location": None,
|
||||
"tags": None,
|
||||
"properties": {
|
||||
"daprAIInstrumentationKey": None,
|
||||
"vnetConfiguration": None, # VnetConfiguration
|
||||
"appLogsConfiguration": None,
|
||||
"customDomainConfiguration": None, # CustomDomainConfiguration,
|
||||
"workloadProfiles": None
|
||||
}
|
||||
}
|
||||
|
||||
CustomDomainConfiguration = {
|
||||
"dnsSuffix": None,
|
||||
"certificateValue": None,
|
||||
"certificatePassword": None
|
||||
}
|
||||
|
||||
AppLogsConfiguration = {
|
||||
"destination": None,
|
||||
"logAnalyticsConfiguration": None
|
||||
}
|
||||
|
||||
LogAnalyticsConfiguration = {
|
||||
"customerId": None,
|
||||
"sharedKey": None
|
||||
}
|
||||
|
||||
# Containerapp
|
||||
|
||||
Dapr = {
|
||||
"enabled": False,
|
||||
"appId": None,
|
||||
"appProtocol": None,
|
||||
"appPort": None,
|
||||
"httpReadBufferSize": None,
|
||||
"httpMaxRequestSize": None,
|
||||
"logLevel": None,
|
||||
"enableApiLogging": None
|
||||
}
|
||||
|
||||
EnvironmentVar = {
|
||||
"name": None,
|
||||
"value": None,
|
||||
"secretRef": None
|
||||
}
|
||||
|
||||
ContainerResources = {
|
||||
"cpu": None,
|
||||
"memory": None
|
||||
}
|
||||
|
||||
VolumeMount = {
|
||||
"volumeName": None,
|
||||
"mountPath": None,
|
||||
"subPath": None
|
||||
}
|
||||
|
||||
Container = {
|
||||
"image": None,
|
||||
"name": None,
|
||||
"command": None,
|
||||
"args": None,
|
||||
"env": None, # [EnvironmentVar]
|
||||
"resources": None, # ContainerResources
|
||||
"volumeMounts": None, # [VolumeMount]
|
||||
}
|
||||
|
||||
SecretVolumeItem = {
|
||||
"secretRef": None,
|
||||
"path": None,
|
||||
}
|
||||
|
||||
Volume = {
|
||||
"name": None,
|
||||
"storageType": "EmptyDir", # AzureFile, EmptyDir or Secret
|
||||
"storageName": None, # None for EmptyDir or Secret, otherwise name of storage resource
|
||||
"secrets": None, # [SecretVolumeItem]
|
||||
"mountOptions": None,
|
||||
}
|
||||
|
||||
ScaleRuleAuth = {
|
||||
"secretRef": None,
|
||||
"triggerParameter": None
|
||||
}
|
||||
|
||||
QueueScaleRule = {
|
||||
"queueName": None,
|
||||
"queueLength": None,
|
||||
"auth": None # ScaleRuleAuth
|
||||
}
|
||||
|
||||
CustomScaleRule = {
|
||||
"type": None,
|
||||
"metadata": {},
|
||||
"auth": None # ScaleRuleAuth
|
||||
}
|
||||
|
||||
HttpScaleRule = {
|
||||
"metadata": {},
|
||||
"auth": None # ScaleRuleAuth
|
||||
}
|
||||
|
||||
ScaleRule = {
|
||||
"name": None,
|
||||
"azureQueue": None, # QueueScaleRule
|
||||
"custom": None, # CustomScaleRule
|
||||
"http": None, # HttpScaleRule
|
||||
}
|
||||
|
||||
Secret = {
|
||||
"name": None,
|
||||
"value": None,
|
||||
"keyVaultUrl": None,
|
||||
"identity": None
|
||||
}
|
||||
|
||||
Scale = {
|
||||
"minReplicas": None,
|
||||
"maxReplicas": None,
|
||||
"rules": [] # list of ScaleRule
|
||||
}
|
||||
|
||||
ServiceBinding = {
|
||||
"serviceId": None,
|
||||
"name": None
|
||||
}
|
||||
|
||||
JobScale = {
|
||||
"minExecutions": None,
|
||||
"maxExecutions": None,
|
||||
"pollingInterval": None,
|
||||
"rules": [] # list of ScaleRule
|
||||
}
|
||||
|
||||
TrafficWeight = {
|
||||
"revisionName": None,
|
||||
"weight": None,
|
||||
"latestRevision": False
|
||||
}
|
||||
|
||||
BindingType = {
|
||||
|
||||
}
|
||||
|
||||
CustomDomain = {
|
||||
"name": None,
|
||||
"bindingType": None, # BindingType
|
||||
"certificateId": None
|
||||
}
|
||||
|
||||
Ingress = {
|
||||
"fqdn": None,
|
||||
"external": False,
|
||||
"targetPort": None,
|
||||
"transport": None, # 'auto', 'http', 'http2', 'tcp'
|
||||
"exposedPort": None,
|
||||
"allowInsecure": False,
|
||||
"traffic": None, # TrafficWeight
|
||||
"customDomains": None, # [CustomDomain]
|
||||
"ipSecurityRestrictions": None, # [IPSecurityRestrictions]
|
||||
"stickySessions": None # StickySessions
|
||||
}
|
||||
|
||||
RegistryCredentials = {
|
||||
"server": None,
|
||||
"username": None,
|
||||
"passwordSecretRef": None
|
||||
}
|
||||
|
||||
Template = {
|
||||
"revisionSuffix": None,
|
||||
"containers": None, # [Container]
|
||||
"initContainers": None, # [Container]
|
||||
"scale": Scale,
|
||||
"volumes": None, # [Volume]
|
||||
"serviceBinds": None # [ServiceBinding]
|
||||
}
|
||||
|
||||
Configuration = {
|
||||
"secrets": None, # [Secret]
|
||||
"activeRevisionsMode": None, # 'multiple' or 'single'
|
||||
"ingress": None, # Ingress
|
||||
"dapr": Dapr,
|
||||
"registries": None # [RegistryCredentials]
|
||||
}
|
||||
|
||||
JobTemplate = {
|
||||
"containers": None, # [Container]
|
||||
"initContainers": None, # [Container]
|
||||
"volumes": None # [Volume]
|
||||
}
|
||||
|
||||
# Added template for starting job executions
|
||||
JobExecutionTemplate = {
|
||||
"containers": None, # [Container]
|
||||
"initContainers": None # [Container]
|
||||
}
|
||||
|
||||
JobConfiguration = {
|
||||
"secrets": None, # [Secret]
|
||||
"triggerType": None, # 'manual' or 'schedule' or 'event'
|
||||
"replicaTimeout": None,
|
||||
"replicaRetryLimit": None,
|
||||
"manualTriggerConfig": None, # ManualTriggerConfig
|
||||
"scheduleTriggerConfig": None, # ScheduleTriggerConfig
|
||||
"eventTriggerConfig": None, # EventTriggerConfig
|
||||
"registries": None, # [RegistryCredentials]
|
||||
"dapr": None
|
||||
}
|
||||
|
||||
ManualTriggerConfig = {
|
||||
"replicaCompletionCount": None,
|
||||
"parallelism": None
|
||||
}
|
||||
|
||||
ScheduleTriggerConfig = {
|
||||
"replicaCompletionCount": None,
|
||||
"parallelism": None,
|
||||
"cronExpression": None
|
||||
}
|
||||
|
||||
EventTriggerConfig = {
|
||||
"replicaCompletionCount": None,
|
||||
"parallelism": None,
|
||||
"scale": None, # [JobScale]
|
||||
}
|
||||
|
||||
UserAssignedIdentity = {
|
||||
|
||||
}
|
||||
|
||||
ManagedServiceIdentity = {
|
||||
"type": None, # 'None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned'
|
||||
"userAssignedIdentities": None # {string: UserAssignedIdentity}
|
||||
}
|
||||
|
||||
ServiceConnector = {
|
||||
"properties": {
|
||||
"targetService": {
|
||||
"id": None,
|
||||
"type": "AzureResource"
|
||||
},
|
||||
"authInfo": {
|
||||
"authType": None,
|
||||
},
|
||||
"scope": None,
|
||||
}
|
||||
}
|
||||
|
||||
Service = {
|
||||
"type": None
|
||||
}
|
||||
|
||||
ContainerApp = {
|
||||
"location": None,
|
||||
"identity": None, # ManagedServiceIdentity
|
||||
"properties": {
|
||||
"environmentId": None,
|
||||
"configuration": None, # Configuration
|
||||
"template": None, # Template
|
||||
"workloadProfileName": None
|
||||
},
|
||||
"tags": None
|
||||
}
|
||||
|
||||
ContainerAppsJob = {
|
||||
"location": None,
|
||||
"identity": None, # ManagedServiceIdentity
|
||||
"properties": {
|
||||
"environmentId": None,
|
||||
"configuration": None, # JobConfiguration
|
||||
"template": None, # JobTemplate
|
||||
"workloadProfileName": None
|
||||
},
|
||||
"tags": None
|
||||
}
|
||||
|
||||
ContainerAppCertificateEnvelope = {
|
||||
"location": None,
|
||||
"properties": {
|
||||
"password": None,
|
||||
"value": None
|
||||
}
|
||||
}
|
||||
|
||||
DaprComponent = {
|
||||
"properties": {
|
||||
"componentType": None, # String
|
||||
"version": None,
|
||||
"ignoreErrors": None,
|
||||
"initTimeout": None,
|
||||
"secrets": None,
|
||||
"metadata": None,
|
||||
"scopes": None
|
||||
}
|
||||
}
|
||||
|
||||
DaprMetadata = {
|
||||
"key": None, # str
|
||||
"value": None, # str
|
||||
"secret_ref": None # str
|
||||
}
|
||||
|
||||
SourceControl = {
|
||||
"properties": {
|
||||
"repoUrl": None,
|
||||
"branch": None,
|
||||
"githubActionConfiguration": None # [GitHubActionConfiguration]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
GitHubActionConfiguration = {
|
||||
"registryInfo": None, # [RegistryInfo]
|
||||
"azureCredentials": None, # [AzureCredentials]
|
||||
"image": None, # str
|
||||
"contextPath": None, # str
|
||||
"publishType": None, # str
|
||||
"os": None, # str
|
||||
"runtimeStack": None, # str
|
||||
"runtimeVersion": None # str
|
||||
}
|
||||
|
||||
RegistryInfo = {
|
||||
"registryUrl": None, # str
|
||||
"registryUserName": None, # str
|
||||
"registryPassword": None # str
|
||||
}
|
||||
|
||||
AzureCredentials = {
|
||||
"clientId": None, # str
|
||||
"clientSecret": None, # str
|
||||
"tenantId": None, # str
|
||||
"subscriptionId": None # str
|
||||
}
|
||||
|
||||
ContainerAppCustomDomainEnvelope = {
|
||||
"properties": {
|
||||
"configuration": {
|
||||
"ingress": {
|
||||
"customDomains": None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ContainerAppCustomDomain = {
|
||||
"name": None,
|
||||
"bindingType": "SniEnabled",
|
||||
"certificateId": None
|
||||
}
|
||||
|
||||
AzureFileProperties = {
|
||||
"accountName": None,
|
||||
"accountKey": None,
|
||||
"accessMode": None,
|
||||
"shareName": None
|
||||
}
|
||||
|
||||
ManagedCertificateEnvelop = {
|
||||
"location": None, # str
|
||||
"properties": {
|
||||
"subjectName": None, # str
|
||||
"validationMethod": None # str
|
||||
}
|
||||
}
|
||||
|
||||
# ContainerApp Patch
|
||||
ImageProperties = {
|
||||
"imageName": None,
|
||||
"targetContainerName": None,
|
||||
"targetContainerAppName": None,
|
||||
"revisionMode": None,
|
||||
}
|
||||
|
||||
ImagePatchableCheck = {
|
||||
"targetContainerAppName": None,
|
||||
"targetContainerName": None,
|
||||
"revisionMode": None,
|
||||
"targetImageName": None,
|
||||
"oldRunImage": None,
|
||||
"newRunImage": None,
|
||||
"id": None,
|
||||
"reason": None,
|
||||
}
|
||||
|
||||
OryxMarinerRunImgTagProperty = {
|
||||
"fullTag": None,
|
||||
"framework": None,
|
||||
"version": None,
|
||||
"marinerVersion": None,
|
||||
"architectures": None,
|
||||
"support": None,
|
||||
}
|
|
@ -0,0 +1,476 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, too-many-statements, consider-using-f-string
|
||||
|
||||
from knack.arguments import CLIArgumentType
|
||||
|
||||
from azure.cli.core.commands.parameters import (resource_group_name_type,
|
||||
get_location_type,
|
||||
file_type,
|
||||
get_three_state_flag, get_enum_type, tags_type)
|
||||
|
||||
from ._validators import (validate_memory, validate_cpu, validate_managed_env_name_or_id, validate_registry_server,
|
||||
validate_registry_user, validate_registry_pass, validate_target_port, validate_ingress,
|
||||
validate_storage_name_or_id, validate_cors_max_age, validate_allow_insecure)
|
||||
from ._constants import UNAUTHENTICATED_CLIENT_ACTION, FORWARD_PROXY_CONVENTION, MAXIMUM_CONTAINER_APP_NAME_LENGTH, LOG_TYPE_CONSOLE, LOG_TYPE_SYSTEM
|
||||
|
||||
|
||||
def load_arguments(self, _):
|
||||
|
||||
name_type = CLIArgumentType(options_list=['--name', '-n'])
|
||||
|
||||
with self.argument_context('containerapp') as c:
|
||||
# Base arguments
|
||||
c.argument('name', name_type, metavar='NAME', id_part='name', help=f"The name of the Containerapp. A name must consist of lower case alphanumeric characters or '-', start with a letter, end with an alphanumeric character, cannot have '--', and must be less than {MAXIMUM_CONTAINER_APP_NAME_LENGTH} characters.")
|
||||
c.argument('resource_group_name', arg_type=resource_group_name_type)
|
||||
c.argument('location', arg_type=get_location_type(self.cli_ctx))
|
||||
c.ignore('disable_warnings')
|
||||
|
||||
with self.argument_context('containerapp') as c:
|
||||
c.argument('tags', arg_type=tags_type)
|
||||
c.argument('managed_env', validator=validate_managed_env_name_or_id, options_list=['--environment'], help="Name or resource ID of the container app's environment.")
|
||||
c.argument('yaml', type=file_type, help='Path to a .yaml file with the configuration of a container app. All other parameters will be ignored. For an example, see https://docs.microsoft.com/azure/container-apps/azure-resource-manager-api-spec#examples')
|
||||
|
||||
with self.argument_context('containerapp exec') as c:
|
||||
c.argument('container', help="The name of the container to ssh into")
|
||||
c.argument('replica', help="The name of the replica to ssh into. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.")
|
||||
c.argument('revision', help="The name of the container app revision to ssh into. Defaults to the latest revision.")
|
||||
c.argument('startup_command', options_list=["--command"], help="The startup command (bash, zsh, sh, etc.).")
|
||||
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
|
||||
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
|
||||
|
||||
with self.argument_context('containerapp logs show') as c:
|
||||
c.argument('follow', help="Print logs in real time if present.", arg_type=get_three_state_flag())
|
||||
c.argument('tail', help="The number of past logs to print (0-300)", type=int, default=20)
|
||||
c.argument('container', help="The name of the container")
|
||||
c.argument('output_format', options_list=["--format"], help="Log output format", arg_type=get_enum_type(["json", "text"]), default="json")
|
||||
c.argument('replica', help="The name of the replica. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.")
|
||||
c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.")
|
||||
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
|
||||
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
|
||||
c.argument('kind', options_list=["--type", "-t"], help="Type of logs to stream", arg_type=get_enum_type([LOG_TYPE_CONSOLE, LOG_TYPE_SYSTEM]), default=LOG_TYPE_CONSOLE)
|
||||
|
||||
with self.argument_context('containerapp env logs show') as c:
|
||||
c.argument('follow', help="Print logs in real time if present.", arg_type=get_three_state_flag())
|
||||
c.argument('tail', help="The number of past logs to print (0-300)", type=int, default=20)
|
||||
|
||||
# Replica
|
||||
with self.argument_context('containerapp replica') as c:
|
||||
c.argument('replica', help="The name of the replica. ")
|
||||
c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.")
|
||||
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
|
||||
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
|
||||
|
||||
# Container
|
||||
with self.argument_context('containerapp', arg_group='Container') as c:
|
||||
c.argument('container_name', help="Name of the container.")
|
||||
c.argument('cpu', type=float, validator=validate_cpu, help="Required CPU in cores from 0.25 - 2.0, e.g. 0.5")
|
||||
c.argument('memory', validator=validate_memory, help="Required memory from 0.5 - 4.0 ending with \"Gi\", e.g. 1.0Gi")
|
||||
c.argument('env_vars', nargs='*', help="A list of environment variable(s) for the container. Space-separated values in 'key=value' format. Empty string to clear existing values. Prefix value with 'secretref:' to reference a secret.")
|
||||
c.argument('startup_command', nargs='*', options_list=['--command'], help="A list of supported commands on the container that will executed during startup. Space-separated values e.g. \"/bin/queue\" \"mycommand\". Empty string to clear existing values")
|
||||
c.argument('args', nargs='*', help="A list of container startup command argument(s). Space-separated values e.g. \"-c\" \"mycommand\". Empty string to clear existing values")
|
||||
c.argument('revision_suffix', help='User friendly suffix that is appended to the revision name')
|
||||
|
||||
# Env vars
|
||||
with self.argument_context('containerapp', arg_group='Environment variables') as c:
|
||||
c.argument('set_env_vars', nargs='*', help="Add or update environment variable(s) in container. Existing environment variables are not modified. Space-separated values in 'key=value' format. If stored as a secret, value must start with 'secretref:' followed by the secret name.")
|
||||
c.argument('remove_env_vars', nargs='*', help="Remove environment variable(s) from container. Space-separated environment variable names.")
|
||||
c.argument('replace_env_vars', nargs='*', help="Replace environment variable(s) in container. Other existing environment variables are removed. Space-separated values in 'key=value' format. If stored as a secret, value must start with 'secretref:' followed by the secret name.")
|
||||
c.argument('remove_all_env_vars', help="Remove all environment variable(s) from container..")
|
||||
|
||||
# Scale
|
||||
with self.argument_context('containerapp', arg_group='Scale') as c:
|
||||
c.argument('min_replicas', type=int, help="The minimum number of replicas.")
|
||||
c.argument('max_replicas', type=int, help="The maximum number of replicas.")
|
||||
c.argument('scale_rule_name', options_list=['--scale-rule-name', '--srn'], help="The name of the scale rule.")
|
||||
c.argument('scale_rule_type', options_list=['--scale-rule-type', '--srt'], help="The type of the scale rule. Default: http. For more information please visit https://learn.microsoft.com/azure/container-apps/scale-app#scale-triggers")
|
||||
c.argument('scale_rule_http_concurrency', type=int, options_list=['--scale-rule-http-concurrency', '--srhc', '--srtc', '--scale-rule-tcp-concurrency'], help="The maximum number of concurrent requests before scale out. Only supported for http and tcp scale rules.")
|
||||
c.argument('scale_rule_metadata', nargs="+", options_list=['--scale-rule-metadata', '--srm'], help="Scale rule metadata. Metadata must be in format \"<key>=<value> <key>=<value> ...\".")
|
||||
c.argument('scale_rule_auth', nargs="+", options_list=['--scale-rule-auth', '--sra'], help="Scale rule auth parameters. Auth parameters must be in format \"<triggerParameter>=<secretRef> <triggerParameter>=<secretRef> ...\".")
|
||||
|
||||
# Dapr
|
||||
with self.argument_context('containerapp', arg_group='Dapr') as c:
|
||||
c.argument('dapr_enabled', options_list=['--enable-dapr'], default=False, arg_type=get_three_state_flag(), help="Boolean indicating if the Dapr side car is enabled.")
|
||||
c.argument('dapr_app_port', type=int, help="The port Dapr uses to talk to the application.")
|
||||
c.argument('dapr_app_id', help="The Dapr application identifier.")
|
||||
c.argument('dapr_app_protocol', arg_type=get_enum_type(['http', 'grpc']), help="The protocol Dapr uses to talk to the application.")
|
||||
c.argument('dapr_http_read_buffer_size', options_list=['--dapr-http-read-buffer-size', '--dhrbs'], type=int, help="Dapr max size of http header read buffer in KB to handle when sending multi-KB headers..")
|
||||
c.argument('dapr_http_max_request_size', options_list=['--dapr-http-max-request-size', '--dhmrs'], type=int, help="Increase max size of request body http and grpc servers parameter in MB to handle uploading of big files.")
|
||||
c.argument('dapr_log_level', arg_type=get_enum_type(["info", "debug", "warn", "error"]), help="Set the log level for the Dapr sidecar.")
|
||||
c.argument('dapr_enable_api_logging', options_list=['--dapr-enable-api-logging', '--dal'], help="Enable API logging for the Dapr sidecar.")
|
||||
|
||||
# Configuration
|
||||
with self.argument_context('containerapp', arg_group='Configuration') as c:
|
||||
c.argument('revisions_mode', arg_type=get_enum_type(['single', 'multiple']), help="The active revisions mode for the container app.")
|
||||
c.argument('registry_server', validator=validate_registry_server, help="The container registry server hostname, e.g. myregistry.azurecr.io.")
|
||||
c.argument('registry_pass', validator=validate_registry_pass, options_list=['--registry-password'], help="The password to log in to container registry. If stored as a secret, value must start with \'secretref:\' followed by the secret name.")
|
||||
c.argument('registry_user', validator=validate_registry_user, options_list=['--registry-username'], help="The username to log in to container registry.")
|
||||
c.argument('secrets', nargs='*', options_list=['--secrets', '-s'], help="A list of secret(s) for the container app. Space-separated values in 'key=value' format.")
|
||||
c.argument('registry_identity', help="A Managed Identity to authenticate with the registry server instead of username/password. Use a resource ID or 'system' for user-defined and system-defined identities, respectively. The registry must be an ACR. If possible, an 'acrpull' role assignemnt will be created for the identity automatically.")
|
||||
|
||||
# Ingress
|
||||
with self.argument_context('containerapp', arg_group='Ingress') as c:
|
||||
c.argument('ingress', validator=validate_ingress, default=None, arg_type=get_enum_type(['internal', 'external']), help="The ingress type.")
|
||||
c.argument('target_port', type=int, validator=validate_target_port, help="The application port used for ingress traffic.")
|
||||
c.argument('transport', arg_type=get_enum_type(['auto', 'http', 'http2', 'tcp']), help="The transport protocol used for ingress traffic.")
|
||||
c.argument('exposed_port', type=int, help="Additional exposed port. Only supported by tcp transport protocol. Must be unique per environment if the app ingress is external.")
|
||||
|
||||
with self.argument_context('containerapp create') as c:
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help="Name of the workload profile to run the app on.")
|
||||
c.argument('secret_volume_mount', help="Path to mount all secrets e.g. mnt/secrets")
|
||||
c.argument('termination_grace_period', type=int, options_list=['--termination-grace-period', '--tgp'], help="Duration in seconds a replica is given to gracefully shut down before it is forcefully terminated. (Default: 30)")
|
||||
c.argument('allow_insecure', validator=validate_allow_insecure, arg_type=get_three_state_flag(), help='Allow insecure connections for ingress traffic.')
|
||||
|
||||
with self.argument_context('containerapp create', arg_group='Identity') as c:
|
||||
c.argument('user_assigned', nargs='+', help="Space-separated user identities to be assigned.")
|
||||
c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.")
|
||||
|
||||
with self.argument_context('containerapp create', arg_group='Container') as c:
|
||||
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
|
||||
|
||||
with self.argument_context('containerapp show') as c:
|
||||
c.argument('show_secrets', help="Show Containerapp secrets.", action='store_true')
|
||||
|
||||
with self.argument_context('containerapp update', arg_group='Container') as c:
|
||||
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
c.argument('secret_volume_mount', help="Path to mount all secrets e.g. mnt/secrets")
|
||||
c.argument('termination_grace_period', type=int, options_list=['--termination-grace-period', '--tgp'], help="Duration in seconds a replica is given to gracefully shut down before it is forcefully terminated. (Default: 30)")
|
||||
|
||||
with self.argument_context('containerapp scale') as c:
|
||||
c.argument('min_replicas', type=int, help="The minimum number of replicas.")
|
||||
c.argument('max_replicas', type=int, help="The maximum number of replicas.")
|
||||
|
||||
with self.argument_context('containerapp env') as c:
|
||||
c.argument('name', name_type, help='Name of the Container Apps environment.')
|
||||
c.argument('resource_group_name', arg_type=resource_group_name_type)
|
||||
c.argument('location', arg_type=get_location_type(self.cli_ctx), help='Location of resource. Examples: eastus2, northeurope')
|
||||
c.argument('tags', arg_type=tags_type)
|
||||
|
||||
with self.argument_context('containerapp env', arg_group='Monitoring') as c:
|
||||
c.argument('logs_destination', arg_type=get_enum_type(["log-analytics", "azure-monitor", "none"]), help='Logs destination.')
|
||||
c.argument('logs_customer_id', options_list=['--logs-workspace-id'], help='Workspace ID of the Log Analytics workspace to send diagnostics logs to. Only works with logs destination "log-analytics". You can use \"az monitor log-analytics workspace create\" to create one. Extra billing may apply.')
|
||||
c.argument('logs_key', options_list=['--logs-workspace-key'], help='Log Analytics workspace key to configure your Log Analytics workspace. Only works with logs destination "log-analytics". You can use \"az monitor log-analytics workspace get-shared-keys\" to retrieve the key.')
|
||||
c.argument('storage_account', validator=validate_storage_name_or_id, help="Name or resource ID of the storage account used for Azure Monitor. If this value is provided, Azure Monitor Diagnostic Settings will be created automatically.")
|
||||
|
||||
with self.argument_context('containerapp env', arg_group='Dapr') as c:
|
||||
c.argument('instrumentation_key', options_list=['--dapr-instrumentation-key'], help='Application Insights instrumentation key used by Dapr to export Service to Service communication telemetry')
|
||||
|
||||
with self.argument_context('containerapp env', arg_group='Virtual Network') as c:
|
||||
c.argument('infrastructure_subnet_resource_id', options_list=['--infrastructure-subnet-resource-id', '-s'], help='Resource ID of a subnet for infrastructure components and user app containers.')
|
||||
c.argument('docker_bridge_cidr', options_list=['--docker-bridge-cidr'], help='CIDR notation IP range assigned to the Docker bridge. It must not overlap with any Subnet IP ranges or the IP range defined in Platform Reserved CIDR, if defined')
|
||||
c.argument('platform_reserved_cidr', options_list=['--platform-reserved-cidr'], help='IP range in CIDR notation that can be reserved for environment infrastructure IP addresses. It must not overlap with any other Subnet IP ranges')
|
||||
c.argument('platform_reserved_dns_ip', options_list=['--platform-reserved-dns-ip'], help='An IP address from the IP range defined by Platform Reserved CIDR that will be reserved for the internal DNS server.')
|
||||
c.argument('internal_only', arg_type=get_three_state_flag(), options_list=['--internal-only'], help='Boolean indicating the environment only has an internal load balancer. These environments do not have a public static IP resource, therefore must provide infrastructureSubnetResourceId if enabling this property')
|
||||
|
||||
with self.argument_context('containerapp env', arg_group='Custom Domain') as c:
|
||||
c.argument('hostname', options_list=['--custom-domain-dns-suffix', '--dns-suffix'], help='The DNS suffix for the environment\'s custom domain.')
|
||||
c.argument('certificate_file', options_list=['--custom-domain-certificate-file', '--certificate-file'], help='The filepath of the certificate file (.pfx or .pem) for the environment\'s custom domain. To manage certificates for container apps, use `az containerapp env certificate`.')
|
||||
c.argument('certificate_password', options_list=['--custom-domain-certificate-password', '--certificate-password'], help='The certificate file password for the environment\'s custom domain.')
|
||||
|
||||
with self.argument_context('containerapp env', arg_group='Peer Authentication') as c:
|
||||
c.argument('mtls_enabled', arg_type=get_three_state_flag(), options_list=['--enable-mtls'], help='Boolean indicating if mTLS peer authentication is enabled for the environment.')
|
||||
|
||||
with self.argument_context('containerapp env create') as c:
|
||||
c.argument('zone_redundant', options_list=["--zone-redundant", "-z"], help="Enable zone redundancy on the environment. Cannot be used without --infrastructure-subnet-resource-id. If used with --location, the subnet's location must match")
|
||||
|
||||
with self.argument_context('containerapp env update') as c:
|
||||
c.argument('name', name_type, help='Name of the Container Apps environment.')
|
||||
c.argument('tags', arg_type=tags_type)
|
||||
# c.argument('plan', help="The sku of the containerapp environment. Downgrading from premium to consumption is not supported. Environment must have a subnet to be upgraded to premium sku.", arg_type=get_enum_type(['consumption', 'premium', None], default=None))
|
||||
c.argument('workload_profile_type', help='The type of workload profile to add or update in this environment, --workload-profile-name required')
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
c.argument('min_nodes', help='The minimum nodes for this workload profile, --workload-profile-name required')
|
||||
c.argument('max_nodes', help='The maximum nodes for this workload profile, --workload-profile-name required')
|
||||
|
||||
with self.argument_context('containerapp env delete') as c:
|
||||
c.argument('name', name_type, help='Name of the Container Apps Environment.')
|
||||
|
||||
with self.argument_context('containerapp env show') as c:
|
||||
c.argument('name', name_type, help='Name of the Container Apps Environment.')
|
||||
|
||||
with self.argument_context('containerapp env certificate upload') as c:
|
||||
c.argument('certificate_file', options_list=['--certificate-file', '-f'], help='The filepath of the .pfx or .pem file')
|
||||
c.argument('certificate_name', options_list=['--certificate-name', '-c'], help='Name of the certificate which should be unique within the Container Apps environment.')
|
||||
c.argument('certificate_password', options_list=['--password', '-p'], help='The certificate file password')
|
||||
c.argument('prompt', options_list=['--show-prompt'], action='store_true', help='Show prompt to upload an existing certificate.')
|
||||
|
||||
with self.argument_context('containerapp env certificate list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
c.argument('certificate', options_list=['--certificate', '-c'], help='Name or resource id of the certificate.')
|
||||
c.argument('thumbprint', options_list=['--thumbprint', '-t'], help='Thumbprint of the certificate.')
|
||||
|
||||
with self.argument_context('containerapp env certificate delete') as c:
|
||||
c.argument('certificate', options_list=['--certificate', '-c'], help='Name or resource id of the certificate.')
|
||||
c.argument('thumbprint', options_list=['--thumbprint', '-t'], help='Thumbprint of the certificate.')
|
||||
|
||||
with self.argument_context('containerapp env storage') as c:
|
||||
c.argument('name', id_part=None)
|
||||
c.argument('storage_name', help="Name of the storage.")
|
||||
c.argument('access_mode', id_part=None, arg_type=get_enum_type(["ReadWrite", "ReadOnly"]), help="Access mode for the AzureFile storage.")
|
||||
c.argument('azure_file_account_key', options_list=["--azure-file-account-key", "--storage-account-key", "-k"], help="Key of the AzureFile storage account.")
|
||||
c.argument('azure_file_share_name', options_list=["--azure-file-share-name", "--file-share", "-f"], help="Name of the share on the AzureFile storage.")
|
||||
c.argument('azure_file_account_name', options_list=["--azure-file-account-name", "--account-name", "-a"], help="Name of the AzureFile storage account.")
|
||||
|
||||
with self.argument_context('containerapp identity') as c:
|
||||
c.argument('user_assigned', nargs='+', help="Space-separated user identities.")
|
||||
c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.")
|
||||
|
||||
with self.argument_context('containerapp identity remove') as c:
|
||||
c.argument('user_assigned', nargs='*', help="Space-separated user identities. If no user identities are specified, all user identities will be removed.")
|
||||
|
||||
with self.argument_context('containerapp github-action add') as c:
|
||||
c.argument('repo_url', help='The GitHub repository to which the workflow file will be added. In the format: https://github.com/<owner>/<repository-name>')
|
||||
c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line')
|
||||
c.argument('branch', options_list=['--branch', '-b'], help='The branch of the Github repo. Assumed to be the Github repo\'s default branch if not specified.')
|
||||
c.argument('login_with_github', help='Interactively log in with Github to retrieve the Personal Access Token')
|
||||
c.argument('registry_url', help='The container registry server, e.g. myregistry.azurecr.io')
|
||||
c.argument('registry_username', help='The username of the registry. If using Azure Container Registry, we will try to infer the credentials if not supplied')
|
||||
c.argument('registry_password', help='The password of the registry. If using Azure Container Registry, we will try to infer the credentials if not supplied')
|
||||
c.argument('context_path', help='Path in the repo from which to run the docker build. Defaults to "./"')
|
||||
c.argument('service_principal_client_id', help='The service principal client ID. ')
|
||||
c.argument('service_principal_client_secret', help='The service principal client secret.')
|
||||
c.argument('service_principal_tenant_id', help='The service principal tenant ID.')
|
||||
c.argument('image', options_list=['--image', '-i'], help="Container image name that the Github Action should use. Defaults to the Container App name.")
|
||||
c.ignore('trigger_existing_workflow')
|
||||
|
||||
with self.argument_context('containerapp github-action delete') as c:
|
||||
c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line')
|
||||
c.argument('login_with_github', help='Interactively log in with Github to retrieve the Personal Access Token')
|
||||
|
||||
with self.argument_context('containerapp revision') as c:
|
||||
c.argument('revision_name', options_list=['--revision'], help='Name of the revision.')
|
||||
c.argument('all', help='Show inactive revisions.', action='store_true')
|
||||
|
||||
with self.argument_context('containerapp revision copy') as c:
|
||||
c.argument('from_revision', help='Revision to copy from. Default: latest revision.')
|
||||
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
|
||||
with self.argument_context('containerapp revision label') as c:
|
||||
c.argument('name', id_part=None)
|
||||
c.argument('revision', help='Name of the revision.')
|
||||
c.argument('label', help='Name of the label.')
|
||||
c.argument('yes', options_list=['--no-prompt', '--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
|
||||
|
||||
with self.argument_context('containerapp revision label') as c:
|
||||
c.argument('source_label', options_list=['--source'], help='Source label to be swapped.')
|
||||
c.argument('target_label', options_list=['--target'], help='Target label to be swapped to.')
|
||||
|
||||
with self.argument_context('containerapp ingress') as c:
|
||||
c.argument('allow_insecure', arg_type=get_three_state_flag(), help='Allow insecure connections for ingress traffic.')
|
||||
c.argument('type', validator=validate_ingress, arg_type=get_enum_type(['internal', 'external']), help="The ingress type.")
|
||||
c.argument('transport', arg_type=get_enum_type(['auto', 'http', 'http2', 'tcp']), help="The transport protocol used for ingress traffic.")
|
||||
c.argument('target_port', type=int, validator=validate_target_port, help="The application port used for ingress traffic.")
|
||||
c.argument('exposed_port', type=int, help="Additional exposed port. Only supported by tcp transport protocol. Must be unique per environment if the app ingress is external.")
|
||||
|
||||
with self.argument_context('containerapp ingress access-restriction') as c:
|
||||
c.argument('action', arg_type=get_enum_type(['Allow', 'Deny']), help='Whether the IP security restriction allows or denies access. All restrictions must be use the same action. If no restrictions are set, all traffic is allowed.')
|
||||
c.argument('rule_name', help="The IP security restriction name.")
|
||||
c.argument('description', help="The description of the IP security restriction.")
|
||||
c.argument('ip_address', help="The address range of the IP security restriction in IPv4 CIDR notation. (for example, '198.51.100.14/24')")
|
||||
|
||||
with self.argument_context('containerapp ingress access-restriction list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
|
||||
with self.argument_context('containerapp ingress traffic') as c:
|
||||
c.argument('revision_weights', nargs='+', options_list=['--revision-weight', c.deprecate(target='--traffic-weight', redirect='--revision-weight')], help="A list of revision weight(s) for the container app. Space-separated values in 'revision_name=weight' format. For latest revision, use 'latest=weight'")
|
||||
c.argument('label_weights', nargs='+', options_list=['--label-weight'], help="A list of label weight(s) for the container app. Space-separated values in 'label_name=weight' format.")
|
||||
|
||||
with self.argument_context('containerapp ingress sticky-sessions') as c:
|
||||
c.argument('affinity', arg_type=get_enum_type(['sticky', 'none']), help='Whether the affinity for the container app is Sticky or None.')
|
||||
|
||||
with self.argument_context('containerapp ingress cors') as c:
|
||||
c.argument('allowed_origins', nargs='*', options_list=['--allowed-origins', '-r'], help="A list of allowed origin(s) for the container app. Values are space-separated. Empty string to clear existing values.")
|
||||
c.argument('allowed_methods', nargs='*', options_list=['--allowed-methods', '-m'], help="A list of allowed method(s) for the container app. Values are space-separated. Empty string to clear existing values.")
|
||||
c.argument('allowed_headers', nargs='*', options_list=['--allowed-headers', '-a'], help="A list of allowed header(s) for the container app. Values are space-separated. Empty string to clear existing values.")
|
||||
c.argument('expose_headers', nargs='*', options_list=['--expose-headers', '-e'], help="A list of expose header(s) for the container app. Values are space-separated. Empty string to clear existing values.")
|
||||
c.argument('allow_credentials', options_list=['--allow-credentials'], arg_type=get_three_state_flag(), help='Whether the credential is allowed for the container app.')
|
||||
c.argument('max_age', nargs='?', const='', validator=validate_cors_max_age, help="The maximum age of the allowed origin in seconds. Only postive integer or empty string are allowed. Empty string resets max_age to null.")
|
||||
|
||||
with self.argument_context('containerapp secret') as c:
|
||||
c.argument('secrets', nargs='+', options_list=['--secrets', '-s'], help="A list of secret(s) for the container app. Space-separated values in 'key=value' or 'key=keyvaultref:keyvaulturl,identityref:identity' format (where 'key' cannot be longer than 20 characters).")
|
||||
c.argument('secret_name', help="The name of the secret to show.")
|
||||
c.argument('secret_names', nargs='+', help="A list of secret(s) for the container app. Space-separated secret values names.")
|
||||
c.argument('show_values', help='Show the secret values.')
|
||||
c.ignore('disable_max_length')
|
||||
|
||||
with self.argument_context('containerapp env dapr-component') as c:
|
||||
c.argument('dapr_app_id', help="The Dapr app ID.")
|
||||
c.argument('dapr_app_port', help="The port of your app.")
|
||||
c.argument('dapr_app_protocol', help="Tell Dapr which protocol your application is using. Allowed values: grpc, http.")
|
||||
c.argument('dapr_component_name', help="The Dapr component name.")
|
||||
c.argument('environment_name', options_list=['--name', '-n'], help="The environment name.")
|
||||
|
||||
with self.argument_context('containerapp revision set-mode') as c:
|
||||
c.argument('mode', arg_type=get_enum_type(['single', 'multiple']), help="The active revisions mode for the container app.")
|
||||
|
||||
with self.argument_context('containerapp registry') as c:
|
||||
c.argument('server', help="The container registry server, e.g. myregistry.azurecr.io")
|
||||
c.argument('username', help='The username of the registry. If using Azure Container Registry, we will try to infer the credentials if not supplied')
|
||||
c.argument('password', help='The password of the registry. If using Azure Container Registry, we will try to infer the credentials if not supplied')
|
||||
c.argument('identity', help="The managed identity with which to authenticate to the Azure Container Registry (instead of username/password). Use 'system' for a system-defined identity or a resource id for a user-defined identity. The managed identity should have been assigned acrpull permissions on the ACR before deployment (use 'az role assignment create --role acrpull ...').")
|
||||
|
||||
with self.argument_context('containerapp registry list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
|
||||
with self.argument_context('containerapp secret list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
|
||||
with self.argument_context('containerapp revision list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
|
||||
with self.argument_context('containerapp up') as c:
|
||||
c.argument('resource_group_name', configured_default='resource_group_name', id_part=None)
|
||||
c.argument('location', configured_default='location')
|
||||
c.argument('name', configured_default='name', id_part=None)
|
||||
c.argument('managed_env', configured_default='managed_env')
|
||||
c.argument('registry_server', configured_default='registry_server')
|
||||
c.argument('source', help='Local directory path containing the application source and Dockerfile for building the container image. Preview: If no Dockerfile is present, a container image is generated using buildpacks. If Docker is not running or buildpacks cannot be used, Oryx will be used to generate the image. See the supported Oryx runtimes here: https://github.com/microsoft/Oryx/blob/main/doc/supportedRuntimeVersions.md.')
|
||||
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
|
||||
c.argument('browse', help='Open the app in a web browser after creation and deployment, if possible.')
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
|
||||
with self.argument_context('containerapp up', arg_group='Log Analytics (Environment)') as c:
|
||||
c.argument('logs_customer_id', options_list=['--logs-workspace-id'], help='Workspace ID of the Log Analytics workspace to send diagnostics logs to. You can use \"az monitor log-analytics workspace create\" to create one. Extra billing may apply.')
|
||||
c.argument('logs_key', options_list=['--logs-workspace-key'], help='Log Analytics workspace key to configure your Log Analytics workspace. You can use \"az monitor log-analytics workspace get-shared-keys\" to retrieve the key.')
|
||||
c.ignore('no_wait')
|
||||
|
||||
with self.argument_context('containerapp up', arg_group='Github Repo') as c:
|
||||
c.argument('repo', help='Create an app via Github Actions. In the format: https://github.com/<owner>/<repository-name> or <owner>/<repository-name>')
|
||||
c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line. If not provided or not found in the cache (and using --repo), a browser page will be opened to authenticate with Github.')
|
||||
c.argument('branch', options_list=['--branch', '-b'], help='The branch of the Github repo. Assumed to be the Github repo\'s default branch if not specified.')
|
||||
c.argument('context_path', help='Path in the repo from which to run the docker build. Defaults to "./". Dockerfile is assumed to be named "Dockerfile" and in this directory.')
|
||||
c.argument('service_principal_client_id', help='The service principal client ID. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-client-id", "--sp-cid"])
|
||||
c.argument('service_principal_client_secret', help='The service principal client secret. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-client-secret", "--sp-sec"])
|
||||
c.argument('service_principal_tenant_id', help='The service principal tenant ID. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-tenant-id", "--sp-tid"])
|
||||
|
||||
with self.argument_context('containerapp auth') as c:
|
||||
# subgroup update
|
||||
c.argument('client_id', help='The Client ID of the app used for login.')
|
||||
c.argument('client_secret', help='The client secret.')
|
||||
c.argument('client_secret_setting_name', options_list=['--client-secret-name'], help='The app secret name that contains the client secret of the relying party application.')
|
||||
c.argument('issuer', help='The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.')
|
||||
c.argument('allowed_token_audiences', options_list=['--allowed-token-audiences', '--allowed-audiences'], help='The configuration settings of the allowed list of audiences from which to validate the JWT token.')
|
||||
c.argument('client_secret_certificate_thumbprint', options_list=['--thumbprint', '--client-secret-certificate-thumbprint'], help='Alternative to AAD Client Secret, thumbprint of a certificate used for signing purposes')
|
||||
c.argument('client_secret_certificate_san', options_list=['--san', '--client-secret-certificate-san'], help='Alternative to AAD Client Secret and thumbprint, subject alternative name of a certificate used for signing purposes')
|
||||
c.argument('client_secret_certificate_issuer', options_list=['--certificate-issuer', '--client-secret-certificate-issuer'], help='Alternative to AAD Client Secret and thumbprint, issuer of a certificate used for signing purposes')
|
||||
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
|
||||
c.argument('tenant_id', help='The tenant id of the application.')
|
||||
c.argument('app_id', help='The App ID of the app used for login.')
|
||||
c.argument('app_secret', help='The app secret.')
|
||||
c.argument('app_secret_setting_name', options_list=['--app-secret-name', '--secret-name'], help='The app secret name that contains the app secret.')
|
||||
c.argument('graph_api_version', help='The version of the Facebook api to be used while logging in.')
|
||||
c.argument('scopes', help='A list of the scopes that should be requested while authenticating.')
|
||||
c.argument('consumer_key', help='The OAuth 1.0a consumer key of the Twitter application used for sign-in.')
|
||||
c.argument('consumer_secret', help='The consumer secret.')
|
||||
c.argument('consumer_secret_setting_name', options_list=['--consumer-secret-name', '--secret-name'], help='The consumer secret name that contains the app secret.')
|
||||
c.argument('provider_name', required=True, help='The name of the custom OpenID Connect provider.')
|
||||
c.argument('openid_configuration', help='The endpoint that contains all the configuration endpoints for the provider.')
|
||||
# auth update
|
||||
c.argument('set_string', options_list=['--set'], help='Value of a specific field within the configuration settings for the Azure App Service Authentication / Authorization feature.')
|
||||
c.argument('config_file_path', help='The path of the config file containing auth settings if they come from a file.')
|
||||
c.argument('unauthenticated_client_action', options_list=['--unauthenticated-client-action', '--action'], arg_type=get_enum_type(UNAUTHENTICATED_CLIENT_ACTION), help='The action to take when an unauthenticated client attempts to access the app.')
|
||||
c.argument('redirect_provider', help='The default authentication provider to use when multiple providers are configured.')
|
||||
c.argument('require_https', arg_type=get_three_state_flag(), help='false if the authentication/authorization responses not having the HTTPS scheme are permissible; otherwise, true.')
|
||||
c.argument('proxy_convention', arg_type=get_enum_type(FORWARD_PROXY_CONVENTION), help='The convention used to determine the url of the request made.')
|
||||
c.argument('proxy_custom_host_header', options_list=['--proxy-custom-host-header', '--custom-host-header'], help='The name of the header containing the host of the request.')
|
||||
c.argument('proxy_custom_proto_header', options_list=['--proxy-custom-proto-header', '--custom-proto-header'], help='The name of the header containing the scheme of the request.')
|
||||
c.argument('excluded_paths', help='The list of paths that should be excluded from authentication rules.')
|
||||
c.argument('enabled', arg_type=get_three_state_flag(), help='true if the Authentication / Authorization feature is enabled for the current app; otherwise, false.')
|
||||
c.argument('runtime_version', help='The RuntimeVersion of the Authentication / Authorization feature in use for the current app.')
|
||||
|
||||
with self.argument_context('containerapp ssl upload') as c:
|
||||
c.argument('hostname', help='The custom domain name.')
|
||||
c.argument('environment', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.')
|
||||
c.argument('certificate_file', options_list=['--certificate-file', '-f'], help='The filepath of the .pfx or .pem file')
|
||||
c.argument('certificate_password', options_list=['--password', '-p'], help='The certificate file password')
|
||||
c.argument('certificate_name', options_list=['--certificate-name', '-c'], help='Name of the certificate which should be unique within the Container Apps environment.')
|
||||
|
||||
with self.argument_context('containerapp hostname bind') as c:
|
||||
c.argument('hostname', help='The custom domain name.')
|
||||
c.argument('thumbprint', options_list=['--thumbprint', '-t'], help='Thumbprint of the certificate.')
|
||||
c.argument('certificate', options_list=['--certificate', '-c'], help='Name or resource id of the certificate.')
|
||||
c.argument('environment', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.')
|
||||
c.argument('validation_method', options_list=['--validation-method', '-v'], help='Validation method of custom domain ownership.')
|
||||
|
||||
with self.argument_context('containerapp hostname add') as c:
|
||||
c.argument('hostname', help='The custom domain name.')
|
||||
c.argument('location', arg_type=get_location_type(self.cli_ctx))
|
||||
|
||||
with self.argument_context('containerapp hostname list') as c:
|
||||
c.argument('name', id_part=None)
|
||||
c.argument('location', arg_type=get_location_type(self.cli_ctx))
|
||||
|
||||
with self.argument_context('containerapp hostname delete') as c:
|
||||
c.argument('hostname', help='The custom domain name.')
|
||||
|
||||
# Compose
|
||||
with self.argument_context('containerapp compose create') as c:
|
||||
c.argument('environment', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.')
|
||||
c.argument('compose_file_path', options_list=['--compose-file-path', '-f'], help='Path to a Docker Compose file with the configuration to import to Azure Container Apps.')
|
||||
c.argument('transport_mapping', options_list=['--transport-mapping', c.deprecate(target='--transport', redirect='--transport-mapping')], action='append', nargs='+', help="Transport options per Container App instance (servicename=transportsetting).")
|
||||
|
||||
with self.argument_context('containerapp env workload-profile') as c:
|
||||
c.argument('env_name', options_list=['--name', '-n'], help="The name of the Container App environment")
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
|
||||
with self.argument_context('containerapp env workload-profile add') as c:
|
||||
c.argument('workload_profile_type', help="The type of workload profile to add to this environment. Run 'az containerapp env workload-profile list-supported -l <region>' to check the options for your region.")
|
||||
c.argument('min_nodes', help="The minimum node count for the workload profile")
|
||||
c.argument('max_nodes', help="The maximum node count for the workload profile")
|
||||
|
||||
with self.argument_context('containerapp env workload-profile update') as c:
|
||||
c.argument('workload_profile_type', help="The type of workload profile to update. Run 'az containerapp env workload-profile list-supported -l <region>' to check the options for your region.")
|
||||
c.argument('min_nodes', help="The minimum node count for the workload profile")
|
||||
c.argument('max_nodes', help="The maximum node count for the workload profile")
|
||||
|
||||
# Container App job
|
||||
with self.argument_context('containerapp job') as c:
|
||||
c.argument('name', name_type, metavar='NAME', id_part='name', help=f"The name of the Container Apps Job. A name must consist of lower case alphanumeric characters or '-', start with a letter, end with an alphanumeric character, cannot have '--', and must be less than {MAXIMUM_CONTAINER_APP_NAME_LENGTH} characters.")
|
||||
c.argument('cron_expression', help='Cron expression. Only supported for trigger type "Schedule"')
|
||||
c.argument('image', help="Container image, e.g. publisher/image-name:tag.")
|
||||
c.argument('replica_completion_count', type=int, options_list=['--replica-completion-count', '--rcc'], help='Number of replicas that need to complete successfully for execution to succeed.')
|
||||
c.argument('replica_retry_limit', type=int, help='Maximum number of retries before the replica fails.')
|
||||
c.argument('replica_timeout', type=int, help='Maximum number of seconds a replica can execute.')
|
||||
c.argument('parallelism', type=int, help='Maximum number of replicas to run per execution.')
|
||||
c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help='The friendly name for the workload profile')
|
||||
c.argument('min_executions', type=int, help="Minimum number of job executions that are created for a trigger, default 0.")
|
||||
c.argument('max_executions', type=int, help="Maximum number of job executions that are created for a trigger, default 100.")
|
||||
c.argument('polling_interval', type=int, help="Interval to check each event source in seconds. Defaults to 30s.", default=30)
|
||||
|
||||
with self.argument_context('containerapp job create') as c:
|
||||
c.argument('system_assigned', options_list=['--mi-system-assigned', c.deprecate(target='--system-assigned', redirect='--mi-system-assigned', hide=True)], help='Boolean indicating whether to assign system-assigned identity.', action='store_true')
|
||||
c.argument('trigger_type', help='Trigger type. Schedule | Event | Manual')
|
||||
c.argument('user_assigned', options_list=['--mi-user-assigned', c.deprecate(target='--user-assigned', redirect='--mi-user-assigned', hide=True)], nargs='+', help='Space-separated user identities to be assigned.')
|
||||
|
||||
with self.argument_context('containerapp job', arg_group='Scale') as c:
|
||||
c.argument('min_executions', type=int, help="Minimum number of job executions to run per polling interval.")
|
||||
c.argument('max_executions', type=int, help="Maximum number of job executions to run per polling interval.")
|
||||
c.argument('polling_interval', type=int, help="Interval to check each event source in seconds. Defaults to 30s.")
|
||||
c.argument('scale_rule_type', options_list=['--scale-rule-type', '--srt'], help="The type of the scale rule.")
|
||||
|
||||
with self.argument_context('containerapp job stop') as c:
|
||||
c.argument('job_execution_name', help='name of the specific job execution which needs to be stopped.')
|
||||
c.argument('execution_name_list', help='comma separated list of job execution names.')
|
||||
|
||||
with self.argument_context('containerapp job execution') as c:
|
||||
c.argument('name', id_part=None)
|
||||
c.argument('job_execution_name', help='name of the specific job execution.')
|
||||
|
||||
with self.argument_context('containerapp job secret') as c:
|
||||
c.argument('secrets', nargs='+', options_list=['--secrets', '-s'], help="A list of secret(s) for the container app job. Space-separated values in 'key=value' or 'key=keyvaultref:keyvaulturl,identityref:identity' format (where 'key' cannot be longer than 20 characters).")
|
||||
c.argument('name', id_part=None, help="The name of the container app job for which the secret needs to be retrieved.")
|
||||
c.argument('secret_name', id_part=None, help="The name of the secret to show.")
|
||||
c.argument('secret_names', id_part=None, nargs='+', help="A list of secret(s) for the container app job. Space-separated secret values names.")
|
||||
c.argument('show_values', action='store_true', help='Show the secret values.')
|
||||
c.ignore('disable_max_length')
|
||||
|
||||
with self.argument_context('containerapp job identity') as c:
|
||||
c.argument('user_assigned', nargs='+', help="Space-separated user identities.")
|
||||
c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.", action='store_true')
|
||||
|
||||
with self.argument_context('containerapp job identity remove') as c:
|
||||
c.argument('user_assigned', nargs='*', help="Space-separated user identities. If no user identities are specified, all user identities will be removed.")
|
|
@ -0,0 +1,303 @@
|
|||
# coding=utf-8
|
||||
# --------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
from enum import Enum
|
||||
from azure.core import CaseInsensitiveEnumMeta
|
||||
|
||||
|
||||
class AccessMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Access mode for storage."""
|
||||
|
||||
READ_ONLY = "ReadOnly"
|
||||
READ_WRITE = "ReadWrite"
|
||||
|
||||
|
||||
class Action(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Allow or Deny rules to determine for incoming IP. Note: Rules can only consist of ALL Allow or
|
||||
ALL Deny.
|
||||
"""
|
||||
|
||||
ALLOW = "Allow"
|
||||
DENY = "Deny"
|
||||
|
||||
|
||||
class ActiveRevisionsMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""ActiveRevisionsMode controls how active revisions are handled for the Container app:
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<list><item>Multiple: multiple revisions can be active.</item><item>Single: Only one
|
||||
revision can be active at a time. Revision weights can not be used in this mode. If no value if
|
||||
provided, this is the default.</item></list>.
|
||||
"""
|
||||
|
||||
MULTIPLE = "Multiple"
|
||||
SINGLE = "Single"
|
||||
|
||||
|
||||
class Affinity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Sticky Session Affinity."""
|
||||
|
||||
STICKY = "sticky"
|
||||
NONE = "none"
|
||||
|
||||
|
||||
class Applicability(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""indicates whether the profile is default for the location."""
|
||||
|
||||
LOCATION_DEFAULT = "LocationDefault"
|
||||
CUSTOM = "Custom"
|
||||
|
||||
|
||||
class AppProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Tells Dapr which protocol your application is using. Valid options are http and grpc. Default
|
||||
is http.
|
||||
"""
|
||||
|
||||
HTTP = "http"
|
||||
GRPC = "grpc"
|
||||
|
||||
|
||||
class BindingType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Custom Domain binding type."""
|
||||
|
||||
DISABLED = "Disabled"
|
||||
SNI_ENABLED = "SniEnabled"
|
||||
|
||||
|
||||
class CertificateProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Provisioning state of the certificate."""
|
||||
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
DELETE_FAILED = "DeleteFailed"
|
||||
PENDING = "Pending"
|
||||
|
||||
|
||||
class CheckNameAvailabilityReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The reason why the given name is not available."""
|
||||
|
||||
INVALID = "Invalid"
|
||||
ALREADY_EXISTS = "AlreadyExists"
|
||||
|
||||
|
||||
class ConnectedEnvironmentProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Provisioning state of the Kubernetes Environment."""
|
||||
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
WAITING = "Waiting"
|
||||
INITIALIZATION_IN_PROGRESS = "InitializationInProgress"
|
||||
INFRASTRUCTURE_SETUP_IN_PROGRESS = "InfrastructureSetupInProgress"
|
||||
INFRASTRUCTURE_SETUP_COMPLETE = "InfrastructureSetupComplete"
|
||||
SCHEDULED_FOR_DELETE = "ScheduledForDelete"
|
||||
|
||||
|
||||
class ContainerAppProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Provisioning state of the Container App."""
|
||||
|
||||
IN_PROGRESS = "InProgress"
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
DELETING = "Deleting"
|
||||
|
||||
|
||||
class CookieExpirationConvention(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The convention used when determining the session cookie's expiration."""
|
||||
|
||||
FIXED_TIME = "FixedTime"
|
||||
IDENTITY_PROVIDER_DERIVED = "IdentityProviderDerived"
|
||||
|
||||
|
||||
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The type of identity that created the resource."""
|
||||
|
||||
USER = "User"
|
||||
APPLICATION = "Application"
|
||||
MANAGED_IDENTITY = "ManagedIdentity"
|
||||
KEY = "Key"
|
||||
|
||||
|
||||
class DnsVerificationTestResult(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""DNS verification test result."""
|
||||
|
||||
PASSED = "Passed"
|
||||
FAILED = "Failed"
|
||||
SKIPPED = "Skipped"
|
||||
|
||||
|
||||
class EnvironmentProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Provisioning state of the Environment."""
|
||||
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
WAITING = "Waiting"
|
||||
INITIALIZATION_IN_PROGRESS = "InitializationInProgress"
|
||||
INFRASTRUCTURE_SETUP_IN_PROGRESS = "InfrastructureSetupInProgress"
|
||||
INFRASTRUCTURE_SETUP_COMPLETE = "InfrastructureSetupComplete"
|
||||
SCHEDULED_FOR_DELETE = "ScheduledForDelete"
|
||||
UPGRADE_REQUESTED = "UpgradeRequested"
|
||||
UPGRADE_FAILED = "UpgradeFailed"
|
||||
|
||||
|
||||
class ExtendedLocationTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The type of extendedLocation."""
|
||||
|
||||
CUSTOM_LOCATION = "CustomLocation"
|
||||
|
||||
|
||||
class ForwardProxyConvention(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The convention used to determine the url of the request made."""
|
||||
|
||||
NO_PROXY = "NoProxy"
|
||||
STANDARD = "Standard"
|
||||
CUSTOM = "Custom"
|
||||
|
||||
|
||||
class IngressClientCertificateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Client certificate mode for mTLS authentication. Ignore indicates server drops client
|
||||
certificate on forwarding. Accept indicates server forwards client certificate but does not
|
||||
require a client certificate. Require indicates server requires a client certificate.
|
||||
"""
|
||||
|
||||
IGNORE = "ignore"
|
||||
ACCEPT = "accept"
|
||||
REQUIRE = "require"
|
||||
|
||||
|
||||
class IngressTransportMethod(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Ingress transport protocol."""
|
||||
|
||||
AUTO = "auto"
|
||||
HTTP = "http"
|
||||
HTTP2 = "http2"
|
||||
TCP = "tcp"
|
||||
|
||||
|
||||
class JobExecutionRunningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Current running State of the job."""
|
||||
|
||||
RUNNING = "Running"
|
||||
PROCESSING = "Processing"
|
||||
STOPPED = "Stopped"
|
||||
DEGRADED = "Degraded"
|
||||
FAILED = "Failed"
|
||||
UNKNOWN = "Unknown"
|
||||
SUCCEEDED = "Succeeded"
|
||||
|
||||
|
||||
class JobProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Provisioning state of the Container Apps Job."""
|
||||
|
||||
IN_PROGRESS = "InProgress"
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
DELETING = "Deleting"
|
||||
|
||||
|
||||
class LogLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Sets the log level for the Dapr sidecar. Allowed values are debug, info, warn, error. Default
|
||||
is info.
|
||||
"""
|
||||
|
||||
INFO = "info"
|
||||
DEBUG = "debug"
|
||||
WARN = "warn"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class ManagedCertificateDomainControlValidation(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Selected type of domain control validation for managed certificates."""
|
||||
|
||||
CNAME = "CNAME"
|
||||
HTTP = "HTTP"
|
||||
TXT = "TXT"
|
||||
|
||||
|
||||
class ManagedServiceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Type of managed service identity (where both SystemAssigned and UserAssigned types are
|
||||
allowed).
|
||||
"""
|
||||
|
||||
NONE = "None"
|
||||
SYSTEM_ASSIGNED = "SystemAssigned"
|
||||
USER_ASSIGNED = "UserAssigned"
|
||||
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
|
||||
|
||||
|
||||
class RevisionHealthState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Current health State of the revision."""
|
||||
|
||||
HEALTHY = "Healthy"
|
||||
UNHEALTHY = "Unhealthy"
|
||||
NONE = "None"
|
||||
|
||||
|
||||
class RevisionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Current provisioning State of the revision."""
|
||||
|
||||
PROVISIONING = "Provisioning"
|
||||
PROVISIONED = "Provisioned"
|
||||
FAILED = "Failed"
|
||||
DEPROVISIONING = "Deprovisioning"
|
||||
DEPROVISIONED = "Deprovisioned"
|
||||
|
||||
|
||||
class Scheme(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Scheme to use for connecting to the host. Defaults to HTTP."""
|
||||
|
||||
HTTP = "HTTP"
|
||||
HTTPS = "HTTPS"
|
||||
|
||||
|
||||
class SourceControlOperationState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Current provisioning State of the operation."""
|
||||
|
||||
IN_PROGRESS = "InProgress"
|
||||
SUCCEEDED = "Succeeded"
|
||||
FAILED = "Failed"
|
||||
CANCELED = "Canceled"
|
||||
|
||||
|
||||
class StorageType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Storage type for the volume. If not provided, use EmptyDir."""
|
||||
|
||||
AZURE_FILE = "AzureFile"
|
||||
EMPTY_DIR = "EmptyDir"
|
||||
SECRET = "Secret"
|
||||
|
||||
|
||||
class TriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""Trigger type of the job."""
|
||||
|
||||
SCHEDULED = "Scheduled"
|
||||
EVENT = "Event"
|
||||
MANUAL = "Manual"
|
||||
|
||||
|
||||
class Type(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The type of probe."""
|
||||
|
||||
LIVENESS = "Liveness"
|
||||
READINESS = "Readiness"
|
||||
STARTUP = "Startup"
|
||||
|
||||
|
||||
class UnauthenticatedClientActionV2(str, Enum, metaclass=CaseInsensitiveEnumMeta):
|
||||
"""The action to take when an unauthenticated client attempts to access the app."""
|
||||
|
||||
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
|
||||
ALLOW_ANONYMOUS = "AllowAnonymous"
|
||||
RETURN401 = "Return401"
|
||||
RETURN403 = "Return403"
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,193 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=logging-fstring-interpolation
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
import urllib
|
||||
import requests
|
||||
import websocket
|
||||
|
||||
from knack.log import get_logger
|
||||
from azure.cli.core.azclierror import CLIInternalError, ValidationError
|
||||
from azure.cli.core.commands.client_factory import get_subscription_id
|
||||
|
||||
from ._clients import ContainerAppClient
|
||||
from ._utils import safe_get, is_platform_windows
|
||||
|
||||
# pylint: disable=import-error,ungrouped-imports
|
||||
if is_platform_windows():
|
||||
import msvcrt
|
||||
from azure.cli.command_modules.container._vt_helper import (enable_vt_mode, _get_conout_mode,
|
||||
_set_conout_mode, _get_conin_mode, _set_conin_mode)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
# SSH control byte values for container app proxy
|
||||
SSH_PROXY_FORWARD = 0
|
||||
SSH_PROXY_INFO = 1
|
||||
SSH_PROXY_ERROR = 2
|
||||
|
||||
# SSH control byte values for container app cluster
|
||||
SSH_CLUSTER_STDIN = 0
|
||||
SSH_CLUSTER_STDOUT = 1
|
||||
SSH_CLUSTER_STDERR = 2
|
||||
|
||||
# forward byte + stdin byte
|
||||
SSH_INPUT_PREFIX = b"\x00\x00"
|
||||
|
||||
# forward byte + terminal resize byte
|
||||
SSH_TERM_RESIZE_PREFIX = b"\x00\x04"
|
||||
|
||||
SSH_DEFAULT_ENCODING = "utf-8"
|
||||
SSH_BACKUP_ENCODING = "latin_1"
|
||||
|
||||
SSH_CTRL_C_MSG = b"\x00\x00\x03"
|
||||
|
||||
|
||||
class WebSocketConnection:
|
||||
def __init__(self, cmd, resource_group_name, name, revision, replica, container, startup_command):
|
||||
token_response = ContainerAppClient.get_auth_token(cmd, resource_group_name, name)
|
||||
self._token = token_response["properties"]["token"]
|
||||
self._logstream_endpoint = self._get_logstream_endpoint(cmd, resource_group_name, name,
|
||||
revision, replica, container)
|
||||
self._url = self._get_url(cmd=cmd, resource_group_name=resource_group_name, name=name, revision=revision,
|
||||
replica=replica, container=container, startup_command=startup_command)
|
||||
self._socket = websocket.WebSocket(enable_multithread=True)
|
||||
logger.info("Attempting to connect to %s", self._url)
|
||||
self._socket.connect(self._url, header=[f"Authorization: Bearer {self._token}"])
|
||||
|
||||
self.is_connected = True
|
||||
self._windows_conout_mode = None
|
||||
self._windows_conin_mode = None
|
||||
if is_platform_windows():
|
||||
self._windows_conout_mode = _get_conout_mode()
|
||||
self._windows_conin_mode = _get_conin_mode()
|
||||
|
||||
@classmethod
|
||||
def _get_logstream_endpoint(cls, cmd, resource_group_name, name, revision, replica, container):
|
||||
containers = ContainerAppClient.get_replica(cmd,
|
||||
resource_group_name,
|
||||
name, revision, replica)["properties"]["containers"]
|
||||
container_info = [c for c in containers if c["name"] == container]
|
||||
if not container_info:
|
||||
raise ValidationError(f"No such container: {container}")
|
||||
return container_info[0]["logStreamEndpoint"]
|
||||
|
||||
def _get_url(self, cmd, resource_group_name, name, revision, replica, container, startup_command):
|
||||
sub = get_subscription_id(cmd.cli_ctx)
|
||||
base_url = self._logstream_endpoint
|
||||
proxy_api_url = base_url[:base_url.index("/subscriptions/")].replace("https://", "")
|
||||
encoded_cmd = urllib.parse.quote_plus(startup_command)
|
||||
|
||||
return (f"wss://{proxy_api_url}/subscriptions/{sub}/resourceGroups/{resource_group_name}/containerApps/{name}"
|
||||
f"/revisions/{revision}/replicas/{replica}/containers/{container}/exec"
|
||||
f"?command={encoded_cmd}")
|
||||
|
||||
def disconnect(self):
|
||||
logger.warning("Disconnecting...")
|
||||
self.is_connected = False
|
||||
self._socket.close()
|
||||
if self._windows_conout_mode and self._windows_conin_mode:
|
||||
_set_conout_mode(self._windows_conout_mode)
|
||||
_set_conin_mode(self._windows_conin_mode)
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
return self._socket.send(*args, **kwargs)
|
||||
|
||||
def recv(self, *args, **kwargs):
|
||||
return self._socket.recv(*args, **kwargs)
|
||||
|
||||
|
||||
def _decode_and_output_to_terminal(connection: WebSocketConnection, response, encodings):
|
||||
for i, encoding in enumerate(encodings):
|
||||
try:
|
||||
print(response[2:].decode(encoding), end="", flush=True)
|
||||
break
|
||||
except UnicodeDecodeError as e:
|
||||
if i == len(encodings) - 1: # ran out of encodings to try
|
||||
connection.disconnect()
|
||||
logger.info("Proxy Control Byte: %s", response[0])
|
||||
logger.info("Cluster Control Byte: %s", response[1])
|
||||
logger.info("Hexdump: %s", response[2:].hex())
|
||||
raise CLIInternalError("Failed to decode server data") from e
|
||||
logger.info("Failed to encode with encoding %s", encoding)
|
||||
|
||||
|
||||
def read_ssh(connection: WebSocketConnection, response_encodings):
|
||||
# We just need to do resize once for the whole session
|
||||
_resize_terminal(connection)
|
||||
|
||||
# response_encodings is the ordered list of Unicode encodings to try to decode with before raising an exception
|
||||
while connection.is_connected:
|
||||
response = connection.recv()
|
||||
if not response:
|
||||
connection.disconnect()
|
||||
else:
|
||||
logger.info("Received raw response %s", response.hex())
|
||||
proxy_status = response[0]
|
||||
if proxy_status == SSH_PROXY_INFO:
|
||||
print(f"INFO: {response[1:].decode(SSH_DEFAULT_ENCODING)}")
|
||||
elif proxy_status == SSH_PROXY_ERROR:
|
||||
print(f"ERROR: {response[1:].decode(SSH_DEFAULT_ENCODING)}")
|
||||
elif proxy_status == SSH_PROXY_FORWARD:
|
||||
control_byte = response[1]
|
||||
if control_byte in (SSH_CLUSTER_STDOUT, SSH_CLUSTER_STDERR):
|
||||
_decode_and_output_to_terminal(connection, response, response_encodings)
|
||||
else:
|
||||
connection.disconnect()
|
||||
raise CLIInternalError("Unexpected message received")
|
||||
|
||||
|
||||
def _send_stdin(connection: WebSocketConnection, getch_fn):
|
||||
while connection.is_connected:
|
||||
ch = getch_fn()
|
||||
if connection.is_connected:
|
||||
connection.send(b"".join([SSH_INPUT_PREFIX, ch]))
|
||||
|
||||
|
||||
def _resize_terminal(connection: WebSocketConnection):
|
||||
size = os.get_terminal_size()
|
||||
if connection.is_connected:
|
||||
connection.send(b"".join([SSH_TERM_RESIZE_PREFIX,
|
||||
f'{{"Width": {size.columns}, '
|
||||
f'"Height": {size.lines}}}'.encode(SSH_DEFAULT_ENCODING)]))
|
||||
|
||||
|
||||
def _getch_unix():
|
||||
return sys.stdin.read(1).encode(SSH_DEFAULT_ENCODING)
|
||||
|
||||
|
||||
def _getch_windows():
|
||||
while not msvcrt.kbhit():
|
||||
time.sleep(0.01)
|
||||
return msvcrt.getch()
|
||||
|
||||
|
||||
def ping_container_app(app):
|
||||
site = safe_get(app, "properties", "configuration", "ingress", "fqdn")
|
||||
if site:
|
||||
try:
|
||||
resp = requests.get(f'https://{site}', timeout=30)
|
||||
if not resp.ok:
|
||||
logger.info(f"Got bad status pinging app: {resp.status_code}")
|
||||
except requests.exceptions.ReadTimeout:
|
||||
logger.info("Timed out while pinging app external URL")
|
||||
else:
|
||||
logger.info("Could not fetch site external URL")
|
||||
|
||||
|
||||
def get_stdin_writer(connection: WebSocketConnection):
|
||||
if not is_platform_windows():
|
||||
import tty
|
||||
tty.setcbreak(sys.stdin.fileno()) # needed to prevent printing arrow key characters
|
||||
writer = threading.Thread(target=_send_stdin, args=(connection, _getch_unix))
|
||||
else:
|
||||
enable_vt_mode() # needed for interactive commands (ie vim)
|
||||
writer = threading.Thread(target=_send_stdin, args=(connection, _getch_windows))
|
||||
|
||||
return writer
|
|
@ -0,0 +1,51 @@
|
|||
# coding=utf-8
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=bare-except
|
||||
|
||||
|
||||
def transform_containerapp_output(app):
|
||||
props = ['name', 'location', 'resourceGroup', 'provisioningState']
|
||||
result = {k: app[k] for k in app if k in props}
|
||||
|
||||
try:
|
||||
result['fqdn'] = app['properties']['configuration']['ingress']['fqdn']
|
||||
except:
|
||||
result['fqdn'] = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def transform_containerapp_list_output(apps):
|
||||
return [transform_containerapp_output(a) for a in apps]
|
||||
|
||||
|
||||
def transform_revision_output(rev):
|
||||
props = ['name', 'active', 'createdTime', 'trafficWeight', 'healthState', 'provisioningState', 'replicas']
|
||||
result = {k: rev['properties'][k] for k in rev['properties'] if k in props}
|
||||
|
||||
if 'name' in rev:
|
||||
result['name'] = rev['name']
|
||||
|
||||
if 'fqdn' in rev['properties']['template']:
|
||||
result['fqdn'] = rev['properties']['template']['fqdn']
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def transform_revision_list_output(revs):
|
||||
return [transform_revision_output(r) for r in revs]
|
||||
|
||||
|
||||
def transform_job_execution_show_output(execution):
|
||||
return {
|
||||
'name': execution['name'],
|
||||
'startTime': execution['properties']['startTime'],
|
||||
'status': execution['properties']['status']
|
||||
}
|
||||
|
||||
|
||||
def transform_job_execution_list_output(executions):
|
||||
return [transform_job_execution_show_output(e) for e in executions]
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,229 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, unused-argument
|
||||
|
||||
import re
|
||||
from azure.cli.core.azclierror import (ValidationError, ResourceNotFoundError, InvalidArgumentValueError,
|
||||
MutuallyExclusiveArgumentError)
|
||||
from msrestazure.tools import is_valid_resource_id
|
||||
from knack.log import get_logger
|
||||
|
||||
from ._clients import ContainerAppClient
|
||||
from ._ssh_utils import ping_container_app
|
||||
from ._utils import safe_get, is_registry_msi_system
|
||||
from ._constants import ACR_IMAGE_SUFFIX, LOG_TYPE_SYSTEM
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
# called directly from custom method bc otherwise it disrupts the --environment auto RID functionality
|
||||
def validate_create(registry_identity, registry_pass, registry_user, registry_server, no_wait):
|
||||
if registry_identity and (registry_pass or registry_user):
|
||||
raise MutuallyExclusiveArgumentError("Cannot provide both registry identity and username/password")
|
||||
if is_registry_msi_system(registry_identity) and no_wait:
|
||||
raise MutuallyExclusiveArgumentError("--no-wait is not supported with system registry identity")
|
||||
if registry_identity and not is_valid_resource_id(registry_identity) and not is_registry_msi_system(registry_identity):
|
||||
raise InvalidArgumentValueError("--registry-identity must be an identity resource ID or 'system'")
|
||||
if registry_identity and ACR_IMAGE_SUFFIX not in (registry_server or ""):
|
||||
raise InvalidArgumentValueError("--registry-identity: expected an ACR registry (*.azurecr.io) for --registry-server")
|
||||
|
||||
|
||||
def _is_number(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def validate_revision_suffix(value):
|
||||
if value is not None:
|
||||
# what does the following regex check?
|
||||
# 1. ^[a-z0-9] - starts with a letter or number
|
||||
# 2. (?!.*-{2}) - does not contain '--'
|
||||
# 3. ([-a-z0-9]*[a-z0-9])? - ends with a letter or number and can contain '-' in between
|
||||
matched = re.match(r"^[a-z0-9](?!.*-{2})([-a-z0-9]*[a-z0-9])?$", value)
|
||||
if not matched:
|
||||
raise ValidationError(f"Invalid Container App revision suffix '{value}'. A revision suffix must consist of lower case alphanumeric characters or '-', start with a letter or number, end with an alphanumeric character and cannot have '--'.")
|
||||
|
||||
|
||||
def validate_memory(namespace):
|
||||
if namespace.memory is not None:
|
||||
valid = False
|
||||
|
||||
if not namespace.memory.endswith("Gi"):
|
||||
namespace.memory = namespace.memory.rstrip()
|
||||
namespace.memory += "Gi"
|
||||
|
||||
valid = _is_number(namespace.memory[:-2])
|
||||
|
||||
if not valid:
|
||||
raise ValidationError("Usage error: --memory must be a number ending with \"Gi\"")
|
||||
|
||||
|
||||
def validate_cpu(namespace):
|
||||
if namespace.cpu:
|
||||
cpu = namespace.cpu
|
||||
try:
|
||||
float(cpu)
|
||||
except ValueError as e:
|
||||
raise ValidationError("Usage error: --cpu must be a number eg. \"0.5\"") from e
|
||||
|
||||
|
||||
def validate_managed_env_name_or_id(cmd, namespace):
|
||||
from azure.cli.core.commands.client_factory import get_subscription_id
|
||||
from msrestazure.tools import resource_id
|
||||
|
||||
if namespace.managed_env:
|
||||
if not is_valid_resource_id(namespace.managed_env):
|
||||
namespace.managed_env = resource_id(
|
||||
subscription=get_subscription_id(cmd.cli_ctx),
|
||||
resource_group=namespace.resource_group_name,
|
||||
namespace='Microsoft.App',
|
||||
type='managedEnvironments',
|
||||
name=namespace.managed_env
|
||||
)
|
||||
|
||||
|
||||
def validate_storage_name_or_id(cmd, namespace):
|
||||
from azure.cli.core.commands.client_factory import get_subscription_id
|
||||
from msrestazure.tools import resource_id
|
||||
|
||||
if namespace.storage_account:
|
||||
if not is_valid_resource_id(namespace.storage_account):
|
||||
namespace.storage_account = resource_id(
|
||||
subscription=get_subscription_id(cmd.cli_ctx),
|
||||
resource_group=namespace.resource_group_name,
|
||||
namespace='Microsoft.Storage',
|
||||
type='storageAccounts',
|
||||
name=namespace.storage_account
|
||||
)
|
||||
|
||||
|
||||
def validate_registry_server(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.registry_server:
|
||||
if not namespace.registry_user or not namespace.registry_pass:
|
||||
if ACR_IMAGE_SUFFIX not in namespace.registry_server:
|
||||
raise ValidationError("Usage error: --registry-server, --registry-password and --registry-username are required together if not using Azure Container Registry")
|
||||
|
||||
|
||||
def validate_registry_user(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.registry_user:
|
||||
if not namespace.registry_server or (not namespace.registry_pass and ACR_IMAGE_SUFFIX not in namespace.registry_server):
|
||||
raise ValidationError("Usage error: --registry-server, --registry-password and --registry-username are required together if not using Azure Container Registry")
|
||||
|
||||
|
||||
def validate_registry_pass(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.registry_pass:
|
||||
if not namespace.registry_server or (not namespace.registry_user and ACR_IMAGE_SUFFIX not in namespace.registry_server):
|
||||
raise ValidationError("Usage error: --registry-server, --registry-password and --registry-username are required together if not using Azure Container Registry")
|
||||
|
||||
|
||||
def validate_target_port(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.target_port:
|
||||
if not namespace.ingress:
|
||||
raise ValidationError("Usage error: must specify --ingress with --target-port")
|
||||
|
||||
|
||||
def validate_ingress(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.ingress:
|
||||
if not namespace.target_port:
|
||||
raise ValidationError("Usage error: must specify --target-port with --ingress")
|
||||
|
||||
|
||||
def validate_allow_insecure(namespace):
|
||||
if "create" in namespace.command.lower():
|
||||
if namespace.allow_insecure:
|
||||
if not namespace.ingress or not namespace.target_port:
|
||||
raise ValidationError("Usage error: must specify --ingress and --target-port with --allow-insecure")
|
||||
if namespace.transport == "tcp":
|
||||
raise ValidationError("Usage error: --allow-insecure is not supported for TCP ingress")
|
||||
|
||||
|
||||
def _set_ssh_defaults(cmd, namespace):
|
||||
app = ContainerAppClient.show(cmd, namespace.resource_group_name, namespace.name)
|
||||
if not app:
|
||||
raise ResourceNotFoundError("Could not find a container app")
|
||||
replicas = []
|
||||
if not namespace.revision:
|
||||
namespace.revision = app.get("properties", {}).get("latestRevisionName")
|
||||
if not namespace.revision:
|
||||
raise ResourceNotFoundError("Could not find a revision")
|
||||
if not namespace.replica:
|
||||
# VVV this may not be necessary according to Anthony Chu
|
||||
try:
|
||||
ping_container_app(app) # needed to get an alive replica
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
logger.warning("Failed to ping container app with error '%s' \nPlease ensure there is an alive replica. ", str(e))
|
||||
replicas = ContainerAppClient.list_replicas(cmd=cmd,
|
||||
resource_group_name=namespace.resource_group_name,
|
||||
container_app_name=namespace.name,
|
||||
revision_name=namespace.revision)
|
||||
if not replicas:
|
||||
raise ResourceNotFoundError("Could not find a replica for this app")
|
||||
namespace.replica = replicas[0]["name"]
|
||||
if not namespace.container:
|
||||
revision = ContainerAppClient.show_revision(cmd, resource_group_name=namespace.resource_group_name,
|
||||
container_app_name=namespace.name,
|
||||
name=namespace.revision)
|
||||
revision_containers = safe_get(revision, "properties", "template", "containers")
|
||||
if revision_containers:
|
||||
namespace.container = revision_containers[0]["name"]
|
||||
|
||||
|
||||
def _validate_revision_exists(cmd, namespace):
|
||||
revision = ContainerAppClient.show_revision(cmd, resource_group_name=namespace.resource_group_name,
|
||||
container_app_name=namespace.name, name=namespace.revision)
|
||||
if not revision:
|
||||
raise ResourceNotFoundError("Could not find revision")
|
||||
|
||||
|
||||
def _validate_replica_exists(cmd, namespace):
|
||||
replica = ContainerAppClient.get_replica(cmd=cmd,
|
||||
resource_group_name=namespace.resource_group_name,
|
||||
container_app_name=namespace.name,
|
||||
revision_name=namespace.revision,
|
||||
replica_name=namespace.replica)
|
||||
if not replica:
|
||||
raise ResourceNotFoundError("Could not find replica")
|
||||
|
||||
|
||||
def _validate_container_exists(cmd, namespace):
|
||||
replica_containers = ContainerAppClient.get_replica(cmd=cmd,
|
||||
resource_group_name=namespace.resource_group_name,
|
||||
container_app_name=namespace.name,
|
||||
revision_name=namespace.revision,
|
||||
replica_name=namespace.replica)["properties"]["containers"]
|
||||
matches = [r for r in replica_containers if r["name"].lower() == namespace.container.lower()]
|
||||
if not matches:
|
||||
raise ResourceNotFoundError("Could not find container")
|
||||
|
||||
|
||||
# also used to validate logstream
|
||||
def validate_ssh(cmd, namespace):
|
||||
if not hasattr(namespace, "kind") or (namespace.kind and namespace.kind.lower() != LOG_TYPE_SYSTEM):
|
||||
_set_ssh_defaults(cmd, namespace)
|
||||
_validate_revision_exists(cmd, namespace)
|
||||
_validate_replica_exists(cmd, namespace)
|
||||
_validate_container_exists(cmd, namespace)
|
||||
|
||||
|
||||
def validate_cors_max_age(cmd, namespace):
|
||||
if namespace.max_age:
|
||||
try:
|
||||
if namespace.max_age == "":
|
||||
return
|
||||
|
||||
max_age = int(namespace.max_age)
|
||||
if max_age < 0:
|
||||
raise InvalidArgumentValueError("max-age must be a positive integer.")
|
||||
except ValueError:
|
||||
raise InvalidArgumentValueError("max-age must be an integer.")
|
|
@ -0,0 +1,6 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
|
@ -0,0 +1,6 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
|
@ -0,0 +1,20 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: skip-file
|
||||
# flake8: noqa
|
||||
|
||||
from azure.cli.core.aaz import *
|
||||
|
||||
|
||||
class __CMDGroup(AAZCommandGroup):
|
||||
"""Manage Azure Network resources.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
__all__ = ["__CMDGroup"]
|
|
@ -0,0 +1,11 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: skip-file
|
||||
# flake8: noqa
|
||||
|
||||
from .__cmd_group import *
|
|
@ -0,0 +1,22 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: skip-file
|
||||
# flake8: noqa
|
||||
|
||||
from azure.cli.core.aaz import *
|
||||
|
||||
|
||||
class __CMDGroup(AAZCommandGroup):
|
||||
"""Check if a private IP address is available for use within a virtual network.
|
||||
|
||||
To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
__all__ = ["__CMDGroup"]
|
|
@ -0,0 +1,12 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
#
|
||||
# Code generated by aaz-dev-tools
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: skip-file
|
||||
# flake8: noqa
|
||||
|
||||
from .__cmd_group import *
|
||||
from ._show import *
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,66 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=broad-exception-caught, line-too-long, no-else-return
|
||||
from typing import Any, Dict
|
||||
|
||||
from azure.cli.core.commands import AzCliCommand
|
||||
from knack.util import CLIError
|
||||
|
||||
from ._client_factory import handle_raw_exception
|
||||
from ._utils import register_provider_if_needed, _validate_subscription_registered
|
||||
|
||||
|
||||
class BaseResource:
|
||||
def __init__(
|
||||
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
|
||||
):
|
||||
self.raw_param = raw_parameters
|
||||
self.cmd = cmd
|
||||
self.client = client
|
||||
self.models = models
|
||||
|
||||
def register_provider(self, *rp_name_list):
|
||||
for rp in rp_name_list:
|
||||
register_provider_if_needed(self.cmd, rp)
|
||||
|
||||
def validate_subscription_registered(self, *rp_name_list):
|
||||
for rp in rp_name_list:
|
||||
_validate_subscription_registered(self.cmd, rp)
|
||||
|
||||
def list(self):
|
||||
try:
|
||||
if self.get_argument_resource_group_name() is None:
|
||||
return self.client.list_by_subscription(cmd=self.cmd)
|
||||
else:
|
||||
return self.client.list_by_resource_group(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name())
|
||||
except CLIError as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def show(self):
|
||||
try:
|
||||
return self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
|
||||
except CLIError as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def delete(self):
|
||||
try:
|
||||
return self.client.delete(cmd=self.cmd, name=self.get_argument_name(), resource_group_name=self.get_argument_resource_group_name(), no_wait=self.get_argument_no_wait())
|
||||
except CLIError as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def get_param(self, key) -> Any:
|
||||
return self.raw_param.get(key)
|
||||
|
||||
def set_param(self, key, value):
|
||||
self.raw_param[key] = value
|
||||
|
||||
def get_argument_name(self):
|
||||
return self.get_param("name")
|
||||
|
||||
def get_argument_resource_group_name(self):
|
||||
return self.get_param("resource_group_name")
|
||||
|
||||
def get_argument_no_wait(self):
|
||||
return self.get_param("no_wait")
|
|
@ -0,0 +1,204 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
# pylint: disable=line-too-long, too-many-statements, bare-except
|
||||
# from azure.cli.core.commands import CliCommandType
|
||||
# from msrestazure.tools import is_valid_resource_id, parse_resource_id
|
||||
from azure.cli.command_modules.containerapp._client_factory import ex_handler_factory
|
||||
from ._validators import validate_ssh
|
||||
from ._transformers import (transform_containerapp_output,
|
||||
transform_containerapp_list_output,
|
||||
transform_job_execution_list_output,
|
||||
transform_job_execution_show_output,
|
||||
transform_revision_list_output,
|
||||
transform_revision_output)
|
||||
|
||||
|
||||
def load_command_table(self, _):
|
||||
with self.command_group('containerapp') as g:
|
||||
g.custom_show_command('show', 'show_containerapp', table_transformer=transform_containerapp_output)
|
||||
g.custom_command('list', 'list_containerapp', table_transformer=transform_containerapp_list_output)
|
||||
g.custom_command('create', 'create_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output)
|
||||
g.custom_command('update', 'update_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output)
|
||||
g.custom_command('delete', 'delete_containerapp', supports_no_wait=True, confirmation=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('exec', 'containerapp_ssh', validator=validate_ssh)
|
||||
g.custom_command('up', 'containerapp_up', supports_no_wait=False, exception_handler=ex_handler_factory())
|
||||
g.custom_command('browse', 'open_containerapp_in_browser')
|
||||
|
||||
with self.command_group('containerapp replica') as g:
|
||||
g.custom_show_command('show', 'get_replica') # TODO implement the table transformer
|
||||
g.custom_command('list', 'list_replicas')
|
||||
|
||||
with self.command_group('containerapp logs') as g:
|
||||
g.custom_show_command('show', 'stream_containerapp_logs', validator=validate_ssh)
|
||||
with self.command_group('containerapp env logs') as g:
|
||||
g.custom_show_command('show', 'stream_environment_logs')
|
||||
|
||||
with self.command_group('containerapp env') as g:
|
||||
g.custom_show_command('show', 'show_managed_environment')
|
||||
g.custom_command('list', 'list_managed_environments')
|
||||
g.custom_command('create', 'create_managed_environment', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('delete', 'delete_managed_environment', supports_no_wait=True, confirmation=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('update', 'update_managed_environment', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp job') as g:
|
||||
g.custom_show_command('show', 'show_containerappsjob')
|
||||
g.custom_command('list', 'list_containerappsjob')
|
||||
g.custom_command('create', 'create_containerappsjob', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('delete', 'delete_containerappsjob', supports_no_wait=True, confirmation=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('update', 'update_containerappsjob', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('start', 'start_containerappsjob', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('stop', 'stop_containerappsjob', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp job execution') as g:
|
||||
g.custom_show_command('list', 'listexecution_containerappsjob', table_transformer=transform_job_execution_list_output)
|
||||
g.custom_show_command('show', 'getSingleExecution_containerappsjob', table_transformer=transform_job_execution_show_output)
|
||||
|
||||
with self.command_group('containerapp job secret') as g:
|
||||
g.custom_command('list', 'list_secrets_job')
|
||||
g.custom_show_command('show', 'show_secret_job')
|
||||
g.custom_command('remove', 'remove_secrets_job', confirmation=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('set', 'set_secrets_job', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp job identity') as g:
|
||||
g.custom_command('assign', 'assign_managed_identity_job', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('remove', 'remove_managed_identity_job', confirmation=True, supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_managed_identity_job')
|
||||
|
||||
with self.command_group('containerapp env dapr-component') as g:
|
||||
g.custom_command('list', 'list_dapr_components')
|
||||
g.custom_show_command('show', 'show_dapr_component')
|
||||
g.custom_command('set', 'create_or_update_dapr_component')
|
||||
g.custom_command('remove', 'remove_dapr_component')
|
||||
|
||||
with self.command_group('containerapp env certificate') as g:
|
||||
g.custom_command('list', 'list_certificates')
|
||||
g.custom_command('upload', 'upload_certificate')
|
||||
g.custom_command('delete', 'delete_certificate', confirmation=True, exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp env storage') as g:
|
||||
g.custom_show_command('show', 'show_storage')
|
||||
g.custom_command('list', 'list_storage')
|
||||
g.custom_command('set', 'create_or_update_storage', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('remove', 'remove_storage', confirmation=True, exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp identity') as g:
|
||||
g.custom_command('assign', 'assign_managed_identity', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_command('remove', 'remove_managed_identity', supports_no_wait=True, exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_managed_identity')
|
||||
|
||||
with self.command_group('containerapp github-action') as g:
|
||||
g.custom_command('add', 'create_or_update_github_action', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_github_action', exception_handler=ex_handler_factory())
|
||||
g.custom_command('delete', 'delete_github_action', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp revision') as g:
|
||||
g.custom_command('activate', 'activate_revision')
|
||||
g.custom_command('deactivate', 'deactivate_revision')
|
||||
g.custom_command('list', 'list_revisions', table_transformer=transform_revision_list_output, exception_handler=ex_handler_factory())
|
||||
g.custom_command('restart', 'restart_revision')
|
||||
g.custom_show_command('show', 'show_revision', table_transformer=transform_revision_output, exception_handler=ex_handler_factory())
|
||||
g.custom_command('copy', 'copy_revision', exception_handler=ex_handler_factory())
|
||||
g.custom_command('set-mode', 'set_revision_mode', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp revision label') as g:
|
||||
g.custom_command('add', 'add_revision_label')
|
||||
g.custom_command('remove', 'remove_revision_label')
|
||||
g.custom_command('swap', 'swap_revision_label')
|
||||
|
||||
with self.command_group('containerapp ingress') as g:
|
||||
g.custom_command('enable', 'enable_ingress', exception_handler=ex_handler_factory())
|
||||
g.custom_command('disable', 'disable_ingress', exception_handler=ex_handler_factory())
|
||||
g.custom_command('update', 'update_ingress', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_ingress')
|
||||
|
||||
with self.command_group('containerapp ingress traffic') as g:
|
||||
g.custom_command('set', 'set_ingress_traffic', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_ingress_traffic')
|
||||
|
||||
with self.command_group('containerapp ingress sticky-sessions') as g:
|
||||
g.custom_command('set', 'set_ingress_sticky_session', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_ingress_sticky_session')
|
||||
|
||||
with self.command_group('containerapp ingress access-restriction') as g:
|
||||
g.custom_command('set', 'set_ip_restriction', exception_handler=ex_handler_factory())
|
||||
g.custom_command('remove', 'remove_ip_restriction')
|
||||
g.custom_show_command('list', 'show_ip_restrictions')
|
||||
|
||||
with self.command_group('containerapp ingress cors') as g:
|
||||
g.custom_command('enable', 'enable_cors_policy', exception_handler=ex_handler_factory())
|
||||
g.custom_command('disable', 'disable_cors_policy', exception_handler=ex_handler_factory())
|
||||
g.custom_command('update', 'update_cors_policy', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_cors_policy')
|
||||
|
||||
with self.command_group('containerapp registry') as g:
|
||||
g.custom_command('set', 'set_registry', exception_handler=ex_handler_factory())
|
||||
g.custom_show_command('show', 'show_registry')
|
||||
g.custom_command('list', 'list_registry')
|
||||
g.custom_command('remove', 'remove_registry', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp secret') as g:
|
||||
g.custom_command('list', 'list_secrets')
|
||||
g.custom_show_command('show', 'show_secret')
|
||||
g.custom_command('remove', 'remove_secrets', exception_handler=ex_handler_factory())
|
||||
g.custom_command('set', 'set_secrets', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp dapr') as g:
|
||||
g.custom_command('enable', 'enable_dapr', exception_handler=ex_handler_factory())
|
||||
g.custom_command('disable', 'disable_dapr', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth') as g:
|
||||
g.custom_show_command('show', 'show_auth_config')
|
||||
g.custom_command('update', 'update_auth_config', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth microsoft') as g:
|
||||
g.custom_show_command('show', 'get_aad_settings')
|
||||
g.custom_command('update', 'update_aad_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth facebook') as g:
|
||||
g.custom_show_command('show', 'get_facebook_settings')
|
||||
g.custom_command('update', 'update_facebook_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth github') as g:
|
||||
g.custom_show_command('show', 'get_github_settings')
|
||||
g.custom_command('update', 'update_github_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth google') as g:
|
||||
g.custom_show_command('show', 'get_google_settings')
|
||||
g.custom_command('update', 'update_google_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth twitter') as g:
|
||||
g.custom_show_command('show', 'get_twitter_settings')
|
||||
g.custom_command('update', 'update_twitter_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth apple') as g:
|
||||
g.custom_show_command('show', 'get_apple_settings')
|
||||
g.custom_command('update', 'update_apple_settings', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp auth openid-connect') as g:
|
||||
g.custom_show_command('show', 'get_openid_connect_provider_settings')
|
||||
g.custom_command('add', 'add_openid_connect_provider_settings', exception_handler=ex_handler_factory())
|
||||
g.custom_command('update', 'update_openid_connect_provider_settings', exception_handler=ex_handler_factory())
|
||||
g.custom_command('remove', 'remove_openid_connect_provider_settings', confirmation=True)
|
||||
|
||||
with self.command_group('containerapp ssl') as g:
|
||||
g.custom_command('upload', 'upload_ssl', exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp hostname') as g:
|
||||
g.custom_command('add', 'add_hostname', exception_handler=ex_handler_factory())
|
||||
g.custom_command('bind', 'bind_hostname', exception_handler=ex_handler_factory())
|
||||
g.custom_command('list', 'list_hostname')
|
||||
g.custom_command('delete', 'delete_hostname', confirmation=True, exception_handler=ex_handler_factory())
|
||||
|
||||
with self.command_group('containerapp compose') as g:
|
||||
g.custom_command('create', 'create_containerapps_from_compose')
|
||||
|
||||
with self.command_group('containerapp env workload-profile') as g:
|
||||
g.custom_command('list-supported', 'list_supported_workload_profiles')
|
||||
g.custom_command('list', 'list_workload_profiles')
|
||||
g.custom_show_command('show', 'show_workload_profile')
|
||||
g.custom_command('add', 'add_workload_profile')
|
||||
g.custom_command('update', 'update_workload_profile')
|
||||
g.custom_command('delete', 'delete_workload_profile')
|
|
@ -0,0 +1,114 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, broad-exception-caught, bare-except, too-many-boolean-expressions, useless-parent-delegation, expression-not-assigned
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from azure.cli.core.commands import AzCliCommand
|
||||
|
||||
from ._client_factory import handle_raw_exception
|
||||
from .base_resource import BaseResource
|
||||
|
||||
|
||||
class ContainerAppAuthDecorator(BaseResource):
|
||||
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
self.existing_auth = {}
|
||||
|
||||
def show(self):
|
||||
auth_settings = {}
|
||||
try:
|
||||
auth_settings = self.client.get(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), container_app_name=self.get_argument_name(), auth_config_name="current")["properties"]
|
||||
except:
|
||||
pass
|
||||
return auth_settings
|
||||
|
||||
def construct_payload(self):
|
||||
from ._utils import set_field_in_auth_settings, update_http_settings_in_auth_settings
|
||||
self.existing_auth = {}
|
||||
try:
|
||||
self.existing_auth = self.client.get(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), container_app_name=self.get_argument_name(), auth_config_name="current")["properties"]
|
||||
except:
|
||||
self.existing_auth["platform"] = {}
|
||||
self.existing_auth["platform"]["enabled"] = True
|
||||
self.existing_auth["globalValidation"] = {}
|
||||
self.existing_auth["login"] = {}
|
||||
|
||||
self.existing_auth = set_field_in_auth_settings(self.existing_auth, self.get_argument_set_string())
|
||||
|
||||
if self.get_argument_enabled() is not None:
|
||||
if "platform" not in self.existing_auth:
|
||||
self.existing_auth["platform"] = {}
|
||||
self.existing_auth["platform"]["enabled"] = self.get_argument_enabled()
|
||||
|
||||
if self.get_argument_runtime_version() is not None:
|
||||
if "platform" not in self.existing_auth:
|
||||
self.existing_auth["platform"] = {}
|
||||
self.existing_auth["platform"]["runtimeVersion"] = self.get_argument_runtime_version()
|
||||
|
||||
if self.get_argument_config_file_path() is not None:
|
||||
if "platform" not in self.existing_auth:
|
||||
self.existing_auth["platform"] = {}
|
||||
self.existing_auth["platform"]["configFilePath"] = self.get_argument_config_file_path()
|
||||
|
||||
if self.get_argument_unauthenticated_client_action() is not None:
|
||||
if "globalValidation" not in self.existing_auth:
|
||||
self.existing_auth["globalValidation"] = {}
|
||||
self.existing_auth["globalValidation"]["unauthenticatedClientAction"] = self.get_argument_unauthenticated_client_action()
|
||||
|
||||
if self.get_argument_redirect_provider() is not None:
|
||||
if "globalValidation" not in self.existing_auth:
|
||||
self.existing_auth["globalValidation"] = {}
|
||||
self.existing_auth["globalValidation"]["redirectToProvider"] = self.get_argument_redirect_provider()
|
||||
|
||||
if self.get_argument_excluded_paths() is not None:
|
||||
if "globalValidation" not in self.existing_auth:
|
||||
self.existing_auth["globalValidation"] = {}
|
||||
self.existing_auth["globalValidation"]["excludedPaths"] = self.get_argument_excluded_paths().split(",")
|
||||
|
||||
self.existing_auth = update_http_settings_in_auth_settings(self.existing_auth, self.get_argument_require_https(),
|
||||
self.get_argument_proxy_convention(), self.get_argument_proxy_custom_host_header(),
|
||||
self.get_argument_proxy_custom_proto_header())
|
||||
|
||||
def create_or_update(self):
|
||||
try:
|
||||
return self.client.create_or_update(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(),
|
||||
container_app_name=self.get_argument_name(), auth_config_name="current",
|
||||
auth_config_envelope=self.existing_auth)
|
||||
except Exception as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def get_argument_set_string(self):
|
||||
return self.get_param("set_string")
|
||||
|
||||
def get_argument_enabled(self):
|
||||
return self.get_param("enabled")
|
||||
|
||||
def get_argument_runtime_version(self):
|
||||
return self.get_param("runtime_version")
|
||||
|
||||
def get_argument_config_file_path(self):
|
||||
return self.get_param("config_file_path")
|
||||
|
||||
def get_argument_unauthenticated_client_action(self):
|
||||
return self.get_param("unauthenticated_client_action")
|
||||
|
||||
def get_argument_redirect_provider(self):
|
||||
return self.get_param("redirect_provider")
|
||||
|
||||
def get_argument_require_https(self):
|
||||
return self.get_param("require_https")
|
||||
|
||||
def get_argument_proxy_convention(self):
|
||||
return self.get_param("proxy_convention")
|
||||
|
||||
def get_argument_proxy_custom_host_header(self):
|
||||
return self.get_param("proxy_custom_host_header")
|
||||
|
||||
def get_argument_proxy_custom_proto_header(self):
|
||||
return self.get_param("proxy_custom_proto_header")
|
||||
|
||||
def get_argument_excluded_paths(self):
|
||||
return self.get_param("excluded_paths")
|
|
@ -0,0 +1,648 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, broad-except, pointless-statement, bare-except, too-many-public-methods, logging-format-interpolation, too-many-boolean-expressions, too-many-branches, useless-parent-delegation
|
||||
from typing import Dict, Any
|
||||
|
||||
from azure.cli.core.commands import AzCliCommand
|
||||
|
||||
import time
|
||||
|
||||
from azure.cli.core.azclierror import (
|
||||
RequiredArgumentMissingError,
|
||||
ValidationError)
|
||||
from azure.cli.core.commands.client_factory import get_subscription_id
|
||||
|
||||
from knack.log import get_logger
|
||||
from knack.util import CLIError
|
||||
|
||||
from msrestazure.tools import parse_resource_id, is_valid_resource_id
|
||||
from msrest.exceptions import DeserializationError
|
||||
|
||||
from .base_resource import BaseResource
|
||||
from ._clients import ManagedEnvironmentClient
|
||||
from ._client_factory import handle_raw_exception, handle_non_404_status_code_exception
|
||||
|
||||
from ._models import (
|
||||
Ingress as IngressModel,
|
||||
Configuration as ConfigurationModel,
|
||||
Template as TemplateModel,
|
||||
RegistryCredentials as RegistryCredentialsModel,
|
||||
ContainerApp as ContainerAppModel,
|
||||
Dapr as DaprModel,
|
||||
ContainerResources as ContainerResourcesModel,
|
||||
Scale as ScaleModel,
|
||||
Container as ContainerModel,
|
||||
ManagedServiceIdentity as ManagedServiceIdentityModel,
|
||||
ScaleRule as ScaleRuleModel,
|
||||
Volume as VolumeModel,
|
||||
VolumeMount as VolumeMountModel)
|
||||
|
||||
from ._decorator_utils import (create_deserializer,
|
||||
process_loaded_yaml,
|
||||
load_yaml_file)
|
||||
from ._utils import (_ensure_location_allowed,
|
||||
parse_secret_flags, store_as_secret_and_return_secret_ref, parse_env_var_flags,
|
||||
_convert_object_from_snake_to_camel_case,
|
||||
_object_to_dict, _remove_additional_attributes,
|
||||
_remove_readonly_attributes,
|
||||
_infer_acr_credentials,
|
||||
_ensure_identity_resource_id,
|
||||
validate_container_app_name,
|
||||
set_managed_identity,
|
||||
create_acrpull_role_assignment, is_registry_msi_system,
|
||||
safe_set, parse_metadata_flags, parse_auth_flags,
|
||||
get_default_workload_profile_name_from_env,
|
||||
ensure_workload_profile_supported, _generate_secret_volume_name,
|
||||
AppType,
|
||||
safe_get)
|
||||
from ._validators import validate_create, validate_revision_suffix
|
||||
|
||||
from ._constants import (CONTAINER_APPS_RP,
|
||||
HELLO_WORLD_IMAGE)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class BaseContainerAppDecorator(BaseResource):
|
||||
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
|
||||
def list(self):
|
||||
containerapps = super().list()
|
||||
managed_env = self.get_argument_managed_env()
|
||||
if managed_env:
|
||||
env_name = parse_resource_id(managed_env)["name"].lower()
|
||||
if "resource_group" in parse_resource_id(managed_env):
|
||||
self.get_environment_client().show(self.cmd, parse_resource_id(managed_env)["resource_group"],
|
||||
parse_resource_id(managed_env)["name"])
|
||||
containerapps = [c for c in containerapps if
|
||||
c["properties"]["environmentId"].lower() == managed_env.lower()]
|
||||
else:
|
||||
containerapps = [c for c in containerapps if
|
||||
parse_resource_id(c["properties"]["environmentId"])["name"].lower() == env_name]
|
||||
|
||||
return containerapps
|
||||
|
||||
def show(self):
|
||||
try:
|
||||
r = super().show()
|
||||
if self.get_param("show_secrets"):
|
||||
self.set_up_get_existing_secrets(r)
|
||||
return r
|
||||
except CLIError as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def get_environment_client(self):
|
||||
return ManagedEnvironmentClient
|
||||
|
||||
def set_up_get_existing_secrets(self, containerapp_def):
|
||||
if "secrets" not in containerapp_def["properties"]["configuration"]:
|
||||
containerapp_def["properties"]["configuration"]["secrets"] = []
|
||||
else:
|
||||
secrets = None
|
||||
try:
|
||||
secrets = self.client.list_secrets(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
handle_raw_exception(e)
|
||||
|
||||
containerapp_def["properties"]["configuration"]["secrets"] = secrets["value"]
|
||||
safe_set(containerapp_def, "properties", "configuration", "secrets", value=secrets["value"])
|
||||
|
||||
def get_param(self, key) -> Any:
|
||||
return self.raw_param.get(key)
|
||||
|
||||
def set_param(self, key, value):
|
||||
self.raw_param[key] = value
|
||||
|
||||
def get_argument_name(self):
|
||||
return self.get_param("name")
|
||||
|
||||
def get_argument_resource_group_name(self):
|
||||
return self.get_param("resource_group_name")
|
||||
|
||||
def get_argument_no_wait(self):
|
||||
return self.get_param("no_wait")
|
||||
|
||||
def get_argument_yaml(self):
|
||||
return self.get_param("yaml")
|
||||
|
||||
def get_argument_image(self):
|
||||
return self.get_param("image")
|
||||
|
||||
def set_argument_image(self, image):
|
||||
self.set_param("image", image)
|
||||
|
||||
def get_argument_container_name(self):
|
||||
return self.get_param("container_name")
|
||||
|
||||
def get_argument_managed_env(self):
|
||||
return self.get_param("managed_env")
|
||||
|
||||
def set_argument_managed_env(self, managed_env):
|
||||
self.set_param("managed_env", managed_env)
|
||||
|
||||
def get_argument_min_replicas(self):
|
||||
return self.get_param("min_replicas")
|
||||
|
||||
def get_argument_max_replicas(self):
|
||||
return self.get_param("max_replicas")
|
||||
|
||||
def get_argument_scale_rule_name(self):
|
||||
return self.get_param("scale_rule_name")
|
||||
|
||||
def get_argument_scale_rule_type(self):
|
||||
return self.get_param("scale_rule_type")
|
||||
|
||||
def set_argument_scale_rule_type(self, scale_rule_type):
|
||||
self.set_param("scale_rule_type", scale_rule_type)
|
||||
|
||||
def get_argument_scale_rule_http_concurrency(self):
|
||||
return self.get_param("scale_rule_http_concurrency")
|
||||
|
||||
def get_argument_scale_rule_metadata(self):
|
||||
return self.get_param("scale_rule_metadata")
|
||||
|
||||
def get_argument_scale_rule_auth(self):
|
||||
return self.get_param("scale_rule_auth")
|
||||
|
||||
def get_argument_target_port(self):
|
||||
return self.get_param("target_port")
|
||||
|
||||
def get_argument_exposed_port(self):
|
||||
return self.get_param("exposed_port")
|
||||
|
||||
def get_argument_transport(self):
|
||||
return self.get_param("transport")
|
||||
|
||||
def get_argument_ingress(self):
|
||||
return self.get_param("ingress")
|
||||
|
||||
def get_argument_allow_insecure(self):
|
||||
return self.get_param("allow_insecure")
|
||||
|
||||
def get_argument_revisions_mode(self):
|
||||
return self.get_param("revisions_mode")
|
||||
|
||||
def get_argument_secrets(self):
|
||||
return self.get_param("secrets")
|
||||
|
||||
def get_argument_env_vars(self):
|
||||
return self.get_param("env_vars")
|
||||
|
||||
def get_argument_cpu(self):
|
||||
return self.get_param("cpu")
|
||||
|
||||
def get_argument_memory(self):
|
||||
return self.get_param("memory")
|
||||
|
||||
def get_argument_registry_server(self):
|
||||
return self.get_param("registry_server")
|
||||
|
||||
def get_argument_registry_user(self):
|
||||
return self.get_param("registry_user")
|
||||
|
||||
def set_argument_registry_user(self, registry_user):
|
||||
self.set_param("registry_user", registry_user)
|
||||
|
||||
def get_argument_registry_pass(self):
|
||||
return self.get_param("registry_pass")
|
||||
|
||||
def set_argument_registry_pass(self, registry_pass):
|
||||
self.set_param("registry_pass", registry_pass)
|
||||
|
||||
def get_argument_dapr_enabled(self):
|
||||
return self.get_param("dapr_enabled")
|
||||
|
||||
def get_argument_dapr_app_port(self):
|
||||
return self.get_param("dapr_app_port")
|
||||
|
||||
def get_argument_dapr_app_id(self):
|
||||
return self.get_param("dapr_app_id")
|
||||
|
||||
def get_argument_dapr_app_protocol(self):
|
||||
return self.get_param("dapr_app_protocol")
|
||||
|
||||
def get_argument_dapr_http_read_buffer_size(self):
|
||||
return self.get_param("dapr_http_read_buffer_size")
|
||||
|
||||
def get_argument_dapr_http_max_request_size(self):
|
||||
return self.get_param("dapr_http_max_request_size")
|
||||
|
||||
def get_argument_dapr_log_level(self):
|
||||
return self.get_param("dapr_log_level")
|
||||
|
||||
def get_argument_dapr_enable_api_logging(self):
|
||||
return self.get_param("dapr_enable_api_logging")
|
||||
|
||||
def get_argument_revision_suffix(self):
|
||||
return self.get_param("revision_suffix")
|
||||
|
||||
def get_argument_startup_command(self):
|
||||
return self.get_param("startup_command")
|
||||
|
||||
def get_argument_args(self):
|
||||
return self.get_param("args")
|
||||
|
||||
def get_argument_tags(self):
|
||||
return self.get_param("tags")
|
||||
|
||||
def get_argument_system_assigned(self):
|
||||
return self.get_param("system_assigned")
|
||||
|
||||
def get_argument_disable_warnings(self):
|
||||
return self.get_param("disable_warnings")
|
||||
|
||||
def get_argument_user_assigned(self):
|
||||
return self.get_param("user_assigned")
|
||||
|
||||
def get_argument_registry_identity(self):
|
||||
return self.get_param("registry_identity")
|
||||
|
||||
def get_argument_workload_profile_name(self):
|
||||
return self.get_param("workload_profile_name")
|
||||
|
||||
def set_argument_workload_profile_name(self, workload_profile_name):
|
||||
self.set_param("workload_profile_name", workload_profile_name)
|
||||
|
||||
def get_argument_secret_volume_mount(self):
|
||||
return self.get_param("secret_volume_mount")
|
||||
|
||||
def get_argument_termination_grace_period(self):
|
||||
return self.get_param("termination_grace_period")
|
||||
|
||||
|
||||
class ContainerAppCreateDecorator(BaseContainerAppDecorator):
|
||||
def __init__(
|
||||
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
|
||||
):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
self.containerapp_def = ContainerAppModel
|
||||
|
||||
def validate_arguments(self):
|
||||
validate_container_app_name(self.get_argument_name(), AppType.ContainerApp.name)
|
||||
validate_create(self.get_argument_registry_identity(), self.get_argument_registry_pass(), self.get_argument_registry_user(), self.get_argument_registry_server(), self.get_argument_no_wait())
|
||||
validate_revision_suffix(self.get_argument_revision_suffix())
|
||||
|
||||
def construct_payload(self):
|
||||
if self.get_argument_registry_identity() and not is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
logger.info("Creating an acrpull role assignment for the registry identity")
|
||||
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), self.get_argument_registry_identity(), skip_error=True)
|
||||
|
||||
if self.get_argument_yaml():
|
||||
return self.set_up_create_containerapp_yaml(name=self.get_argument_name(), file_name=self.get_argument_yaml())
|
||||
|
||||
if not self.get_argument_image():
|
||||
self.set_argument_image(HELLO_WORLD_IMAGE)
|
||||
|
||||
if self.get_argument_managed_env() is None:
|
||||
raise RequiredArgumentMissingError('Usage error: --environment is required if not using --yaml')
|
||||
|
||||
# Validate managed environment
|
||||
parsed_managed_env = parse_resource_id(self.get_argument_managed_env())
|
||||
managed_env_name = parsed_managed_env['name']
|
||||
managed_env_rg = parsed_managed_env['resource_group']
|
||||
managed_env_info = None
|
||||
|
||||
try:
|
||||
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
|
||||
except Exception as e:
|
||||
handle_non_404_status_code_exception(e)
|
||||
|
||||
if not managed_env_info:
|
||||
raise ValidationError("The environment '{}' does not exist. Specify a valid environment".format(self.get_argument_managed_env()))
|
||||
|
||||
while not self.get_argument_no_wait() and safe_get(managed_env_info, "properties", "provisioningState", default="").lower() in ["inprogress", "updating"]:
|
||||
logger.info("Waiting for environment provisioning to finish before creating container app")
|
||||
time.sleep(5)
|
||||
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
|
||||
|
||||
location = managed_env_info["location"]
|
||||
_ensure_location_allowed(self.cmd, location, CONTAINER_APPS_RP, "containerApps")
|
||||
|
||||
if not self.get_argument_workload_profile_name() and "workloadProfiles" in managed_env_info:
|
||||
workload_profile_name = get_default_workload_profile_name_from_env(self.cmd, managed_env_info, managed_env_rg)
|
||||
self.set_argument_workload_profile_name(workload_profile_name)
|
||||
|
||||
external_ingress = None
|
||||
if self.get_argument_ingress() is not None:
|
||||
if self.get_argument_ingress().lower() == "internal":
|
||||
external_ingress = False
|
||||
elif self.get_argument_ingress().lower() == "external":
|
||||
external_ingress = True
|
||||
|
||||
ingress_def = None
|
||||
if self.get_argument_target_port() is not None and self.get_argument_ingress() is not None:
|
||||
ingress_def = IngressModel
|
||||
ingress_def["external"] = external_ingress
|
||||
ingress_def["targetPort"] = self.get_argument_target_port()
|
||||
ingress_def["transport"] = self.get_argument_transport()
|
||||
ingress_def["exposedPort"] = self.get_argument_exposed_port() if self.get_argument_transport() == "tcp" else None
|
||||
ingress_def["allowInsecure"] = self.get_argument_allow_insecure()
|
||||
|
||||
secrets_def = None
|
||||
if self.get_argument_secrets() is not None:
|
||||
secrets_def = parse_secret_flags(self.get_argument_secrets())
|
||||
|
||||
registries_def = None
|
||||
if self.get_argument_registry_server() is not None and not is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
registries_def = RegistryCredentialsModel
|
||||
registries_def["server"] = self.get_argument_registry_server()
|
||||
|
||||
# Infer credentials if not supplied and its azurecr
|
||||
if (self.get_argument_registry_user() is None or self.get_argument_registry_pass() is None) and self.get_argument_registry_identity() is None:
|
||||
registry_user, registry_pass = _infer_acr_credentials(self.cmd, self.get_argument_registry_server(), self.get_argument_disable_warnings())
|
||||
self.set_argument_registry_user(registry_user)
|
||||
self.set_argument_registry_pass(registry_pass)
|
||||
|
||||
if not self.get_argument_registry_identity():
|
||||
registries_def["username"] = self.get_argument_registry_user()
|
||||
|
||||
if secrets_def is None:
|
||||
secrets_def = []
|
||||
registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, self.get_argument_registry_user(),
|
||||
self.get_argument_registry_server(),
|
||||
self.get_argument_registry_pass(),
|
||||
disable_warnings=self.get_argument_disable_warnings())
|
||||
else:
|
||||
registries_def["identity"] = self.get_argument_registry_identity()
|
||||
|
||||
dapr_def = None
|
||||
if self.get_argument_dapr_enabled():
|
||||
dapr_def = DaprModel
|
||||
dapr_def["enabled"] = True
|
||||
dapr_def["appId"] = self.get_argument_dapr_app_id()
|
||||
dapr_def["appPort"] = self.get_argument_dapr_app_port()
|
||||
dapr_def["appProtocol"] = self.get_argument_dapr_app_protocol()
|
||||
dapr_def["httpReadBufferSize"] = self.get_argument_dapr_http_read_buffer_size()
|
||||
dapr_def["httpMaxRequestSize"] = self.get_argument_dapr_http_max_request_size()
|
||||
dapr_def["logLevel"] = self.get_argument_dapr_log_level()
|
||||
dapr_def["enableApiLogging"] = self.get_argument_dapr_enable_api_logging()
|
||||
|
||||
config_def = ConfigurationModel
|
||||
config_def["secrets"] = secrets_def
|
||||
config_def["activeRevisionsMode"] = self.get_argument_revisions_mode()
|
||||
config_def["ingress"] = ingress_def
|
||||
config_def["registries"] = [registries_def] if registries_def is not None else None
|
||||
config_def["dapr"] = dapr_def
|
||||
|
||||
# Identity actions
|
||||
identity_def = ManagedServiceIdentityModel
|
||||
identity_def["type"] = "None"
|
||||
|
||||
assign_system_identity = self.get_argument_system_assigned()
|
||||
if self.get_argument_user_assigned():
|
||||
assign_user_identities = [x.lower() for x in self.get_argument_user_assigned()]
|
||||
else:
|
||||
assign_user_identities = []
|
||||
|
||||
if assign_system_identity and assign_user_identities:
|
||||
identity_def["type"] = "SystemAssigned, UserAssigned"
|
||||
elif assign_system_identity:
|
||||
identity_def["type"] = "SystemAssigned"
|
||||
elif assign_user_identities:
|
||||
identity_def["type"] = "UserAssigned"
|
||||
|
||||
if assign_user_identities:
|
||||
identity_def["userAssignedIdentities"] = {}
|
||||
subscription_id = get_subscription_id(self.cmd.cli_ctx)
|
||||
|
||||
for r in assign_user_identities:
|
||||
r = _ensure_identity_resource_id(subscription_id, self.get_argument_resource_group_name(), r)
|
||||
identity_def["userAssignedIdentities"][r] = {} # pylint: disable=unsupported-assignment-operation
|
||||
|
||||
scale_def = self.set_up_scale_rule()
|
||||
|
||||
resources_def = None
|
||||
if self.get_argument_cpu() is not None or self.get_argument_memory() is not None:
|
||||
resources_def = ContainerResourcesModel
|
||||
resources_def["cpu"] = self.get_argument_cpu()
|
||||
resources_def["memory"] = self.get_argument_memory()
|
||||
|
||||
container_def = ContainerModel
|
||||
container_def["name"] = self.get_argument_container_name() if self.get_argument_container_name() else self.get_argument_name()
|
||||
container_def["image"] = self.get_argument_image() if not is_registry_msi_system(self.get_argument_registry_identity()) else HELLO_WORLD_IMAGE
|
||||
if self.get_argument_env_vars() is not None:
|
||||
container_def["env"] = parse_env_var_flags(self.get_argument_env_vars())
|
||||
if self.get_argument_startup_command() is not None:
|
||||
container_def["command"] = self.get_argument_startup_command()
|
||||
if self.get_argument_args() is not None:
|
||||
container_def["args"] = self.get_argument_args()
|
||||
if resources_def is not None:
|
||||
container_def["resources"] = resources_def
|
||||
|
||||
template_def = TemplateModel
|
||||
|
||||
template_def["containers"] = [container_def]
|
||||
template_def["scale"] = scale_def
|
||||
|
||||
if self.get_argument_secret_volume_mount() is not None:
|
||||
volume_def = VolumeModel
|
||||
volume_mount_def = VolumeMountModel
|
||||
# generate a volume name
|
||||
volume_def["name"] = _generate_secret_volume_name()
|
||||
volume_def["storageType"] = "Secret"
|
||||
|
||||
# mount the volume to the container
|
||||
volume_mount_def["volumeName"] = volume_def["name"]
|
||||
volume_mount_def["mountPath"] = self.get_argument_secret_volume_mount()
|
||||
container_def["volumeMounts"] = [volume_mount_def]
|
||||
template_def["volumes"] = [volume_def]
|
||||
|
||||
if self.get_argument_revision_suffix() is not None and not is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
template_def["revisionSuffix"] = self.get_argument_revision_suffix()
|
||||
|
||||
if self.get_argument_termination_grace_period() is not None:
|
||||
template_def["terminationGracePeriodSeconds"] = self.get_argument_termination_grace_period()
|
||||
|
||||
self.containerapp_def["location"] = location
|
||||
self.containerapp_def["identity"] = identity_def
|
||||
self.containerapp_def["properties"]["environmentId"] = self.get_argument_managed_env()
|
||||
self.containerapp_def["properties"]["configuration"] = config_def
|
||||
self.containerapp_def["properties"]["template"] = template_def
|
||||
self.containerapp_def["tags"] = self.get_argument_tags()
|
||||
|
||||
if self.get_argument_workload_profile_name():
|
||||
self.containerapp_def["properties"]["workloadProfileName"] = self.get_argument_workload_profile_name()
|
||||
ensure_workload_profile_supported(self.cmd, managed_env_name, managed_env_rg, self.get_argument_workload_profile_name(),
|
||||
managed_env_info)
|
||||
|
||||
if self.get_argument_registry_identity():
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerapp_def, system_assigned=True)
|
||||
else:
|
||||
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerapp_def, user_assigned=[self.get_argument_registry_identity()])
|
||||
|
||||
def create(self):
|
||||
try:
|
||||
r = self.client.create_or_update(
|
||||
cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name(), container_app_envelope=self.containerapp_def,
|
||||
no_wait=self.get_argument_no_wait())
|
||||
|
||||
return r
|
||||
except Exception as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def construct_for_post_process(self, r):
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
while r["properties"]["provisioningState"] == "InProgress":
|
||||
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
|
||||
time.sleep(10)
|
||||
logger.info("Creating an acrpull role assignment for the system identity")
|
||||
system_sp = r["identity"]["principalId"]
|
||||
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), registry_identity=None, service_principal=system_sp)
|
||||
containers_def = safe_get(self.containerapp_def, "properties", "template", "containers")
|
||||
containers_def[0]["image"] = self.get_argument_image()
|
||||
|
||||
safe_set(self.containerapp_def, "properties", "template", "revisionSuffix", value=self.get_argument_revision_suffix())
|
||||
|
||||
registries_def = RegistryCredentialsModel
|
||||
registries_def["server"] = self.get_argument_registry_server()
|
||||
registries_def["identity"] = self.get_argument_registry_identity()
|
||||
safe_set(self.containerapp_def, "properties", "configuration", "registries", value=[registries_def])
|
||||
|
||||
def post_process(self, r):
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
r = self.create()
|
||||
|
||||
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not self.get_argument_no_wait():
|
||||
not self.get_argument_disable_warnings() and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name()))
|
||||
|
||||
if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and r["properties"]["configuration"]["ingress"] and "fqdn" in r["properties"]["configuration"]["ingress"]:
|
||||
not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"]))
|
||||
else:
|
||||
target_port = self.get_argument_target_port() or "<port>"
|
||||
not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: "
|
||||
"az containerapp ingress enable -n %s -g %s --type external --target-port %s"
|
||||
" --transport auto\n", self.get_argument_name(), self.get_argument_resource_group_name(), target_port)
|
||||
|
||||
return r
|
||||
|
||||
def set_up_create_containerapp_yaml(self, name, file_name):
|
||||
if self.get_argument_image() or self.get_argument_min_replicas() or self.get_argument_max_replicas() or self.get_argument_target_port() or self.get_argument_ingress() or \
|
||||
self.get_argument_revisions_mode() or self.get_argument_secrets() or self.get_argument_env_vars() or self.get_argument_cpu() or self.get_argument_memory() or self.get_argument_registry_server() or \
|
||||
self.get_argument_registry_user() or self.get_argument_registry_pass() or self.get_argument_dapr_enabled() or self.get_argument_dapr_app_port() or self.get_argument_dapr_app_id() or \
|
||||
self.get_argument_startup_command() or self.get_argument_args() or self.get_argument_tags():
|
||||
not self.get_argument_disable_warnings() and logger.warning(
|
||||
'Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead')
|
||||
|
||||
yaml_containerapp = process_loaded_yaml(load_yaml_file(file_name))
|
||||
|
||||
if not yaml_containerapp.get('name'):
|
||||
yaml_containerapp['name'] = name
|
||||
elif yaml_containerapp.get('name').lower() != name.lower():
|
||||
logger.warning(
|
||||
'The app name provided in the --yaml file "{}" does not match the one provided in the --name flag "{}". The one provided in the --yaml file will be used.'.format(
|
||||
yaml_containerapp.get('name'), name))
|
||||
name = yaml_containerapp.get('name')
|
||||
|
||||
if not yaml_containerapp.get('type'):
|
||||
yaml_containerapp['type'] = 'Microsoft.App/containerApps'
|
||||
elif yaml_containerapp.get('type').lower() != "microsoft.app/containerapps":
|
||||
raise ValidationError('Containerapp type must be \"Microsoft.App/ContainerApps\"')
|
||||
|
||||
# Deserialize the yaml into a ContainerApp object. Need this since we're not using SDK
|
||||
try:
|
||||
deserializer = create_deserializer(self.models)
|
||||
|
||||
self.containerapp_def = deserializer('ContainerApp', yaml_containerapp)
|
||||
except DeserializationError as ex:
|
||||
raise ValidationError(
|
||||
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.') from ex
|
||||
|
||||
# Remove tags before converting from snake case to camel case, then re-add tags. We don't want to change the case of the tags. Need this since we're not using SDK
|
||||
tags = None
|
||||
if yaml_containerapp.get('tags'):
|
||||
tags = yaml_containerapp.get('tags')
|
||||
del yaml_containerapp['tags']
|
||||
|
||||
self.containerapp_def = _convert_object_from_snake_to_camel_case(_object_to_dict(self.containerapp_def))
|
||||
self.containerapp_def['tags'] = tags
|
||||
|
||||
# After deserializing, some properties may need to be moved under the "properties" attribute. Need this since we're not using SDK
|
||||
self.containerapp_def = process_loaded_yaml(self.containerapp_def)
|
||||
|
||||
# Remove "additionalProperties" and read-only attributes that are introduced in the deserialization. Need this since we're not using SDK
|
||||
_remove_additional_attributes(self.containerapp_def)
|
||||
_remove_readonly_attributes(self.containerapp_def)
|
||||
|
||||
# Remove extra workloadProfileName introduced in deserialization
|
||||
if "workloadProfileName" in self.containerapp_def:
|
||||
del self.containerapp_def["workloadProfileName"]
|
||||
|
||||
# Validate managed environment
|
||||
env_id = self.containerapp_def["properties"]['environmentId']
|
||||
env_info = None
|
||||
if self.get_argument_managed_env():
|
||||
if not self.get_argument_disable_warnings() and env_id is not None and env_id != self.get_argument_managed_env():
|
||||
logger.warning('The environmentId was passed along with --yaml. The value entered with --environment will be ignored, and the configuration defined in the yaml will be used instead')
|
||||
if env_id is None:
|
||||
env_id = self.get_argument_managed_env()
|
||||
safe_set(self.containerapp_def, "properties", "environmentId", value=env_id)
|
||||
|
||||
if not self.containerapp_def["properties"].get('environmentId'):
|
||||
raise RequiredArgumentMissingError(
|
||||
'environmentId is required. This can be retrieved using the `az containerapp env show -g MyResourceGroup -n MyContainerappEnvironment --query id` command. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
|
||||
|
||||
if is_valid_resource_id(env_id):
|
||||
parsed_managed_env = parse_resource_id(env_id)
|
||||
env_name = parsed_managed_env['name']
|
||||
env_rg = parsed_managed_env['resource_group']
|
||||
else:
|
||||
raise ValidationError('Invalid environmentId specified. Environment not found')
|
||||
|
||||
try:
|
||||
env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=env_rg, name=env_name)
|
||||
except Exception as e:
|
||||
handle_non_404_status_code_exception(e)
|
||||
|
||||
if not env_info:
|
||||
raise ValidationError("The environment '{}' in resource group '{}' was not found".format(env_name, env_rg))
|
||||
|
||||
# Validate location
|
||||
if not self.containerapp_def.get('location'):
|
||||
self.containerapp_def['location'] = env_info['location']
|
||||
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
def set_up_scale_rule(self):
|
||||
scale_def = None
|
||||
if self.get_argument_min_replicas() is not None or self.get_argument_max_replicas() is not None:
|
||||
scale_def = ScaleModel
|
||||
scale_def["minReplicas"] = self.get_argument_min_replicas()
|
||||
scale_def["maxReplicas"] = self.get_argument_max_replicas()
|
||||
|
||||
scale_rule_type = self.get_argument_scale_rule_type()
|
||||
scale_rule_name = self.get_argument_scale_rule_name()
|
||||
scale_rule_auth = self.get_argument_scale_rule_auth()
|
||||
scale_rule_metadata = self.get_argument_scale_rule_metadata()
|
||||
scale_rule_http_concurrency = self.get_argument_scale_rule_http_concurrency()
|
||||
if self.get_argument_scale_rule_name():
|
||||
if not scale_rule_type:
|
||||
scale_rule_type = "http"
|
||||
scale_rule_type = scale_rule_type.lower()
|
||||
scale_rule_def = ScaleRuleModel
|
||||
curr_metadata = {}
|
||||
if self.get_argument_scale_rule_http_concurrency():
|
||||
if scale_rule_type in ('http', 'tcp'):
|
||||
curr_metadata["concurrentRequests"] = str(scale_rule_http_concurrency)
|
||||
metadata_def = parse_metadata_flags(scale_rule_metadata, curr_metadata)
|
||||
auth_def = parse_auth_flags(scale_rule_auth)
|
||||
if scale_rule_type == "http":
|
||||
scale_rule_def["name"] = scale_rule_name
|
||||
scale_rule_def["custom"] = None
|
||||
scale_rule_def["http"] = {}
|
||||
scale_rule_def["http"]["metadata"] = metadata_def
|
||||
scale_rule_def["http"]["auth"] = auth_def
|
||||
else:
|
||||
scale_rule_def["name"] = scale_rule_name
|
||||
scale_rule_def["http"] = None
|
||||
scale_rule_def["custom"] = {}
|
||||
scale_rule_def["custom"]["type"] = scale_rule_type
|
||||
scale_rule_def["custom"]["metadata"] = metadata_def
|
||||
scale_rule_def["custom"]["auth"] = auth_def
|
||||
if not scale_def:
|
||||
scale_def = ScaleModel
|
||||
scale_def["rules"] = [scale_rule_def]
|
||||
|
||||
return scale_def
|
|
@ -0,0 +1,343 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement, expression-not-assigned, unbalanced-tuple-unpacking, unsupported-assignment-operation, too-many-public-methods, broad-exception-caught, expression-not-assigned, ungrouped-imports
|
||||
|
||||
from typing import Any, Dict
|
||||
from knack.log import get_logger
|
||||
|
||||
from azure.cli.command_modules.appservice.utils import _normalize_location
|
||||
from azure.cli.core.azclierror import RequiredArgumentMissingError, ValidationError
|
||||
from azure.cli.core.commands import AzCliCommand
|
||||
from knack.util import CLIError
|
||||
from msrestazure.tools import is_valid_resource_id
|
||||
|
||||
from ._constants import CONTAINER_APPS_RP
|
||||
from ._utils import (get_vnet_location,
|
||||
validate_environment_location,
|
||||
_ensure_location_allowed,
|
||||
_generate_log_analytics_if_not_provided,
|
||||
load_cert_file,
|
||||
safe_set,
|
||||
get_default_workload_profiles,
|
||||
_azure_monitor_quickstart)
|
||||
from ._client_factory import handle_raw_exception
|
||||
from .base_resource import BaseResource
|
||||
from ._models import (
|
||||
ManagedEnvironment as ManagedEnvironmentModel,
|
||||
VnetConfiguration as VnetConfigurationModel,
|
||||
AppLogsConfiguration as AppLogsConfigurationModel,
|
||||
LogAnalyticsConfiguration as LogAnalyticsConfigurationModel,
|
||||
CustomDomainConfiguration as CustomDomainConfigurationModel)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class ContainerAppEnvDecorator(BaseResource):
|
||||
|
||||
def get_argument_logs_destination(self):
|
||||
return self.get_param("logs_destination")
|
||||
|
||||
def get_argument_storage_account(self):
|
||||
return self.get_param("storage_account")
|
||||
|
||||
def get_argument_logs_customer_id(self):
|
||||
return self.get_param("logs_customer_id")
|
||||
|
||||
def set_argument_logs_customer_id(self, logs_customer_id):
|
||||
self.set_param("logs_customer_id", logs_customer_id)
|
||||
|
||||
def get_argument_logs_key(self):
|
||||
return self.get_param("logs_key")
|
||||
|
||||
def set_argument_logs_key(self, logs_key):
|
||||
self.set_param("logs_key", logs_key)
|
||||
|
||||
def get_argument_location(self):
|
||||
return self.get_param("location")
|
||||
|
||||
def set_argument_location(self, location):
|
||||
self.set_param("location", location)
|
||||
|
||||
def get_argument_instrumentation_key(self):
|
||||
return self.get_param("instrumentation_key")
|
||||
|
||||
def get_argument_infrastructure_subnet_resource_id(self):
|
||||
return self.get_param("infrastructure_subnet_resource_id")
|
||||
|
||||
def get_argument_docker_bridge_cidr(self):
|
||||
return self.get_param("docker_bridge_cidr")
|
||||
|
||||
def get_argument_platform_reserved_cidr(self):
|
||||
return self.get_param("platform_reserved_cidr")
|
||||
|
||||
def get_argument_platform_reserved_dns_ip(self):
|
||||
return self.get_param("platform_reserved_dns_ip")
|
||||
|
||||
def get_argument_internal_only(self):
|
||||
return self.get_param("internal_only")
|
||||
|
||||
def get_argument_tags(self):
|
||||
return self.get_param("tags")
|
||||
|
||||
def get_argument_disable_warnings(self):
|
||||
return self.get_param("disable_warnings")
|
||||
|
||||
def get_argument_zone_redundant(self):
|
||||
return self.get_param("zone_redundant")
|
||||
|
||||
def get_argument_hostname(self):
|
||||
return self.get_param("hostname")
|
||||
|
||||
def get_argument_certificate_file(self):
|
||||
return self.get_param("certificate_file")
|
||||
|
||||
def get_argument_certificate_password(self):
|
||||
return self.get_param("certificate_password")
|
||||
|
||||
def get_argument_mtls_enabled(self):
|
||||
return self.get_param("mtls_enabled")
|
||||
|
||||
def get_argument_workload_profile_type(self):
|
||||
return self.get_param("workload_profile_type")
|
||||
|
||||
def get_argument_workload_profile_name(self):
|
||||
return self.get_param("workload_profile_name")
|
||||
|
||||
def get_argument_min_nodes(self):
|
||||
return self.get_param("min_nodes")
|
||||
|
||||
def get_argument_max_nodes(self):
|
||||
return self.get_param("max_nodes")
|
||||
|
||||
|
||||
class ContainerAppEnvCreateDecorator(ContainerAppEnvDecorator):
|
||||
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
self.managed_env_def = ManagedEnvironmentModel
|
||||
|
||||
def validate_arguments(self):
|
||||
location = self.get_argument_location()
|
||||
if self.get_argument_zone_redundant():
|
||||
if not self.get_argument_infrastructure_subnet_resource_id():
|
||||
raise RequiredArgumentMissingError("Cannot use --zone-redundant/-z without "
|
||||
"--infrastructure-subnet-resource-id/-s")
|
||||
if not is_valid_resource_id(self.get_argument_infrastructure_subnet_resource_id()):
|
||||
raise ValidationError("--infrastructure-subnet-resource-id must be a valid resource id")
|
||||
vnet_location = get_vnet_location(self.cmd, self.get_argument_infrastructure_subnet_resource_id())
|
||||
if location:
|
||||
if _normalize_location(self.cmd, location) != vnet_location:
|
||||
raise ValidationError(
|
||||
f"Location '{location}' does not match the subnet's location: '{vnet_location}'. "
|
||||
"Please change either --location/-l or --infrastructure-subnet-resource-id/-s")
|
||||
else:
|
||||
location = vnet_location
|
||||
|
||||
location = validate_environment_location(self.cmd, location)
|
||||
_ensure_location_allowed(self.cmd, location, CONTAINER_APPS_RP, "managedEnvironments")
|
||||
self.set_argument_location(location)
|
||||
|
||||
def create(self):
|
||||
try:
|
||||
return self.client.create(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(),
|
||||
name=self.get_argument_name(), managed_environment_envelope=self.managed_env_def, no_wait=self.get_argument_no_wait())
|
||||
except Exception as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def post_process(self, r):
|
||||
_azure_monitor_quickstart(self.cmd, self.get_argument_name(), self.get_argument_resource_group_name(), self.get_argument_storage_account(), self.get_argument_logs_destination())
|
||||
|
||||
# return ENV
|
||||
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() != "succeeded" and not self.get_argument_no_wait():
|
||||
not self.get_argument_disable_warnings() and logger.warning('Containerapp environment creation in progress. Please monitor the creation using `az containerapp env show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name()))
|
||||
|
||||
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "succeeded":
|
||||
not self.get_argument_disable_warnings() and logger.warning("\nContainer Apps environment created. To deploy a container app, use: az containerapp create --help\n")
|
||||
|
||||
return r
|
||||
|
||||
def construct_payload(self):
|
||||
self.set_up_app_log_configuration()
|
||||
|
||||
self.managed_env_def["location"] = self.get_argument_location()
|
||||
self.managed_env_def["tags"] = self.get_argument_tags()
|
||||
self.managed_env_def["properties"]["zoneRedundant"] = self.get_argument_zone_redundant()
|
||||
|
||||
self.set_up_workload_profiles()
|
||||
|
||||
# Custom domains
|
||||
if self.get_argument_hostname():
|
||||
custom_domain = CustomDomainConfigurationModel
|
||||
blob, _ = load_cert_file(self.get_argument_certificate_file(), self.get_argument_certificate_password())
|
||||
custom_domain["dnsSuffix"] = self.get_argument_hostname()
|
||||
custom_domain["certificatePassword"] = self.get_argument_certificate_password()
|
||||
custom_domain["certificateValue"] = blob
|
||||
self.managed_env_def["properties"]["customDomainConfiguration"] = custom_domain
|
||||
|
||||
if self.get_argument_instrumentation_key() is not None:
|
||||
self.managed_env_def["properties"]["daprAIInstrumentationKey"] = self.get_argument_instrumentation_key()
|
||||
|
||||
# Vnet
|
||||
self.set_up_vnet_configuration()
|
||||
|
||||
if self.get_argument_mtls_enabled() is not None:
|
||||
safe_set(self.managed_env_def, "properties", "peerAuthentication", "mtls", "enabled", value=self.get_argument_mtls_enabled())
|
||||
|
||||
def set_up_workload_profiles(self):
|
||||
self.managed_env_def["properties"]["workloadProfiles"] = get_default_workload_profiles(self.cmd, self.get_argument_location())
|
||||
|
||||
def set_up_app_log_configuration(self):
|
||||
if (self.get_argument_logs_customer_id() is None or self.get_argument_logs_key() is None) and self.get_argument_logs_destination() == "log-analytics":
|
||||
logs_customer_id, logs_key = _generate_log_analytics_if_not_provided(self.cmd, self.get_argument_logs_customer_id(), self.get_argument_logs_key(),
|
||||
self.get_argument_location(), self.get_argument_resource_group_name())
|
||||
self.set_argument_logs_customer_id(logs_customer_id)
|
||||
self.set_argument_logs_key(logs_key)
|
||||
|
||||
if self.get_argument_logs_destination() == "log-analytics":
|
||||
log_analytics_config_def = LogAnalyticsConfigurationModel
|
||||
log_analytics_config_def["customerId"] = self.get_argument_logs_customer_id()
|
||||
log_analytics_config_def["sharedKey"] = self.get_argument_logs_key()
|
||||
else:
|
||||
log_analytics_config_def = None
|
||||
|
||||
app_logs_config_def = AppLogsConfigurationModel
|
||||
app_logs_config_def["destination"] = self.get_argument_logs_destination() if self.get_argument_logs_destination() != "none" else None
|
||||
app_logs_config_def["logAnalyticsConfiguration"] = log_analytics_config_def
|
||||
|
||||
self.managed_env_def["properties"]["appLogsConfiguration"] = app_logs_config_def
|
||||
|
||||
def set_up_vnet_configuration(self):
|
||||
if self.get_argument_infrastructure_subnet_resource_id() or self.get_argument_docker_bridge_cidr() or self.get_argument_platform_reserved_cidr() or self.get_argument_platform_reserved_dns_ip():
|
||||
vnet_config_def = VnetConfigurationModel
|
||||
|
||||
if self.get_argument_infrastructure_subnet_resource_id is not None:
|
||||
vnet_config_def["infrastructureSubnetId"] = self.get_argument_infrastructure_subnet_resource_id()
|
||||
|
||||
if self.get_argument_docker_bridge_cidr is not None:
|
||||
vnet_config_def["dockerBridgeCidr"] = self.get_argument_docker_bridge_cidr()
|
||||
|
||||
if self.get_argument_platform_reserved_cidr() is not None:
|
||||
vnet_config_def["platformReservedCidr"] = self.get_argument_platform_reserved_cidr()
|
||||
|
||||
if self.get_argument_platform_reserved_dns_ip() is not None:
|
||||
vnet_config_def["platformReservedDnsIP"] = self.get_argument_platform_reserved_dns_ip()
|
||||
|
||||
self.managed_env_def["properties"]["vnetConfiguration"] = vnet_config_def
|
||||
|
||||
if self.get_argument_internal_only():
|
||||
if not self.get_argument_infrastructure_subnet_resource_id():
|
||||
raise ValidationError(
|
||||
'Infrastructure subnet resource ID needs to be supplied for internal only environments.')
|
||||
self.managed_env_def["properties"]["vnetConfiguration"]["internal"] = True
|
||||
|
||||
|
||||
class ContainerAppEnvUpdateDecorator(ContainerAppEnvDecorator):
|
||||
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
self.managed_env_def = {}
|
||||
|
||||
def validate_arguments(self):
|
||||
if self.get_argument_logs_destination() == "log-analytics" or self.get_argument_logs_customer_id() or self.get_argument_logs_key():
|
||||
if self.get_argument_logs_destination() != "log-analytics":
|
||||
raise ValidationError(
|
||||
"When configuring Log Analytics workspace, --logs-destination should be \"log-analytics\"")
|
||||
if not self.get_argument_logs_customer_id() or not self.get_argument_logs_key():
|
||||
raise ValidationError(
|
||||
"Must provide --logs-workspace-id and --logs-workspace-key if updating logs destination to type 'log-analytics'.")
|
||||
|
||||
def construct_payload(self):
|
||||
try:
|
||||
r = self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
|
||||
except CLIError as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
# General setup
|
||||
safe_set(self.managed_env_def, "location", value=r["location"]) # required for API
|
||||
if self.get_argument_tags():
|
||||
safe_set(self.managed_env_def, "tags", value=self.get_argument_tags())
|
||||
|
||||
# Logs
|
||||
self.set_up_app_log_configuration()
|
||||
|
||||
# Custom domains
|
||||
self.set_up_custom_domain_configuration()
|
||||
|
||||
# workload Profiles
|
||||
self.set_up_workload_profiles(r)
|
||||
|
||||
if self.get_argument_mtls_enabled() is not None:
|
||||
safe_set(self.managed_env_def, "properties", "peerAuthentication", "mtls", "enabled", value=self.get_argument_mtls_enabled())
|
||||
|
||||
def set_up_app_log_configuration(self):
|
||||
logs_destination = self.get_argument_logs_destination()
|
||||
|
||||
if logs_destination:
|
||||
logs_destination = None if logs_destination == "none" else logs_destination
|
||||
safe_set(self.managed_env_def, "properties", "appLogsConfiguration", "destination", value=logs_destination)
|
||||
|
||||
if logs_destination == "log-analytics":
|
||||
safe_set(self.managed_env_def, "properties", "appLogsConfiguration", "logAnalyticsConfiguration", "customerId",
|
||||
value=self.get_argument_logs_customer_id())
|
||||
safe_set(self.managed_env_def, "properties", "appLogsConfiguration", "logAnalyticsConfiguration", "sharedKey",
|
||||
value=self.get_argument_logs_key())
|
||||
elif logs_destination:
|
||||
safe_set(self.managed_env_def, "properties", "appLogsConfiguration", "logAnalyticsConfiguration", value=None)
|
||||
|
||||
def set_up_custom_domain_configuration(self):
|
||||
if self.get_argument_hostname():
|
||||
safe_set(self.managed_env_def, "properties", "customDomainConfiguration", value={})
|
||||
cert_def = self.managed_env_def["properties"]["customDomainConfiguration"]
|
||||
if self.get_argument_certificate_file():
|
||||
blob, _ = load_cert_file(self.get_argument_certificate_file(), self.get_argument_certificate_password())
|
||||
safe_set(cert_def, "certificateValue", value=blob)
|
||||
safe_set(cert_def, "dnsSuffix", value=self.get_argument_hostname())
|
||||
if self.get_argument_certificate_password():
|
||||
safe_set(cert_def, "certificatePassword", value=self.get_argument_certificate_password())
|
||||
|
||||
def set_up_workload_profiles(self, r):
|
||||
workload_profile_name = self.get_argument_workload_profile_name()
|
||||
workload_profile_type = self.get_argument_workload_profile_type()
|
||||
|
||||
if workload_profile_name:
|
||||
if "workloadProfiles" not in r["properties"] or not r["properties"]["workloadProfiles"]:
|
||||
raise ValidationError(
|
||||
"This environment does not allow for workload profiles. Can create a compatible environment with 'az containerapp env create --enable-workload-profiles'")
|
||||
|
||||
if workload_profile_type:
|
||||
workload_profile_type = workload_profile_type.upper()
|
||||
workload_profiles = r["properties"]["workloadProfiles"]
|
||||
profile = [p for p in workload_profiles if p["name"].lower() == workload_profile_name.lower()]
|
||||
update = False # flag for updating an existing profile
|
||||
if profile:
|
||||
profile = profile[0]
|
||||
update = True
|
||||
else:
|
||||
profile = {"name": workload_profile_name}
|
||||
|
||||
if workload_profile_type:
|
||||
profile["workloadProfileType"] = workload_profile_type
|
||||
if self.get_argument_max_nodes():
|
||||
profile["maximumCount"] = self.get_argument_max_nodes()
|
||||
if self.get_argument_min_nodes():
|
||||
profile["minimumCount"] = self.get_argument_min_nodes()
|
||||
|
||||
if not update:
|
||||
workload_profiles.append(profile)
|
||||
else:
|
||||
idx = [i for i, p in enumerate(workload_profiles) if p["name"].lower() == workload_profile_name.lower()][0]
|
||||
workload_profiles[idx] = profile
|
||||
|
||||
safe_set(self.managed_env_def, "properties", "workloadProfiles", value=workload_profiles)
|
||||
|
||||
def update(self):
|
||||
try:
|
||||
return self.client.update(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(),
|
||||
name=self.get_argument_name(), managed_environment_envelope=self.managed_env_def, no_wait=self.get_argument_no_wait())
|
||||
except Exception as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def post_process(self, r):
|
||||
_azure_monitor_quickstart(self.cmd, self.get_argument_name(), self.get_argument_resource_group_name(), self.get_argument_storage_account(), self.get_argument_logs_destination())
|
||||
|
||||
return r
|
|
@ -0,0 +1,494 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# pylint: disable=line-too-long
|
||||
# pylint: disable=ungrouped-imports
|
||||
# pylint: disable=unused-argument, unused-variable
|
||||
# pylint: disable=broad-exception-caught
|
||||
# pylint: disable=logging-format-interpolation
|
||||
# pylint: disable=too-many-statements, too-many-locals, too-many-branches, too-many-public-methods, too-many-boolean-expressions, expression-not-assigned
|
||||
from typing import Dict, Any
|
||||
|
||||
from azure.cli.core.commands import AzCliCommand
|
||||
|
||||
import time
|
||||
|
||||
from azure.cli.core.azclierror import (
|
||||
RequiredArgumentMissingError,
|
||||
ValidationError)
|
||||
from azure.cli.core.commands.client_factory import get_subscription_id
|
||||
|
||||
from knack.log import get_logger
|
||||
|
||||
from msrestazure.tools import parse_resource_id, is_valid_resource_id
|
||||
from msrest.exceptions import DeserializationError
|
||||
|
||||
from ._decorator_utils import process_loaded_yaml, load_yaml_file, create_deserializer
|
||||
from ._constants import HELLO_WORLD_IMAGE, CONTAINER_APPS_RP
|
||||
from ._validators import validate_create
|
||||
from .base_resource import BaseResource
|
||||
from ._clients import ManagedEnvironmentClient
|
||||
from ._client_factory import handle_raw_exception
|
||||
|
||||
from ._models import (
|
||||
JobConfiguration as JobConfigurationModel,
|
||||
ManualTriggerConfig as ManualTriggerModel,
|
||||
ScheduleTriggerConfig as ScheduleTriggerModel,
|
||||
EventTriggerConfig as EventTriggerModel,
|
||||
JobTemplate as JobTemplateModel,
|
||||
RegistryCredentials as RegistryCredentialsModel,
|
||||
ContainerAppsJob as ContainerAppsJobModel,
|
||||
ContainerResources as ContainerResourcesModel,
|
||||
JobScale as JobScaleModel,
|
||||
Container as ContainerModel,
|
||||
ManagedServiceIdentity as ManagedServiceIdentityModel,
|
||||
ScaleRule as ScaleRuleModel)
|
||||
|
||||
from ._utils import (_ensure_location_allowed,
|
||||
parse_secret_flags, store_as_secret_and_return_secret_ref, parse_env_var_flags,
|
||||
_convert_object_from_snake_to_camel_case,
|
||||
_object_to_dict, _remove_additional_attributes,
|
||||
_remove_readonly_attributes,
|
||||
_infer_acr_credentials,
|
||||
_ensure_identity_resource_id,
|
||||
validate_container_app_name,
|
||||
set_managed_identity,
|
||||
create_acrpull_role_assignment, is_registry_msi_system,
|
||||
safe_set, parse_metadata_flags, parse_auth_flags,
|
||||
get_default_workload_profile_name_from_env,
|
||||
ensure_workload_profile_supported,
|
||||
AppType,
|
||||
safe_get)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class ContainerAppJobDecorator(BaseResource):
|
||||
|
||||
def get_environment_client(self):
|
||||
return ManagedEnvironmentClient
|
||||
|
||||
def get_argument_yaml(self):
|
||||
return self.get_param("yaml")
|
||||
|
||||
def get_argument_image(self):
|
||||
return self.get_param("image")
|
||||
|
||||
def get_argument_container_name(self):
|
||||
return self.get_param("container_name")
|
||||
|
||||
def set_argument_image(self, image):
|
||||
self.set_param("image", image)
|
||||
|
||||
def get_argument_managed_env(self):
|
||||
return self.get_param("managed_env")
|
||||
|
||||
def get_argument_trigger_type(self):
|
||||
return self.get_param("trigger_type")
|
||||
|
||||
def get_argument_replica_timeout(self):
|
||||
return self.get_param("replica_timeout")
|
||||
|
||||
def get_argument_replica_retry_limit(self):
|
||||
return self.get_param("replica_retry_limit")
|
||||
|
||||
def get_argument_replica_completion_count(self):
|
||||
return self.get_param("replica_completion_count")
|
||||
|
||||
def get_argument_parallelism(self):
|
||||
return self.get_param("parallelism")
|
||||
|
||||
def get_argument_cron_expression(self):
|
||||
return self.get_param("cron_expression")
|
||||
|
||||
def get_argument_cpu(self):
|
||||
return self.get_param("cpu")
|
||||
|
||||
def get_argument_memory(self):
|
||||
return self.get_param("memory")
|
||||
|
||||
def get_argument_secrets(self):
|
||||
return self.get_param("secrets")
|
||||
|
||||
def get_argument_env_vars(self):
|
||||
return self.get_param("env_vars")
|
||||
|
||||
def get_argument_startup_command(self):
|
||||
return self.get_param("startup_command")
|
||||
|
||||
def get_argument_args(self):
|
||||
return self.get_param("args")
|
||||
|
||||
def get_argument_scale_rule_metadata(self):
|
||||
return self.get_param("scale_rule_metadata")
|
||||
|
||||
def get_argument_scale_rule_name(self):
|
||||
return self.get_param("scale_rule_name")
|
||||
|
||||
def get_argument_scale_rule_type(self):
|
||||
return self.get_param("scale_rule_type")
|
||||
|
||||
def get_argument_scale_rule_auth(self):
|
||||
return self.get_param("scale_rule_auth")
|
||||
|
||||
def get_argument_polling_interval(self):
|
||||
return self.get_param("polling_interval")
|
||||
|
||||
def get_argument_min_executions(self):
|
||||
return self.get_param("min_executions")
|
||||
|
||||
def get_argument_max_executions(self):
|
||||
return self.get_param("max_executions")
|
||||
|
||||
def get_argument_disable_warnings(self):
|
||||
return self.get_param("disable_warnings")
|
||||
|
||||
def get_argument_registry_pass(self):
|
||||
return self.get_param("registry_pass")
|
||||
|
||||
def set_argument_registry_pass(self, registry_pass):
|
||||
self.set_param("registry_pass", registry_pass)
|
||||
|
||||
def get_argument_registry_server(self):
|
||||
return self.get_param("registry_server")
|
||||
|
||||
def get_argument_registry_user(self):
|
||||
return self.get_param("registry_user")
|
||||
|
||||
def set_argument_registry_user(self, registry_user):
|
||||
self.set_param("registry_user", registry_user)
|
||||
|
||||
def get_argument_tags(self):
|
||||
return self.get_param("tags")
|
||||
|
||||
def get_argument_system_assigned(self):
|
||||
return self.get_param("system_assigned")
|
||||
|
||||
def get_argument_user_assigned(self):
|
||||
return self.get_param("user_assigned")
|
||||
|
||||
def get_argument_registry_identity(self):
|
||||
return self.get_param("registry_identity")
|
||||
|
||||
def get_argument_workload_profile_name(self):
|
||||
return self.get_param("workload_profile_name")
|
||||
|
||||
def set_augument_workload_profile_name(self, workload_profile_name):
|
||||
self.set_param("workload_profile_name", workload_profile_name)
|
||||
|
||||
|
||||
class ContainerAppJobCreateDecorator(ContainerAppJobDecorator):
|
||||
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
|
||||
super().__init__(cmd, client, raw_parameters, models)
|
||||
self.containerappjob_def = ContainerAppsJobModel
|
||||
|
||||
def validate_arguments(self):
|
||||
validate_container_app_name(self.get_argument_name(), AppType.ContainerAppJob.name)
|
||||
validate_create(self.get_argument_registry_identity(), self.get_argument_registry_pass(), self.get_argument_registry_user(), self.get_argument_registry_server(), self.get_argument_no_wait())
|
||||
if self.get_argument_yaml() is None:
|
||||
if self.get_argument_replica_timeout() is None:
|
||||
raise RequiredArgumentMissingError('Usage error: --replica-timeout is required')
|
||||
|
||||
if self.get_argument_replica_retry_limit() is None:
|
||||
raise RequiredArgumentMissingError('Usage error: --replica-retry-limit is required')
|
||||
|
||||
if self.get_argument_managed_env() is None:
|
||||
raise RequiredArgumentMissingError('Usage error: --environment is required if not using --yaml')
|
||||
|
||||
def create(self):
|
||||
try:
|
||||
r = self.client.create_or_update(
|
||||
cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name(),
|
||||
containerapp_job_envelope=self.containerappjob_def, no_wait=self.get_argument_no_wait())
|
||||
return r
|
||||
except Exception as e:
|
||||
handle_raw_exception(e)
|
||||
|
||||
def construct_for_post_process(self, r):
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
while r["properties"]["provisioningState"] == "InProgress":
|
||||
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
|
||||
time.sleep(10)
|
||||
logger.info("Creating an acrpull role assignment for the system identity")
|
||||
system_sp = r["identity"]["principalId"]
|
||||
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), registry_identity=None, service_principal=system_sp)
|
||||
containers_def = safe_get(self.containerappjob_def, "properties", "template", "containers")
|
||||
containers_def[0]["image"] = self.get_argument_image()
|
||||
|
||||
registries_def = RegistryCredentialsModel
|
||||
registries_def["server"] = self.get_argument_registry_server()
|
||||
registries_def["identity"] = self.get_argument_registry_identity()
|
||||
safe_set(self.containerappjob_def, "properties", "configuration", "registries", value=[registries_def])
|
||||
|
||||
def post_process(self, r):
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
r = self.create()
|
||||
|
||||
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not self.get_argument_no_wait():
|
||||
if not self.get_argument_disable_warnings():
|
||||
logger.warning('Containerapp job creation in progress. Please monitor the creation using `az containerapp job show -n {} -g {}`'.format(self.get_argument_name, self.get_argument_resource_group_name()))
|
||||
return r
|
||||
|
||||
def construct_payload(self):
|
||||
if self.get_argument_registry_identity() and not is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
logger.info("Creating an acrpull role assignment for the registry identity")
|
||||
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), self.get_argument_registry_identity(), skip_error=True)
|
||||
|
||||
if self.get_argument_yaml():
|
||||
return self.set_up_create_containerapp_job_yaml(name=self.get_argument_name(), file_name=self.get_argument_yaml())
|
||||
|
||||
if not self.get_argument_image():
|
||||
self.set_argument_image(HELLO_WORLD_IMAGE)
|
||||
|
||||
# Validate managed environment
|
||||
parsed_managed_env = parse_resource_id(self.get_argument_managed_env())
|
||||
managed_env_name = parsed_managed_env['name']
|
||||
managed_env_rg = parsed_managed_env['resource_group']
|
||||
managed_env_info = None
|
||||
|
||||
try:
|
||||
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
|
||||
if not managed_env_info:
|
||||
raise ValidationError(
|
||||
"The environment '{}' does not exist. Specify a valid environment".format(self.get_argument_managed_env()))
|
||||
|
||||
location = managed_env_info["location"]
|
||||
_ensure_location_allowed(self.cmd, location, CONTAINER_APPS_RP, "jobs")
|
||||
|
||||
if not self.get_argument_workload_profile_name() and "workloadProfiles" in managed_env_info:
|
||||
workload_profile_name = get_default_workload_profile_name_from_env(self.cmd, managed_env_info, managed_env_rg)
|
||||
self.set_augument_workload_profile_name(workload_profile_name)
|
||||
|
||||
manualTriggerConfig_def = None
|
||||
if self.get_argument_trigger_type() is not None and self.get_argument_trigger_type().lower() == "manual":
|
||||
manualTriggerConfig_def = ManualTriggerModel
|
||||
manualTriggerConfig_def[
|
||||
"replicaCompletionCount"] = 0 if self.get_argument_replica_completion_count() is None else self.get_argument_replica_completion_count()
|
||||
manualTriggerConfig_def["parallelism"] = 0 if self.get_argument_parallelism() is None else self.get_argument_parallelism()
|
||||
|
||||
scheduleTriggerConfig_def = None
|
||||
if self.get_argument_trigger_type is not None and self.get_argument_trigger_type().lower() == "schedule":
|
||||
scheduleTriggerConfig_def = ScheduleTriggerModel
|
||||
scheduleTriggerConfig_def[
|
||||
"replicaCompletionCount"] = 0 if self.get_argument_replica_completion_count() is None else self.get_argument_replica_completion_count()
|
||||
scheduleTriggerConfig_def["parallelism"] = 0 if self.get_argument_parallelism() is None else self.get_argument_parallelism()
|
||||
scheduleTriggerConfig_def["cronExpression"] = self.get_argument_cron_expression()
|
||||
|
||||
eventTriggerConfig_def = None
|
||||
if self.get_argument_trigger_type is not None and self.get_argument_trigger_type().lower() == "event":
|
||||
scale_def = None
|
||||
if self.get_argument_min_executions() is not None or self.get_argument_max_executions() is not None or self.get_argument_polling_interval() is not None:
|
||||
scale_def = JobScaleModel
|
||||
scale_def["pollingInterval"] = self.get_argument_polling_interval()
|
||||
scale_def["minExecutions"] = self.get_argument_min_executions()
|
||||
scale_def["maxExecutions"] = self.get_argument_max_executions()
|
||||
|
||||
if self.get_argument_scale_rule_name():
|
||||
scale_rule_type = self.get_argument_scale_rule_type().lower()
|
||||
scale_rule_def = ScaleRuleModel
|
||||
curr_metadata = {}
|
||||
metadata_def = parse_metadata_flags(self.get_argument_scale_rule_metadata(), curr_metadata)
|
||||
auth_def = parse_auth_flags(self.get_argument_scale_rule_auth())
|
||||
scale_rule_def["name"] = self.get_argument_scale_rule_name()
|
||||
scale_rule_def["type"] = scale_rule_type
|
||||
scale_rule_def["metadata"] = metadata_def
|
||||
scale_rule_def["auth"] = auth_def
|
||||
|
||||
if not scale_def:
|
||||
scale_def = JobScaleModel
|
||||
scale_def["rules"] = [scale_rule_def]
|
||||
|
||||
eventTriggerConfig_def = EventTriggerModel
|
||||
eventTriggerConfig_def["replicaCompletionCount"] = self.get_argument_replica_completion_count()
|
||||
eventTriggerConfig_def["parallelism"] = self.get_argument_parallelism()
|
||||
eventTriggerConfig_def["scale"] = scale_def
|
||||
|
||||
secrets_def = None
|
||||
if self.get_argument_secrets() is not None:
|
||||
secrets_def = parse_secret_flags(self.get_argument_secrets())
|
||||
|
||||
registries_def = None
|
||||
if self.get_argument_registry_server() is not None and not is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
registries_def = RegistryCredentialsModel
|
||||
registries_def["server"] = self.get_argument_registry_server()
|
||||
|
||||
# Infer credentials if not supplied and its azurecr
|
||||
if (self.get_argument_registry_user() is None or self.get_argument_registry_pass() is None) and self.get_argument_registry_identity() is None:
|
||||
registry_user, registry_pass = _infer_acr_credentials(self.cmd, self.get_argument_registry_server(), self.get_argument_disable_warnings())
|
||||
self.set_argument_registry_user(registry_user)
|
||||
self.set_argument_registry_pass(registry_pass)
|
||||
|
||||
if not self.get_argument_registry_identity():
|
||||
registries_def["username"] = self.get_argument_registry_user()
|
||||
|
||||
if secrets_def is None:
|
||||
secrets_def = []
|
||||
registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, self.get_argument_registry_user(),
|
||||
self.get_argument_registry_server(),
|
||||
self.get_argument_registry_pass(),
|
||||
disable_warnings=self.get_argument_disable_warnings())
|
||||
else:
|
||||
registries_def["identity"] = self.get_argument_registry_identity()
|
||||
|
||||
config_def = JobConfigurationModel
|
||||
config_def["secrets"] = secrets_def
|
||||
config_def["triggerType"] = self.get_argument_trigger_type()
|
||||
config_def["replicaTimeout"] = self.get_argument_replica_timeout()
|
||||
config_def["replicaRetryLimit"] = self.get_argument_replica_retry_limit()
|
||||
config_def["manualTriggerConfig"] = manualTriggerConfig_def
|
||||
config_def["scheduleTriggerConfig"] = scheduleTriggerConfig_def
|
||||
config_def["eventTriggerConfig"] = eventTriggerConfig_def
|
||||
config_def["registries"] = [registries_def] if registries_def is not None else None
|
||||
|
||||
# Identity actions
|
||||
identity_def = ManagedServiceIdentityModel
|
||||
identity_def["type"] = "None"
|
||||
|
||||
assign_system_identity = self.get_argument_system_assigned()
|
||||
if self.get_argument_user_assigned():
|
||||
assign_user_identities = [x.lower() for x in self.get_argument_user_assigned()]
|
||||
else:
|
||||
assign_user_identities = []
|
||||
|
||||
if assign_system_identity and assign_user_identities:
|
||||
identity_def["type"] = "SystemAssigned, UserAssigned"
|
||||
elif assign_system_identity:
|
||||
identity_def["type"] = "SystemAssigned"
|
||||
elif assign_user_identities:
|
||||
identity_def["type"] = "UserAssigned"
|
||||
|
||||
if assign_user_identities:
|
||||
identity_def["userAssignedIdentities"] = {}
|
||||
subscription_id = get_subscription_id(self.cmd.cli_ctx)
|
||||
|
||||
for r in assign_user_identities:
|
||||
r = _ensure_identity_resource_id(subscription_id, self.get_argument_resource_group_name(), r)
|
||||
identity_def["userAssignedIdentities"][r] = {} # pylint: disable=unsupported-assignment-operation
|
||||
|
||||
resources_def = None
|
||||
if self.get_argument_cpu() is not None or self.get_argument_memory() is not None:
|
||||
resources_def = ContainerResourcesModel
|
||||
resources_def["cpu"] = self.get_argument_cpu()
|
||||
resources_def["memory"] = self.get_argument_memory()
|
||||
|
||||
container_def = ContainerModel
|
||||
container_def["name"] = self.get_argument_container_name() if self.get_argument_container_name() else self.get_argument_name()
|
||||
container_def["image"] = self.get_argument_image() if not is_registry_msi_system(self.get_argument_registry_identity()) else HELLO_WORLD_IMAGE
|
||||
if self.get_argument_env_vars() is not None:
|
||||
container_def["env"] = parse_env_var_flags(self.get_argument_env_vars())
|
||||
if self.get_argument_startup_command() is not None:
|
||||
container_def["command"] = self.get_argument_startup_command()
|
||||
if self.get_argument_args() is not None:
|
||||
container_def["args"] = self.get_argument_args()
|
||||
if resources_def is not None:
|
||||
container_def["resources"] = resources_def
|
||||
|
||||
template_def = JobTemplateModel
|
||||
template_def["containers"] = [container_def]
|
||||
|
||||
self.containerappjob_def["location"] = location
|
||||
self.containerappjob_def["identity"] = identity_def
|
||||
self.containerappjob_def["properties"]["environmentId"] = self.get_argument_managed_env()
|
||||
self.containerappjob_def["properties"]["configuration"] = config_def
|
||||
self.containerappjob_def["properties"]["template"] = template_def
|
||||
self.containerappjob_def["tags"] = self.get_argument_tags()
|
||||
|
||||
if self.get_argument_workload_profile_name():
|
||||
self.containerappjob_def["properties"]["workloadProfileName"] = self.get_argument_workload_profile_name()
|
||||
ensure_workload_profile_supported(self.cmd, managed_env_name, managed_env_rg, self.get_argument_workload_profile_name(),
|
||||
managed_env_info)
|
||||
|
||||
if self.get_argument_registry_identity():
|
||||
if is_registry_msi_system(self.get_argument_registry_identity()):
|
||||
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerappjob_def, system_assigned=True)
|
||||
else:
|
||||
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerappjob_def, user_assigned=[self.get_argument_registry_identity()])
|
||||
|
||||
def set_up_create_containerapp_job_yaml(self, name, file_name):
|
||||
if self.get_argument_image() or self.get_argument_trigger_type() or self.get_argument_replica_timeout() or self.get_argument_replica_retry_limit() or \
|
||||
self.get_argument_replica_completion_count() or self.get_argument_parallelism() or self.get_argument_cron_expression() or self.get_argument_cpu() or self.get_argument_memory() or self.get_argument_registry_server() or \
|
||||
self.get_argument_registry_user() or self.get_argument_registry_pass() or self.get_argument_secrets() or self.get_argument_env_vars() or \
|
||||
self.get_argument_startup_command() or self.get_argument_args() or self.get_argument_tags():
|
||||
not self.get_argument_disable_warnings() and logger.warning(
|
||||
'Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead')
|
||||
|
||||
yaml_containerappsjob = process_loaded_yaml(load_yaml_file(file_name))
|
||||
|
||||
if not yaml_containerappsjob.get('name'):
|
||||
yaml_containerappsjob['name'] = name
|
||||
elif yaml_containerappsjob.get('name').lower() != name.lower():
|
||||
logger.warning(
|
||||
'The job name provided in the --yaml file "{}" does not match the one provided in the --name flag "{}". The one provided in the --yaml file will be used.'.format(
|
||||
yaml_containerappsjob.get('name'), name))
|
||||
name = yaml_containerappsjob.get('name')
|
||||
|
||||
if not yaml_containerappsjob.get('type'):
|
||||
yaml_containerappsjob['type'] = 'Microsoft.App/jobs'
|
||||
elif yaml_containerappsjob.get('type').lower() != "microsoft.app/jobs":
|
||||
raise ValidationError('Containerapp job type must be \"Microsoft.App/jobs\"')
|
||||
|
||||
# Deserialize the yaml into a ContainerAppsJob object. Need this since we're not using SDK
|
||||
try:
|
||||
deserializer = create_deserializer(self.models)
|
||||
|
||||
self.containerappjob_def = deserializer('ContainerAppsJob', yaml_containerappsjob)
|
||||
except DeserializationError as ex:
|
||||
raise ValidationError(
|
||||
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.') from ex
|
||||
|
||||
# Remove tags before converting from snake case to camel case, then re-add tags. We don't want to change the case of the tags. Need this since we're not using SDK
|
||||
tags = None
|
||||
if yaml_containerappsjob.get('tags'):
|
||||
tags = yaml_containerappsjob.get('tags')
|
||||
del yaml_containerappsjob['tags']
|
||||
|
||||
self.containerappjob_def = _convert_object_from_snake_to_camel_case(_object_to_dict(self.containerappjob_def))
|
||||
self.containerappjob_def['tags'] = tags
|
||||
|
||||
# After deserializing, some properties may need to be moved under the "properties" attribute. Need this since we're not using SDK
|
||||
self.containerappjob_def = process_loaded_yaml(self.containerappjob_def)
|
||||
|
||||
# Remove "additionalProperties" and read-only attributes that are introduced in the deserialization. Need this since we're not using SDK
|
||||
_remove_additional_attributes(self.containerappjob_def)
|
||||
_remove_readonly_attributes(self.containerappjob_def)
|
||||
|
||||
# Remove extra workloadProfileName introduced in deserialization
|
||||
if "workloadProfileName" in self.containerappjob_def:
|
||||
del self.containerappjob_def["workloadProfileName"]
|
||||
|
||||
# Validate managed environment
|
||||
env_id = self.containerappjob_def["properties"]['environmentId']
|
||||
env_info = None
|
||||
if self.get_argument_managed_env():
|
||||
if not self.get_argument_disable_warnings() and env_id is not None and env_id != self.get_argument_managed_env():
|
||||
logger.warning('The environmentId was passed along with --yaml. The value entered with --environment will be ignored, and the configuration defined in the yaml will be used instead')
|
||||
if env_id is None:
|
||||
env_id = self.get_argument_managed_env()
|
||||
safe_set(self.containerappjob_def, "properties", "environmentId", value=env_id)
|
||||
|
||||
if not self.containerappjob_def["properties"].get('environmentId'):
|
||||
raise RequiredArgumentMissingError(
|
||||
'environmentId is required. This can be retrieved using the `az containerapp env show -g MyResourceGroup -n MyContainerappEnvironment --query id` command. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
|
||||
|
||||
if is_valid_resource_id(env_id):
|
||||
parsed_managed_env = parse_resource_id(env_id)
|
||||
env_name = parsed_managed_env['name']
|
||||
env_rg = parsed_managed_env['resource_group']
|
||||
else:
|
||||
raise ValidationError('Invalid environmentId specified. Environment not found')
|
||||
|
||||
try:
|
||||
env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=env_rg, name=env_name)
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
|
||||
if not env_info:
|
||||
raise ValidationError("The environment '{}' in resource group '{}' was not found".format(env_name, env_rg))
|
||||
|
||||
# Validate location
|
||||
if not self.containerappjob_def.get('location'):
|
||||
self.containerappjob_def['location'] = env_info['location']
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
# exclusions for the vm module
|
||||
|
||||
containerapp env create:
|
||||
parameters:
|
||||
infrastructure_subnet_resource_id:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
instrumentation_key:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
platform_reserved_dns_ip:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
|
||||
containerapp exec:
|
||||
rule_exclusions:
|
||||
- missing_command_test_coverage
|
||||
parameters:
|
||||
container:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
replica:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
revision:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
command:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
name:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
resource-group-name:
|
||||
rule_exclusions:
|
||||
- missing_parameter_test_coverage
|
||||
|
||||
containerapp github-action show:
|
||||
rule_exclusions:
|
||||
- missing_command_test_coverage
|
||||
|
||||
containerapp github-action delete:
|
||||
rule_exclusions:
|
||||
- missing_command_test_coverage
|
||||
|
||||
containerapp github-action add:
|
||||
rule_exclusions:
|
||||
- missing_command_test_coverage
|
||||
parameters:
|
||||
service_principal_client_id:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
service_principal_client_secret:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
service_principal_tenant_id:
|
||||
rule_exclusions:
|
||||
- option_length_too_long
|
||||
|
||||
containerapp:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
containerapp env storage:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
containerapp env:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
containerapp identity:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
containerapp job identity:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
containerapp job:
|
||||
rule_exclusions:
|
||||
- require_wait_command_if_no_wait
|
||||
...
|
|
@ -0,0 +1,8 @@
|
|||
# -----------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# -----------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for
|
||||
# license information.
|
||||
# -----------------------------------------------------------------------------
|
|
@ -0,0 +1,25 @@
|
|||
# --------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See License.txt in the project root for license information.
|
||||
# --------------------------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
from azure.cli.testsdk import (ScenarioTest)
|
||||
|
||||
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
|
||||
TEST_LOCATION = os.getenv("CLITestLocation") if os.getenv("CLITestLocation") else "eastus"
|
||||
|
||||
|
||||
def write_test_file(filename, content):
|
||||
test_file = open(filename, "w", encoding='utf-8')
|
||||
_ = test_file.write(content)
|
||||
test_file.close()
|
||||
|
||||
|
||||
def clean_up_test_file(filename):
|
||||
if os.path.exists(filename):
|
||||
os.remove(filename)
|
||||
|
||||
|
||||
class ContainerappComposePreviewScenarioTest(ScenarioTest):
|
||||
pass
|
|
@ -0,0 +1,13 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIB4jCCAYigAwIBAgIJAP7PvAbawuyKMAoGCCqGSM49BAMCMEwxCzAJBgNVBAYT
|
||||
AlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHUmVkbW9uZDELMAkGA1UECgwCTVMx
|
||||
ETAPBgNVBAMMCHRlc3QgRUNDMB4XDTIyMDMzMDAzMzgwOVoXDTIzMDMyNTAzMzgw
|
||||
OVowTDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdSZWRtb25k
|
||||
MQswCQYDVQQKDAJNUzERMA8GA1UEAwwIdGVzdCBFQ0MwWTATBgcqhkjOPQIBBggq
|
||||
hkjOPQMBBwNCAAQwjPFJZIKDKti/CIF/3Q6N3TlhsFGqd298ntf+R5Y086hpwwHZ
|
||||
12g/V5FO6Egju3O1kzU47ZVHtpjV5idGp9+uo1MwUTAdBgNVHQ4EFgQUvu4PsfwU
|
||||
+jSoHjtSH/d+txuwrU0wHwYDVR0jBBgwFoAUvu4PsfwU+jSoHjtSH/d+txuwrU0w
|
||||
DwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNIADBFAiAZjExYDs6zRCwQBqIZ
|
||||
wSQhN4s0JMAaL68vbsYaiEmP5AIhANXvE21kv4FmJDUBLhKTTVtTuCILNiKNiXjr
|
||||
l+yC2sCb
|
||||
-----END CERTIFICATE-----
|
Двоичные данные
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/data/cert.pfx
Normal file
Двоичные данные
src/azure-cli/azure/cli/command_modules/containerapp/tests/latest/data/cert.pfx
Normal file
Двоичный файл не отображается.
|
@ -0,0 +1 @@
|
|||
testing
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"address1": {
|
||||
"value": "One Microsoft Way"
|
||||
},
|
||||
"address2": {
|
||||
"value": ""
|
||||
},
|
||||
"city": {
|
||||
"value": "Seattle"
|
||||
},
|
||||
"country": {
|
||||
"value": "US"
|
||||
},
|
||||
"postal_code": {
|
||||
"value": "98109"
|
||||
},
|
||||
"state": {
|
||||
"value": "WA"
|
||||
},
|
||||
"email": {
|
||||
"value": "testemail@hotmail.com"
|
||||
},
|
||||
"fax": {
|
||||
"value": ""
|
||||
},
|
||||
"job_title": {
|
||||
"value": ""
|
||||
},
|
||||
"name_first": {
|
||||
"value": "Jane"
|
||||
},
|
||||
"name_last": {
|
||||
"value": "Doe"
|
||||
},
|
||||
"name_middle": {
|
||||
"value": ""
|
||||
},
|
||||
"organization": {
|
||||
"value": ""
|
||||
},
|
||||
"phone": {
|
||||
"value": "+1.2061234567"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
var createError = require('http-errors');
|
||||
var express = require('express');
|
||||
var path = require('path');
|
||||
var cookieParser = require('cookie-parser');
|
||||
var logger = require('morgan');
|
||||
|
||||
var indexRouter = require('./routes/index');
|
||||
var usersRouter = require('./routes/users');
|
||||
|
||||
var app = express();
|
||||
|
||||
// view engine setup
|
||||
app.set('views', path.join(__dirname, 'views'));
|
||||
app.set('view engine', 'ejs');
|
||||
|
||||
app.use(logger('dev'));
|
||||
app.use(express.json());
|
||||
app.use(express.urlencoded({ extended: false }));
|
||||
app.use(cookieParser());
|
||||
app.use(express.static(path.join(__dirname, 'public')));
|
||||
|
||||
app.use('/', indexRouter);
|
||||
app.use('/users', usersRouter);
|
||||
|
||||
// catch 404 and forward to error handler
|
||||
app.use(function(req, res, next) {
|
||||
next(createError(404));
|
||||
});
|
||||
|
||||
// error handler
|
||||
app.use(function(err, req, res, next) {
|
||||
// set locals, only providing error in development
|
||||
res.locals.message = err.message;
|
||||
res.locals.error = req.app.get('env') === 'development' ? err : {};
|
||||
|
||||
// render the error page
|
||||
res.status(err.status || 500);
|
||||
res.render('error');
|
||||
});
|
||||
|
||||
module.exports = app;
|
|
@ -0,0 +1,90 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Module dependencies.
|
||||
*/
|
||||
|
||||
var app = require('../app');
|
||||
var debug = require('debug')('myexpressapp:server');
|
||||
var http = require('http');
|
||||
|
||||
/**
|
||||
* Get port from environment and store in Express.
|
||||
*/
|
||||
|
||||
var port = normalizePort(process.env.PORT || '3000');
|
||||
app.set('port', port);
|
||||
|
||||
/**
|
||||
* Create HTTP server.
|
||||
*/
|
||||
|
||||
var server = http.createServer(app);
|
||||
|
||||
/**
|
||||
* Listen on provided port, on all network interfaces.
|
||||
*/
|
||||
|
||||
server.listen(port);
|
||||
server.on('error', onError);
|
||||
server.on('listening', onListening);
|
||||
|
||||
/**
|
||||
* Normalize a port into a number, string, or false.
|
||||
*/
|
||||
|
||||
function normalizePort(val) {
|
||||
var port = parseInt(val, 10);
|
||||
|
||||
if (isNaN(port)) {
|
||||
// named pipe
|
||||
return val;
|
||||
}
|
||||
|
||||
if (port >= 0) {
|
||||
// port number
|
||||
return port;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Event listener for HTTP server "error" event.
|
||||
*/
|
||||
|
||||
function onError(error) {
|
||||
if (error.syscall !== 'listen') {
|
||||
throw error;
|
||||
}
|
||||
|
||||
var bind = typeof port === 'string'
|
||||
? 'Pipe ' + port
|
||||
: 'Port ' + port;
|
||||
|
||||
// handle specific listen errors with friendly messages
|
||||
switch (error.code) {
|
||||
case 'EACCES':
|
||||
console.error(bind + ' requires elevated privileges');
|
||||
process.exit(1);
|
||||
break;
|
||||
case 'EADDRINUSE':
|
||||
console.error(bind + ' is already in use');
|
||||
process.exit(1);
|
||||
break;
|
||||
default:
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Event listener for HTTP server "listening" event.
|
||||
*/
|
||||
|
||||
function onListening() {
|
||||
var addr = server.address();
|
||||
var bind = typeof addr === 'string'
|
||||
? 'pipe ' + addr
|
||||
: 'port ' + addr.port;
|
||||
debug('Listening on ' + bind);
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"name": "myexpressapp",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "node ./bin/www"
|
||||
},
|
||||
"dependencies": {
|
||||
"cookie-parser": "~1.4.4",
|
||||
"debug": "~2.6.9",
|
||||
"ejs": "~2.6.1",
|
||||
"express": "~4.16.1",
|
||||
"http-errors": "~1.6.3",
|
||||
"morgan": "~1.9.1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
body {
|
||||
padding: 50px;
|
||||
font: 14px "Lucida Grande", Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #00B7FF;
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
var express = require('express');
|
||||
var router = express.Router();
|
||||
|
||||
/* GET home page. */
|
||||
router.get('/', function(req, res, next) {
|
||||
res.render('index', { title: 'Express' });
|
||||
});
|
||||
|
||||
module.exports = router;
|
|
@ -0,0 +1,9 @@
|
|||
var express = require('express');
|
||||
var router = express.Router();
|
||||
|
||||
/* GET users listing. */
|
||||
router.get('/', function(req, res, next) {
|
||||
res.send('respond with a resource');
|
||||
});
|
||||
|
||||
module.exports = router;
|
|
@ -0,0 +1,3 @@
|
|||
<h1><%= message %></h1>
|
||||
<h2><%= error.status %></h2>
|
||||
<pre><%= error.stack %></pre>
|
|
@ -0,0 +1,11 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title><%= title %></title>
|
||||
<link rel='stylesheet' href='/stylesheets/style.css' />
|
||||
</head>
|
||||
<body>
|
||||
<h1><%= title %></h1>
|
||||
<p>Welcome to <%= title %></p>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,13 @@
|
|||
<Project Sdk="Microsoft.NET.Sdk.Web">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net7.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.Extensions.Logging" Version="7.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
|
@ -0,0 +1,26 @@
|
|||
@page
|
||||
@model ErrorModel
|
||||
@{
|
||||
ViewData["Title"] = "Error";
|
||||
}
|
||||
|
||||
<h1 class="text-danger">Error.</h1>
|
||||
<h2 class="text-danger">An error occurred while processing your request.</h2>
|
||||
|
||||
@if (Model.ShowRequestId)
|
||||
{
|
||||
<p>
|
||||
<strong>Request ID:</strong> <code>@Model.RequestId</code>
|
||||
</p>
|
||||
}
|
||||
|
||||
<h3>Development Mode</h3>
|
||||
<p>
|
||||
Swapping to the <strong>Development</strong> environment displays detailed information about the error that occurred.
|
||||
</p>
|
||||
<p>
|
||||
<strong>The Development environment shouldn't be enabled for deployed applications.</strong>
|
||||
It can result in displaying sensitive information from exceptions to end users.
|
||||
For local debugging, enable the <strong>Development</strong> environment by setting the <strong>ASPNETCORE_ENVIRONMENT</strong> environment variable to <strong>Development</strong>
|
||||
and restarting the app.
|
||||
</p>
|
|
@ -0,0 +1,27 @@
|
|||
using System.Diagnostics;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
|
||||
namespace HelloWorldApp.Pages;
|
||||
|
||||
[ResponseCache(Duration = 0, Location = ResponseCacheLocation.None, NoStore = true)]
|
||||
[IgnoreAntiforgeryToken]
|
||||
public class ErrorModel : PageModel
|
||||
{
|
||||
public string? RequestId { get; set; }
|
||||
|
||||
public bool ShowRequestId => !string.IsNullOrEmpty(RequestId);
|
||||
|
||||
private readonly ILogger<ErrorModel> _logger;
|
||||
|
||||
public ErrorModel(ILogger<ErrorModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
RequestId = Activity.Current?.Id ?? HttpContext.TraceIdentifier;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
@page
|
||||
@model IndexModel
|
||||
@{
|
||||
ViewData["Title"] = "Home page";
|
||||
}
|
||||
|
||||
<div class="text-center">
|
||||
<h1 class="display-4">Hello, world!</h1>
|
||||
<p>Learn about <a href="https://docs.microsoft.com/aspnet/core">building Web apps with ASP.NET Core</a>.</p>
|
||||
</div>
|
|
@ -0,0 +1,19 @@
|
|||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
|
||||
namespace HelloWorldApp.Pages;
|
||||
|
||||
public class IndexModel : PageModel
|
||||
{
|
||||
private readonly ILogger<IndexModel> _logger;
|
||||
|
||||
public IndexModel(ILogger<IndexModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
@page
|
||||
@model PrivacyModel
|
||||
@{
|
||||
ViewData["Title"] = "Privacy Policy";
|
||||
}
|
||||
<h1>@ViewData["Title"]</h1>
|
||||
|
||||
<p>Use this page to detail your site's privacy policy.</p>
|
|
@ -0,0 +1,19 @@
|
|||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
|
||||
namespace HelloWorldApp.Pages;
|
||||
|
||||
public class PrivacyModel : PageModel
|
||||
{
|
||||
private readonly ILogger<PrivacyModel> _logger;
|
||||
|
||||
public PrivacyModel(ILogger<PrivacyModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>@ViewData["Title"] - HelloWorldApp</title>
|
||||
<link rel="stylesheet" href="~/lib/bootstrap/dist/css/bootstrap.min.css" />
|
||||
<link rel="stylesheet" href="~/css/site.css" asp-append-version="true" />
|
||||
<link rel="stylesheet" href="~/HelloWorldApp.styles.css" asp-append-version="true" />
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<nav class="navbar navbar-expand-sm navbar-toggleable-sm navbar-light bg-white border-bottom box-shadow mb-3">
|
||||
<div class="container">
|
||||
<a class="navbar-brand" asp-area="" asp-page="/Index">HelloWorldApp</a>
|
||||
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target=".navbar-collapse" aria-controls="navbarSupportedContent"
|
||||
aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="navbar-collapse collapse d-sm-inline-flex justify-content-between">
|
||||
<ul class="navbar-nav flex-grow-1">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link text-dark" asp-area="" asp-page="/Index">Home</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link text-dark" asp-area="" asp-page="/Privacy">Privacy</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<div class="container">
|
||||
<main role="main" class="pb-3">
|
||||
@RenderBody()
|
||||
</main>
|
||||
</div>
|
||||
|
||||
<footer class="border-top footer text-muted">
|
||||
<div class="container">
|
||||
© 2023 - HelloWorldApp - <a asp-area="" asp-page="/Privacy">Privacy</a>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<script src="~/lib/jquery/dist/jquery.min.js"></script>
|
||||
<script src="~/lib/bootstrap/dist/js/bootstrap.bundle.min.js"></script>
|
||||
<script src="~/js/site.js" asp-append-version="true"></script>
|
||||
|
||||
@await RenderSectionAsync("Scripts", required: false)
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,48 @@
|
|||
/* Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
|
||||
for details on configuring this project to bundle and minify static web assets. */
|
||||
|
||||
a.navbar-brand {
|
||||
white-space: normal;
|
||||
text-align: center;
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #0077cc;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
color: #fff;
|
||||
background-color: #1b6ec2;
|
||||
border-color: #1861ac;
|
||||
}
|
||||
|
||||
.nav-pills .nav-link.active, .nav-pills .show > .nav-link {
|
||||
color: #fff;
|
||||
background-color: #1b6ec2;
|
||||
border-color: #1861ac;
|
||||
}
|
||||
|
||||
.border-top {
|
||||
border-top: 1px solid #e5e5e5;
|
||||
}
|
||||
.border-bottom {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.box-shadow {
|
||||
box-shadow: 0 .25rem .75rem rgba(0, 0, 0, .05);
|
||||
}
|
||||
|
||||
button.accept-policy {
|
||||
font-size: 1rem;
|
||||
line-height: inherit;
|
||||
}
|
||||
|
||||
.footer {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
width: 100%;
|
||||
white-space: nowrap;
|
||||
line-height: 60px;
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
<script src="~/lib/jquery-validation/dist/jquery.validate.min.js"></script>
|
||||
<script src="~/lib/jquery-validation-unobtrusive/jquery.validate.unobtrusive.min.js"></script>
|
|
@ -0,0 +1,3 @@
|
|||
@using HelloWorldApp
|
||||
@namespace HelloWorldApp.Pages
|
||||
@addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers
|
|
@ -0,0 +1,3 @@
|
|||
@{
|
||||
Layout = "_Layout";
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
var builder = WebApplication.CreateBuilder(args);
|
||||
|
||||
// Add services to the container.
|
||||
builder.Services.AddRazorPages();
|
||||
|
||||
var app = builder.Build();
|
||||
|
||||
// Configure the HTTP request pipeline.
|
||||
if (!app.Environment.IsDevelopment())
|
||||
{
|
||||
app.UseExceptionHandler("/Error");
|
||||
// The default HSTS value is 30 days. You may want to change this for production scenarios, see https://aka.ms/aspnetcore-hsts.
|
||||
app.UseHsts();
|
||||
}
|
||||
|
||||
app.UseHttpsRedirection();
|
||||
app.UseStaticFiles();
|
||||
|
||||
app.UseRouting();
|
||||
|
||||
app.UseAuthorization();
|
||||
|
||||
app.MapRazorPages();
|
||||
|
||||
app.Run();
|
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
"iisSettings": {
|
||||
"windowsAuthentication": false,
|
||||
"anonymousAuthentication": true,
|
||||
"iisExpress": {
|
||||
"applicationUrl": "http://localhost:28306",
|
||||
"sslPort": 44381
|
||||
}
|
||||
},
|
||||
"profiles": {
|
||||
"http": {
|
||||
"commandName": "Project",
|
||||
"dotnetRunMessages": true,
|
||||
"launchBrowser": true,
|
||||
"applicationUrl": "http://localhost:5086",
|
||||
"environmentVariables": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development"
|
||||
}
|
||||
},
|
||||
"https": {
|
||||
"commandName": "Project",
|
||||
"dotnetRunMessages": true,
|
||||
"launchBrowser": true,
|
||||
"applicationUrl": "https://localhost:7118;http://localhost:5086",
|
||||
"environmentVariables": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development"
|
||||
}
|
||||
},
|
||||
"IIS Express": {
|
||||
"commandName": "IISExpress",
|
||||
"launchBrowser": true,
|
||||
"environmentVariables": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"DetailedErrors": true,
|
||||
"Logging": {
|
||||
"LogLevel": {
|
||||
"Default": "Information",
|
||||
"Microsoft.AspNetCore": "Warning"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"Logging": {
|
||||
"LogLevel": {
|
||||
"Default": "Information",
|
||||
"Microsoft.AspNetCore": "Warning"
|
||||
}
|
||||
},
|
||||
"AllowedHosts": "*"
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
html {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
@media (min-width: 768px) {
|
||||
html {
|
||||
font-size: 16px;
|
||||
}
|
||||
}
|
||||
|
||||
.btn:focus, .btn:active:focus, .btn-link.nav-link:focus, .form-control:focus, .form-check-input:focus {
|
||||
box-shadow: 0 0 0 0.1rem white, 0 0 0 0.25rem #258cfb;
|
||||
}
|
||||
|
||||
html {
|
||||
position: relative;
|
||||
min-height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
margin-bottom: 60px;
|
||||
}
|
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 5.3 KiB |
|
@ -0,0 +1,4 @@
|
|||
// Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
|
||||
// for details on configuring this project to bundle and minify static web assets.
|
||||
|
||||
// Write your JavaScript code.
|
|
@ -0,0 +1,25 @@
|
|||
**/.classpath
|
||||
**/.dockerignore
|
||||
**/.env
|
||||
**/.git
|
||||
**/.gitignore
|
||||
**/.project
|
||||
**/.settings
|
||||
**/.toolstarget
|
||||
**/.vs
|
||||
**/.vscode
|
||||
**/*.*proj.user
|
||||
**/*.dbmdl
|
||||
**/*.jfm
|
||||
**/azds.yaml
|
||||
**/bin
|
||||
**/charts
|
||||
**/docker-compose*
|
||||
**/Dockerfile*
|
||||
**/node_modules
|
||||
**/npm-debug.log
|
||||
**/obj
|
||||
**/secrets.dev.yaml
|
||||
**/values.dev.yaml
|
||||
LICENSE
|
||||
README.md
|
|
@ -0,0 +1,22 @@
|
|||
#See https://aka.ms/customizecontainer to learn how to customize your debug container and how Visual Studio uses this Dockerfile to build your images for faster debugging.
|
||||
|
||||
FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base
|
||||
WORKDIR /app
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
|
||||
WORKDIR /src
|
||||
COPY ["TestWebApp202305.csproj", "."]
|
||||
RUN dotnet restore "./TestWebApp202305.csproj"
|
||||
COPY . .
|
||||
WORKDIR "/src/."
|
||||
RUN dotnet build "TestWebApp202305.csproj" -c Release -o /app/build
|
||||
|
||||
FROM build AS publish
|
||||
RUN dotnet publish "TestWebApp202305.csproj" -c Release -o /app/publish /p:UseAppHost=false
|
||||
|
||||
FROM base AS final
|
||||
WORKDIR /app
|
||||
COPY --from=publish /app/publish .
|
||||
ENTRYPOINT ["dotnet", "TestWebApp202305.dll"]
|
|
@ -0,0 +1,26 @@
|
|||
@page
|
||||
@model ErrorModel
|
||||
@{
|
||||
ViewData["Title"] = "Error";
|
||||
}
|
||||
|
||||
<h1 class="text-danger">Error.</h1>
|
||||
<h2 class="text-danger">An error occurred while processing your request.</h2>
|
||||
|
||||
@if (Model.ShowRequestId)
|
||||
{
|
||||
<p>
|
||||
<strong>Request ID:</strong> <code>@Model.RequestId</code>
|
||||
</p>
|
||||
}
|
||||
|
||||
<h3>Development Mode</h3>
|
||||
<p>
|
||||
Swapping to the <strong>Development</strong> environment displays detailed information about the error that occurred.
|
||||
</p>
|
||||
<p>
|
||||
<strong>The Development environment shouldn't be enabled for deployed applications.</strong>
|
||||
It can result in displaying sensitive information from exceptions to end users.
|
||||
For local debugging, enable the <strong>Development</strong> environment by setting the <strong>ASPNETCORE_ENVIRONMENT</strong> environment variable to <strong>Development</strong>
|
||||
and restarting the app.
|
||||
</p>
|
|
@ -0,0 +1,27 @@
|
|||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace TestWebApp202305.Pages
|
||||
{
|
||||
[ResponseCache(Duration = 0, Location = ResponseCacheLocation.None, NoStore = true)]
|
||||
[IgnoreAntiforgeryToken]
|
||||
public class ErrorModel : PageModel
|
||||
{
|
||||
public string? RequestId { get; set; }
|
||||
|
||||
public bool ShowRequestId => !string.IsNullOrEmpty(RequestId);
|
||||
|
||||
private readonly ILogger<ErrorModel> _logger;
|
||||
|
||||
public ErrorModel(ILogger<ErrorModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
RequestId = Activity.Current?.Id ?? HttpContext.TraceIdentifier;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
@page
|
||||
@model IndexModel
|
||||
@{
|
||||
ViewData["Title"] = "Home page";
|
||||
}
|
||||
|
||||
<div class="text-center">
|
||||
<h1 class="display-4">Welcome</h1>
|
||||
<p>Learn about <a href="https://docs.microsoft.com/aspnet/core">building Web apps with ASP.NET Core</a>.</p>
|
||||
</div>
|
|
@ -0,0 +1,20 @@
|
|||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
|
||||
namespace TestWebApp202305.Pages
|
||||
{
|
||||
public class IndexModel : PageModel
|
||||
{
|
||||
private readonly ILogger<IndexModel> _logger;
|
||||
|
||||
public IndexModel(ILogger<IndexModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
@page
|
||||
@model PrivacyModel
|
||||
@{
|
||||
ViewData["Title"] = "Privacy Policy";
|
||||
}
|
||||
<h1>@ViewData["Title"]</h1>
|
||||
|
||||
<p>Use this page to detail your site's privacy policy.</p>
|
|
@ -0,0 +1,19 @@
|
|||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.AspNetCore.Mvc.RazorPages;
|
||||
|
||||
namespace TestWebApp202305.Pages
|
||||
{
|
||||
public class PrivacyModel : PageModel
|
||||
{
|
||||
private readonly ILogger<PrivacyModel> _logger;
|
||||
|
||||
public PrivacyModel(ILogger<PrivacyModel> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
public void OnGet()
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>@ViewData["Title"] - TestWebApp202305</title>
|
||||
<link rel="stylesheet" href="~/lib/bootstrap/dist/css/bootstrap.min.css" />
|
||||
<link rel="stylesheet" href="~/css/site.css" asp-append-version="true" />
|
||||
<link rel="stylesheet" href="~/TestWebApp202305.styles.css" asp-append-version="true" />
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<nav class="navbar navbar-expand-sm navbar-toggleable-sm navbar-light bg-white border-bottom box-shadow mb-3">
|
||||
<div class="container">
|
||||
<a class="navbar-brand" asp-area="" asp-page="/Index">TestWebApp202305</a>
|
||||
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target=".navbar-collapse" aria-controls="navbarSupportedContent"
|
||||
aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="navbar-collapse collapse d-sm-inline-flex justify-content-between">
|
||||
<ul class="navbar-nav flex-grow-1">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link text-dark" asp-area="" asp-page="/Index">Home</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link text-dark" asp-area="" asp-page="/Privacy">Privacy</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<div class="container">
|
||||
<main role="main" class="pb-3">
|
||||
@RenderBody()
|
||||
</main>
|
||||
</div>
|
||||
|
||||
<footer class="border-top footer text-muted">
|
||||
<div class="container">
|
||||
© 2023 - TestWebApp202305 - <a asp-area="" asp-page="/Privacy">Privacy</a>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<script src="~/lib/jquery/dist/jquery.min.js"></script>
|
||||
<script src="~/lib/bootstrap/dist/js/bootstrap.bundle.min.js"></script>
|
||||
<script src="~/js/site.js" asp-append-version="true"></script>
|
||||
|
||||
@await RenderSectionAsync("Scripts", required: false)
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,48 @@
|
|||
/* Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
|
||||
for details on configuring this project to bundle and minify static web assets. */
|
||||
|
||||
a.navbar-brand {
|
||||
white-space: normal;
|
||||
text-align: center;
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #0077cc;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
color: #fff;
|
||||
background-color: #1b6ec2;
|
||||
border-color: #1861ac;
|
||||
}
|
||||
|
||||
.nav-pills .nav-link.active, .nav-pills .show > .nav-link {
|
||||
color: #fff;
|
||||
background-color: #1b6ec2;
|
||||
border-color: #1861ac;
|
||||
}
|
||||
|
||||
.border-top {
|
||||
border-top: 1px solid #e5e5e5;
|
||||
}
|
||||
.border-bottom {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.box-shadow {
|
||||
box-shadow: 0 .25rem .75rem rgba(0, 0, 0, .05);
|
||||
}
|
||||
|
||||
button.accept-policy {
|
||||
font-size: 1rem;
|
||||
line-height: inherit;
|
||||
}
|
||||
|
||||
.footer {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
width: 100%;
|
||||
white-space: nowrap;
|
||||
line-height: 60px;
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
<script src="~/lib/jquery-validation/dist/jquery.validate.min.js"></script>
|
||||
<script src="~/lib/jquery-validation-unobtrusive/jquery.validate.unobtrusive.min.js"></script>
|
|
@ -0,0 +1,3 @@
|
|||
@using TestWebApp202305
|
||||
@namespace TestWebApp202305.Pages
|
||||
@addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers
|
|
@ -0,0 +1,3 @@
|
|||
@{
|
||||
Layout = "_Layout";
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
var builder = WebApplication.CreateBuilder(args);
|
||||
|
||||
// Add services to the container.
|
||||
builder.Services.AddRazorPages();
|
||||
|
||||
var app = builder.Build();
|
||||
|
||||
// Configure the HTTP request pipeline.
|
||||
if (!app.Environment.IsDevelopment())
|
||||
{
|
||||
app.UseExceptionHandler("/Error");
|
||||
// The default HSTS value is 30 days. You may want to change this for production scenarios, see https://aka.ms/aspnetcore-hsts.
|
||||
app.UseHsts();
|
||||
}
|
||||
|
||||
app.UseHttpsRedirection();
|
||||
app.UseStaticFiles();
|
||||
|
||||
app.UseRouting();
|
||||
|
||||
app.UseAuthorization();
|
||||
|
||||
app.MapRazorPages();
|
||||
|
||||
app.Run();
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"profiles": {
|
||||
"TestWebApp202305": {
|
||||
"commandName": "Project",
|
||||
"launchBrowser": true,
|
||||
"environmentVariables": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development"
|
||||
},
|
||||
"dotnetRunMessages": true,
|
||||
"applicationUrl": "https://localhost:7095;http://localhost:5114"
|
||||
},
|
||||
"IIS Express": {
|
||||
"commandName": "IISExpress",
|
||||
"launchBrowser": true,
|
||||
"environmentVariables": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development"
|
||||
}
|
||||
},
|
||||
"Docker": {
|
||||
"commandName": "Docker",
|
||||
"launchBrowser": true,
|
||||
"launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}",
|
||||
"publishAllPorts": true,
|
||||
"useSSL": true
|
||||
}
|
||||
},
|
||||
"iisSettings": {
|
||||
"windowsAuthentication": false,
|
||||
"anonymousAuthentication": true,
|
||||
"iisExpress": {
|
||||
"applicationUrl": "http://localhost:40936",
|
||||
"sslPort": 44350
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
<Project Sdk="Microsoft.NET.Sdk.Web">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>net6.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<UserSecretsId>546d15d2-b889-4cb2-8a29-5b0cea7e26a9</UserSecretsId>
|
||||
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
|
||||
<DockerfileContext>.</DockerfileContext>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.17.2" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"DetailedErrors": true,
|
||||
"Logging": {
|
||||
"LogLevel": {
|
||||
"Default": "Information",
|
||||
"Microsoft.AspNetCore": "Warning"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"Logging": {
|
||||
"LogLevel": {
|
||||
"Default": "Information",
|
||||
"Microsoft.AspNetCore": "Warning"
|
||||
}
|
||||
},
|
||||
"AllowedHosts": "*"
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
html {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
@media (min-width: 768px) {
|
||||
html {
|
||||
font-size: 16px;
|
||||
}
|
||||
}
|
||||
|
||||
html {
|
||||
position: relative;
|
||||
min-height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
margin-bottom: 60px;
|
||||
}
|
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 5.3 KiB |
|
@ -0,0 +1,4 @@
|
|||
// Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
|
||||
// for details on configuring this project to bundle and minify static web assets.
|
||||
|
||||
// Write your JavaScript code.
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче