From 647319f27fef964f08a108135ae7d605ccebccfa Mon Sep 17 00:00:00 2001 From: harrli Date: Fri, 12 May 2023 13:12:02 -0700 Subject: [PATCH 1/3] Temp fix using run tag as image tag --- src/containerapp/azext_containerapp/_up_utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/containerapp/azext_containerapp/_up_utils.py b/src/containerapp/azext_containerapp/_up_utils.py index f149dc7a7d0..8db18cd04bb 100644 --- a/src/containerapp/azext_containerapp/_up_utils.py +++ b/src/containerapp/azext_containerapp/_up_utils.py @@ -423,6 +423,21 @@ def build_container_from_source_with_buildpack(self, image_name, source): except Exception as ex: raise CLIError(f"Unable to run 'pack build' command to produce runnable application image: {ex}") + # Temporary fix: using run time tag as customer image tag + # Waiting for buildpacks side to fix this issue: https://github.com/buildpacks/pack/issues/1753 + retag_command = ['docker', 'tag', image_name, f"{image_name}:{buildpack_run_tag}"] + logger.debug(f"Calling '{' '.join(retag_command)}'") + logger.warning(f"Tagging image {image_name} with tag {buildpack_run_tag}...") + try: + process = subprocess.Popen(retag_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode != 0: + raise CLIError(f"Error thrown when running 'docker tag': {stderr.decode('utf-8')}") + logger.debug(f"Successfully tagged image {image_name} with tag {buildpack_run_tag}.") + except Exception as ex: + raise CLIError(f"Unable to run 'docker tag' command to tag image: {ex}") + image_name = f"{image_name}:{buildpack_run_tag}" + # Run 'docker push' to push the image to the ACR command = ['docker', 'push', image_name] logger.debug(f"Calling '{' '.join(command)}'") From 092d245739bd9e8c7b2fd0177bdc145de0d1f679 Mon Sep 17 00:00:00 2001 From: daniv-msft Date: Thu, 11 May 2023 23:52:47 -0700 Subject: [PATCH 2/3] Various updates to clean the code or smoothen slightly the UX/help text --- src/containerapp/azext_containerapp/_help.py | 37 ++++--- .../azext_containerapp/_params.py | 15 +-- .../azext_containerapp/_up_utils.py | 4 +- src/containerapp/azext_containerapp/_utils.py | 54 +++++----- .../azext_containerapp/commands.py | 1 - src/containerapp/azext_containerapp/custom.py | 98 +++++++++---------- 6 files changed, 106 insertions(+), 103 deletions(-) diff --git a/src/containerapp/azext_containerapp/_help.py b/src/containerapp/azext_containerapp/_help.py index 21b6ac08e4a..a40f692600c 100644 --- a/src/containerapp/azext_containerapp/_help.py +++ b/src/containerapp/azext_containerapp/_help.py @@ -1275,41 +1275,38 @@ # Patch commands helps['containerapp patch'] = """ type: group - short-summary: Patch Azure Container Apps. + short-summary: Patch Azure Container Apps. Patching is only available for the apps built using the source to cloud feature. See https://aka.ms/aca-local-source-to-cloud """ helps['containerapp patch list'] = """ type: command - short-summary: List Container Apps to be patched. Patching is only available for the apps built using the source to cloud feature. - exmaples: - - name: List Container Apps that can be patched. + short-summary: List container apps that can be patched. Patching is only available for the apps built using the source to cloud feature. See https://aka.ms/aca-local-source-to-cloud + examples: + - name: List container apps that can be patched. text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv - - name: List patchable and unpatchable Container Apps. + az containerapp patch list -g MyResourceGroup --environment MyContainerAppEnv + - name: List patchable and unpatchable container apps. text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv --show-all + az containerapp patch list -g MyResourceGroup --environment MyContainerAppEnv --show-all """ helps['containerapp patch run'] = """ type: command - short-summary: List and apply Container Apps to be patched. Patching is only available for the apps built using the source to cloud feature. - exmaples: - - name: List Container Apps that can be patched and apply patch. - text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv - - name: List patchable and unpatchable Container Apps and apply patch. + short-summary: List and apply container apps to be patched. Patching is only available for the apps built using the source to cloud feature. See https://aka.ms/aca-local-source-to-cloud + examples: + - name: List container apps that can be patched and apply patch. text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv --show-all + az containerapp patch run -g MyResourceGroup --environment MyContainerAppEnv """ helps['containerapp patch interactive-run'] = """ type: command - short-summary: List and select Container Apps to be patched in an interactive way. Patching is only available for the apps built using the source to cloud feature. - exmaples: - - name: List Container Apps that can be patched and apply patch. + short-summary: List and select container apps to be patched in an interactive way. Patching is only available for the apps built using the source to cloud feature. See https://aka.ms/aca-local-source-to-cloud + examples: + - name: List container apps that can be patched and apply patch. text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv - - name: List patchable and unpatchable Container Apps and apply patch. + az containerapp patch interactive-run -g MyResourceGroup --environment MyContainerAppEnv + - name: List patchable and unpatchable container apps and apply patch to the patchable ones. text: | - az containerapp patch -g MyResourceGroup --environment MyContainerappEnv --show-all + az containerapp patch interactive-run -g MyResourceGroup --environment MyContainerAppEnv --show-all """ diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index 83fc31eaa69..9a8b5695fe2 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -415,17 +415,18 @@ def load_arguments(self, _): c.argument('min_nodes', help="The minimum node count for the workload profile") c.argument('max_nodes', help="The maximum node count for the workload profile") - with self.argument_context('containerapp patch') as c: - c.argument('resource_group_name', arg_type=resource_group_name_type) - c.argument('managed_env', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.') - c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable Container Apps') - + # Patch with self.argument_context('containerapp patch list') as c: c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('managed_env', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.') - c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable Container Apps') + c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable container apps') with self.argument_context('containerapp patch run') as c: c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('managed_env', validator=validate_managed_env_name_or_id, options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.') - c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable Container Apps') + c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable container apps') + + with self.argument_context('containerapp patch interactive-run') as c: + c.argument('resource_group_name', arg_type=resource_group_name_type) + c.argument('managed_env', options_list=['--environment', '-e'], help='Name or resource id of the Container App environment.') + c.argument('show_all', options_list=['--show-all'], help='Show all patchable and unpatchable container apps') diff --git a/src/containerapp/azext_containerapp/_up_utils.py b/src/containerapp/azext_containerapp/_up_utils.py index 8db18cd04bb..7d4ce95d295 100644 --- a/src/containerapp/azext_containerapp/_up_utils.py +++ b/src/containerapp/azext_containerapp/_up_utils.py @@ -391,7 +391,9 @@ def build_container_from_source_with_buildpack(self, image_name, source): command = [pack_exec_path, 'build', image_name, '--builder', builder_image_name, '--path', source] buildpack_run_tag = get_latest_buildpack_run_tag("aspnet", "7.0") if buildpack_run_tag is not None: - command.extend(['--run-image', f"mcr.microsoft.com/oryx/builder:{buildpack_run_tag}"]) + buildpack_run_image = f"mcr.microsoft.com/oryx/builder:{buildpack_run_tag}" + logger.debug(f"Determined the run image to use as {buildpack_run_image}.") + command.extend(['--run-image', buildpack_run_image]) logger.debug(f"Calling '{' '.join(command)}'") try: diff --git a/src/containerapp/azext_containerapp/_utils.py b/src/containerapp/azext_containerapp/_utils.py index c0c4c504971..57a4e87d69b 100644 --- a/src/containerapp/azext_containerapp/_utils.py +++ b/src/containerapp/azext_containerapp/_utils.py @@ -1795,15 +1795,15 @@ def get_pack_exec_path(): return None -def patchable_check(repo_tag_split: str, oryx_builder_run_img_tags, bom): +def patchable_check(repo_tag_split: str, oryx_builder_run_img_tags, inspect_result): tag_prop = parse_oryx_mariner_tag(repo_tag_split) if tag_prop is None: result = { - "targetContainerAppName": bom["targetContainerAppName"], - "targetContainerName": bom["targetContainerName"], - "targetContainerAppEnvironmentName": bom["targetContainerAppEnvironmentName"], - "targetResourceGroup": bom["targetResourceGroup"], - "targetImageName": bom["image_name"], + "targetContainerAppName": inspect_result["targetContainerAppName"], + "targetContainerName": inspect_result["targetContainerName"], + "targetContainerAppEnvironmentName": inspect_result["targetContainerAppEnvironmentName"], + "targetResourceGroup": inspect_result["targetResourceGroup"], + "targetImageName": inspect_result["image_name"], "oldRunImage": repo_tag_split, "newRunImage": None, "id": None, @@ -1812,11 +1812,11 @@ def patchable_check(repo_tag_split: str, oryx_builder_run_img_tags, bom): return result # elif len(str(tag_prop["version"]).split(".")) == 2: # result = { - # "targetContainerAppName": bom["targetContainerAppName"], - # "targetContainerName": bom["targetContainerName"], - # "targetContainerAppEnvironmentName": bom["targetContainerAppEnvironmentName"], - # "targetResourceGroup": bom["targetResourceGroup"], - # "targetImageName": bom["image_name"], + # "targetContainerAppName": inspect_result["targetContainerAppName"], + # "targetContainerName": inspect_result["targetContainerName"], + # "targetContainerAppEnvironmentName": inspect_result["targetContainerAppEnvironmentName"], + # "targetResourceGroup": inspect_result["targetResourceGroup"], + # "targetImageName": inspect_result["image_name"], # "oldRunImage": repo_tag_split, # "newRunImage": None, # "id": None, @@ -1826,14 +1826,14 @@ def patchable_check(repo_tag_split: str, oryx_builder_run_img_tags, bom): repo_tag_split = repo_tag_split.split("-") if repo_tag_split[1] == "dotnet": matching_version_info = oryx_builder_run_img_tags[repo_tag_split[2]][str(tag_prop["version"].major) + "." + str(tag_prop["version"].minor)][tag_prop["support"]][tag_prop["marinerVersion"]] - # Check if the image minor version is four less than the latest minor version + # Check if the image minor version is less than the latest minor version if tag_prop["version"] < matching_version_info[0]["version"]: result = { - "targetContainerAppName": bom["targetContainerAppName"], - "targetContainerName": bom["targetContainerName"], - "targetContainerAppEnvironmentName": bom["targetContainerAppEnvironmentName"], - "targetResourceGroup": bom["targetResourceGroup"], - "targetImageName": bom["image_name"], + "targetContainerAppName": inspect_result["targetContainerAppName"], + "targetContainerName": inspect_result["targetContainerName"], + "targetContainerAppEnvironmentName": inspect_result["targetContainerAppEnvironmentName"], + "targetResourceGroup": inspect_result["targetResourceGroup"], + "targetImageName": inspect_result["image_name"], "oldRunImage": tag_prop["fullTag"], } if (tag_prop["version"].minor == matching_version_info[0]["version"].minor) and (tag_prop["version"].micro < matching_version_info[0]["version"].micro): @@ -1848,15 +1848,15 @@ def patchable_check(repo_tag_split: str, oryx_builder_run_img_tags, bom): result["reason"] = "The image is not pachable Please check for major or minor version upgrade." else: result = { - "targetContainerAppName": bom["targetContainerAppName"], - "targetContainerName": bom["targetContainerName"], - "targetContainerAppEnvironmentName": bom["targetContainerAppEnvironmentName"], - "targetResourceGroup": bom["targetResourceGroup"], - "targetImageName": bom["image_name"], + "targetContainerAppName": inspect_result["targetContainerAppName"], + "targetContainerName": inspect_result["targetContainerName"], + "targetContainerAppEnvironmentName": inspect_result["targetContainerAppEnvironmentName"], + "targetResourceGroup": inspect_result["targetResourceGroup"], + "targetImageName": inspect_result["image_name"], "oldRunImage": tag_prop["fullTag"], "newRunImage": None, "id": None, - "reason": "You're already up to date!" + "reason": "The image is already up to date." } return result @@ -1892,6 +1892,14 @@ def get_current_mariner_tags() -> list(OryxMarinerRunImgTagProperty): return tag_list +def get_latest_buildpack_run_tag(framework, version, support = "lts", mariner_version = "cbl-mariner2.0"): + tags = get_current_mariner_tags() + try: + return tags[framework][version][support][mariner_version][0]["fullTag"] + except KeyError: + return None + + def parse_oryx_mariner_tag(tag: str) -> OryxMarinerRunImgTagProperty: tag_split = tag.split("-") if tag_split[0] == "run" and tag_split[1] == "dotnet": diff --git a/src/containerapp/azext_containerapp/commands.py b/src/containerapp/azext_containerapp/commands.py index 48a43f5d492..9a68d3d9d6e 100644 --- a/src/containerapp/azext_containerapp/commands.py +++ b/src/containerapp/azext_containerapp/commands.py @@ -53,7 +53,6 @@ def load_command_table(self, _): g.custom_command('exec', 'containerapp_ssh', validator=validate_ssh) g.custom_command('up', 'containerapp_up', supports_no_wait=False, exception_handler=ex_handler_factory()) g.custom_command('browse', 'open_containerapp_in_browser') - # g.custom_command('patch', 'patch_run_interactive', is_preview=True) with self.command_group('containerapp replica') as g: g.custom_show_command('show', 'get_replica') # TODO implement the table transformer diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 8cbac644793..d6bcfa2ce53 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -4313,10 +4313,9 @@ def patch_list(cmd, resource_group_name=None, managed_env=None, show_all=False): logger.warning("Please install or start Docker and try again.") return pack_exec_path = get_pack_exec_path() - print("\rStarting process 1/5...", end="", flush=True) + print("\rListing container apps...", end="", flush=True) ca_list = list_containerapp(cmd, resource_group_name, managed_env) imgs = [] - print("\rStarting process 2/5...", end="", flush=True) if ca_list: for ca in ca_list: resource_group_name = re.search('/subscriptions/[^/]+/resourceGroups/([^/]+)/', ca["id"]).group(1) @@ -4325,69 +4324,66 @@ def patch_list(cmd, resource_group_name=None, managed_env=None, show_all=False): for container in containers: result = dict(imageName=container["image"], targetContainerName=container["name"], targetContainerAppName=ca["name"], targetContainerAppEnvironmentName=managed_env_name, targetResourceGroup=resource_group_name) imgs.append(result) - print("\rStarting process 3/5...", end="", flush=True) - # Get the BOM of the images + # Inspect the images results = [] - boms = [] + inspect_results = [] # Multi-worker - print("\rStarting process 4/5...", end="", flush=True) + print("\rInspecting container apps images...", end="", flush=True) with ThreadPoolExecutor(max_workers=10) as executor: - [executor.submit(patch_get_image_inspection, pack_exec_path, img, boms) for img in imgs] + [executor.submit(patch_get_image_inspection, pack_exec_path, img, inspect_results) for img in imgs] # Get the current tags of Dotnet Mariners oryx_run_img_tags = get_current_mariner_tags() - print("\rStarting process 5/5...", end="", flush=True) - failed_reason = "Failed to get BOM of the image. Please check if the image exists or you have the permission to access the image." + failed_reason = "Failed to inspect the image. Please make sure that you are authenticated to the container registry and that the image exists." not_based_mariner_reason = "Image not based on Mariner" mcr_check_reason = "Image not from mcr.microsoft.com/oryx/builder" results = [] # Start checking if the images are based on Mariner print("\rChecking for patches...", end="", flush=True) - for bom in boms: - if bom["remote_info"] == 401: - results.append(dict(targetContainerName=bom["targetContainerName"], targetContainerAppName=bom["targetContainerAppName"], targetContainerAppEnvironmentName=bom["targetContainerAppEnvironmentName"], targetResourceGroup=bom["targetResourceGroup"], targetImageName=bom["image_name"], oldRunImage=None, newRunImage=None, id=None, reason=failed_reason)) + for inspect_result in inspect_results: + if inspect_result["remote_info"] == 401: + results.append(dict(targetContainerName=inspect_result["targetContainerName"], targetContainerAppName=inspect_result["targetContainerAppName"], targetContainerAppEnvironmentName=inspect_result["targetContainerAppEnvironmentName"], targetResourceGroup=inspect_result["targetResourceGroup"], targetImageName=inspect_result["image_name"], oldRunImage=None, newRunImage=None, id=None, reason=failed_reason)) else: - # devide run-images into different parts by "/" - run_images_props = bom["remote_info"]["run_images"] + # Divide run-images into different parts by "/" + run_images_props = inspect_result["remote_info"]["run_images"] if run_images_props is None: - results.append(dict(targetContainerName=bom["targetContainerName"], targetContainerAppName=bom["targetContainerAppName"], targetContainerAppEnvironmentName=bom["targetContainerAppEnvironmentName"], targetResourceGroup=bom["targetResourceGroup"], targetImageName=bom["image_name"], oldRunImage=None, newRunImage=None, id=None, reason=not_based_mariner_reason)) + results.append(dict(targetContainerName=inspect_result["targetContainerName"], targetContainerAppName=inspect_result["targetContainerAppName"], targetContainerAppEnvironmentName=inspect_result["targetContainerAppEnvironmentName"], targetResourceGroup=inspect_result["targetResourceGroup"], targetImageName=inspect_result["image_name"], oldRunImage=None, newRunImage=None, id=None, reason=not_based_mariner_reason)) else: for run_images_prop in run_images_props: - # result = None if run_images_prop["name"].find("mcr.microsoft.com/oryx/builder") != -1: run_images_prop = run_images_prop["name"].split(":") run_images_tag = run_images_prop[1] # Based on Mariners if run_images_tag.find('mariner') != -1: - check_result = patchable_check(run_images_tag, oryx_run_img_tags, bom=bom) + check_result = patchable_check(run_images_tag, oryx_run_img_tags, inspect_result=inspect_result) results.append(check_result) else: - results.append(dict(targetContainerName=bom["targetContainerName"], targetContainerAppName=bom["targetContainerAppName"], targetContainerAppEnvironmentName=bom["targetContainerAppEnvironmentName"], targetResourceGroup=bom["targetResourceGroup"], targetImageName=bom["image_name"], oldRunImage=bom["remote_info"]["run_images"]["name"], newRunImage=None, id=None, reason=failed_reason)) + results.append(dict(targetContainerName=inspect_result["targetContainerName"], targetContainerAppName=inspect_result["targetContainerAppName"], targetContainerAppEnvironmentName=inspect_result["targetContainerAppEnvironmentName"], targetResourceGroup=inspect_result["targetResourceGroup"], targetImageName=inspect_result["image_name"], oldRunImage=inspect_result["remote_info"]["run_images"]["name"], newRunImage=None, id=None, reason=failed_reason)) else: # Not based on image from mcr.microsoft.com/dotnet - results.append(dict(targetContainerAppName=bom["targetContainerAppName"], targetContainerAppEnvironmentName=bom["targetContainerAppEnvironmentName"], targetResourceGroup=bom["targetResourceGroup"], oldRunImage=bom["remote_info"]["run_images"], newRunImage=None, id=None, reason=mcr_check_reason)) - print("\r \r", end="", flush=True) + results.append(dict(targetContainerAppName=inspect_result["targetContainerAppName"], targetContainerAppEnvironmentName=inspect_result["targetContainerAppEnvironmentName"], targetResourceGroup=inspect_result["targetResourceGroup"], oldRunImage=inspect_result["remote_info"]["run_images"], newRunImage=None, id=None, reason=mcr_check_reason)) + # Make sure that we clear the first line before showing the new output + print("\r \r", end="", flush=True) if show_all is False: - print("Use --show-all to show all the patchable and unpatchable images.") results = [result for result in results if result["id"] is not None] if not results: - print("No Container App available to patch at this time.") + print("No container apps available to patch at this time. Use --show-all to show the container apps that cannot be patched.") return return results def patch_get_image_inspection(pack_exec_path, img, info_list): if (img["imageName"].find("run-dotnet") != -1) and (img["imageName"].find("cbl-mariner") != -1): - bom = {"remote_info": {"run_images": [{"name": "mcr.microsoft.com/oryx/builder:" + img["imageName"].split(":")[-1]}]}, "image_name": img["imageName"], "targetContainerName": img["targetContainerName"], "targetContainerAppName": img["targetContainerAppName"], "targetContainerAppEnvironmentName": img["targetContainerAppEnvironmentName"], "targetResourceGroup": img["targetResourceGroup"]} + inspect_result = {"remote_info": {"run_images": [{"name": "mcr.microsoft.com/oryx/builder:" + img["imageName"].split(":")[-1]}]}, "image_name": img["imageName"], "targetContainerName": img["targetContainerName"], "targetContainerAppName": img["targetContainerAppName"], "targetContainerAppEnvironmentName": img["targetContainerAppEnvironmentName"], "targetResourceGroup": img["targetResourceGroup"]} else: img_info = subprocess.Popen(pack_exec_path + " inspect-image " + img["imageName"] + " --output json", shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) img_info_out, img_info_err = img_info.communicate() if img_info_err.find(b"status code 401 Unauthorized") != -1 or img_info_err.find(b"unable to find image") != -1: - bom = dict(remote_info=401, image_name=img["imageName"]) + inspect_result = dict(remote_info=401, image_name=img["imageName"]) else: - bom = json.loads(img_info_out) - bom.update({"targetContainerName": img["targetContainerName"], "targetContainerAppName": img["targetContainerAppName"], "targetContainerAppEnvironmentName": img["targetContainerAppEnvironmentName"], "targetResourceGroup": img["targetResourceGroup"]}) - info_list.append(bom) + inspect_result = json.loads(img_info_out) + inspect_result.update({"targetContainerName": img["targetContainerName"], "targetContainerAppName": img["targetContainerAppName"], "targetContainerAppEnvironmentName": img["targetContainerAppEnvironmentName"], "targetResourceGroup": img["targetResourceGroup"]}) + info_list.append(inspect_result) def patch_run_interactive(cmd, resource_group_name=None, managed_env=None, show_all=False): @@ -4398,7 +4394,7 @@ def patch_run_interactive(cmd, resource_group_name=None, managed_env=None, show_ pack_exec_path = get_pack_exec_path() if patchable_check_results is None: return - patchable_check_results_json = json.dumps(patchable_check_results, indent=4) + patchable_check_results_json = json.dumps(patchable_check_results, indent=2) without_unpatchable_results = [] without_unpatchable_results = [result for result in patchable_check_results if result["id"] is not None] if without_unpatchable_results == [] and (patchable_check_results is None or show_all is False): @@ -4406,8 +4402,8 @@ def patch_run_interactive(cmd, resource_group_name=None, managed_env=None, show_ print(patchable_check_results_json) if without_unpatchable_results == []: return - user_input = input("Do you want to apply all the patch or specify by id? (y/n/id)\n") - return patch_apply(cmd, patchable_check_results, user_input, pack_exec_path) + user_input = input("Do you want to apply all the patches or specify by id? (y/n/id)\n") + patch_apply(cmd, patchable_check_results, user_input, pack_exec_path) def patch_run(cmd, resource_group_name=None, managed_env=None, show_all=False): @@ -4418,7 +4414,7 @@ def patch_run(cmd, resource_group_name=None, managed_env=None, show_all=False): pack_exec_path = get_pack_exec_path() if patchable_check_results is None: return - patchable_check_results_json = json.dumps(patchable_check_results, indent=4) + patchable_check_results_json = json.dumps(patchable_check_results, indent=2) without_unpatchable_results = [] without_unpatchable_results = [result for result in patchable_check_results if result["id"] is not None] if without_unpatchable_results == [] and (patchable_check_results is None or show_all is False): @@ -4426,11 +4422,10 @@ def patch_run(cmd, resource_group_name=None, managed_env=None, show_all=False): print(patchable_check_results_json) if without_unpatchable_results == []: return - return patch_apply(cmd, patchable_check_results, "y", pack_exec_path) + patch_apply(cmd, patchable_check_results, "y", pack_exec_path) def patch_apply(cmd, patch_check_list, method, pack_exec_path): - results = [] m = method.strip().lower() # Track number of times patches were applied successfully. patch_run_count = 0 @@ -4438,13 +4433,13 @@ def patch_apply(cmd, patch_check_list, method, pack_exec_path): for patch_check in patch_check_list: if patch_check["id"]: if patch_check["newRunImage"]: - results.append(patch_cli_call(cmd, - patch_check["targetResourceGroup"], - patch_check["targetContainerAppName"], - patch_check["targetContainerName"], - patch_check["targetImageName"], - patch_check["newRunImage"], - pack_exec_path)) + patch_cli_call(cmd, + patch_check["targetResourceGroup"], + patch_check["targetContainerAppName"], + patch_check["targetContainerName"], + patch_check["targetImageName"], + patch_check["newRunImage"], + pack_exec_path) # Increment patch_run_count with every successful patch. patch_run_count+=1 elif m == "n": @@ -4454,13 +4449,13 @@ def patch_apply(cmd, patch_check_list, method, pack_exec_path): # Check if method is an existing id in the list for patch_check in patch_check_list: if patch_check["id"] == method: - results.append(patch_cli_call(cmd, - patch_check["targetResourceGroup"], - patch_check["targetContainerAppName"], - patch_check["targetContainerName"], - patch_check["targetImageName"], - patch_check["newRunImage"], - pack_exec_path)) + patch_cli_call(cmd, + patch_check["targetResourceGroup"], + patch_check["targetContainerAppName"], + patch_check["targetContainerName"], + patch_check["targetImageName"], + patch_check["newRunImage"], + pack_exec_path) patch_run_properties = { 'Context.Default.AzureCLI.PatchRunUserResponse':method, 'Context.Default.AzureCLI.PatchRunCount':1 @@ -4474,7 +4469,7 @@ def patch_apply(cmd, patch_check_list, method, pack_exec_path): 'Context.Default.AzureCLI.PatchRunCount':patch_run_count } telemetry_core.add_extension_event('containerapp', patch_run_properties) - return results + return def patch_cli_call(cmd, resource_group, container_app_name, container_name, target_image_name, new_run_image, pack_exec_path): @@ -4491,14 +4486,15 @@ def patch_cli_call(cmd, resource_group, container_app_name, container_name, targ raise try: print("Patching container app: " + container_app_name + " container: " + container_name) - print("Applying new image: " + new_target_image_name) + print("Creating new revision with image: " + new_target_image_name) update_info_json = update_containerapp(cmd, name=container_app_name, resource_group_name=resource_group, container_name=container_name, image=new_target_image_name) - print("Container app revision created successfully.") - return update_info_json + print(json.dumps(update_info_json, indent=2)) + print("Container app revision created successfully from the patched image.") + return except Exception: print("Error: Failed to create new revision with the container app.") raise From c7f63ce1bfda96fc1c2b0495eaa4c48401ae540a Mon Sep 17 00:00:00 2001 From: harrli Date: Fri, 12 May 2023 14:09:37 -0700 Subject: [PATCH 3/3] Fixed image name matching bug --- .../azext_containerapp/_up_utils.py | 46 +++++++++++-------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/src/containerapp/azext_containerapp/_up_utils.py b/src/containerapp/azext_containerapp/_up_utils.py index 7d4ce95d295..8228d458076 100644 --- a/src/containerapp/azext_containerapp/_up_utils.py +++ b/src/containerapp/azext_containerapp/_up_utils.py @@ -425,21 +425,6 @@ def build_container_from_source_with_buildpack(self, image_name, source): except Exception as ex: raise CLIError(f"Unable to run 'pack build' command to produce runnable application image: {ex}") - # Temporary fix: using run time tag as customer image tag - # Waiting for buildpacks side to fix this issue: https://github.com/buildpacks/pack/issues/1753 - retag_command = ['docker', 'tag', image_name, f"{image_name}:{buildpack_run_tag}"] - logger.debug(f"Calling '{' '.join(retag_command)}'") - logger.warning(f"Tagging image {image_name} with tag {buildpack_run_tag}...") - try: - process = subprocess.Popen(retag_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode != 0: - raise CLIError(f"Error thrown when running 'docker tag': {stderr.decode('utf-8')}") - logger.debug(f"Successfully tagged image {image_name} with tag {buildpack_run_tag}.") - except Exception as ex: - raise CLIError(f"Unable to run 'docker tag' command to tag image: {ex}") - image_name = f"{image_name}:{buildpack_run_tag}" - # Run 'docker push' to push the image to the ACR command = ['docker', 'push', image_name] logger.debug(f"Calling '{' '.join(command)}'") @@ -502,11 +487,12 @@ def run_acr_build(self, dockerfile, source, quiet=False, build_from_source=False image_name = self.image if self.image is not None else self.name from datetime import datetime - now = datetime.now() - # Add version tag for acr image - image_name += ":{}".format( - str(now).replace(" ", "").replace("-", "").replace(".", "").replace(":", "") - ) + # Moving this to skip the buildpacks scenario for now due to issues with buildpacks + # now = datetime.now() + # # Add version tag for acr image + # image_name += ":{}".format( + # str(now).replace(" ", "").replace("-", "").replace(".", "").replace(":", "") + # ) self.image = self.registry_server + "/" + image_name @@ -516,8 +502,14 @@ def run_acr_build(self, dockerfile, source, quiet=False, build_from_source=False try: # First try to build source using buildpacks + # Temporary fix: using run time tag as customer image tag + # Waiting for buildpacks side to fix this issue: https://github.com/buildpacks/pack/issues/1753 logger.warning("Attempting to build image using buildpacks...") + run_image_tag = get_latest_buildpack_run_tag("aspnet", "7.0") + if run_image_tag is not None: + image_name = f"{image_name}:{run_image_tag}" self.build_container_from_source_with_buildpack(image_name, source) + self.image = self.registry_server + "/" + image_name return except ValidationError as e: logger.warning(f"Unable to use buildpacks to build image from source: {e}\nFalling back to ACR Task...") @@ -526,9 +518,23 @@ def run_acr_build(self, dockerfile, source, quiet=False, build_from_source=False raise e # If we're unable to use the buildpack, build source using an ACR Task + # Moving tagging img to here + # Skipping the buildpacks scenario for now due to issues with buildpacks + now = datetime.now() + # Add version tag for acr image + image_name += ":{}".format( + str(now).replace(" ", "").replace("-", "").replace(".", "").replace(":", "") + ) logger.warning("Attempting to build image using ACR Task...") self.build_container_from_source_with_acr_task(image_name, source) else: + # Moving tagging img to here + # Skipping the buildpacks scenario for now due to issues with buildpacks + now = datetime.now() + # Add version tag for acr image + image_name += ":{}".format( + str(now).replace(" ", "").replace("-", "").replace(".", "").replace(":", "") + ) queue_acr_build( self.cmd, self.acr.resource_group.name,