From d2819c17a5727a5c31adaad952d34cfade6d98e4 Mon Sep 17 00:00:00 2001 From: jtbates Date: Wed, 26 Jan 2022 22:56:34 +0000 Subject: [PATCH 1/7] Updated copy.bara.sky instructions to reference dry-run flag. PiperOrigin-RevId: 424450284 --- dsub/_dsub_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsub/_dsub_version.py b/dsub/_dsub_version.py index aa82174..da7bf0f 100644 --- a/dsub/_dsub_version.py +++ b/dsub/_dsub_version.py @@ -26,4 +26,4 @@ 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ -DSUB_VERSION = '0.4.6' \ No newline at end of file +DSUB_VERSION = '0.4.6' From 03b2b9859e235cea190842a3bf982e26ee9200cf Mon Sep 17 00:00:00 2001 From: jtbates Date: Thu, 27 Jan 2022 00:58:57 +0000 Subject: [PATCH 2/7] Updated version to 0.4.7.dev0. PiperOrigin-RevId: 424475008 --- dsub/_dsub_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsub/_dsub_version.py b/dsub/_dsub_version.py index da7bf0f..41fd12f 100644 --- a/dsub/_dsub_version.py +++ b/dsub/_dsub_version.py @@ -26,4 +26,4 @@ 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ -DSUB_VERSION = '0.4.6' +DSUB_VERSION = '0.4.7.dev0' From 2c6402e220e1bd0cc559fdbc37fd562cb73ae696 Mon Sep 17 00:00:00 2001 From: Googler Date: Thu, 14 Apr 2022 23:29:09 +0000 Subject: [PATCH 3/7] Remove Lint as gpylint comments. PiperOrigin-RevId: 441883338 --- dsub/commands/ddel.py | 1 - dsub/commands/dstat.py | 1 - dsub/commands/dsub.py | 1 - dsub/lib/dsub_errors.py | 1 - dsub/lib/dsub_util.py | 1 - dsub/lib/job_model.py | 1 - dsub/lib/output_formatter.py | 1 - dsub/lib/param_util.py | 1 - dsub/lib/providers_util.py | 1 - dsub/lib/resources.py | 1 - dsub/lib/retry_util.py | 1 - dsub/providers/base.py | 1 - dsub/providers/google_base.py | 1 - dsub/providers/google_cls_v2.py | 1 - dsub/providers/google_v2.py | 1 - dsub/providers/google_v2_base.py | 1 - dsub/providers/google_v2_operations.py | 1 - dsub/providers/google_v2_pipelines.py | 1 - dsub/providers/google_v2_versions.py | 1 - dsub/providers/local.py | 1 - dsub/providers/provider_base.py | 1 - dsub/providers/stub.py | 1 - dsub/providers/test_fails.py | 1 - 23 files changed, 23 deletions(-) diff --git a/dsub/commands/ddel.py b/dsub/commands/ddel.py index 77bc2df..1429c97 100755 --- a/dsub/commands/ddel.py +++ b/dsub/commands/ddel.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/commands/dstat.py b/dsub/commands/dstat.py index 4a24821..f3b2dc0 100755 --- a/dsub/commands/dstat.py +++ b/dsub/commands/dstat.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/commands/dsub.py b/dsub/commands/dsub.py index 005054b..e6a8c19 100644 --- a/dsub/commands/dsub.py +++ b/dsub/commands/dsub.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/dsub_errors.py b/dsub/lib/dsub_errors.py index db95a08..2060942 100644 --- a/dsub/lib/dsub_errors.py +++ b/dsub/lib/dsub_errors.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/dsub_util.py b/dsub/lib/dsub_util.py index e724c5d..e107a2f 100644 --- a/dsub/lib/dsub_util.py +++ b/dsub/lib/dsub_util.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/job_model.py b/dsub/lib/job_model.py index 1438e51..e4033fa 100644 --- a/dsub/lib/job_model.py +++ b/dsub/lib/job_model.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/output_formatter.py b/dsub/lib/output_formatter.py index 80522a6..45805ae 100644 --- a/dsub/lib/output_formatter.py +++ b/dsub/lib/output_formatter.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2019 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/param_util.py b/dsub/lib/param_util.py index 8f74561..35b7091 100644 --- a/dsub/lib/param_util.py +++ b/dsub/lib/param_util.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/providers_util.py b/dsub/lib/providers_util.py index a678d1b..320043b 100644 --- a/dsub/lib/providers_util.py +++ b/dsub/lib/providers_util.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/resources.py b/dsub/lib/resources.py index 4882b14..8198500 100644 --- a/dsub/lib/resources.py +++ b/dsub/lib/resources.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/lib/retry_util.py b/dsub/lib/retry_util.py index 70baaa7..2c70b7b 100644 --- a/dsub/lib/retry_util.py +++ b/dsub/lib/retry_util.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 Verily Life Sciences Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/base.py b/dsub/providers/base.py index e4872ea..1cf783e 100644 --- a/dsub/providers/base.py +++ b/dsub/providers/base.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_base.py b/dsub/providers/google_base.py index f2543da..8c9aba9 100644 --- a/dsub/providers/google_base.py +++ b/dsub/providers/google_base.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2018 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_cls_v2.py b/dsub/providers/google_cls_v2.py index 25e321d..07805ff 100644 --- a/dsub/providers/google_cls_v2.py +++ b/dsub/providers/google_cls_v2.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2019 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_v2.py b/dsub/providers/google_v2.py index bda2984..2a20f7e 100644 --- a/dsub/providers/google_v2.py +++ b/dsub/providers/google_v2.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2019 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_v2_base.py b/dsub/providers/google_v2_base.py index b56fb86..7441a35 100644 --- a/dsub/providers/google_v2_base.py +++ b/dsub/providers/google_v2_base.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2018 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_v2_operations.py b/dsub/providers/google_v2_operations.py index a001e22..2fdacc1 100644 --- a/dsub/providers/google_v2_operations.py +++ b/dsub/providers/google_v2_operations.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2018 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_v2_pipelines.py b/dsub/providers/google_v2_pipelines.py index c34bbcf..d594a0b 100644 --- a/dsub/providers/google_v2_pipelines.py +++ b/dsub/providers/google_v2_pipelines.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2018 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/google_v2_versions.py b/dsub/providers/google_v2_versions.py index d0da42e..3f08626 100644 --- a/dsub/providers/google_v2_versions.py +++ b/dsub/providers/google_v2_versions.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 Verily Life Sciences Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/local.py b/dsub/providers/local.py index 724b402..a393a30 100644 --- a/dsub/providers/local.py +++ b/dsub/providers/local.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/provider_base.py b/dsub/providers/provider_base.py index 7d791c8..e62ef1d 100644 --- a/dsub/providers/provider_base.py +++ b/dsub/providers/provider_base.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/stub.py b/dsub/providers/stub.py index e8eb7d4..3e7e680 100644 --- a/dsub/providers/stub.py +++ b/dsub/providers/stub.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/dsub/providers/test_fails.py b/dsub/providers/test_fails.py index 30b1c5a..88c31c5 100644 --- a/dsub/providers/test_fails.py +++ b/dsub/providers/test_fails.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); From 2d0b808def65bc6100e4da81d9f82e241bbfb8c9 Mon Sep 17 00:00:00 2001 From: mbookman Date: Thu, 12 May 2022 17:18:58 +0000 Subject: [PATCH 4/7] google-v2 and google-cls-v2 providers: - Add support for mounting an existing disk read-only to a pipeline VM - Migrate to using "volumes" in pipelines requests instead of "disks" PiperOrigin-RevId: 448280287 --- README.md | 67 ++++++--- dsub/lib/job_model.py | 8 ++ dsub/lib/param_util.py | 48 ++++++- dsub/providers/google_v2_base.py | 86 ++++++++--- dsub/providers/google_v2_pipelines.py | 23 ++- .../e2e_io_mount_existing_disk.google-v2.sh | 46 ++++++ test/integration/io_setup.sh | 74 +++++++++- test/integration/unit_flags.google-v2.sh | 135 +++++++++++++++++- 8 files changed, 436 insertions(+), 51 deletions(-) create mode 100755 test/integration/e2e_io_mount_existing_disk.google-v2.sh diff --git a/README.md b/README.md index 21584ce..0d5c290 100644 --- a/README.md +++ b/README.md @@ -423,57 +423,88 @@ specified and they can be specified in any order. #### Mounting "resource data" -If you have one of the following: +While explicitly specifying inputs improves tracking provenance of your data, +there are cases where you might not want to expliclty localize all inputs +from Cloud Storage to your job VM. -1. A large set of resource files, your code only reads a subset of those files, -and the decision of which files to read is determined at runtime, or -2. A large input file over which your code makes a single read pass or only -needs to read a small range of bytes, +For example, if you have: -then you may find it more efficient at runtime to access this resource data via -mounting a Google Cloud Storage bucket read-only or mounting a persistent disk -created from a -[Compute Engine Image](https://cloud.google.com/compute/docs/images) read-only. +- a large set of resource files +- your code only reads a subset of those files +- runtime decisions of which files to read -The `google-v2` and `google-cls-v2` providers support these two methods of providing access to -resource data. The `local` provider supports mounting a local directory in a -similar fashion to support your local development. +OR + +- a large input file over which your code makes a single read pass + +OR + +- a large input file that your code does not read in its entirety + +then you may find it more efficient or convenient to access this data by +mounting read-only: + +- a Google Cloud Storage bucket +- a persistent disk that you pre-create and populate +- a persistent disk that gets created from a +[Compute Engine Image](https://cloud.google.com/compute/docs/images) that you +pre-create. + +The `google-v2` and `google-cls-v2` providers support these methods of +providing access to resource data. + +The `local` provider supports mounting a +local directory in a similar fashion to support your local development. + +##### Mounting a Google Cloud Storage bucket To have the `google-v2` or `google-cls-v2` provider mount a Cloud Storage bucket using Cloud Storage FUSE, use the `--mount` command line flag: - --mount MYBUCKET=gs://mybucket + --mount RESOURCES=gs://mybucket The bucket will be mounted into the Docker container running your `--script` or `--command` and the location made available via the environment variable -`${MYBUCKET}`. Inside your script, you can reference the mounted path using the +`${RESOURCES}`. Inside your script, you can reference the mounted path using the environment variable. Please read [Key differences from a POSIX file system](https://cloud.google.com/storage/docs/gcs-fuse#notes) and [Semantics](https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/semantics.md) before using Cloud Storage FUSE. +##### Mounting an existing peristent disk + +To have the `google-v2` or `google-cls-v2` provider mount a persistent disk that +you have pre-created and populated, use the `--mount` command line flag and the +url of the source disk: + + --mount RESOURCES="https://www.googleapis.com/compute/v1/projects/your-project/global/images/your-image 50" + +##### Mounting a persistent disk, created from an image + To have the `google-v2` or `google-cls-v2` provider mount a persistent disk created from an image, use the `--mount` command line flag and the url of the source image and the size (in GB) of the disk: - --mount MYDISK="https://www.googleapis.com/compute/v1/projects/your-project/global/images/your-image 50" + --mount RESOURCES="https://www.googleapis.com/compute/v1/projects/your-project/global/images/your-image 50" The image will be used to create a new persistent disk, which will be attached to a Compute Engine VM. The disk will mounted into the Docker container running your `--script` or `--command` and the location made available by the -environment variable `${MYDISK}`. Inside your script, you can reference the +environment variable `${RESOURCES}`. Inside your script, you can reference the mounted path using the environment variable. To create an image, see [Creating a custom image](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images). +##### Mounting a local directory (`local` provider) + To have the `local` provider mount a directory read-only, use the `--mount` command line flag and a `file://` prefix: - --mount LOCAL_MOUNT=file://path/to/my/dir + --mount RESOURCES=file://path/to/my/dir The local directory will be mounted into the Docker container running your `--script`or `--command` and the location made available via the environment -variable `${LOCAL_MOUNT}`. Inside your script, you can reference the mounted +variable `${RESOURCES}`. Inside your script, you can reference the mounted path using the environment variable. ### Setting resource requirements diff --git a/dsub/lib/job_model.py b/dsub/lib/job_model.py index e4033fa..5e3cc0e 100644 --- a/dsub/lib/job_model.py +++ b/dsub/lib/job_model.py @@ -398,6 +398,14 @@ def __new__(cls, name, value, docker_path, disk_size, disk_type): cls, name, value, docker_path, disk_size=disk_size, disk_type=disk_type) +class ExistingDiskMountParam(MountParam): + """A MountParam representing an existing Google Persistent Disk.""" + + def __new__(cls, name, value, docker_path): + return super(ExistingDiskMountParam, cls).__new__(cls, name, value, + docker_path) + + class LocalMountParam(MountParam): """A MountParam representing a path on the local machine.""" diff --git a/dsub/lib/param_util.py b/dsub/lib/param_util.py index 35b7091..c0728f1 100644 --- a/dsub/lib/param_util.py +++ b/dsub/lib/param_util.py @@ -236,8 +236,32 @@ class MountParamUtil(object): def __init__(self, docker_path): self._relative_path = docker_path - def _parse_image_uri(self, raw_uri): - """Return a valid docker_path from a Google Persistent Disk url.""" + def _is_gce_disk_uri(self, raw_uri): + """Returns true if we can parse the URI as a GCE disk path.""" + + # Full disk URI should look something like: + # https://www.googleapis.com/compute//projects//regions//disks/ + # https://www.googleapis.com/compute//projects//zones//disks/ + # + # This function only returns True if we were able to recognize the path + # as clearly for a GCE disk. This is different than the Image path parsing + # in "make_param" below, which was made very forgiving. + + if raw_uri.startswith('https://www.googleapis.com/compute'): + parts = raw_uri.split('/') + + # Parts will look something like + # ['https:', '', 'www.googleapis.com', 'compute', '', 'projects', + # '', '[regions/zones]', '', 'disks', ''] + # + return ((parts[0] == 'https:') and (not parts[1]) and + (parts[2] == 'www.googleapis.com') and (parts[3] == 'compute') and + (parts[5] == 'projects') and (parts[9] == 'disks')) + + return False + + def _gce_uri_to_docker_uri(self, raw_uri): + """Return a valid docker_path from a GCE disk or image url.""" # The string replace is so we don't have colons and double slashes in the # mount path. The idea is the resulting mount path would look like: # /mnt/data/mount/http/www.googleapis.com/compute/v1/projects/... @@ -262,13 +286,22 @@ def _parse_gcs_uri(self, raw_uri): return docker_uri def make_param(self, name, raw_uri, disk_size): - """Return a MountParam given a GCS bucket, disk image or local path.""" - if raw_uri.startswith('https://www.googleapis.com/compute'): + """Return a MountParam given a GCS bucket, disk uri, image uri or local path.""" + + if self._is_gce_disk_uri(raw_uri): + docker_path = self._gce_uri_to_docker_uri(raw_uri) + return job_model.ExistingDiskMountParam(name, raw_uri, docker_path) + elif raw_uri.startswith('https://www.googleapis.com/compute'): + # In retrospect, this function should have been more precise to only + # treat a raw_uri as being for an "Image" if the path followed a known + # format. Just checking for the googleapis.com/compute prefix is too + # forgiving. + # Full Image URI should look something like: # https://www.googleapis.com/compute/v1/projects//global/images/ # But don't validate further, should the form of a valid image URI # change (v1->v2, for example) - docker_path = self._parse_image_uri(raw_uri) + docker_path = self._gce_uri_to_docker_uri(raw_uri) return job_model.PersistentDiskMountParam( name, raw_uri, docker_path, disk_size, disk_type=None) elif raw_uri.startswith('file://'): @@ -373,6 +406,11 @@ def get_persistent_disk_mounts(mounts): return _get_filtered_mounts(mounts, job_model.PersistentDiskMountParam) +def get_existing_disk_mounts(mounts): + """Returns the existing disk mounts from mounts.""" + return _get_filtered_mounts(mounts, job_model.ExistingDiskMountParam) + + def get_local_mounts(mounts): """Returns the local mounts from mounts.""" return _get_filtered_mounts(mounts, job_model.LocalMountParam) diff --git a/dsub/providers/google_v2_base.py b/dsub/providers/google_v2_base.py index 7441a35..01f5a47 100644 --- a/dsub/providers/google_v2_base.py +++ b/dsub/providers/google_v2_base.py @@ -683,19 +683,21 @@ def _build_pipeline_request(self, task_view): gcs_mounts = param_util.get_gcs_mounts(mounts) persistent_disk_mount_params = param_util.get_persistent_disk_mounts(mounts) + existing_disk_mount_params = param_util.get_existing_disk_mounts(mounts) # pylint: disable=g-complex-comprehension persistent_disks = [ - google_v2_pipelines.build_disk( - name=disk.name.replace('_', '-'), # Underscores not allowed - size_gb=disk.disk_size or job_model.DEFAULT_MOUNTED_DISK_SIZE, - source_image=disk.value, - disk_type=disk.disk_type or job_model.DEFAULT_DISK_TYPE) + google_v2_pipelines.build_volume_persistent_disk( + volume=disk.name.replace('_', '-'), # Underscores not allowed + disk=google_v2_pipelines.build_persistent_disk( + size_gb=disk.disk_size or job_model.DEFAULT_MOUNTED_DISK_SIZE, + source_image=disk.value, + disk_type=disk.disk_type or job_model.DEFAULT_DISK_TYPE)) for disk in persistent_disk_mount_params ] persistent_disk_mounts = [ google_v2_pipelines.build_mount( - disk=persistent_disk.get('name'), + disk=persistent_disk.get('volume'), path=os.path.join(providers_util.DATA_MOUNT_POINT, persistent_disk_mount_param.docker_path), read_only=True) @@ -704,6 +706,23 @@ def _build_pipeline_request(self, task_view): ] # pylint: enable=g-complex-comprehension + # pylint: disable=g-complex-comprehension + existing_disks = [ + google_v2_pipelines.build_volume_existing_disk( + volume=disk.name.replace('_', '-'), # Underscores not allowed + disk=google_v2_pipelines.build_existing_disk(disk=disk.value)) + for disk in existing_disk_mount_params + ] + existing_disk_mounts = [ + google_v2_pipelines.build_mount( + disk=existing_disk.get('volume'), + path=os.path.join(providers_util.DATA_MOUNT_POINT, + existing_disk_mount_param.docker_path), + read_only=True) for existing_disk, existing_disk_mount_param in zip( + existing_disks, existing_disk_mount_params) + ] + # pylint: enable=g-complex-comprehension + # The list of "actions" (1-based) will be: # 1- continuous copy of log files off to Cloud Storage # 2- prepare the shared mount point (write the user script) @@ -825,7 +844,8 @@ def _build_pipeline_request(self, task_view): pid_namespace=pid_namespace, block_external_network=job_resources.block_external_network, image_uri=job_resources.image, - mounts=[mnt_datadisk] + persistent_disk_mounts, + mounts=[mnt_datadisk] + persistent_disk_mounts + + existing_disk_mounts, environment=user_environment, entrypoint='/usr/bin/env', commands=[ @@ -864,14 +884,18 @@ def _build_pipeline_request(self, task_view): assert len(actions) == final_logging_action # Prepare the VM (resources) configuration - disks = [ - google_v2_pipelines.build_disk( - _DATA_DISK_NAME, - job_resources.disk_size, - source_image=None, - disk_type=job_resources.disk_type or job_model.DEFAULT_DISK_TYPE) + volumes = [ + google_v2_pipelines.build_volume_persistent_disk( + volume=_DATA_DISK_NAME, + disk=google_v2_pipelines.build_persistent_disk( + job_resources.disk_size, + source_image=None, + disk_type=job_resources.disk_type or + job_model.DEFAULT_DISK_TYPE)) ] - disks.extend(persistent_disks) + volumes.extend(persistent_disks) + volumes.extend(existing_disks) + network = google_v2_pipelines.build_network( job_resources.network, job_resources.subnetwork, job_resources.use_private_address) @@ -903,7 +927,7 @@ def _build_pipeline_request(self, task_view): preemptible=task_resources.preemptible, service_account=service_account, boot_disk_size_gb=job_resources.boot_disk_size, - disks=disks, + volumes=volumes, accelerators=accelerators, nvidia_driver_version=job_resources.nvidia_driver_version, labels=labels, @@ -1547,12 +1571,42 @@ def get_field(self, field, default=None): value['enable-stackdriver-monitoring'] = vm.get( 'enableStackdriverMonitoring', False) value['service-account'] = vm.get('serviceAccount', {}).get('email') - if 'disks' in vm: + + # dsub now use "volumes" instead of "disks" (following the lead of the + # Life Sciences API). This block is included for compatibility with + # jobs in the operations list run by older versions of dsub. + if vm.get('disks'): datadisk = next( (d for d in vm['disks'] if d['name'] == _DATA_DISK_NAME)) if datadisk: value['disk-size'] = datadisk.get('sizeGb') value['disk-type'] = datadisk.get('type') + if vm.get('volumes'): + volumes = [] + for v in vm['volumes']: + if v['volume'] == _DATA_DISK_NAME: + d = v.get('persistentDisk', {}) + value['disk-size'] = d.get('sizeGb') + value['disk-type'] = d.get('type') + else: + d = None + if v.get('persistentDisk'): + d = v.get('persistentDisk') + elif v.get('existingDisk'): + d = v.get('existingDisk') + + if d: + volume = {'name': v['volume']} + if d.get('disk'): + volume['disk-name'] = d.get('disk') + if d.get('type'): + volume['disk-type'] = d.get('type') + if d.get('sizeGb'): + volume['disk-size'] = d.get('sizeGb') + volumes.append(volume) + + value['volumes'] = volumes + elif field == 'events': value = GoogleV2EventMap(self._op).get_filtered_normalized_events() elif field == 'script-name': diff --git a/dsub/providers/google_v2_pipelines.py b/dsub/providers/google_v2_pipelines.py index d594a0b..c45c093 100644 --- a/dsub/providers/google_v2_pipelines.py +++ b/dsub/providers/google_v2_pipelines.py @@ -14,6 +14,8 @@ """Utility routines for constructing a Google Genomics Pipelines v2 API request. """ +from typing import Dict + from . import google_v2_versions _API_VERSION = None @@ -41,15 +43,26 @@ def build_network(name, subnetwork, use_private_address): } -def build_disk(name, size_gb, source_image, disk_type): +def build_persistent_disk(size_gb, source_image, disk_type): return { - 'name': name, 'sizeGb': size_gb, 'type': disk_type, 'sourceImage': source_image, } +def build_existing_disk(disk: str) -> Dict[str, str]: + return {'disk': disk} + + +def build_volume_persistent_disk(volume, disk): + return {'volume': volume, 'persistentDisk': disk} + + +def build_volume_existing_disk(volume, disk): + return {'volume': volume, 'existingDisk': disk} + + def build_accelerator(accelerator_type, accelerator_count): return {'type': accelerator_type, 'count': accelerator_count} @@ -66,7 +79,7 @@ def build_machine(network=None, preemptible=None, service_account=None, boot_disk_size_gb=None, - disks=None, + volumes=None, accelerators=None, labels=None, cpu_platform=None, @@ -80,7 +93,7 @@ def build_machine(network=None, preemptible (bool): Use a preemptible VM for the job. service_account (dict): Service account configuration for the VM. boot_disk_size_gb (int): Boot disk size in GB. - disks (list[dict]): List of disks to mount. + volumes (list[dict]): List of volumes to create or mount on the VM. accelerators (list[dict]): List of accelerators to attach to the VM. labels (dict[string, string]): Labels for the VM. cpu_platform (str): The CPU platform to request. @@ -98,7 +111,7 @@ def build_machine(network=None, 'preemptible': preemptible, 'serviceAccount': service_account, 'bootDiskSizeGb': boot_disk_size_gb, - 'disks': disks, + 'volumes': volumes, 'accelerators': accelerators, 'labels': labels, 'cpuPlatform': cpu_platform, diff --git a/test/integration/e2e_io_mount_existing_disk.google-v2.sh b/test/integration/e2e_io_mount_existing_disk.google-v2.sh new file mode 100755 index 0000000..9d77d4b --- /dev/null +++ b/test/integration/e2e_io_mount_existing_disk.google-v2.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Copyright 2022 Verily Life Sciences Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset + +# Test existing persistent disk abilities. +# +# This test is designed to verify that mounting an existing Google Persistent +# Disk works. +# Input files have been placed inside the persistent disk ahead of time. +# +# The actual operation performed here is to download a BAM and compute +# the md5, writing it to .bam.md5. +# and compute its md5, writing it to .bam.md5. + +readonly SCRIPT_DIR="$(dirname "${0}")" + +# Do standard test setup +source "${SCRIPT_DIR}/test_setup_e2e.sh" + +# Do io setup +source "${SCRIPT_DIR}/io_setup.sh" + +readonly ZONES="${TEST_EXISTING_DISK_ZONE}" + +io_setup::existing_disk_setup +echo "Launching pipeline..." +JOB_ID="$(io_setup::run_dsub_with_mount "${TEST_EXISTING_DISK_URL}")" + +# Do validation +io_setup::check_output +io_setup::check_dstat "${JOB_ID}" false "${TEST_EXISTING_DISK_URL}" diff --git a/test/integration/io_setup.sh b/test/integration/io_setup.sh index 2d883f4..aaab1f0 100644 --- a/test/integration/io_setup.sh +++ b/test/integration/io_setup.sh @@ -35,6 +35,16 @@ readonly TEST_IMAGE_NAME="dsub-e2e-test-image-$(echo ${TEST_TOKEN} | tr '_' '-') readonly TEST_IMAGE_GCS_LOCATION="gs://dsub-test-e2e-bucket/dsub-test-image.tar.gz" readonly TEST_IMAGE_URL="https://www.googleapis.com/compute/v1/projects/${PROJECT_ID}/global/images/${TEST_IMAGE_NAME}" +# This is the name and URL of a disk that we create in order to test the PD +# "mount existing disk" feature. Note that GCP supports regional disks, +# but we test zonal, as it is most likely to be used in practice. +# For the mount test, we create a disk from an image, so that the disk is +# already formatted. Note that the mount fails if the disk is not formatted. +readonly TEST_EXISTING_DISK_IMAGE_NAME="dsub-e2e-test-disk-image-$(echo ${TEST_TOKEN} | tr '_' '-')-$$" +readonly TEST_EXISTING_DISK_NAME="dsub-e2e-test-disk-$(echo ${TEST_TOKEN} | tr '_' '-')-$$" +readonly TEST_EXISTING_DISK_ZONE="us-central1-a" +readonly TEST_EXISTING_DISK_URL="https://www.googleapis.com/compute/v1/projects/${PROJECT_ID}/zones/${TEST_EXISTING_DISK_ZONE}/disks/${TEST_EXISTING_DISK_NAME}" + # This is the path we use to test local file:// mounts readonly TEST_TMP_PATH="/tmp/dsub_test_files" readonly TEST_LOCAL_MOUNT_PARAMETER="file://${TEST_TMP_PATH}" @@ -50,21 +60,75 @@ function io_setup::mount_local_path_setup() { } readonly -f io_setup::mount_local_path_setup -function io_setup::exit_handler() { +function io_setup::exit_handler_image() { local code="${?}" + echo "Deleting image ${TEST_IMAGE_NAME}..." gcloud --quiet compute images delete "${TEST_IMAGE_NAME}" echo "Image successfully deleted." + return "${code}" } -readonly -f io_setup::exit_handler +readonly -f io_setup::exit_handler_image function io_setup::image_setup() { - echo "Creating image from ${TEST_IMAGE_GCS_LOCATION}..." + trap "io_setup::exit_handler_image" EXIT + + echo "Creating image ${TEST_IMAGE_NAME} from ${TEST_IMAGE_GCS_LOCATION}..." gcloud compute images create "${TEST_IMAGE_NAME}" \ --source-uri "${TEST_IMAGE_GCS_LOCATION}" echo "Image successfully created." - trap "io_setup::exit_handler" EXIT +} +readonly -f io_setup::image_setup + + +function io_setup::exit_handler_disk() { + local code="${?}" + + echo "Deleting image ${TEST_EXISTING_DISK_IMAGE_NAME}..." + gcloud --quiet compute images delete "${TEST_EXISTING_DISK_IMAGE_NAME}" + echo "Image successfully deleted." + + # Delete the disk, but in a retry loop - if the VM has not yet gone away, + # it'll be marked as "in use" and the delete fails + echo "Deleting disk ${TEST_EXISTING_DISK_NAME}..." + local TOTAL_WAIT_SECONDS="$((60 * 2))" + local WAIT_INTERVAL=5 + + for ((waited = 0; waited <= TOTAL_WAIT_SECONDS; waited += WAIT_INTERVAL)); do + if gcloud --quiet compute disks delete "${TEST_EXISTING_DISK_NAME}" \ + --zone="${TEST_EXISTING_DISK_ZONE}"; then + break + fi + + if ((waited >= TOTAL_WAIT_SECONDS)); then + 1>&2 echo "Failed to delete disk after ${waited} seconds" + exit 1 + fi + + echo "Sleeping ${WAIT_INTERVAL}s" + sleep "${WAIT_INTERVAL}s" + done + echo "Disk successfully deleted." + + return "${code}" +} +readonly -f io_setup::exit_handler_disk + + +function io_setup::existing_disk_setup() { + trap "io_setup::exit_handler_disk" EXIT + + echo "Creating image ${TEST_EXISTING_DISK_IMAGE_NAME} from ${TEST_IMAGE_GCS_LOCATION}..." + gcloud compute images create "${TEST_EXISTING_DISK_IMAGE_NAME}" \ + --source-uri "${TEST_IMAGE_GCS_LOCATION}" + echo "Image successfully created." + + echo "Creating disk from ${TEST_IMAGE_GCS_LOCATION} from ${TEST_EXISTING_DISK_IMAGE_NAME}..." + gcloud compute disks create "${TEST_EXISTING_DISK_NAME}" \ + --image="${TEST_EXISTING_DISK_IMAGE_NAME}" \ + --zone="${TEST_EXISTING_DISK_ZONE}" + echo "Disk successfully created from image." } readonly -f io_setup::image_setup @@ -159,7 +223,7 @@ function io_setup::check_dstat() { local check_requester_pays_inputs="${4:-}" echo - echo "Checking dstat output..." + echo "Checking dstat output for job-id: ${job_id}..." local dstat_output=$(run_dstat --status '*' --jobs "${job_id}" --full) diff --git a/test/integration/unit_flags.google-v2.sh b/test/integration/unit_flags.google-v2.sh index 48dd4f2..8f7ba69 100755 --- a/test/integration/unit_flags.google-v2.sh +++ b/test/integration/unit_flags.google-v2.sh @@ -44,8 +44,10 @@ readonly -f call_dsub if [[ "${DSUB_PROVIDER}" == "google-cls-v2" ]]; then readonly NETWORK_NAME_KEY="network" + readonly CONTAINER_NAME_KEY="containerName" elif [[ "${DSUB_PROVIDER}" == "google-v2" ]]; then readonly NETWORK_NAME_KEY="name" + readonly CONTAINER_NAME_KEY="name" fi # Define tests @@ -694,7 +696,13 @@ function test_disk_type() { # Check that the output contains expected values assert_err_value_equals \ - "[0].pipeline.resources.virtualMachine.disks.[0].type" "pd-ssd" + "[0].pipeline.resources.virtualMachine.volumes.[0].volume" "datadisk" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sizeGb" "200" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.type" "pd-ssd" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sourceImage" "None" test_passed "${subtest}" else @@ -712,7 +720,13 @@ function test_no_disk_type() { # Check that the output contains expected values assert_err_value_equals \ - "[0].pipeline.resources.virtualMachine.disks.[0].type" "pd-standard" + "[0].pipeline.resources.virtualMachine.volumes.[0].volume" "datadisk" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sizeGb" "200" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.type" "pd-standard" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sourceImage" "None" test_passed "${subtest}" else @@ -721,6 +735,119 @@ function test_no_disk_type() { } readonly -f test_no_disk_type +function test_mount_image() { + local subtest="${FUNCNAME[0]}" + + if call_dsub \ + --command 'echo "${TEST_NAME}"' \ + --regions us-central1 \ + --mount 'MOUNT_POINT=https://www.googleapis.com/compute/v1/projects/my-project/global/images/my-image 250'; then + + # Check that the output contains expected values + + # The volumes aren't order dependent, but we know the code adds the + # data disk first + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].volume" "datadisk" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sizeGb" "200" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.type" "pd-standard" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sourceImage" "None" + + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].volume" "MOUNT-POINT" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].persistentDisk.sizeGb" "250" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].persistentDisk.type" "pd-standard" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].persistentDisk.sourceImage" "https://www.googleapis.com/compute/v1/projects/my-project/global/images/my-image" + + # Check the mount points and environment variables for the user-command + assert_err_value_equals \ + "[0].pipeline.actions.[3].${CONTAINER_NAME_KEY}" "user-command" + assert_err_value_equals \ + "[0].pipeline.actions.[3].environment.MOUNT_POINT" "/mnt/data/mount/https/www.googleapis.com/compute/v1/projects/my-project/global/images/my-image" + + # The mounts aren't order dependent, but we know the code adds the + # data disk first + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].disk" "datadisk" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].path" "/mnt/data" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].readOnly" "False" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].disk" "MOUNT-POINT" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].path" "/mnt/data/mount/https/www.googleapis.com/compute/v1/projects/my-project/global/images/my-image" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].readOnly" "True" + + test_passed "${subtest}" + else + test_failed "${subtest}" + fi +} +readonly -f test_mount_image + + +function test_mount_existing_disk() { + local subtest="${FUNCNAME[0]}" + + if call_dsub \ + --command 'echo "${TEST_NAME}"' \ + --regions us-central1 \ + --mount 'MOUNT_POINT=https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a/disks/my-existing-disk'; then + + # Check that the output contains expected values + + # The volumes aren't order dependent, but we know the code adds the + # data disk first + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].volume" "datadisk" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sizeGb" "200" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.type" "pd-standard" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[0].persistentDisk.sourceImage" "None" + + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].volume" "MOUNT-POINT" + assert_err_value_equals \ + "[0].pipeline.resources.virtualMachine.volumes.[1].existingDisk.disk" "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a/disks/my-existing-disk" + + # Check the mount points and environment variables for the user-command + assert_err_value_equals \ + "[0].pipeline.actions.[3].${CONTAINER_NAME_KEY}" "user-command" + assert_err_value_equals \ + "[0].pipeline.actions.[3].environment.MOUNT_POINT" "/mnt/data/mount/https/www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a/disks/my-existing-disk" + + # The mounts aren't order dependent, but we know the code adds the + # data disk first + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].disk" "datadisk" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].path" "/mnt/data" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[0].readOnly" "False" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].disk" "MOUNT-POINT" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].path" "/mnt/data/mount/https/www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a/disks/my-existing-disk" + assert_err_value_equals \ + "[0].pipeline.actions.[3].mounts.[1].readOnly" "True" + + test_passed "${subtest}" + else + test_failed "${subtest}" + fi +} +readonly -f test_mount_existing_disk + function test_stackdriver() { local subtest="${FUNCNAME[0]}" @@ -871,6 +998,10 @@ echo test_disk_type test_no_disk_type +echo +test_mount_image +test_mount_existing_disk + echo test_stackdriver test_no_stackdriver From 915d02d54f424ea789f3d83bdf1546d1767e532c Mon Sep 17 00:00:00 2001 From: willyn Date: Mon, 16 May 2022 19:13:19 +0000 Subject: [PATCH 5/7] setup.py: Update dsub dependent libraries to pick up newer versions. PiperOrigin-RevId: 449022889 --- setup.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index 39926fa..d1b2061 100644 --- a/setup.py +++ b/setup.py @@ -14,22 +14,22 @@ # dependencies for dsub, ddel, dstat # Pin to known working versions to prevent episodic breakage from library # version mismatches. - # This version list generated: 01/20/2022 + # This version list generated: 05/16/2022 # direct dependencies - 'google-api-python-client<=2.36.0', - 'google-auth<=2.3.0', + 'google-api-python-client<=2.47.0', + 'google-auth<=2.6.6', 'python-dateutil<=2.8.2', - 'pytz<=2021.3', + 'pytz<=2022.1', 'pyyaml<=6.0', 'tenacity<=7.0.0', 'tabulate<=0.8.9', # downstream dependencies 'funcsigs<=1.0.2', - 'google-api-core<=2.4.0', + 'google-api-core<=2.7.3', 'google-auth-httplib2<=0.1.0', - 'httplib2<=0.20.2', + 'httplib2<=0.20.4', 'pyasn1<=0.4.8', 'pyasn1-modules<=0.2.8', 'rsa<=4.8', From 4259dedec856423797946f7485407ca452a69c29 Mon Sep 17 00:00:00 2001 From: mbookman Date: Mon, 16 May 2022 19:29:53 +0000 Subject: [PATCH 6/7] README updates including documenting the Cloud SDK as a requirement for the local provider. PiperOrigin-RevId: 449026468 --- README.md | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 0d5c290..a15cd77 100644 --- a/README.md +++ b/README.md @@ -17,24 +17,17 @@ and Azure Batch. ## Getting started -You can install `dsub` from [PyPI](https://pypi.org/project/dsub/), or you can clone and -install from [github](https://github.com/DataBiosphere/dsub). +`dsub` is written in Python and requires Python 3.6 or higher. -### Sunsetting Python 2 support - -Python 2 support ended in January 2020. -See Python's official [Sunsetting Python 2 announcement](https://www.python.org/doc/sunset-python-2/) for details. - -Automated `dsub` tests running on Python 2 have been disabled. -[Release 0.3.10](https://github.com/DataBiosphere/dsub/releases/tag/v0.3.10) is -the last version of `dsub` that supports Python 2. - -Use Python 3.6 or greater. For earlier versions of Python 3, use `dsub` 0.4.1. +* For earlier versions of Python 3, use `dsub` [0.4.1](https://github.com/DataBiosphere/dsub/releases/tag/v0.4.11). +* For Python 2, use `dsub`[0.3.10](https://github.com/DataBiosphere/dsub/releases/tag/v0.3.10). ### Pre-installation steps +#### Create a Python virtual environment + This is optional, but whether installing from PyPI or from github, -you are encouraged to use a +you are strongly encouraged to use a [Python virtual environment](https://docs.python.org/3/library/venv.html). You can do this in a directory of your choosing. @@ -56,9 +49,27 @@ virutalenv before calling `dsub`, `dstat`, and `ddel`. They are in the use these scripts if you don't want to activate the virtualenv explicitly in your shell. +#### Install the Google Cloud SDK + +While not used directly by `dsub` for the `google-v2` or `google-cls-v2` providers, you are likely to want to install the command line tools found in the [Google +Cloud SDK](https://cloud.google.com/sdk/). + +If you will be using the `local` provider for faster job development, +you *will* need to install the Google Cloud SDK, which uses `gsutil` to ensure +file operation semantics consistent with the Google `dsub` providers. + +1. [Install the Google Cloud SDK](https://cloud.google.com/sdk/) +2. Run + + gcloud init + + + `gcloud` will prompt you to set your default project and to grant + credentials to the Google Cloud SDK. + ### Install `dsub` -Choose one of the following: +Choose **one** of the following: #### Install from PyPI @@ -167,12 +178,7 @@ The steps for getting started differ slightly as indicated in the steps below: [Enable the Cloud Life Sciences, Storage, and Compute APIs](https://console.cloud.google.com/flows/enableapi?apiid=lifesciences.googleapis.com,storage_component,compute_component&redirect=https://console.cloud.google.com) -1. [Install the Google Cloud SDK](https://cloud.google.com/sdk/) and run - - gcloud init - - This will set up your default project and grant credentials to the Google - Cloud SDK. Now provide [credentials](https://developers.google.com/identity/protocols/application-default-credentials) +1. Provide [credentials](https://developers.google.com/identity/protocols/application-default-credentials) so `dsub` can call Google APIs: gcloud auth application-default login From b7bdd73fdfe0036ceb0a423e3d2619a8a4a35a1f Mon Sep 17 00:00:00 2001 From: jtbates Date: Wed, 18 May 2022 16:31:28 +0000 Subject: [PATCH 7/7] Update dsub version to 0.4.7 PiperOrigin-RevId: 449501976 --- dsub/_dsub_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsub/_dsub_version.py b/dsub/_dsub_version.py index 41fd12f..a8f9d40 100644 --- a/dsub/_dsub_version.py +++ b/dsub/_dsub_version.py @@ -26,4 +26,4 @@ 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ -DSUB_VERSION = '0.4.7.dev0' +DSUB_VERSION = '0.4.7'