From 650b4a1140fbd5c42e08e5987b92111fa4b8be42 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 19:59:54 +0000 Subject: [PATCH 01/10] Bump cfn-lint from 1.20.1 to 1.21.0 Bumps [cfn-lint](https://github.com/aws-cloudformation/cfn-lint) from 1.20.1 to 1.21.0. - [Release notes](https://github.com/aws-cloudformation/cfn-lint/releases) - [Changelog](https://github.com/aws-cloudformation/cfn-lint/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws-cloudformation/cfn-lint/compare/v1.20.1...v1.21.0) --- updated-dependencies: - dependency-name: cfn-lint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-all.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-all.txt b/requirements-all.txt index 6169911a0..fac75a374 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -17,4 +17,4 @@ flake8-blind-except==0.2.1 flake8-builtins==2.5.0 setuptools==75.6.0 openapi-spec-validator==0.7.1 -cfn-lint==1.20.1 +cfn-lint==1.21.0 From 65b793b9511bdd8b97a367c3b9ea73f56ec38c09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 20:00:04 +0000 Subject: [PATCH 02/10] Bump boto3 from 1.35.72 to 1.35.76 Bumps [boto3](https://github.com/boto/boto3) from 1.35.72 to 1.35.76. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.72...1.35.76) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-all.txt | 2 +- requirements-apps-disable-private-dns.txt | 2 +- requirements-apps-start-execution-manager.txt | 2 +- requirements-apps-start-execution-worker.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements-all.txt b/requirements-all.txt index 6169911a0..5b61291ae 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -5,7 +5,7 @@ -r requirements-apps-start-execution-worker.txt -r requirements-apps-disable-private-dns.txt -r requirements-apps-update-db.txt -boto3==1.35.72 +boto3==1.35.76 jinja2==3.1.4 moto[dynamodb]==5.0.22 pytest==8.3.4 diff --git a/requirements-apps-disable-private-dns.txt b/requirements-apps-disable-private-dns.txt index e1e9d53d0..9fedc171b 100644 --- a/requirements-apps-disable-private-dns.txt +++ b/requirements-apps-disable-private-dns.txt @@ -1 +1 @@ -boto3==1.35.72 +boto3==1.35.76 diff --git a/requirements-apps-start-execution-manager.txt b/requirements-apps-start-execution-manager.txt index bdf18d6a5..70d2d45fe 100644 --- a/requirements-apps-start-execution-manager.txt +++ b/requirements-apps-start-execution-manager.txt @@ -1,3 +1,3 @@ -boto3==1.35.72 +boto3==1.35.76 ./lib/dynamo/ ./lib/lambda_logging/ diff --git a/requirements-apps-start-execution-worker.txt b/requirements-apps-start-execution-worker.txt index 7f387d327..8013cc37e 100644 --- a/requirements-apps-start-execution-worker.txt +++ b/requirements-apps-start-execution-worker.txt @@ -1,2 +1,2 @@ -boto3==1.35.72 +boto3==1.35.76 ./lib/lambda_logging/ From 061f35fdfadde12b07431c7d5aa9c8103610c275 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 16 Dec 2024 14:21:29 -0500 Subject: [PATCH 03/10] switch to ruff from flake8 --- .github/workflows/static-analysis.yml | 15 +++------------ Makefile | 6 +++--- requirements-all.txt | 5 +---- ruff.toml | 4 ++++ 4 files changed, 11 insertions(+), 19 deletions(-) create mode 100644 ruff.toml diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml index aa5180bca..2ed4887b4 100644 --- a/.github/workflows/static-analysis.yml +++ b/.github/workflows/static-analysis.yml @@ -3,18 +3,9 @@ name: Static code analysis on: push jobs: - flake8: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.2.2 - - uses: actions/setup-python@v5 - with: - python-version: 3.9 - - run: | - python -m pip install --upgrade pip - make install - - - run: make flake8 + call-ruff-workflow: + # Docs: https://github.com/ASFHyP3/actions + uses: ASFHyP3/actions/.github/workflows/reusable-ruff.yml@v0.12.0 cfn-lint: runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 81d462c22..0a5a5c605 100644 --- a/Makefile +++ b/Makefile @@ -44,10 +44,10 @@ cost_profile ?= DEFAULT render: @echo rendering $(files) for API $(api_name) and security environment $(security_environment); python apps/render_cf.py -j $(files) -e $(compute_env_file) -s $(security_environment) -n $(api_name) -c $(cost_profile) -static: flake8 openapi-validate cfn-lint +static: ruff openapi-validate cfn-lint -flake8: - flake8 --ignore=E731 --max-line-length=120 --import-order-style=pycharm --statistics --application-import-names hyp3_api,get_files,handle_batch_event,set_batch_overrides,check_processing_time,start_execution_manager,start_execution_worker,disable_private_dns,update_db,upload_log,dynamo,lambda_logging,scale_cluster apps tests lib +ruff: + ruff check openapi-validate: render openapi-spec-validator apps/api/src/hyp3_api/api-spec/openapi-spec.yml diff --git a/requirements-all.txt b/requirements-all.txt index 36b01eb44..285c0a70c 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -11,10 +11,7 @@ moto[dynamodb]==5.0.22 pytest==8.3.4 PyYAML==6.0.2 responses==0.25.3 -flake8==7.1.1 -flake8-import-order==0.18.2 -flake8-blind-except==0.2.1 -flake8-builtins==2.5.0 +ruff==0.8.3 setuptools==75.6.0 openapi-spec-validator==0.7.1 cfn-lint==1.21.0 diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 000000000..61f91605f --- /dev/null +++ b/ruff.toml @@ -0,0 +1,4 @@ +line-length = 120 + +[format] +quote-style = "single" From 3d3af862d924957d5c951eec7a4f3e8c032acfc2 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 16 Dec 2024 14:21:55 -0500 Subject: [PATCH 04/10] ruff formatting --- apps/api/src/hyp3_api/__main__.py | 2 +- apps/api/src/hyp3_api/handlers.py | 12 +- apps/api/src/hyp3_api/routes.py | 16 +- apps/api/src/hyp3_api/validation.py | 13 +- .../src/disable_private_dns.py | 2 +- apps/get-files/src/get_files.py | 40 ++-- apps/render_cf.py | 40 ++-- apps/scale-cluster/src/scale_cluster.py | 30 +-- .../src/start_execution_manager.py | 6 +- apps/upload-log/src/upload_log.py | 5 +- lib/dynamo/dynamo/exceptions.py | 5 +- lib/dynamo/dynamo/jobs.py | 15 +- lib/dynamo/dynamo/user.py | 8 +- lib/dynamo/dynamo/util.py | 4 +- lib/dynamo/setup.py | 3 - lib/lambda_logging/lambda_logging/__init__.py | 1 - tests/conftest.py | 2 + tests/test_api/conftest.py | 51 +++-- tests/test_api/test_api_spec.py | 8 +- tests/test_api/test_get_user.py | 2 +- tests/test_api/test_list_jobs.py | 28 ++- tests/test_api/test_patch_user.py | 35 +--- tests/test_api/test_submit_job.py | 30 ++- tests/test_api/test_validation.py | 177 ++++++++---------- tests/test_check_processing_time.py | 2 +- tests/test_dynamo/test_jobs.py | 139 +++++--------- tests/test_dynamo/test_user.py | 163 ++++++++-------- tests/test_dynamo/test_util.py | 8 +- tests/test_get_files.py | 35 +--- tests/test_handle_batch_event.py | 37 +--- tests/test_scale_cluster.py | 118 +++++++----- tests/test_set_batch_overrides.py | 19 +- tests/test_start_execution_manager.py | 48 ++--- tests/test_start_execution_worker.py | 8 +- tests/test_upload_log.py | 91 +++++---- 35 files changed, 540 insertions(+), 663 deletions(-) diff --git a/apps/api/src/hyp3_api/__main__.py b/apps/api/src/hyp3_api/__main__.py index 5ce6736e7..7edd23a02 100644 --- a/apps/api/src/hyp3_api/__main__.py +++ b/apps/api/src/hyp3_api/__main__.py @@ -1,4 +1,4 @@ from hyp3_api import app -if __name__ == "__main__": +if __name__ == '__main__': app.run(port=8080) diff --git a/apps/api/src/hyp3_api/handlers.py b/apps/api/src/hyp3_api/handlers.py index fbe92de96..b26f13591 100644 --- a/apps/api/src/hyp3_api/handlers.py +++ b/apps/api/src/hyp3_api/handlers.py @@ -10,12 +10,7 @@ def problem_format(status, message): - response = jsonify({ - 'status': status, - 'detail': message, - 'title': responses[status], - 'type': 'about:blank' - }) + response = jsonify({'status': status, 'detail': message, 'title': responses[status], 'type': 'about:blank'}) response.headers['Content-Type'] = 'application/problem+json' response.status_code = status return response @@ -49,8 +44,9 @@ def get_jobs(user, start=None, end=None, status_code=None, name=None, job_type=N payload = {'jobs': jobs} if last_evaluated_key is not None: next_token = util.serialize(last_evaluated_key) - payload['next'] = util.build_next_url(request.url, next_token, request.headers.get('X-Forwarded-Host'), - request.root_path) + payload['next'] = util.build_next_url( + request.url, next_token, request.headers.get('X-Forwarded-Host'), request.root_path + ) return payload diff --git a/apps/api/src/hyp3_api/routes.py b/apps/api/src/hyp3_api/routes.py index e0d69998f..04483000e 100644 --- a/apps/api/src/hyp3_api/routes.py +++ b/apps/api/src/hyp3_api/routes.py @@ -26,14 +26,9 @@ @app.before_request def check_system_available(): - if environ['SYSTEM_AVAILABLE'] != "true": + if environ['SYSTEM_AVAILABLE'] != 'true': message = 'HyP3 is currently unavailable. Please try again later.' - error = { - 'detail': message, - 'status': 503, - 'title': 'Service Unavailable', - 'type': 'about:blank' - } + error = {'detail': message, 'status': 503, 'title': 'Service Unavailable', 'type': 'about:blank'} return make_response(jsonify(error), 503) @@ -71,8 +66,11 @@ def render_ui(): @app.errorhandler(404) def error404(_): - return handlers.problem_format(404, 'The requested URL was not found on the server.' - ' If you entered the URL manually please check your spelling and try again.') + return handlers.problem_format( + 404, + 'The requested URL was not found on the server.' + ' If you entered the URL manually please check your spelling and try again.', + ) class CustomEncoder(json.JSONEncoder): diff --git a/apps/api/src/hyp3_api/validation.py b/apps/api/src/hyp3_api/validation.py index 4355f71f6..fa555761b 100644 --- a/apps/api/src/hyp3_api/validation.py +++ b/apps/api/src/hyp3_api/validation.py @@ -57,8 +57,9 @@ def get_cmr_metadata(granules): granules = [ { 'name': entry.get('producer_granule_id', entry.get('title')), - 'polygon': Polygon(format_points(entry['polygons'][0][0])) - } for entry in response.json()['feed']['entry'] + 'polygon': Polygon(format_points(entry['polygons'][0][0])), + } + for entry in response.json()['feed']['entry'] ] return granules @@ -93,9 +94,7 @@ def check_same_burst_ids(job, _): ) for i in range(len(ref_ids)): if ref_ids[i] != sec_ids[i]: - raise GranuleValidationError( - f'Burst IDs do not match for {refs[i]} and {secs[i]}.' - ) + raise GranuleValidationError(f'Burst IDs do not match for {refs[i]} and {secs[i]}.') if len(set(ref_ids)) != len(ref_ids): duplicate_pair_id = next(ref_id for ref_id in ref_ids if ref_ids.count(ref_id) > 1) raise GranuleValidationError( @@ -174,9 +173,7 @@ def check_granules_intersecting_bounds(job, granule_metadata): if not bbox.intersection(bounds): bad_granules.append(granule['name']) if bad_granules: - raise GranuleValidationError( - f'The following granules do not intersect the provided bounds: {bad_granules}.' - ) + raise GranuleValidationError(f'The following granules do not intersect the provided bounds: {bad_granules}.') def check_same_relative_orbits(job, granule_metadata): diff --git a/apps/disable-private-dns/src/disable_private_dns.py b/apps/disable-private-dns/src/disable_private_dns.py index 8ded8f71b..53c205744 100644 --- a/apps/disable-private-dns/src/disable_private_dns.py +++ b/apps/disable-private-dns/src/disable_private_dns.py @@ -28,7 +28,7 @@ def set_private_dns_disabled(endpoint_id): response = CLIENT.modify_vpc_endpoint(VpcEndpointId=endpoint_id, PrivateDnsEnabled=False) # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/modify_vpc_endpoint.html assert response['Return'] is True, response - print(f"Private DNS disabled for VPC Endpoint: {endpoint_id}.") + print(f'Private DNS disabled for VPC Endpoint: {endpoint_id}.') def disable_private_dns(vpc_id, endpoint_name): diff --git a/apps/get-files/src/get_files.py b/apps/get-files/src/get_files.py index fc72a464f..d37c6944e 100644 --- a/apps/get-files/src/get_files.py +++ b/apps/get-files/src/get_files.py @@ -40,12 +40,16 @@ def visible_product(product_path: Union[str, Path]) -> bool: def get_products(files): - return [{ - 'url': item['download_url'], - 'size': item['size'], - 'filename': item['filename'], - 's3': item['s3'], - } for item in files if item['file_type'] == 'product' and visible_product(item['filename'])] + return [ + { + 'url': item['download_url'], + 'size': item['size'], + 'filename': item['filename'], + 's3': item['s3'], + } + for item in files + if item['file_type'] == 'product' and visible_product(item['filename']) + ] def get_file_urls_by_type(file_list, file_type): @@ -61,16 +65,18 @@ def organize_files(files_dict, bucket): for item in files_dict: download_url = get_download_url(bucket, item['Key']) file_type = get_object_file_type(bucket, item['Key']) - all_files.append({ - 'download_url': download_url, - 'file_type': file_type, - 'size': item['Size'], - 'filename': basename(item['Key']), - 's3': { - 'bucket': bucket, - 'key': item['Key'], - }, - }) + all_files.append( + { + 'download_url': download_url, + 'file_type': file_type, + 'size': item['Size'], + 'filename': basename(item['Key']), + 's3': { + 'bucket': bucket, + 'key': item['Key'], + }, + } + ) if expiration is None and file_type in ['product', 'log']: expiration = get_expiration_time(bucket, item['Key']) @@ -79,7 +85,7 @@ def organize_files(files_dict, bucket): 'browse_images': get_file_urls_by_type(all_files, 'browse'), 'thumbnail_images': get_file_urls_by_type(all_files, 'thumbnail'), 'logs': get_file_urls_by_type(all_files, 'log'), - 'expiration_time': expiration + 'expiration_time': expiration, } diff --git a/apps/render_cf.py b/apps/render_cf.py index 9688782ee..3eaf302ca 100644 --- a/apps/render_cf.py +++ b/apps/render_cf.py @@ -37,9 +37,7 @@ def get_state_for_job_step(step: dict, index: int, next_state_name: str, job_spe { 'Catch': [ { - 'ErrorEquals': [ - 'States.ALL' - ], + 'ErrorEquals': ['States.ALL'], 'ResultPath': f'$.results.processing_results.step_{index}', 'Next': 'PROCESSING_FAILED', }, @@ -72,8 +70,8 @@ def get_map_state(job_spec: dict, step: dict) -> dict: 'StartAt': submit_job_state_name, 'States': { submit_job_state_name: submit_job_state, - } - } + }, + }, } @@ -98,29 +96,16 @@ def get_batch_submit_job_state(job_spec: dict, step: dict, filter_batch_params=F 'SchedulingPriorityOverride.$': '$.priority', parameters_key: batch_job_parameters, 'ContainerOverrides.$': '$.container_overrides', - 'RetryStrategy': { - 'Attempts': 3 - }, + 'RetryStrategy': {'Attempts': 3}, }, 'ResultSelector': { 'StartedAt.$': '$.StartedAt', 'StoppedAt.$': '$.StoppedAt', }, 'Retry': [ - { - 'ErrorEquals': [ - 'Batch.ServerException', - 'Batch.AWSBatchException' - ], - 'MaxAttempts': 2 - }, - { - 'ErrorEquals': [ - 'States.ALL' - ], - 'MaxAttempts': 0 - } - ] + {'ErrorEquals': ['Batch.ServerException', 'Batch.AWSBatchException'], 'MaxAttempts': 2}, + {'ErrorEquals': ['States.ALL'], 'MaxAttempts': 0}, + ], } @@ -151,11 +136,7 @@ def get_batch_job_parameters(job_spec: dict, step: dict, map_item: str = None) - def get_batch_param_names_for_job_step(step: dict) -> set[str]: ref_prefix = 'Ref::' - return { - arg.removeprefix(ref_prefix) - for arg in step['command'] - if arg.startswith(ref_prefix) - } + return {arg.removeprefix(ref_prefix) for arg in step['command'] if arg.startswith(ref_prefix)} def render_templates(job_types: dict, compute_envs: dict, security_environment: str, api_name: str): @@ -217,7 +198,8 @@ def render_batch_params_by_job_type(job_types: dict) -> None: def render_default_params_by_job_type(job_types: dict) -> None: default_params_by_job_type = { job_type: { - key: value['api_schema']['default'] for key, value in job_spec['parameters'].items() + key: value['api_schema']['default'] + for key, value in job_spec['parameters'].items() if key not in job_spec['required_parameters'] } for job_type, job_spec in job_types.items() @@ -255,7 +237,7 @@ def validate_job_spec(job_type: str, job_spec: dict) -> None: if actual_param_fields != expected_param_fields: raise ValueError( f"parameter '{param_name}' for {job_type} has fields {actual_param_fields} " - f"but should have {expected_param_fields}" + f'but should have {expected_param_fields}' ) diff --git a/apps/scale-cluster/src/scale_cluster.py b/apps/scale-cluster/src/scale_cluster.py index 1ea0fec1f..d37a2aa4c 100644 --- a/apps/scale-cluster/src/scale_cluster.py +++ b/apps/scale-cluster/src/scale_cluster.py @@ -12,10 +12,7 @@ def get_time_period(today: date): start = today.replace(day=1) end = start + dateutil.relativedelta.relativedelta(months=1) - return { - 'Start': str(start), - 'End': str(end) - } + return {'Start': str(start), 'End': str(end)} def get_month_to_date_spending(today: date): @@ -41,16 +38,19 @@ def set_max_vcpus(compute_environment_arn: str, target_max_vcpus: int, current_d state='ENABLED', ) else: - print(f'Disabling {compute_environment_arn}. Current desiredvCpus {current_desired_vcpus} is larger than ' - f'target maxvCpus {target_max_vcpus}') + print( + f'Disabling {compute_environment_arn}. Current desiredvCpus {current_desired_vcpus} is larger than ' + f'target maxvCpus {target_max_vcpus}' + ) BATCH.update_compute_environment( computeEnvironment=compute_environment_arn, state='DISABLED', ) -def get_target_max_vcpus(today, monthly_budget, month_to_date_spending, default_max_vcpus, expanded_max_vcpus, - required_surplus): +def get_target_max_vcpus( + today, monthly_budget, month_to_date_spending, default_max_vcpus, expanded_max_vcpus, required_surplus +): days_in_month = calendar.monthrange(today.year, today.month)[1] month_to_date_budget = monthly_budget * today.day / days_in_month available_surplus = month_to_date_budget - month_to_date_spending @@ -68,12 +68,14 @@ def get_target_max_vcpus(today, monthly_budget, month_to_date_spending, default_ def lambda_handler(event, context): - target_max_vcpus = get_target_max_vcpus(today=date.today(), - monthly_budget=int(environ['MONTHLY_BUDGET']), - month_to_date_spending=get_month_to_date_spending(date.today()), - default_max_vcpus=int(environ['DEFAULT_MAX_VCPUS']), - expanded_max_vcpus=int(environ['EXPANDED_MAX_VCPUS']), - required_surplus=int(environ['REQUIRED_SURPLUS'])) + target_max_vcpus = get_target_max_vcpus( + today=date.today(), + monthly_budget=int(environ['MONTHLY_BUDGET']), + month_to_date_spending=get_month_to_date_spending(date.today()), + default_max_vcpus=int(environ['DEFAULT_MAX_VCPUS']), + expanded_max_vcpus=int(environ['EXPANDED_MAX_VCPUS']), + required_surplus=int(environ['REQUIRED_SURPLUS']), + ) current_desired_vcpus = get_current_desired_vcpus(environ['COMPUTE_ENVIRONMENT_ARN']) set_max_vcpus( compute_environment_arn=environ['COMPUTE_ENVIRONMENT_ARN'], diff --git a/apps/start-execution-manager/src/start_execution_manager.py b/apps/start-execution-manager/src/start_execution_manager.py index 0babf94ba..5a5ecbfef 100644 --- a/apps/start-execution-manager/src/start_execution_manager.py +++ b/apps/start-execution-manager/src/start_execution_manager.py @@ -10,9 +10,7 @@ def invoke_worker(worker_function_arn: str, jobs: list[dict]) -> dict: - payload = json.dumps( - {'jobs': dynamo.util.convert_decimals_to_numbers(jobs)} - ) + payload = json.dumps({'jobs': dynamo.util.convert_decimals_to_numbers(jobs)}) return LAMBDA_CLIENT.invoke( FunctionName=worker_function_arn, InvocationType='Event', @@ -30,7 +28,7 @@ def lambda_handler(event, context) -> None: batch_size = 250 for i in range(0, len(pending_jobs), batch_size): - jobs = pending_jobs[i:i + batch_size] + jobs = pending_jobs[i : i + batch_size] logger.info(f'Invoking worker for {len(jobs)} jobs') response = invoke_worker(worker_function_arn, jobs) logger.info(f'Got response status code {response["StatusCode"]}') diff --git a/apps/upload-log/src/upload_log.py b/apps/upload-log/src/upload_log.py index aecab3043..87fce2555 100644 --- a/apps/upload-log/src/upload_log.py +++ b/apps/upload-log/src/upload_log.py @@ -23,8 +23,9 @@ def get_log_content(log_group, log_stream): next_token = None while response['nextForwardToken'] != next_token: next_token = response['nextForwardToken'] - response = CLOUDWATCH.get_log_events(logGroupName=log_group, logStreamName=log_stream, startFromHead=True, - nextToken=next_token) + response = CLOUDWATCH.get_log_events( + logGroupName=log_group, logStreamName=log_stream, startFromHead=True, nextToken=next_token + ) messages.extend([event['message'] for event in response['events']]) return '\n'.join(messages) diff --git a/lib/dynamo/dynamo/exceptions.py b/lib/dynamo/dynamo/exceptions.py index 6460d21f8..ece102c76 100644 --- a/lib/dynamo/dynamo/exceptions.py +++ b/lib/dynamo/dynamo/exceptions.py @@ -22,14 +22,13 @@ def __init__(self, user_id: str, application_status: str): class UnexpectedApplicationStatusError(Exception): """Raised for an unexpected user application status.""" + help_url = 'https://hyp3-docs.asf.alaska.edu/using/requesting_access' class NotStartedApplicationError(UnexpectedApplicationStatusError): def __init__(self, user_id: str): - super().__init__( - f'{user_id} must request access before submitting jobs. Visit {self.help_url}' - ) + super().__init__(f'{user_id} must request access before submitting jobs. Visit {self.help_url}') class PendingApplicationError(UnexpectedApplicationStatusError): diff --git a/lib/dynamo/dynamo/jobs.py b/lib/dynamo/dynamo/jobs.py index ddeb579dd..234df602d 100644 --- a/lib/dynamo/dynamo/jobs.py +++ b/lib/dynamo/dynamo/jobs.py @@ -82,12 +82,12 @@ def _raise_for_application_status(application_status: str, user_id: str) -> None def _prepare_job_for_database( - job: dict, - user_id: str, - request_time: str, - remaining_credits: Optional[Decimal], - priority_override: Optional[int], - running_cost: Decimal, + job: dict, + user_id: str, + request_time: str, + remaining_credits: Optional[Decimal], + priority_override: Optional[int], + running_cost: Decimal, ) -> dict: if priority_override: priority = priority_override @@ -107,7 +107,7 @@ def _prepare_job_for_database( if 'job_type' in prepared_job: prepared_job['job_parameters'] = { **DEFAULT_PARAMS_BY_JOB_TYPE[prepared_job['job_type']], - **prepared_job.get('job_parameters', {}) + **prepared_job.get('job_parameters', {}), } prepared_job['credit_cost'] = _get_credit_cost(prepared_job, COSTS) else: @@ -119,7 +119,6 @@ def _get_credit_cost(job: dict, costs: list[dict]) -> Decimal: job_type = job['job_type'] for cost_definition in costs: if cost_definition['job_type'] == job_type: - if cost_definition.keys() not in ({'job_type', 'cost_parameter', 'cost_table'}, {'job_type', 'cost'}): raise ValueError(f'Cost definition for job type {job_type} has invalid keys: {cost_definition.keys()}') diff --git a/lib/dynamo/dynamo/user.py b/lib/dynamo/dynamo/user.py index df25c70d6..fa3a9aa27 100644 --- a/lib/dynamo/dynamo/user.py +++ b/lib/dynamo/dynamo/user.py @@ -55,7 +55,7 @@ def update_user(user_id: str, edl_access_token: str, body: dict) -> dict: ':not_started': APPLICATION_NOT_STARTED, ':pending': APPLICATION_PENDING, ':updated_application_status': updated_application_status, - **access_code_value + **access_code_value, }, ReturnValues='ALL_NEW', )['Attributes'] @@ -125,9 +125,9 @@ def _create_user(user_id: str, users_table) -> dict: def _reset_credits_if_needed(user: dict, current_month: str, users_table) -> dict: if ( - user['application_status'] == APPLICATION_APPROVED - and user.get('_month_of_last_credit_reset', '0') < current_month # noqa: W503 - and user['remaining_credits'] is not None # noqa: W503 + user['application_status'] == APPLICATION_APPROVED + and user.get('_month_of_last_credit_reset', '0') < current_month # noqa: W503 + and user['remaining_credits'] is not None # noqa: W503 ): try: user = users_table.update_item( diff --git a/lib/dynamo/dynamo/util.py b/lib/dynamo/dynamo/util.py index 37a2f7f0e..6caf32c75 100644 --- a/lib/dynamo/dynamo/util.py +++ b/lib/dynamo/dynamo/util.py @@ -10,8 +10,8 @@ def get_request_time_expression(start, end): key = Key('request_time') - formatted_start = (format_time(parse(start)) if start else None) - formatted_end = (format_time(parse(end)) if end else None) + formatted_start = format_time(parse(start)) if start else None + formatted_end = format_time(parse(end)) if end else None if formatted_start and formatted_end: return key.between(formatted_start, formatted_end) diff --git a/lib/dynamo/setup.py b/lib/dynamo/setup.py index a099e6928..bbf12896e 100644 --- a/lib/dynamo/setup.py +++ b/lib/dynamo/setup.py @@ -4,15 +4,12 @@ name='dynamo', license='BSD', include_package_data=True, - install_requires=[ 'boto3', 'python-dateutil', 'requests', ], python_requires='~=3.9', - packages=find_packages(), - package_data={'dynamo': ['*.json']}, ) diff --git a/lib/lambda_logging/lambda_logging/__init__.py b/lib/lambda_logging/lambda_logging/__init__.py index 694c59fcb..64c65da08 100644 --- a/lib/lambda_logging/lambda_logging/__init__.py +++ b/lib/lambda_logging/lambda_logging/__init__.py @@ -11,7 +11,6 @@ class UnhandledException(Exception): def log_exceptions(lambda_handler): - @wraps(lambda_handler) def wrapper(event, context): try: diff --git a/tests/conftest.py b/tests/conftest.py index be67e315b..1d80a4b8d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,6 +14,7 @@ class TableProperties: jobs_table = get_table_properties_from_template('JobsTable') users_table = get_table_properties_from_template('UsersTable') access_codes_table = get_table_properties_from_template('AccessCodesTable') + return TableProperties() @@ -45,6 +46,7 @@ class Tables: TableName=environ['ACCESS_CODES_TABLE_NAME'], **table_properties.access_codes_table, ) + tables = Tables() yield tables diff --git a/tests/test_api/conftest.py b/tests/test_api/conftest.py index a70e0b332..89ce603c8 100644 --- a/tests/test_api/conftest.py +++ b/tests/test_api/conftest.py @@ -29,7 +29,7 @@ def login(client, username=DEFAULT_USERNAME, access_token=DEFAULT_ACCESS_TOKEN): client.set_cookie( domain='localhost', key=AUTH_COOKIE, - value=auth.get_mock_jwt_cookie(username, lifetime_in_seconds=10_000, access_token=access_token) + value=auth.get_mock_jwt_cookie(username, lifetime_in_seconds=10_000, access_token=access_token), ) @@ -38,13 +38,7 @@ def make_job(granules=None, name='someName', job_type='RTC_GAMMA', parameters=No granules = ['S1B_IW_SLC__1SDV_20200604T082207_20200604T082234_021881_029874_5E38'] if parameters is None: parameters = {} - job = { - 'job_type': job_type, - 'job_parameters': { - 'granules': granules, - **parameters - } - } + job = {'job_type': job_type, 'job_parameters': {'granules': granules, **parameters}} if name is not None: job['name'] = name @@ -62,17 +56,19 @@ def submit_batch(client, batch=None, validate_only=None): return client.post(JOBS_URI, json=payload) -def make_db_record(job_id, - granules=None, - job_type='RTC_GAMMA', - user_id=DEFAULT_USERNAME, - request_time='2019-12-31T15:00:00+00:00', - status_code='RUNNING', - expiration_time='2019-12-31T15:00:00+00:00', - name=None, - files=None, - browse_images=None, - thumbnail_images=None): +def make_db_record( + job_id, + granules=None, + job_type='RTC_GAMMA', + user_id=DEFAULT_USERNAME, + request_time='2019-12-31T15:00:00+00:00', + status_code='RUNNING', + expiration_time='2019-12-31T15:00:00+00:00', + name=None, + files=None, + browse_images=None, + thumbnail_images=None, +): if granules is None: granules = ['S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2B'] record = { @@ -102,10 +98,7 @@ def setup_requests_mock_with_given_polygons(granule_polygon_pairs): cmr_response = { 'feed': { 'entry': [ - { - 'producer_granule_id': granule, - 'polygons': polygons - } for granule, polygons in granule_polygon_pairs + {'producer_granule_id': granule, 'polygons': polygons} for granule, polygons in granule_polygon_pairs ] } } @@ -115,9 +108,15 @@ def setup_requests_mock_with_given_polygons(granule_polygon_pairs): def setup_requests_mock(batch): granule_polygon_pairs = [ - (granule, - [['3.871941 -157.47052 62.278873 -156.62677 62.712959 -151.784653 ' - '64.318275 -152.353271 63.871941 -157.47052']]) + ( + granule, + [ + [ + '3.871941 -157.47052 62.278873 -156.62677 62.712959 -151.784653 ' + '64.318275 -152.353271 63.871941 -157.47052' + ] + ], + ) for granule in get_granules(batch) ] setup_requests_mock_with_given_polygons(granule_polygon_pairs) diff --git a/tests/test_api/test_api_spec.py b/tests/test_api/test_api_spec.py index b9937f28a..eb4b3bc2a 100644 --- a/tests/test_api/test_api_spec.py +++ b/tests/test_api/test_api_spec.py @@ -38,11 +38,7 @@ def test_not_logged_in(client): def test_invalid_cookie(client): for uri in ENDPOINTS: - client.set_cookie( - domain='localhost', - key=AUTH_COOKIE, - value='garbage I say!!! GARGBAGE!!!' - ) + client.set_cookie(domain='localhost', key=AUTH_COOKIE, value='garbage I say!!! GARGBAGE!!!') response = client.get(uri) assert response.status_code == HTTPStatus.UNAUTHORIZED @@ -52,7 +48,7 @@ def test_expired_cookie(client): client.set_cookie( domain='localhost', key=AUTH_COOKIE, - value=auth.get_mock_jwt_cookie('user', lifetime_in_seconds=-1, access_token='token') + value=auth.get_mock_jwt_cookie('user', lifetime_in_seconds=-1, access_token='token'), ) response = client.get(uri) assert response.status_code == HTTPStatus.UNAUTHORIZED diff --git a/tests/test_api/test_get_user.py b/tests/test_api/test_get_user.py index 23acef7be..5e2eb0dd6 100644 --- a/tests/test_api/test_get_user.py +++ b/tests/test_api/test_get_user.py @@ -50,7 +50,7 @@ def test_get_user_with_jobs(client, tables): make_db_record('job1', user_id=user_id, request_time=request_time, status_code='PENDING', name='job1'), make_db_record('job2', user_id=user_id, request_time=request_time, status_code='RUNNING', name='job1'), make_db_record('job3', user_id=user_id, request_time=request_time, status_code='FAILED', name='job2'), - make_db_record('job4', user_id=user_id, request_time=request_time, status_code='SUCCEEDED', name=None) + make_db_record('job4', user_id=user_id, request_time=request_time, status_code='SUCCEEDED', name=None), ] for item in items: tables.jobs_table.put_item(Item=item) diff --git a/tests/test_api/test_list_jobs.py b/tests/test_api/test_list_jobs.py index 06623cc74..13c99862d 100644 --- a/tests/test_api/test_list_jobs.py +++ b/tests/test_api/test_list_jobs.py @@ -12,27 +12,26 @@ def test_list_jobs(client, tables): 'filename': 'foo.txt', 'size': 123, 'url': 'https://mybucket.s3.us-west-2.amazonaws.com/prefix/foo.txt', - 's3': { - 'bucket': 'mybucket', - 'key': 'prefix/foo.txt' - }, + 's3': {'bucket': 'mybucket', 'key': 'prefix/foo.txt'}, }, { 'filename': 'bar.png', 'size': 0, 'url': 'https://mybucket.s3.us-west-2.amazonaws.com/prefix/bar.png', - 's3': { - 'bucket': 'mybucket', - 'key': 'prefix/bar.png' - }, + 's3': {'bucket': 'mybucket', 'key': 'prefix/bar.png'}, }, ] browse_images = ['https://mybucket.s3.us-west-2.amazonaws.com/prefix/browse/foo.png'] thumbnail_images = [] items = [ make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', user_id='user_with_jobs'), - make_db_record(job_id='27836b79-e5b2-4d8f-932f-659724ea02c3', user_id='user_with_jobs', files=files, - browse_images=browse_images, thumbnail_images=thumbnail_images) + make_db_record( + job_id='27836b79-e5b2-4d8f-932f-659724ea02c3', + user_id='user_with_jobs', + files=files, + browse_images=browse_images, + thumbnail_images=thumbnail_images, + ), ] for item in items: tables.jobs_table.put_item(Item=item) @@ -52,7 +51,7 @@ def test_list_jobs(client, tables): def test_list_jobs_by_user_id(client, tables): items = [ make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', user_id='user_with_jobs'), - make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', user_id='user_with_jobs') + make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', user_id='user_with_jobs'), ] for item in items: tables.jobs_table.put_item(Item=item) @@ -75,7 +74,7 @@ def test_list_jobs_by_name(client, tables): items = [ make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', name='item1'), - make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', name=long_name) + make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', name=long_name), ] for item in items: tables.jobs_table.put_item(Item=item) @@ -122,7 +121,7 @@ def test_list_jobs_by_type(client, tables): def test_list_jobs_by_status(client, tables): items = [ make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', status_code='RUNNING'), - make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', status_code='SUCCEEDED') + make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', status_code='SUCCEEDED'), ] for item in items: tables.jobs_table.put_item(Item=item) @@ -189,8 +188,7 @@ def test_bad_date_formats(client): '2020-13-01T00:00:00Z', '01-JAN-2020', '01/01/2020', - '2020-01-01' - '2020-01-01T00:00Z', + '2020-01-01' '2020-01-01T00:00Z', '2020-01-01T00:00:00', '2020-01-01T00:00:00+01', '2020-01-01T00:00:00+0100', diff --git a/tests/test_api/test_patch_user.py b/tests/test_api/test_patch_user.py index 7185d77e8..da2de54e5 100644 --- a/tests/test_api/test_patch_user.py +++ b/tests/test_api/test_patch_user.py @@ -74,16 +74,14 @@ def test_patch_user_access_code(client, tables): ) login(client, 'foo') - with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time, \ - unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile: - + with ( + unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time, + unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile, + ): mock_current_utc_time.return_value = '2024-05-21T20:01:03+00:00' mock_get_edl_profile.return_value = {} - response = client.patch( - USER_URI, - json={'use_case': 'I want data.', 'access_code': '123'} - ) + response = client.patch(USER_URI, json={'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() mock_get_edl_profile.assert_called_once_with('foo', DEFAULT_ACCESS_TOKEN) @@ -100,17 +98,12 @@ def test_patch_user_access_code(client, tables): def test_patch_user_access_code_start_date(client, tables): - tables.access_codes_table.put_item( - Item={'access_code': '123', 'start_date': '2024-05-21T20:01:03+00:00'} - ) + tables.access_codes_table.put_item(Item={'access_code': '123', 'start_date': '2024-05-21T20:01:03+00:00'}) login(client, 'foo') with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time: mock_current_utc_time.return_value = '2024-05-21T20:01:02+00:00' - response = client.patch( - USER_URI, - json={'use_case': 'I want data.', 'access_code': '123'} - ) + response = client.patch(USER_URI, json={'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() assert response.status_code == HTTPStatus.FORBIDDEN @@ -125,10 +118,7 @@ def test_patch_user_access_code_end_date(client, tables): with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time: mock_current_utc_time.return_value = '2024-05-21T20:01:04+00:00' - response = client.patch( - USER_URI, - json={'use_case': 'I want data.', 'access_code': '123'} - ) + response = client.patch(USER_URI, json={'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() assert response.status_code == HTTPStatus.FORBIDDEN @@ -136,15 +126,10 @@ def test_patch_user_access_code_end_date(client, tables): def test_patch_user_access_code_invalid(client, tables): - tables.access_codes_table.put_item( - Item={'access_code': '123'} - ) + tables.access_codes_table.put_item(Item={'access_code': '123'}) login(client, 'foo') - response = client.patch( - USER_URI, - json={'use_case': 'I want data.', 'access_code': '456'} - ) + response = client.patch(USER_URI, json={'use_case': 'I want data.', 'access_code': '456'}) assert response.status_code == HTTPStatus.FORBIDDEN assert 'not a valid access code' in response.json['detail'] diff --git a/tests/test_api/test_submit_job.py b/tests/test_api/test_submit_job.py index 292b7e26e..ca8dbde29 100644 --- a/tests/test_api/test_submit_job.py +++ b/tests/test_api/test_submit_job.py @@ -77,16 +77,16 @@ def test_submit_multiple_job_types(client, approved_user): insar_gamma_job = make_job( [ 'S1A_IW_SLC__1SDV_20200720T172109_20200720T172128_033541_03E2FB_341F', - 'S1A_IW_SLC__1SDV_20200813T172110_20200813T172129_033891_03EE3F_2C3E' + 'S1A_IW_SLC__1SDV_20200813T172110_20200813T172129_033891_03EE3F_2C3E', ], - job_type='INSAR_GAMMA' + job_type='INSAR_GAMMA', ) autorift_job = make_job( [ 'S1A_IW_SLC__1SDV_20200720T172109_20200720T172128_033541_03E2FB_341F', - 'S1A_IW_SLC__1SDV_20200813T172110_20200813T172129_033891_03EE3F_2C3E' + 'S1A_IW_SLC__1SDV_20200813T172110_20200813T172129_033891_03EE3F_2C3E', ], - job_type='AUTORIFT' + job_type='AUTORIFT', ) batch = [rtc_gamma_job, insar_gamma_job, autorift_job] setup_requests_mock(batch) @@ -158,9 +158,7 @@ def test_submit_without_jobs(client): def test_submit_job_without_name(client, approved_user): login(client, username=approved_user) - batch = [ - make_job(name=None) - ] + batch = [make_job(name=None)] setup_requests_mock(batch) response = submit_batch(client, batch) @@ -169,9 +167,7 @@ def test_submit_job_without_name(client, approved_user): def test_submit_job_with_empty_name(client): login(client) - batch = [ - make_job(name='') - ] + batch = [make_job(name='')] setup_requests_mock(batch) response = submit_batch(client, batch) assert response.status_code == HTTPStatus.BAD_REQUEST @@ -179,9 +175,7 @@ def test_submit_job_with_empty_name(client): def test_submit_job_with_long_name(client): login(client) - batch = [ - make_job(name='X' * 101) - ] + batch = [make_job(name='X' * 101)] setup_requests_mock(batch) response = submit_batch(client, batch) assert response.status_code == HTTPStatus.BAD_REQUEST @@ -204,7 +198,7 @@ def test_submit_job_without_granules(client): def test_submit_job_granule_does_not_exist(client, tables): batch = [ make_job(['S1B_IW_SLC__1SDV_20200604T082207_20200604T082234_021881_029874_5E38']), - make_job(['S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2B']) + make_job(['S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2B']), ] setup_requests_mock(batch) batch.append(make_job(['S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2A'])) @@ -213,8 +207,10 @@ def test_submit_job_granule_does_not_exist(client, tables): response = submit_batch(client, batch) assert response.status_code == HTTPStatus.BAD_REQUEST assert response.json['title'] == 'Bad Request' - assert response.json['detail'] == 'Some requested scenes could not be found: ' \ - 'S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2A' + assert ( + response.json['detail'] == 'Some requested scenes could not be found: ' + 'S1A_IW_SLC__1SDV_20200610T173646_20200610T173704_032958_03D14C_5F2A' + ) def test_submit_good_rtc_granule_names(client, approved_user): @@ -348,7 +344,7 @@ def test_submit_mixed_job_parameters(client, approved_user): } granule_pair = [ 'S1A_IW_SLC__1SDV_20200527T195012_20200527T195028_032755_03CB56_3D96', - 'S1A_IW_SLC__1SDV_20200515T195012_20200515T195027_032580_03C609_4EBA' + 'S1A_IW_SLC__1SDV_20200515T195012_20200515T195027_032580_03C609_4EBA', ] job = make_job(job_type='RTC_GAMMA', parameters=rtc_parameters) diff --git a/tests/test_api/test_validation.py b/tests/test_api/test_validation.py index 01912dda3..daee9c1f6 100644 --- a/tests/test_api/test_validation.py +++ b/tests/test_api/test_validation.py @@ -75,7 +75,7 @@ def test_format_points(): [25.54, -29.76], [24.66, -29.56], [24.15, -31.23], - [25.04, -31.43] + [25.04, -31.43], ] @@ -104,86 +104,83 @@ def test_check_same_burst_ids(): { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VV_7C85-BURST'], - 'secondary': ['S1_136231_IW2_20200616T022313_VV_5D11-BURST'] + 'secondary': ['S1_136231_IW2_20200616T022313_VV_5D11-BURST'], } }, { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136232_IW2_20200616T022315_VV_5D11-BURST' + 'S1_136232_IW2_20200616T022315_VV_5D11-BURST', ], 'secondary': [ 'S1_136231_IW2_20200616T022313_VV_5411-BURST', - 'S1_136232_IW2_20200616T022345_VV_5D13-BURST' - ] + 'S1_136232_IW2_20200616T022345_VV_5D13-BURST', + ], } }, { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136231_IW3_20200616T022315_VV_5D11-BURST' + 'S1_136231_IW3_20200616T022315_VV_5D11-BURST', ], 'secondary': [ 'S1_136231_IW2_20200616T022313_VV_5411-BURST', - 'S1_136231_IW3_20200616T022345_VV_5D13-BURST' - ] + 'S1_136231_IW3_20200616T022345_VV_5D13-BURST', + ], } - } + }, ] invalid_job_different_lengths = { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VV_7C85-BURST'], - 'secondary': [ - 'S1_136232_IW2_20200616T022313_VV_5D11-BURST', - 'S1_136233_IW2_20200616T022313_VV_5D11-BURST' - ] + 'secondary': ['S1_136232_IW2_20200616T022313_VV_5D11-BURST', 'S1_136233_IW2_20200616T022313_VV_5D11-BURST'], } } invalid_jobs_not_matching = [ { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VV_7C85-BURST'], - 'secondary': ['S1_136232_IW2_20200616T022313_VV_5D11-BURST'] + 'secondary': ['S1_136232_IW2_20200616T022313_VV_5D11-BURST'], } }, { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136232_IW2_20200604T123455_VV_ABC5-BURST' + 'S1_136232_IW2_20200604T123455_VV_ABC5-BURST', ], 'secondary': [ 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', - 'S1_136233_IW2_20200617T123213_VV_5E13-BURST' - ] + 'S1_136233_IW2_20200617T123213_VV_5E13-BURST', + ], } }, { 'job_parameters': { 'reference': [ 'S1_136232_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136231_IW2_20200604T123455_VV_ABC5-BURST' + 'S1_136231_IW2_20200604T123455_VV_ABC5-BURST', ], 'secondary': [ 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', - 'S1_136233_IW2_20200617T123213_VV_5E13-BURST' - ] + 'S1_136233_IW2_20200617T123213_VV_5E13-BURST', + ], } - } + }, ] invalid_jobs_duplicate = [ { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136231_IW2_20200604T123455_VV_ABC5-BURST' + 'S1_136231_IW2_20200604T123455_VV_ABC5-BURST', ], 'secondary': [ 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', - 'S1_136231_IW2_20200617T123213_VV_5E13-BURST' - ] + 'S1_136231_IW2_20200617T123213_VV_5E13-BURST', + ], } }, { @@ -197,9 +194,9 @@ def test_check_same_burst_ids(): 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', 'S1_136231_IW2_20200617T123213_VV_5E13-BURST', 'S1_136232_IW2_20200604T123475_VV_ABC7-BURST', - ] + ], } - } + }, ] for valid_job in valid_jobs: validation.check_same_burst_ids(valid_job, {}) @@ -218,59 +215,59 @@ def test_check_valid_polarizations(): { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VV_7C85-BURST'], - 'secondary': ['S1_136231_IW2_20200616T022313_VV_5D11-BURST'] + 'secondary': ['S1_136231_IW2_20200616T022313_VV_5D11-BURST'], } }, { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_HH_7C85-BURST', - 'S1_136232_IW2_20200616T022315_HH_5D11-BURST' + 'S1_136232_IW2_20200616T022315_HH_5D11-BURST', ], 'secondary': [ 'S1_136231_IW2_20200616T022313_HH_5411-BURST', - 'S1_136232_IW2_20200616T022345_HH_5D13-BURST' - ] + 'S1_136232_IW2_20200616T022345_HH_5D13-BURST', + ], } - } + }, ] invalid_jobs_not_matching = [ { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VV_7C85-BURST'], - 'secondary': ['S1_136232_IW2_20200616T022313_HH_5D11-BURST'] + 'secondary': ['S1_136232_IW2_20200616T022313_HH_5D11-BURST'], } }, { 'job_parameters': { 'reference': [ 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136232_IW2_20200604T123455_VV_ABC5-BURST' + 'S1_136232_IW2_20200604T123455_VV_ABC5-BURST', ], 'secondary': [ 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', - 'S1_136233_IW2_20200617T123213_HH_5E13-BURST' - ] + 'S1_136233_IW2_20200617T123213_HH_5E13-BURST', + ], } }, { 'job_parameters': { 'reference': [ 'S1_136232_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136231_IW2_20200604T123455_HH_ABC5-BURST' + 'S1_136231_IW2_20200604T123455_HH_ABC5-BURST', ], 'secondary': [ 'S1_136231_IW2_20200617T022313_VV_5D11-BURST', - 'S1_136233_IW2_20200617T123213_HH_5E13-BURST' - ] + 'S1_136233_IW2_20200617T123213_HH_5E13-BURST', + ], } - } + }, ] invalid_jobs_unsupported = [ { 'job_parameters': { 'reference': ['S1_136231_IW2_20200604T022312_VH_7C85-BURST'], - 'secondary': ['S1_136231_IW2_20200617T022313_VH_5D11-BURST'] + 'secondary': ['S1_136231_IW2_20200617T022313_VH_5D11-BURST'], } }, { @@ -284,9 +281,9 @@ def test_check_valid_polarizations(): 'S1_136231_IW2_20200617T022313_HV_5D11-BURST', 'S1_136231_IW2_20200617T123213_HV_5E13-BURST', 'S1_136232_IW2_20200604T123475_HV_ABC7-BURST', - ] + ], } - } + }, ] for valid_job in valid_jobs: validation.check_valid_polarizations(valid_job, {}) @@ -313,8 +310,9 @@ def test_check_granules_exist(): validation.check_granules_exist(['scene1', 'scene2'], granule_metadata) with raises(validation.GranuleValidationError) as e: - validation.check_granules_exist(['scene1', 'scene2', 'scene3', 'scene4', 'S2_foo', 'LC08_bar', 'LC09_bar'], - granule_metadata) + validation.check_granules_exist( + ['scene1', 'scene2', 'scene3', 'scene4', 'S2_foo', 'LC08_bar', 'LC09_bar'], granule_metadata + ) assert 'S2_foo' not in str(e) assert 'LC08_bar' not in str(e) assert 'LC09_bar' not in str(e) @@ -345,12 +343,12 @@ def test_get_cmr_metadata(): 'entry': [ { 'producer_granule_id': 'foo', - 'polygons': [["-31.4 25.0 -29.7 25.5 -29.5 24.6 -31.2 24.1 -31.4 25.0"]], + 'polygons': [['-31.4 25.0 -29.7 25.5 -29.5 24.6 -31.2 24.1 -31.4 25.0']], }, { 'title': 'bar', - 'polygons': [["0 1 2 3 4 5 6 7 0 1"]], - } + 'polygons': [['0 1 2 3 4 5 6 7 0 1']], + }, ], }, } @@ -373,23 +371,29 @@ def test_validate_jobs(): granule_with_dem_coverage = 'S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8' granule_without_dem_coverage = 'S1A_IW_GRDH_1SDV_20201219T222530_20201219T222555_035761_042F72_8378' - valid_burst_pair = ( - 'S1_136231_IW2_20200604T022312_VV_7C85-BURST', - 'S1_136231_IW2_20200616T022313_VV_5D11-BURST' - ) + valid_burst_pair = ('S1_136231_IW2_20200604T022312_VV_7C85-BURST', 'S1_136231_IW2_20200616T022313_VV_5D11-BURST') - invalid_burst_pair = ( - 'S1_136231_IW2_20200616T022313_VV_5D11-BURST', - 'S1_136232_IW2_20200604T022315_VV_7C85-BURST' - ) + invalid_burst_pair = ('S1_136231_IW2_20200616T022313_VV_5D11-BURST', 'S1_136232_IW2_20200604T022315_VV_7C85-BURST') granule_polygon_pairs = [ - (granule_with_dem_coverage, - [['13.705972 -91.927132 14.452647 -91.773392 14.888498 -94.065727 ' - '14.143632 -94.211563 13.705972 -91.927132']]), - (granule_without_dem_coverage, - [['37.796551 -68.331245 36.293144 -67.966415 36.69714 -65.129745 ' - '38.198883 -65.437325 37.796551 -68.331245']]) + ( + granule_with_dem_coverage, + [ + [ + '13.705972 -91.927132 14.452647 -91.773392 14.888498 -94.065727 ' + '14.143632 -94.211563 13.705972 -91.927132' + ] + ], + ), + ( + granule_without_dem_coverage, + [ + [ + '37.796551 -68.331245 36.293144 -67.966415 36.69714 -65.129745 ' + '38.198883 -65.437325 37.796551 -68.331245' + ] + ], + ), ] setup_requests_mock_with_given_polygons(granule_polygon_pairs) @@ -398,44 +402,33 @@ def test_validate_jobs(): 'job_type': 'RTC_GAMMA', 'job_parameters': { 'granules': [granule_with_dem_coverage], - } + }, }, { 'job_type': 'RTC_GAMMA', 'job_parameters': { 'granules': [granule_with_dem_coverage], 'dem_name': 'copernicus', - } + }, }, { 'job_type': 'INSAR_GAMMA', 'job_parameters': { 'granules': [granule_with_dem_coverage, granule_with_dem_coverage], - } + }, }, { 'job_type': 'AUTORIFT', 'job_parameters': { 'granules': [granule_with_dem_coverage, granule_without_dem_coverage], - } - }, - { - 'job_type': 'ARIA_RAIDER', - 'job_parameters': {} + }, }, + {'job_type': 'ARIA_RAIDER', 'job_parameters': {}}, { 'job_type': 'INSAR_ISCE_MULTI_BURST', - 'job_parameters': { - 'reference': [valid_burst_pair[0]], - 'secondary': [valid_burst_pair[1]] - } + 'job_parameters': {'reference': [valid_burst_pair[0]], 'secondary': [valid_burst_pair[1]]}, }, - { - 'job_type': 'INSAR_ISCE_BURST', - 'job_parameters': { - 'granules': [valid_burst_pair[0], valid_burst_pair[1]] - } - } + {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'granules': [valid_burst_pair[0], valid_burst_pair[1]]}}, ] validation.validate_jobs(jobs) @@ -444,7 +437,7 @@ def test_validate_jobs(): 'job_type': 'RTC_GAMMA', 'job_parameters': { 'granules': [unknown_granule], - } + }, } ] with raises(validation.GranuleValidationError): @@ -455,7 +448,7 @@ def test_validate_jobs(): 'job_type': 'RTC_GAMMA', 'job_parameters': { 'granules': [granule_without_dem_coverage], - } + }, } ] with raises(validation.GranuleValidationError): @@ -464,22 +457,14 @@ def test_validate_jobs(): jobs = [ { 'job_type': 'INSAR_ISCE_MULTI_BURST', - 'job_parameters': { - 'reference': [invalid_burst_pair[0]], - 'secondary': [invalid_burst_pair[1]] - } + 'job_parameters': {'reference': [invalid_burst_pair[0]], 'secondary': [invalid_burst_pair[1]]}, } ] with raises(validation.GranuleValidationError): validation.validate_jobs(jobs) jobs = [ - { - 'job_type': 'INSAR_ISCE_BURST', - 'job_parameters': { - 'granules': [invalid_burst_pair[0], invalid_burst_pair[1]] - } - } + {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'granules': [invalid_burst_pair[0], invalid_burst_pair[1]]}} ] with raises(validation.GranuleValidationError): validation.validate_jobs(jobs) @@ -490,7 +475,7 @@ def test_check_bounds_formatting(): {'job_parameters': {'bounds': [-10, 0, 10, 10]}}, {'job_parameters': {'bounds': [-180, -90, -170, -80]}}, {'job_parameters': {'bounds': [170, 75, 180, 90]}}, - {'job_parameters': {'bounds': [0, 0, 0, 0]}} + {'job_parameters': {'bounds': [0, 0, 0, 0]}}, ] invalid_jobs_bad_order = [ {'job_parameters': {'bounds': [10, 0, -10, 10]}}, @@ -520,7 +505,7 @@ def test_check_granules_intersecting_bounds(): valid_granule_metadata = [ {'name': 'intersects1', 'polygon': Polygon.from_bounds(-10.0, 0.0, 10.0, 10.0)}, {'name': 'intersects2', 'polygon': Polygon.from_bounds(-9.0, -1.0, 20.0, 11.0)}, - {'name': 'intersects3', 'polygon': Polygon.from_bounds(0.0, 5.0, 15.0, 15.0)} + {'name': 'intersects3', 'polygon': Polygon.from_bounds(0.0, 5.0, 15.0, 15.0)}, ] invalid_granule_metadata = [ {'name': 'intersects1', 'polygon': Polygon.from_bounds(-10.0, 0.0, 10.0, 10.0)}, @@ -543,12 +528,10 @@ def test_check_same_relative_orbits(): {'name': 'S1A_IW_RAW__0SDV_20201015T161622_20201015T161654_034809_040E95_AF3C'}, {'name': 'S1A_IW_RAW__0SDV_20200816T161620_20200816T161652_033934_03EFCE_5730'}, {'name': 'S1B_IW_RAW__0SDV_20200810T161537_20200810T161610_022863_02B66A_F7D7'}, - {'name': 'S1B_IW_RAW__0SDV_20200623T161535_20200623T161607_022163_02A10F_7FD6'} + {'name': 'S1B_IW_RAW__0SDV_20200623T161535_20200623T161607_022163_02A10F_7FD6'}, ] invalid_granule_metadata = valid_granule_metadata.copy() - invalid_granule_metadata.append( - {'name': 'S1B_IW_RAW__0SDV_20200623T161535_20200623T161607_012345_02A10F_7FD6'} - ) + invalid_granule_metadata.append({'name': 'S1B_IW_RAW__0SDV_20200623T161535_20200623T161607_012345_02A10F_7FD6'}) validation.check_same_relative_orbits({}, valid_granule_metadata) error_pattern = r'.*69 is not 87.*' with raises(validation.GranuleValidationError, match=error_pattern): diff --git a/tests/test_check_processing_time.py b/tests/test_check_processing_time.py index 8cc262e78..d10737b1b 100644 --- a/tests/test_check_processing_time.py +++ b/tests/test_check_processing_time.py @@ -21,7 +21,7 @@ def test_lambda_handler(): 'StartedAt': 4000, 'StoppedAt': 4200, }, - ] + ], } } assert check_processing_time.lambda_handler(event, None) == [5.7, 6.4, [5.9, 0.2]] diff --git a/tests/test_dynamo/test_jobs.py b/tests/test_dynamo/test_jobs.py index b4bafa4d5..c021feb7d 100644 --- a/tests/test_dynamo/test_jobs.py +++ b/tests/test_dynamo/test_jobs.py @@ -213,33 +213,21 @@ def test_get_credit_cost(): { 'job_type': 'INSAR_ISCE_BURST', 'cost': 1.0, - } + }, ] - assert dynamo.jobs._get_credit_cost( - {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 10.0}}, - costs - ) == 60.0 - assert dynamo.jobs._get_credit_cost( - {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 20.0}}, - costs - ) == 15.0 - assert dynamo.jobs._get_credit_cost( - {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 30.0}}, - costs - ) == 5.0 + assert ( + dynamo.jobs._get_credit_cost({'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 10.0}}, costs) == 60.0 + ) + assert ( + dynamo.jobs._get_credit_cost({'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 20.0}}, costs) == 15.0 + ) + assert dynamo.jobs._get_credit_cost({'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 30.0}}, costs) == 5.0 with pytest.raises(ValueError): - dynamo.jobs._get_credit_cost( - {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 13.0}}, - costs - ) - assert dynamo.jobs._get_credit_cost( - {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'foo': 'bar'}}, - costs - ) == 1.0 - assert dynamo.jobs._get_credit_cost( - {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {}}, - costs - ) == 1.0 + dynamo.jobs._get_credit_cost({'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 13.0}}, costs) + assert ( + dynamo.jobs._get_credit_cost({'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'foo': 'bar'}}, costs) == 1.0 + ) + assert dynamo.jobs._get_credit_cost({'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {}}, costs) == 1.0 def test_get_credit_cost_validate_keys(): @@ -279,7 +267,14 @@ def test_put_jobs(tables, monkeypatch, approved_user): assert len(jobs) == 3 for job in jobs: assert set(job.keys()) == { - 'name', 'job_id', 'user_id', 'status_code', 'execution_started', 'request_time', 'priority', 'credit_cost' + 'name', + 'job_id', + 'user_id', + 'status_code', + 'execution_started', + 'request_time', + 'priority', + 'credit_cost', } assert job['request_time'] <= current_utc_time() assert job['user_id'] == approved_user @@ -289,12 +284,14 @@ def test_put_jobs(tables, monkeypatch, approved_user): assert tables.jobs_table.scan()['Items'] == sorted(jobs, key=lambda item: item['job_id']) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': approved_user, - 'remaining_credits': Decimal(7), - '_month_of_last_credit_reset': '2024-02', - 'application_status': APPLICATION_APPROVED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': approved_user, + 'remaining_credits': Decimal(7), + '_month_of_last_credit_reset': '2024-02', + 'application_status': APPLICATION_APPROVED, + } + ] def test_put_jobs_application_status(tables): @@ -370,8 +367,10 @@ def test_put_jobs_default_params(tables, approved_user): {'job_type': 'JOB_TYPE_C', 'job_parameters': {'c1': 'foo'}}, {'job_parameters': {'n1': 'foo'}}, ] - with unittest.mock.patch('dynamo.jobs.DEFAULT_PARAMS_BY_JOB_TYPE', default_params), \ - unittest.mock.patch('dynamo.jobs.COSTS', costs): + with ( + unittest.mock.patch('dynamo.jobs.DEFAULT_PARAMS_BY_JOB_TYPE', default_params), + unittest.mock.patch('dynamo.jobs.COSTS', costs), + ): jobs = dynamo.jobs.put_jobs(approved_user, payload) assert 'job_parameters' not in jobs[0] @@ -439,16 +438,16 @@ def test_put_jobs_costs(tables, monkeypatch, approved_user): {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 30}}, {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 20}}, {'job_type': 'RTC_GAMMA', 'job_parameters': {'resolution': 10}}, - {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'looks': '20x4'}}, {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'looks': '10x2'}}, {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {'looks': '5x1'}}, - {'job_type': 'RTC_GAMMA', 'job_parameters': {}}, {'job_type': 'INSAR_ISCE_BURST', 'job_parameters': {}}, ] - with unittest.mock.patch('dynamo.jobs.COSTS', costs), \ - unittest.mock.patch('dynamo.jobs.DEFAULT_PARAMS_BY_JOB_TYPE', default_params): + with ( + unittest.mock.patch('dynamo.jobs.COSTS', costs), + unittest.mock.patch('dynamo.jobs.DEFAULT_PARAMS_BY_JOB_TYPE', default_params), + ): jobs = dynamo.jobs.put_jobs(approved_user, payload) assert len(jobs) == 8 @@ -504,7 +503,10 @@ def test_put_jobs_infinite_credits(tables, monkeypatch): def test_put_jobs_priority_override(tables): payload = [{'name': 'name1'}, {'name': 'name2'}] user = { - 'user_id': 'user1', 'priority_override': 100, 'remaining_credits': 3, 'application_status': APPLICATION_APPROVED + 'user_id': 'user1', + 'priority_override': 100, + 'remaining_credits': 3, + 'application_status': APPLICATION_APPROVED, } tables.users_table.put_item(Item=user) @@ -518,7 +520,7 @@ def test_put_jobs_priority_override(tables): 'user_id': 'user1', 'priority_override': 550, 'remaining_credits': None, - 'application_status': APPLICATION_APPROVED + 'application_status': APPLICATION_APPROVED, } tables.users_table.put_item(Item=user) @@ -670,53 +672,16 @@ def test_update_job(tables): def test_get_jobs_waiting_for_execution(tables): items = [ - { - 'job_id': 'job0', - 'status_code': 'PENDING', - 'execution_started': False - }, - { - 'job_id': 'job1', - 'status_code': 'PENDING' - }, - { - 'job_id': 'job2', - 'status_code': 'RUNNING', - 'execution_started': True - }, - { - 'job_id': 'job3', - 'status_code': 'PENDING', - 'execution_started': True - }, - { - 'job_id': 'job4', - 'status_code': 'PENDING', - 'execution_started': False - }, - { - 'job_id': 'job5', - 'status_code': 'PENDING', - 'execution_started': True - }, - { - 'job_id': 'job6', - 'status_code': 'PENDING', - 'execution_started': False - }, - { - 'job_id': 'job7', - 'status_code': 'PENDING', - 'execution_started': True - }, - { - 'job_id': 'job8', - 'status_code': 'RUNNING' - }, - { - 'job_id': 'job9', - 'status_code': 'PENDING' - }, + {'job_id': 'job0', 'status_code': 'PENDING', 'execution_started': False}, + {'job_id': 'job1', 'status_code': 'PENDING'}, + {'job_id': 'job2', 'status_code': 'RUNNING', 'execution_started': True}, + {'job_id': 'job3', 'status_code': 'PENDING', 'execution_started': True}, + {'job_id': 'job4', 'status_code': 'PENDING', 'execution_started': False}, + {'job_id': 'job5', 'status_code': 'PENDING', 'execution_started': True}, + {'job_id': 'job6', 'status_code': 'PENDING', 'execution_started': False}, + {'job_id': 'job7', 'status_code': 'PENDING', 'execution_started': True}, + {'job_id': 'job8', 'status_code': 'RUNNING'}, + {'job_id': 'job9', 'status_code': 'PENDING'}, ] for item in items: tables.jobs_table.put_item(Item=item) diff --git a/tests/test_dynamo/test_user.py b/tests/test_dynamo/test_user.py index 6b1fed030..9b4779c50 100644 --- a/tests/test_dynamo/test_user.py +++ b/tests/test_dynamo/test_user.py @@ -105,11 +105,13 @@ def test_update_user_rejected(tables): 'test-edl-access-token', {'use_case': 'I want data.'}, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(0), - 'application_status': APPLICATION_REJECTED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(0), + 'application_status': APPLICATION_REJECTED, + } + ] def test_update_user_approved(tables, monkeypatch): @@ -131,12 +133,14 @@ def test_update_user_approved(tables, monkeypatch): ) mock_get_current_month.assert_called_once_with() - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(25), - '_month_of_last_credit_reset': '2024-02', - 'application_status': APPLICATION_APPROVED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(25), + '_month_of_last_credit_reset': '2024-02', + 'application_status': APPLICATION_APPROVED, + } + ] def test_update_user_invalid_status(tables): @@ -153,11 +157,13 @@ def test_update_user_invalid_status(tables): 'test-edl-access-token', {'use_case': 'I want data.'}, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(0), - 'application_status': 'bar', - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(0), + 'application_status': 'bar', + } + ] def test_update_user_failed_application_status(tables): @@ -167,8 +173,10 @@ def test_update_user_failed_application_status(tables): 'application_status': 'bar', } ) - with unittest.mock.patch('dynamo.user.get_or_create_user') as mock_get_or_create_user, \ - unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile: + with ( + unittest.mock.patch('dynamo.user.get_or_create_user') as mock_get_or_create_user, + unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile, + ): mock_get_or_create_user.return_value = { 'user_id': 'foo', 'application_status': APPLICATION_NOT_STARTED, @@ -183,10 +191,12 @@ def test_update_user_failed_application_status(tables): mock_get_or_create_user.assert_called_once_with('foo') mock_get_edl_profile.assert_called_once_with('foo', 'test-edl-access-token') - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'application_status': 'bar', - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'application_status': 'bar', + } + ] def test_update_user_access_code(tables): @@ -194,18 +204,17 @@ def test_update_user_access_code(tables): Item={'access_code': '123', 'start_date': '2024-05-21T20:01:03+00:00', 'end_date': '2024-05-21T20:01:04+00:00'} ) - with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time, \ - unittest.mock.patch('dynamo.user._get_current_month') as mock_get_current_month, \ - unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile: - + with ( + unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time, + unittest.mock.patch('dynamo.user._get_current_month') as mock_get_current_month, + unittest.mock.patch('dynamo.user._get_edl_profile') as mock_get_edl_profile, + ): mock_current_utc_time.return_value = '2024-05-21T20:01:03+00:00' mock_get_current_month.return_value = '2024-05' mock_get_edl_profile.return_value = {'key': 'value'} user = dynamo.user.update_user( - 'foo', - 'test-edl-access-token', - {'use_case': 'I want data.', 'access_code': '123'} + 'foo', 'test-edl-access-token', {'use_case': 'I want data.', 'access_code': '123'} ) mock_current_utc_time.assert_called_once_with() @@ -230,11 +239,7 @@ def test_update_user_access_code_start_date(tables): with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time: mock_current_utc_time.return_value = '2024-05-21T20:01:02+00:00' with pytest.raises(AccessCodeError, match=r'.*will become active.*'): - dynamo.user.update_user( - 'foo', - 'test-edl-access-token', - {'use_case': 'I want data.', 'access_code': '123'} - ) + dynamo.user.update_user('foo', 'test-edl-access-token', {'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() assert tables.users_table.scan()['Items'] == [ @@ -250,11 +255,7 @@ def test_update_user_access_code_end_date(tables): with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time: mock_current_utc_time.return_value = '2024-05-21T20:01:05+00:00' with pytest.raises(AccessCodeError, match=r'.*expired.*'): - dynamo.user.update_user( - 'foo', - 'test-edl-access-token', - {'use_case': 'I want data.', 'access_code': '123'} - ) + dynamo.user.update_user('foo', 'test-edl-access-token', {'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() assert tables.users_table.scan()['Items'] == [ @@ -264,11 +265,7 @@ def test_update_user_access_code_end_date(tables): with unittest.mock.patch('dynamo.util.current_utc_time') as mock_current_utc_time: mock_current_utc_time.return_value = '2024-05-21T20:01:04+00:00' with pytest.raises(AccessCodeError, match=r'.*expired.*'): - dynamo.user.update_user( - 'foo', - 'test-edl-access-token', - {'use_case': 'I want data.', 'access_code': '123'} - ) + dynamo.user.update_user('foo', 'test-edl-access-token', {'use_case': 'I want data.', 'access_code': '123'}) mock_current_utc_time.assert_called_once_with() assert tables.users_table.scan()['Items'] == [ @@ -280,11 +277,7 @@ def test_update_user_access_code_invalid(tables): tables.access_codes_table.put_item(Item={'access_code': '123'}) with pytest.raises(AccessCodeError, match=r'.*not a valid access code.*'): - dynamo.user.update_user( - 'foo', - 'test-edl-access-token', - {'use_case': 'I want data.', 'access_code': '456'} - ) + dynamo.user.update_user('foo', 'test-edl-access-token', {'use_case': 'I want data.', 'access_code': '456'}) assert tables.users_table.scan()['Items'] == [ {'user_id': 'foo', 'remaining_credits': Decimal(0), 'application_status': APPLICATION_NOT_STARTED} @@ -537,11 +530,13 @@ def test_reset_credits_failed_not_approved(tables): users_table=tables.users_table, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(10), - 'application_status': 'bar', - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(10), + 'application_status': 'bar', + } + ] def test_reset_credits_failed_same_month(tables): @@ -566,12 +561,14 @@ def test_reset_credits_failed_same_month(tables): users_table=tables.users_table, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(10), - '_month_of_last_credit_reset': '2024-02', - 'application_status': APPLICATION_APPROVED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(10), + '_month_of_last_credit_reset': '2024-02', + 'application_status': APPLICATION_APPROVED, + } + ] def test_reset_credits_failed_infinite_credits(tables): @@ -594,11 +591,13 @@ def test_reset_credits_failed_infinite_credits(tables): users_table=tables.users_table, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': None, - 'application_status': APPLICATION_APPROVED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': None, + 'application_status': APPLICATION_APPROVED, + } + ] def test_reset_credits_failed_approved(tables): @@ -623,12 +622,14 @@ def test_reset_credits_failed_approved(tables): users_table=tables.users_table, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(10), - '_month_of_last_credit_reset': '2024-02', - 'application_status': APPLICATION_APPROVED, - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(10), + '_month_of_last_credit_reset': '2024-02', + 'application_status': APPLICATION_APPROVED, + } + ] def test_reset_credits_failed_zero_credits(tables): @@ -653,12 +654,14 @@ def test_reset_credits_failed_zero_credits(tables): users_table=tables.users_table, ) - assert tables.users_table.scan()['Items'] == [{ - 'user_id': 'foo', - 'remaining_credits': Decimal(0), - '_month_of_last_credit_reset': '2024-02', - 'application_status': 'bar', - }] + assert tables.users_table.scan()['Items'] == [ + { + 'user_id': 'foo', + 'remaining_credits': Decimal(0), + '_month_of_last_credit_reset': '2024-02', + 'application_status': 'bar', + } + ] def test_decrement_credits(tables): @@ -708,9 +711,9 @@ def test_decrement_credits_infinite_credits(tables): tables.users_table.put_item(Item={'user_id': 'foo', 'remaining_credits': None}) with pytest.raises( - botocore.exceptions.ClientError, - match=r'^An error occurred \(ValidationException\) when calling the UpdateItem operation:' - r' An operand in the update expression has an incorrect data type$' + botocore.exceptions.ClientError, + match=r'^An error occurred \(ValidationException\) when calling the UpdateItem operation:' + r' An operand in the update expression has an incorrect data type$', ): dynamo.user.decrement_credits('foo', 1) diff --git a/tests/test_dynamo/test_util.py b/tests/test_dynamo/test_util.py index b02392d3a..875034f05 100644 --- a/tests/test_dynamo/test_util.py +++ b/tests/test_dynamo/test_util.py @@ -45,7 +45,7 @@ def test_convert_floats_to_decimals(): }, '123.45', 123, - 123.45 + 123.45, ] response = dynamo.util.convert_floats_to_decimals(payload) @@ -59,7 +59,7 @@ def test_convert_floats_to_decimals(): }, '123.45', 123, - decimal.Decimal('123.45') + decimal.Decimal('123.45'), ] @@ -74,7 +74,7 @@ def test_convert_decimals_to_numbers(): }, '123.45', decimal.Decimal('123'), - 123.45 + 123.45, ] response = dynamo.util.convert_decimals_to_numbers(payload) @@ -88,5 +88,5 @@ def test_convert_decimals_to_numbers(): }, '123.45', 123, - 123.45 + 123.45, ] diff --git a/tests/test_get_files.py b/tests/test_get_files.py index 876d9d1e1..2e35a4da2 100644 --- a/tests/test_get_files.py +++ b/tests/test_get_files.py @@ -35,13 +35,10 @@ def test_get_download_url(monkeypatch): def stub_expiration(s3_stubber: Stubber, bucket, key): - params = { - 'Bucket': bucket, - 'Key': key - } + params = {'Bucket': bucket, 'Key': key} s3_response = { 'Expiration': 'expiry-date="Wed, 01 Jan 2020 00:00:00 UTC", ' - 'rule-id="MDQxMzRmZTgtNDFlMi00Y2UwLWIyZjEtMTEzYTllNDNjYjJk"' + 'rule-id="MDQxMzRmZTgtNDFlMi00Y2UwLWIyZjEtMTEzYTllNDNjYjJk"' } s3_stubber.add_response(method='get_object', expected_params=params, service_response=s3_response) @@ -53,18 +50,8 @@ def test_get_expiration(s3_stubber: Stubber): def stub_get_object_tagging(s3_stubber: Stubber, bucket, key, file_type): - params = { - 'Bucket': bucket, - 'Key': key - } - s3_response = { - 'TagSet': [ - { - 'Key': 'file_type', - 'Value': file_type - } - ] - } + params = {'Bucket': bucket, 'Key': key} + s3_response = {'TagSet': [{'Key': 'file_type', 'Value': file_type}]} s3_stubber.add_response(method='get_object_tagging', expected_params=params, service_response=s3_response) @@ -130,9 +117,7 @@ def test_get_files_zipped_product(s3_stubber: Stubber): stub_get_object_tagging(s3_stubber, 'myBucket', 'myJobId/myBrowse.png', 'amp_browse') stub_get_object_tagging(s3_stubber, 'myBucket', 'myJobId/myBrowse_rgb.png', 'rgb_browse') - event = { - 'job_id': 'myJobId' - } + event = {'job_id': 'myJobId'} response = get_files.lambda_handler(event, None) assert response == { 'expiration_time': '2020-01-01T00:00:00+00:00', @@ -149,7 +134,7 @@ def test_get_files_zipped_product(s3_stubber: Stubber): ], 'browse_images': [ 'https://myBucket.s3.myRegion.amazonaws.com/myJobId/myBrowse.png', - 'https://myBucket.s3.myRegion.amazonaws.com/myJobId/myBrowse_rgb.png' + 'https://myBucket.s3.myRegion.amazonaws.com/myJobId/myBrowse_rgb.png', ], 'thumbnail_images': ['https://myBucket.s3.myRegion.amazonaws.com/myJobId/myThumbnail.png'], 'logs': [], @@ -177,9 +162,7 @@ def test_get_files_netcdf_product(s3_stubber: Stubber): stub_get_object_tagging(s3_stubber, 'myBucket', 'myJobId/myThumbnail.png', 'amp_thumbnail') stub_get_object_tagging(s3_stubber, 'myBucket', 'myJobId/myBrowse.png', 'amp_browse') - event = { - 'job_id': 'myJobId' - } + event = {'job_id': 'myJobId'} response = get_files.lambda_handler(event, None) assert response == { 'expiration_time': '2020-01-01T00:00:00+00:00', @@ -213,9 +196,7 @@ def test_get_files_failed_job(s3_stubber: Stubber): stub_get_object_tagging(s3_stubber, 'myBucket', 'myJobId/myJobId.log', 'log') stub_expiration(s3_stubber, 'myBucket', 'myJobId/myJobId.log') - event = { - 'job_id': 'myJobId' - } + event = {'job_id': 'myJobId'} response = get_files.lambda_handler(event, None) assert response == { 'expiration_time': '2020-01-01T00:00:00+00:00', diff --git a/tests/test_handle_batch_event.py b/tests/test_handle_batch_event.py index bcdf65ae5..1a05cd306 100644 --- a/tests/test_handle_batch_event.py +++ b/tests/test_handle_batch_event.py @@ -11,10 +11,7 @@ def test_lambda_handler(mock_get_job: MagicMock, mock_update_job: MagicMock): event = { 'source': 'aws.batch', 'detail-type': 'Batch Job State Change', - 'detail': { - 'status': 'RUNNING', - 'jobName': 'fooJob' - } + 'detail': {'status': 'RUNNING', 'jobName': 'fooJob'}, } mock_get_job.return_value = {'job_id': 'fooJob', 'status_code': 'PENDING'} @@ -30,10 +27,7 @@ def test_lambda_handler_job_not_pending(mock_get_job: MagicMock, mock_update_job event = { 'source': 'aws.batch', 'detail-type': 'Batch Job State Change', - 'detail': { - 'status': 'RUNNING', - 'jobName': 'fooJob' - } + 'detail': {'status': 'RUNNING', 'jobName': 'fooJob'}, } mock_get_job.return_value = {'job_id': 'fooJob', 'status_code': 'SUCCEEDED'} @@ -47,24 +41,14 @@ def test_lambda_handler_invalid_source(): event = { 'source': 'dummy', 'detail-type': 'Batch Job State Change', - 'detail': { - 'status': 'RUNNING', - 'jobName': 'fooJob' - } + 'detail': {'status': 'RUNNING', 'jobName': 'fooJob'}, } with pytest.raises(ValueError, match=r'.* source .*'): handle_batch_event.lambda_handler(event, None) def test_lambda_handler_invalid_detail_type(): - event = { - 'source': 'aws.batch', - 'detail-type': 'dummy', - 'detail': { - 'status': 'RUNNING', - 'jobName': 'fooJob' - } - } + event = {'source': 'aws.batch', 'detail-type': 'dummy', 'detail': {'status': 'RUNNING', 'jobName': 'fooJob'}} with pytest.raises(ValueError, match=r'.* detail-type .*'): handle_batch_event.lambda_handler(event, None) @@ -73,22 +57,13 @@ def test_lambda_handler_invalid_status(): event = { 'source': 'aws.batch', 'detail-type': 'Batch Job State Change', - 'detail': { - 'status': 'dummy', - 'jobName': 'fooJob' - } + 'detail': {'status': 'dummy', 'jobName': 'fooJob'}, } with pytest.raises(ValueError, match=r'.* status .*'): handle_batch_event.lambda_handler(event, None) def test_lambda_handler_missing_key(): - event = { - 'detail-type': 'Batch Job State Change', - 'detail': { - 'status': 'RUNNING', - 'jobName': 'fooJob' - } - } + event = {'detail-type': 'Batch Job State Change', 'detail': {'status': 'RUNNING', 'jobName': 'fooJob'}} with pytest.raises(KeyError, match=r"'source'"): handle_batch_event.lambda_handler(event, None) diff --git a/tests/test_scale_cluster.py b/tests/test_scale_cluster.py index e25a6c1ae..82c1c0a81 100644 --- a/tests/test_scale_cluster.py +++ b/tests/test_scale_cluster.py @@ -35,60 +35,74 @@ def test_get_time_period(): def test_get_target_max_vcpus(): - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 1, 1), - monthly_budget=1000, - month_to_date_spending=0, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1000) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 1, 1), + monthly_budget=1000, + month_to_date_spending=0, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1000, + ) assert vcpus == 1 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 1, 31), - monthly_budget=1000, - month_to_date_spending=0, - default_max_vcpus=3, - expanded_max_vcpus=4, - required_surplus=1000) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 1, 31), + monthly_budget=1000, + month_to_date_spending=0, + default_max_vcpus=3, + expanded_max_vcpus=4, + required_surplus=1000, + ) assert vcpus == 4 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 1, 31), - monthly_budget=1000, - month_to_date_spending=999.99, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1000) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 1, 31), + monthly_budget=1000, + month_to_date_spending=999.99, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1000, + ) assert vcpus == 1 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 1, 31), - monthly_budget=0, - month_to_date_spending=0, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 1, 31), + monthly_budget=0, + month_to_date_spending=0, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1, + ) assert vcpus == 1 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 2, 20), - monthly_budget=29000, - month_to_date_spending=19001, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1000) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 2, 20), + monthly_budget=29000, + month_to_date_spending=19001, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1000, + ) assert vcpus == 1 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 2, 20), - monthly_budget=29000, - month_to_date_spending=19000, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1000) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 2, 20), + monthly_budget=29000, + month_to_date_spending=19000, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1000, + ) assert vcpus == 2 - vcpus = scale_cluster.get_target_max_vcpus(today=date(2020, 2, 20), - monthly_budget=29000, - month_to_date_spending=19000, - default_max_vcpus=1, - expanded_max_vcpus=2, - required_surplus=1001) + vcpus = scale_cluster.get_target_max_vcpus( + today=date(2020, 2, 20), + monthly_budget=29000, + month_to_date_spending=19000, + default_max_vcpus=1, + expanded_max_vcpus=2, + required_surplus=1001, + ) assert vcpus == 1 @@ -109,21 +123,24 @@ def test_get_current_desired_vcpus(batch_stubber): }, ] } - batch_stubber.add_response(method='describe_compute_environments', expected_params=expected_params, - service_response=service_response) + batch_stubber.add_response( + method='describe_compute_environments', expected_params=expected_params, service_response=service_response + ) assert scale_cluster.get_current_desired_vcpus('foo') == 5 def test_set_max_vcpus(batch_stubber): expected_params = {'computeEnvironment': 'foo', 'computeResources': {'maxvCpus': 10}, 'state': 'ENABLED'} - batch_stubber.add_response(method='update_compute_environment', expected_params=expected_params, - service_response={}) + batch_stubber.add_response( + method='update_compute_environment', expected_params=expected_params, service_response={} + ) scale_cluster.set_max_vcpus(compute_environment_arn='foo', target_max_vcpus=10, current_desired_vcpus=10) expected_params = {'computeEnvironment': 'foo', 'state': 'DISABLED'} - batch_stubber.add_response(method='update_compute_environment', expected_params=expected_params, - service_response={}) + batch_stubber.add_response( + method='update_compute_environment', expected_params=expected_params, service_response={} + ) scale_cluster.set_max_vcpus(compute_environment_arn='foo', target_max_vcpus=10, current_desired_vcpus=11) @@ -147,7 +164,8 @@ def test_get_month_to_date_spending(cost_explorer_stubber): }, ], } - cost_explorer_stubber.add_response(method='get_cost_and_usage', expected_params=expected_params, - service_response=mock_service_response) + cost_explorer_stubber.add_response( + method='get_cost_and_usage', expected_params=expected_params, service_response=mock_service_response + ) assert scale_cluster.get_month_to_date_spending(date(2022, 7, 15)) == 100.2 diff --git a/tests/test_set_batch_overrides.py b/tests/test_set_batch_overrides.py index 3b9bafefe..f3b22be7e 100644 --- a/tests/test_set_batch_overrides.py +++ b/tests/test_set_batch_overrides.py @@ -20,18 +20,21 @@ def mock_insar_isce_burst_job(looks: str, bursts: int) -> dict: 'job_parameters': { 'looks': looks, 'reference': ['foo' for _ in range(bursts)], - } + }, } def test_set_batch_overrides_default(): - assert lambda_handler( - { - 'job_type': 'foo', - 'job_parameters': {}, - }, - None, - ) == {} + assert ( + lambda_handler( + { + 'job_type': 'foo', + 'job_parameters': {}, + }, + None, + ) + == {} + ) def test_set_batch_overrides_insar_isce_burst_5x1(): diff --git a/tests/test_start_execution_manager.py b/tests/test_start_execution_manager.py index fb6768005..6d8731611 100644 --- a/tests/test_start_execution_manager.py +++ b/tests/test_start_execution_manager.py @@ -15,12 +15,10 @@ def test_invoke_worker(): 'job_parameters': { 'decimal_float_field': Decimal('10.1'), 'integer_float_field': Decimal('10'), - 'decimal_list_field': [Decimal('10.1'), Decimal('10')] - } + 'decimal_list_field': [Decimal('10.1'), Decimal('10')], + }, }, - { - 'job_id': 'job1' - } + {'job_id': 'job1'}, ] expected_payload = json.dumps( { @@ -32,12 +30,10 @@ def test_invoke_worker(): 'job_parameters': { 'decimal_float_field': 10.1, 'integer_float_field': 10, - 'decimal_list_field': [10.1, 10] - } + 'decimal_list_field': [10.1, 10], + }, }, - { - 'job_id': 'job1' - } + {'job_id': 'job1'}, ] } ) @@ -54,9 +50,11 @@ def test_invoke_worker(): def test_lambda_handler_500_jobs(): - with patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, \ - patch('start_execution_manager.invoke_worker') as mock_invoke_worker, \ - patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True): + with ( + patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, + patch('start_execution_manager.invoke_worker') as mock_invoke_worker, + patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True), + ): mock_jobs = list(range(500)) mock_get_jobs_waiting_for_execution.return_value = mock_jobs @@ -73,9 +71,11 @@ def test_lambda_handler_500_jobs(): def test_lambda_handler_400_jobs(): - with patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, \ - patch('start_execution_manager.invoke_worker') as mock_invoke_worker, \ - patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True): + with ( + patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, + patch('start_execution_manager.invoke_worker') as mock_invoke_worker, + patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True), + ): mock_jobs = list(range(400)) mock_get_jobs_waiting_for_execution.return_value = mock_jobs @@ -92,9 +92,11 @@ def test_lambda_handler_400_jobs(): def test_lambda_handler_50_jobs(): - with patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, \ - patch('start_execution_manager.invoke_worker') as mock_invoke_worker, \ - patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True): + with ( + patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, + patch('start_execution_manager.invoke_worker') as mock_invoke_worker, + patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True), + ): mock_jobs = list(range(50)) mock_get_jobs_waiting_for_execution.return_value = mock_jobs @@ -110,9 +112,11 @@ def test_lambda_handler_50_jobs(): def test_lambda_handler_no_jobs(): - with patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, \ - patch('start_execution_manager.invoke_worker') as mock_invoke_worker, \ - patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True): + with ( + patch('dynamo.jobs.get_jobs_waiting_for_execution') as mock_get_jobs_waiting_for_execution, + patch('start_execution_manager.invoke_worker') as mock_invoke_worker, + patch.dict(os.environ, {'START_EXECUTION_WORKER_ARN': 'test-worker-function-arn'}, clear=True), + ): mock_get_jobs_waiting_for_execution.return_value = [] start_execution_manager.lambda_handler(None, None) diff --git a/tests/test_start_execution_worker.py b/tests/test_start_execution_worker.py index 1a26d469b..3b1bd7863 100644 --- a/tests/test_start_execution_worker.py +++ b/tests/test_start_execution_worker.py @@ -155,9 +155,11 @@ def test_submit_jobs(): sort_keys=True, ) - with patch('start_execution_worker.STEP_FUNCTION.start_execution') as mock_start_execution, \ - patch.dict(os.environ, {'STEP_FUNCTION_ARN': 'test-state-machine-arn'}, clear=True), \ - patch('start_execution_worker.BATCH_PARAMS_BY_JOB_TYPE', batch_params_by_job_type): + with ( + patch('start_execution_worker.STEP_FUNCTION.start_execution') as mock_start_execution, + patch.dict(os.environ, {'STEP_FUNCTION_ARN': 'test-state-machine-arn'}, clear=True), + patch('start_execution_worker.BATCH_PARAMS_BY_JOB_TYPE', batch_params_by_job_type), + ): start_execution_worker.submit_jobs(jobs) assert mock_start_execution.mock_calls == [ diff --git a/tests/test_upload_log.py b/tests/test_upload_log.py index 33eb41165..bd34bf5ec 100644 --- a/tests/test_upload_log.py +++ b/tests/test_upload_log.py @@ -51,14 +51,20 @@ def test_get_log_content(cloudwatch_stubber): ], 'nextForwardToken': 'myNextToken', } - cloudwatch_stubber.add_response(method='get_log_events', expected_params=expected_params, - service_response=service_response) + cloudwatch_stubber.add_response( + method='get_log_events', expected_params=expected_params, service_response=service_response + ) - expected_params = {'logGroupName': 'myLogGroup', 'logStreamName': 'myLogStream', 'startFromHead': True, - 'nextToken': 'myNextToken'} + expected_params = { + 'logGroupName': 'myLogGroup', + 'logStreamName': 'myLogStream', + 'startFromHead': True, + 'nextToken': 'myNextToken', + } service_response = {'events': [], 'nextForwardToken': 'myNextToken'} - cloudwatch_stubber.add_response(method='get_log_events', expected_params=expected_params, - service_response=service_response) + cloudwatch_stubber.add_response( + method='get_log_events', expected_params=expected_params, service_response=service_response + ) assert upload_log.get_log_content('myLogGroup', 'myLogStream') == 'foo\nbar' @@ -70,19 +76,15 @@ def test_get_log_content_from_failed_attempts(): 'Attempts': [ {'Container': {'Reason': 'error message 1'}}, {'Container': {'Reason': 'error message 2'}}, - {'Container': {'Reason': 'error message 3'}} - ] + {'Container': {'Reason': 'error message 3'}}, + ], } expected = 'error message 1\nerror message 2\nerror message 3' assert upload_log.get_log_content_from_failed_attempts(cause) == expected def test_get_log_content_from_failed_attempts_no_attempts(): - cause = { - 'Status': 'FAILED', - 'StatusReason': 'foo reason', - 'Attempts': [] - } + cause = {'Status': 'FAILED', 'StatusReason': 'foo reason', 'Attempts': []} expected = 'foo reason' assert upload_log.get_log_content_from_failed_attempts(cause) == expected @@ -97,11 +99,7 @@ def test_upload_log_to_s3(s3_stubber): tag_params = { 'Bucket': 'myBucket', 'Key': 'myJobId/myJobId.log', - 'Tagging': { - 'TagSet': [ - {'Key': 'file_type', 'Value': 'log'} - ] - } + 'Tagging': {'TagSet': [{'Key': 'file_type', 'Value': 'log'}]}, } s3_stubber.add_response(method='put_object', expected_params=expected_params, service_response={}) s3_stubber.add_response(method='put_object_tagging', expected_params=tag_params, service_response={}) @@ -117,7 +115,7 @@ def test_lambda_handler(mock_get_log_content: MagicMock, mock_write_log_to_s3: M event = { 'prefix': 'test-prefix', 'log_group': 'test-log-group', - 'processing_results': {'step_0': {'Container': {'LogStreamName': 'test-log-stream'}}} + 'processing_results': {'step_0': {'Container': {'LogStreamName': 'test-log-stream'}}}, } upload_log.lambda_handler(event, None) @@ -133,15 +131,11 @@ def test_lambda_handler_no_log_stream(mock_write_log_to_s3: MagicMock): 'prefix': 'test-prefix', 'log_group': 'test-log-group', 'processing_results': { - 'step_0': - { - 'Error': '', - 'Cause': '{"Container": {},' - '"Status": "FAILED",' - '"StatusReason": "foo reason",' - '"Attempts": []}' - } - } + 'step_0': { + 'Error': '', + 'Cause': '{"Container": {},' '"Status": "FAILED",' '"StatusReason": "foo reason",' '"Attempts": []}', + } + }, } upload_log.lambda_handler(event, None) @@ -150,7 +144,6 @@ def test_lambda_handler_no_log_stream(mock_write_log_to_s3: MagicMock): def test_lambda_handler_log_stream_does_not_exist(): - def mock_get_log_events(**kwargs): assert kwargs['logGroupName'] == 'test-log-group' assert kwargs['logStreamName'] == 'test-log-stream' @@ -165,20 +158,21 @@ def mock_get_log_events(**kwargs): 'step_0': { 'Error': '', 'Cause': '{"Container": {"LogStreamName": "test-log-stream"},' - '"Status": "FAILED",' - '"StatusReason": "Task failed to start",' - '"Attempts": [' - '{"Container": {"Reason": "error message 1"}},' - '{"Container": {"Reason": "error message 2"}},' - '{"Container": {"Reason": "error message 3"}}]}' + '"Status": "FAILED",' + '"StatusReason": "Task failed to start",' + '"Attempts": [' + '{"Container": {"Reason": "error message 1"}},' + '{"Container": {"Reason": "error message 2"}},' + '{"Container": {"Reason": "error message 3"}}]}', } - } + }, } - with patch('upload_log.CLOUDWATCH.get_log_events', mock_get_log_events), \ - patch('upload_log.write_log_to_s3') as mock_write_log_to_s3, \ - patch.dict(os.environ, {'BUCKET': 'test-bucket'}, clear=True): - + with ( + patch('upload_log.CLOUDWATCH.get_log_events', mock_get_log_events), + patch('upload_log.write_log_to_s3') as mock_write_log_to_s3, + patch.dict(os.environ, {'BUCKET': 'test-bucket'}, clear=True), + ): upload_log.lambda_handler(event, None) mock_write_log_to_s3.assert_called_once_with( @@ -187,7 +181,6 @@ def mock_get_log_events(**kwargs): def test_lambda_handler_resource_not_found(): - def mock_get_log_events(**kwargs): assert kwargs['logGroupName'] == 'test-log-group' assert kwargs['logStreamName'] == 'test-log-stream' @@ -202,16 +195,16 @@ def mock_get_log_events(**kwargs): 'step_0': { 'Error': '', 'Cause': '{"Container": {"LogStreamName": "test-log-stream"},' - '"Status": "FAILED",' - '"StatusReason": "Task failed to start",' - '"Attempts": [' - '{"Container": {"Reason": "error message 1"}},' - '{"Container": {"Reason": "error message 2"}},' - '{"Container": {"Reason": "error message 3"}}]}' + '"Status": "FAILED",' + '"StatusReason": "Task failed to start",' + '"Attempts": [' + '{"Container": {"Reason": "error message 1"}},' + '{"Container": {"Reason": "error message 2"}},' + '{"Container": {"Reason": "error message 3"}}]}', } - } + }, } with patch('upload_log.CLOUDWATCH.get_log_events', mock_get_log_events): - with pytest.raises(upload_log.CLOUDWATCH.exceptions.ResourceNotFoundException, match=r".*foo message.*"): + with pytest.raises(upload_log.CLOUDWATCH.exceptions.ResourceNotFoundException, match=r'.*foo message.*'): upload_log.lambda_handler(event, None) From 12887ebc5a7192be2c01e0c1a7748b916f0a8278 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 16 Dec 2024 14:22:30 -0500 Subject: [PATCH 05/10] updated changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7cd5615d..90b50dc9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [9.1.1] + +### Changed +- The [`static-analysis`](.github/workflows/static-analysis.yml) Github Actions workflow now uses `ruff` rather than `flake8` for linting. + ## [9.1.0] ### Added From 85102c7b04be8a5902bc46692fc0eefaf54a5f58 Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 16 Dec 2024 15:39:22 -0500 Subject: [PATCH 06/10] Update Makefile Co-authored-by: Jake Herrmann --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0a5a5c605..ea68987db 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ render: static: ruff openapi-validate cfn-lint ruff: - ruff check + ruff check . && ruff format --diff . openapi-validate: render openapi-spec-validator apps/api/src/hyp3_api/api-spec/openapi-spec.yml From 56cbb05ec7b4956ba9d7db02e2ef14850554c8cb Mon Sep 17 00:00:00 2001 From: Andrew Player Date: Mon, 16 Dec 2024 16:15:43 -0500 Subject: [PATCH 07/10] Update requirements-all.txt Co-authored-by: Jake Herrmann --- requirements-all.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-all.txt b/requirements-all.txt index 285c0a70c..b7ae4e6a8 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -11,7 +11,7 @@ moto[dynamodb]==5.0.22 pytest==8.3.4 PyYAML==6.0.2 responses==0.25.3 -ruff==0.8.3 +ruff setuptools==75.6.0 openapi-spec-validator==0.7.1 cfn-lint==1.21.0 From 6c66d8f3d9cf45023d9c2bc3daf0b672a704d357 Mon Sep 17 00:00:00 2001 From: Jake Herrmann Date: Mon, 16 Dec 2024 15:16:48 -0900 Subject: [PATCH 08/10] update ruff config --- .gitignore | 3 ++ apps/api/src/hyp3_api/__init__.py | 1 + apps/api/src/hyp3_api/__main__.py | 1 + apps/api/src/hyp3_api/routes.py | 1 + apps/api/src/hyp3_api/validation.py | 1 + apps/get-files/src/get_files.py | 1 + apps/render_cf.py | 2 +- apps/scale-cluster/src/scale_cluster.py | 1 + .../src/start_execution_manager.py | 3 +- .../src/start_execution_worker.py | 2 +- apps/upload-log/src/upload_log.py | 1 + lib/dynamo/dynamo/jobs.py | 5 ++- lib/dynamo/dynamo/user.py | 1 + lib/dynamo/dynamo/util.py | 1 + lib/dynamo/setup.py | 1 + lib/lambda_logging/lambda_logging/__init__.py | 1 + lib/lambda_logging/setup.py | 1 + pyproject.toml | 40 +++++++++++++++++++ ruff.toml | 4 -- tests/conftest.py | 2 +- tests/test_api/conftest.py | 1 + tests/test_api/test_api_spec.py | 2 +- tests/test_api/test_get_user.py | 3 +- tests/test_api/test_patch_user.py | 3 +- tests/test_api/test_submit_job.py | 3 +- tests/test_api/test_validation.py | 2 +- tests/test_dynamo/test_jobs.py | 2 +- tests/test_lambda_logging.py | 3 +- tests/test_render_cf.py | 3 +- tests/test_set_batch_overrides.py | 4 +- 30 files changed, 75 insertions(+), 24 deletions(-) create mode 100644 pyproject.toml delete mode 100644 ruff.toml diff --git a/.gitignore b/.gitignore index d37283ec2..8796a6956 100644 --- a/.gitignore +++ b/.gitignore @@ -138,3 +138,6 @@ dmypy.json # Pyre type checker .pyre/ + +# vim +*.swp diff --git a/apps/api/src/hyp3_api/__init__.py b/apps/api/src/hyp3_api/__init__.py index 355354738..10b44d6f5 100644 --- a/apps/api/src/hyp3_api/__init__.py +++ b/apps/api/src/hyp3_api/__init__.py @@ -1,5 +1,6 @@ from flask import Flask + app = Flask(__name__, template_folder='ui/swagger/', static_folder='ui/swagger/', static_url_path='/ui/') CMR_URL = 'https://cmr.earthdata.nasa.gov/search/granules.json' diff --git a/apps/api/src/hyp3_api/__main__.py b/apps/api/src/hyp3_api/__main__.py index 7edd23a02..b23ee6733 100644 --- a/apps/api/src/hyp3_api/__main__.py +++ b/apps/api/src/hyp3_api/__main__.py @@ -1,4 +1,5 @@ from hyp3_api import app + if __name__ == '__main__': app.run(port=8080) diff --git a/apps/api/src/hyp3_api/routes.py b/apps/api/src/hyp3_api/routes.py index 04483000e..f7e53314a 100644 --- a/apps/api/src/hyp3_api/routes.py +++ b/apps/api/src/hyp3_api/routes.py @@ -16,6 +16,7 @@ from hyp3_api import app, auth, handlers from hyp3_api.openapi import get_spec_yaml + api_spec_file = Path(__file__).parent / 'api-spec' / 'openapi-spec.yml' api_spec_dict = get_spec_yaml(api_spec_file) api_spec = OpenAPI.from_dict(api_spec_dict) diff --git a/apps/api/src/hyp3_api/validation.py b/apps/api/src/hyp3_api/validation.py index fa555761b..1358b1feb 100644 --- a/apps/api/src/hyp3_api/validation.py +++ b/apps/api/src/hyp3_api/validation.py @@ -11,6 +11,7 @@ from hyp3_api import CMR_URL from hyp3_api.util import get_granules + DEM_COVERAGE = None diff --git a/apps/get-files/src/get_files.py b/apps/get-files/src/get_files.py index d37c6944e..585b3beb1 100644 --- a/apps/get-files/src/get_files.py +++ b/apps/get-files/src/get_files.py @@ -8,6 +8,7 @@ import boto3 + S3_CLIENT = boto3.client('s3') diff --git a/apps/render_cf.py b/apps/render_cf.py index 3eaf302ca..eeb265479 100644 --- a/apps/render_cf.py +++ b/apps/render_cf.py @@ -151,7 +151,7 @@ def render_templates(job_types: dict, compute_envs: dict, security_environment: keep_trailing_newline=True, ) - for template_file in Path('.').glob('**/*.j2'): + for template_file in Path().glob('**/*.j2'): template = env.get_template(str(template_file)) output = template.render( diff --git a/apps/scale-cluster/src/scale_cluster.py b/apps/scale-cluster/src/scale_cluster.py index d37a2aa4c..9f50aec01 100644 --- a/apps/scale-cluster/src/scale_cluster.py +++ b/apps/scale-cluster/src/scale_cluster.py @@ -5,6 +5,7 @@ import boto3 import dateutil.relativedelta + BATCH = boto3.client('batch') COST_EXPLORER = boto3.client('ce') diff --git a/apps/start-execution-manager/src/start_execution_manager.py b/apps/start-execution-manager/src/start_execution_manager.py index 5a5ecbfef..f46507093 100644 --- a/apps/start-execution-manager/src/start_execution_manager.py +++ b/apps/start-execution-manager/src/start_execution_manager.py @@ -2,9 +2,10 @@ import os import boto3 +from lambda_logging import log_exceptions, logger import dynamo -from lambda_logging import log_exceptions, logger + LAMBDA_CLIENT = boto3.client('lambda') diff --git a/apps/start-execution-worker/src/start_execution_worker.py b/apps/start-execution-worker/src/start_execution_worker.py index 63b3b2a72..f603d1b84 100644 --- a/apps/start-execution-worker/src/start_execution_worker.py +++ b/apps/start-execution-worker/src/start_execution_worker.py @@ -4,9 +4,9 @@ from typing import Any import boto3 - from lambda_logging import log_exceptions, logger + STEP_FUNCTION = boto3.client('stepfunctions') batch_params_file = Path(__file__).parent / 'batch_params_by_job_type.json' diff --git a/apps/upload-log/src/upload_log.py b/apps/upload-log/src/upload_log.py index 87fce2555..38c04520b 100644 --- a/apps/upload-log/src/upload_log.py +++ b/apps/upload-log/src/upload_log.py @@ -5,6 +5,7 @@ import boto3 from botocore.config import Config + config = Config(retries={'max_attempts': 2, 'mode': 'standard'}) CLOUDWATCH = boto3.client('logs', config=config) S3 = boto3.client('s3') diff --git a/lib/dynamo/dynamo/jobs.py b/lib/dynamo/dynamo/jobs.py index 234df602d..a9638db0b 100644 --- a/lib/dynamo/dynamo/jobs.py +++ b/lib/dynamo/dynamo/jobs.py @@ -2,7 +2,7 @@ from decimal import Decimal from os import environ from pathlib import Path -from typing import List, Optional +from typing import Optional from uuid import uuid4 from boto3.dynamodb.conditions import Attr, Key @@ -18,6 +18,7 @@ from dynamo.user import APPLICATION_APPROVED, APPLICATION_NOT_STARTED, APPLICATION_PENDING, APPLICATION_REJECTED from dynamo.util import DYNAMODB_RESOURCE, convert_floats_to_decimals, current_utc_time, get_request_time_expression + costs_file = Path(__file__).parent / 'costs.json' COSTS = convert_floats_to_decimals(json.loads(costs_file.read_text())) @@ -29,7 +30,7 @@ DEFAULT_PARAMS_BY_JOB_TYPE = {} -def put_jobs(user_id: str, jobs: List[dict], dry_run=False) -> List[dict]: +def put_jobs(user_id: str, jobs: list[dict], dry_run=False) -> list[dict]: table = DYNAMODB_RESOURCE.Table(environ['JOBS_TABLE_NAME']) request_time = current_utc_time() diff --git a/lib/dynamo/dynamo/user.py b/lib/dynamo/dynamo/user.py index fa3a9aa27..6198032c1 100644 --- a/lib/dynamo/dynamo/user.py +++ b/lib/dynamo/dynamo/user.py @@ -16,6 +16,7 @@ ) from dynamo.util import DYNAMODB_RESOURCE + APPLICATION_NOT_STARTED = 'NOT_STARTED' APPLICATION_PENDING = 'PENDING' APPLICATION_APPROVED = 'APPROVED' diff --git a/lib/dynamo/dynamo/util.py b/lib/dynamo/dynamo/util.py index 6caf32c75..7dab2f394 100644 --- a/lib/dynamo/dynamo/util.py +++ b/lib/dynamo/dynamo/util.py @@ -5,6 +5,7 @@ from boto3.dynamodb.conditions import Key from dateutil.parser import parse + DYNAMODB_RESOURCE = boto3.resource('dynamodb') diff --git a/lib/dynamo/setup.py b/lib/dynamo/setup.py index bbf12896e..676e0abb6 100644 --- a/lib/dynamo/setup.py +++ b/lib/dynamo/setup.py @@ -1,5 +1,6 @@ from setuptools import find_packages, setup + setup( name='dynamo', license='BSD', diff --git a/lib/lambda_logging/lambda_logging/__init__.py b/lib/lambda_logging/lambda_logging/__init__.py index 64c65da08..a8da1498b 100644 --- a/lib/lambda_logging/lambda_logging/__init__.py +++ b/lib/lambda_logging/lambda_logging/__init__.py @@ -1,6 +1,7 @@ import logging from functools import wraps + logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) diff --git a/lib/lambda_logging/setup.py b/lib/lambda_logging/setup.py index 0a26182af..017984faf 100644 --- a/lib/lambda_logging/setup.py +++ b/lib/lambda_logging/setup.py @@ -1,5 +1,6 @@ from setuptools import find_packages, setup + setup( name='lambda_logging', license='BSD', diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..d29cd32ab --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,40 @@ +[tool.ruff] +line-length = 120 +src = [ + "apps", + "apps/api/src", + "apps/check-processing-time/src", + "apps/get-files/src", + "apps/handle-batch-event/src", + "apps/set-batch-overrides/src", + "apps/scale-cluster/src", + "apps/start-execution-manager/src", + "apps/start-execution-worker/src", + "apps/disable-private-dns/src", + "apps/update-db/src", + "apps/upload-log/src", + "lib/dynamo", + "tests", +] + +[tool.ruff.format] +indent-style = "space" +quote-style = "single" + +[tool.ruff.lint] +extend-select = [ + "I", # isort: https://docs.astral.sh/ruff/rules/#isort-i + "UP", # pyupgrade: https://docs.astral.sh/ruff/rules/#pyupgrade-up + + # TODO: uncomment the following extensions and address their warnings: + #"D", # pydocstyle: https://docs.astral.sh/ruff/rules/#pydocstyle-d + #"ANN", # annotations: https://docs.astral.sh/ruff/rules/#flake8-annotations-ann + #"PTH", # use-pathlib-pth: https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth +] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.isort] +case-sensitive = true +lines-after-imports = 2 diff --git a/ruff.toml b/ruff.toml deleted file mode 100644 index 61f91605f..000000000 --- a/ruff.toml +++ /dev/null @@ -1,4 +0,0 @@ -line-length = 120 - -[format] -quote-style = "single" diff --git a/tests/conftest.py b/tests/conftest.py index 1d80a4b8d..f0de52b5c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,7 +21,7 @@ class TableProperties: def get_table_properties_from_template(resource_name): yaml.SafeLoader.add_multi_constructor('!', lambda loader, suffix, node: None) template_file = path.join(path.dirname(__file__), '../apps/main-cf.yml') - with open(template_file, 'r') as f: + with open(template_file) as f: template = yaml.safe_load(f) table_properties = template['Resources'][resource_name]['Properties'] return table_properties diff --git a/tests/test_api/conftest.py b/tests/test_api/conftest.py index 89ce603c8..d2f800fd8 100644 --- a/tests/test_api/conftest.py +++ b/tests/test_api/conftest.py @@ -7,6 +7,7 @@ from hyp3_api import CMR_URL, app, auth from hyp3_api.util import get_granules + AUTH_COOKIE = 'asf-urs' COSTS_URI = '/costs' JOBS_URI = '/jobs' diff --git a/tests/test_api/test_api_spec.py b/tests/test_api/test_api_spec.py index eb4b3bc2a..066b82e45 100644 --- a/tests/test_api/test_api_spec.py +++ b/tests/test_api/test_api_spec.py @@ -1,8 +1,8 @@ from http import HTTPStatus +from hyp3_api import auth from test_api.conftest import AUTH_COOKIE, JOBS_URI, USER_URI, login -from hyp3_api import auth ENDPOINTS = { JOBS_URI: {'GET', 'HEAD', 'OPTIONS', 'POST'}, diff --git a/tests/test_api/test_get_user.py b/tests/test_api/test_get_user.py index 5e2eb0dd6..554552f6b 100644 --- a/tests/test_api/test_get_user.py +++ b/tests/test_api/test_get_user.py @@ -1,9 +1,8 @@ from http import HTTPStatus -from test_api.conftest import USER_URI, login, make_db_record - from dynamo.user import APPLICATION_APPROVED, APPLICATION_NOT_STARTED, APPLICATION_REJECTED from dynamo.util import current_utc_time +from test_api.conftest import USER_URI, login, make_db_record def test_get_new_user(client, tables, monkeypatch): diff --git a/tests/test_api/test_patch_user.py b/tests/test_api/test_patch_user.py index da2de54e5..e49a79b16 100644 --- a/tests/test_api/test_patch_user.py +++ b/tests/test_api/test_patch_user.py @@ -2,9 +2,8 @@ from decimal import Decimal from http import HTTPStatus -from test_api.conftest import DEFAULT_ACCESS_TOKEN, USER_URI, login - from dynamo.user import APPLICATION_APPROVED, APPLICATION_PENDING, APPLICATION_REJECTED +from test_api.conftest import DEFAULT_ACCESS_TOKEN, USER_URI, login def test_patch_new_user(client, tables): diff --git a/tests/test_api/test_submit_job.py b/tests/test_api/test_submit_job.py index ca8dbde29..239b94ed0 100644 --- a/tests/test_api/test_submit_job.py +++ b/tests/test_api/test_submit_job.py @@ -1,10 +1,9 @@ from decimal import Decimal from http import HTTPStatus -from test_api.conftest import login, make_job, setup_requests_mock, submit_batch - from dynamo.user import APPLICATION_PENDING from dynamo.util import current_utc_time +from test_api.conftest import login, make_job, setup_requests_mock, submit_batch def test_submit_one_job(client, approved_user): diff --git a/tests/test_api/test_validation.py b/tests/test_api/test_validation.py index daee9c1f6..556c4d707 100644 --- a/tests/test_api/test_validation.py +++ b/tests/test_api/test_validation.py @@ -1,9 +1,9 @@ import responses from pytest import raises from shapely.geometry import Polygon -from test_api.conftest import setup_requests_mock_with_given_polygons from hyp3_api import CMR_URL, validation +from test_api.conftest import setup_requests_mock_with_given_polygons def rectangle(north, south, east, west): diff --git a/tests/test_dynamo/test_jobs.py b/tests/test_dynamo/test_jobs.py index c021feb7d..add1092e0 100644 --- a/tests/test_dynamo/test_jobs.py +++ b/tests/test_dynamo/test_jobs.py @@ -2,9 +2,9 @@ from decimal import Decimal import pytest -from conftest import list_have_same_elements import dynamo +from conftest import list_have_same_elements from dynamo.exceptions import ( InsufficientCreditsError, InvalidApplicationStatusError, diff --git a/tests/test_lambda_logging.py b/tests/test_lambda_logging.py index f6f8ce093..9760a7dfa 100644 --- a/tests/test_lambda_logging.py +++ b/tests/test_lambda_logging.py @@ -1,6 +1,5 @@ -import pytest - import lambda_logging +import pytest def test_log_exceptions(): diff --git a/tests/test_render_cf.py b/tests/test_render_cf.py index ecb27bacd..50b3e49a5 100644 --- a/tests/test_render_cf.py +++ b/tests/test_render_cf.py @@ -1,7 +1,8 @@ import pytest -import render_cf import yaml +import render_cf + def test_parse_map_statement(): assert render_cf.parse_map_statement('for item in items') == ('item', 'items') diff --git a/tests/test_set_batch_overrides.py b/tests/test_set_batch_overrides.py index f3b22be7e..041477878 100644 --- a/tests/test_set_batch_overrides.py +++ b/tests/test_set_batch_overrides.py @@ -3,11 +3,11 @@ from set_batch_overrides import ( AUTORIFT_LANDSAT_MEMORY, AUTORIFT_S2_MEMORY, - INSAR_ISCE_BURST_MEMORY_128G, + INSAR_ISCE_BURST_MEMORY_8G, INSAR_ISCE_BURST_MEMORY_16G, INSAR_ISCE_BURST_MEMORY_32G, INSAR_ISCE_BURST_MEMORY_64G, - INSAR_ISCE_BURST_MEMORY_8G, + INSAR_ISCE_BURST_MEMORY_128G, RTC_GAMMA_10M_MEMORY, WATER_MAP_10M_MEMORY, lambda_handler, From 837eed448a08fd70bc909203e2bab8e092a45c2c Mon Sep 17 00:00:00 2001 From: Jake Herrmann Date: Mon, 16 Dec 2024 15:20:52 -0900 Subject: [PATCH 09/10] add lambda_logging to ruff src --- apps/start-execution-manager/src/start_execution_manager.py | 2 +- apps/start-execution-worker/src/start_execution_worker.py | 1 + pyproject.toml | 1 + tests/test_lambda_logging.py | 3 ++- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/apps/start-execution-manager/src/start_execution_manager.py b/apps/start-execution-manager/src/start_execution_manager.py index f46507093..841c63577 100644 --- a/apps/start-execution-manager/src/start_execution_manager.py +++ b/apps/start-execution-manager/src/start_execution_manager.py @@ -2,9 +2,9 @@ import os import boto3 -from lambda_logging import log_exceptions, logger import dynamo +from lambda_logging import log_exceptions, logger LAMBDA_CLIENT = boto3.client('lambda') diff --git a/apps/start-execution-worker/src/start_execution_worker.py b/apps/start-execution-worker/src/start_execution_worker.py index f603d1b84..ac8b47b18 100644 --- a/apps/start-execution-worker/src/start_execution_worker.py +++ b/apps/start-execution-worker/src/start_execution_worker.py @@ -4,6 +4,7 @@ from typing import Any import boto3 + from lambda_logging import log_exceptions, logger diff --git a/pyproject.toml b/pyproject.toml index d29cd32ab..beb35ef4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,7 @@ src = [ "apps/update-db/src", "apps/upload-log/src", "lib/dynamo", + "lib/lambda_logging", "tests", ] diff --git a/tests/test_lambda_logging.py b/tests/test_lambda_logging.py index 9760a7dfa..f6f8ce093 100644 --- a/tests/test_lambda_logging.py +++ b/tests/test_lambda_logging.py @@ -1,6 +1,7 @@ -import lambda_logging import pytest +import lambda_logging + def test_log_exceptions(): @lambda_logging.log_exceptions From 270ca8b5a9b0beef4c45aef3a03e823338990e64 Mon Sep 17 00:00:00 2001 From: Jake Herrmann Date: Tue, 17 Dec 2024 09:55:22 -0900 Subject: [PATCH 10/10] update pyproject.toml --- pyproject.toml | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index beb35ef4c..5b7617bf7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,20 +1,12 @@ +[project] +requires-python = "==3.9" + [tool.ruff] line-length = 120 src = [ "apps", - "apps/api/src", - "apps/check-processing-time/src", - "apps/get-files/src", - "apps/handle-batch-event/src", - "apps/set-batch-overrides/src", - "apps/scale-cluster/src", - "apps/start-execution-manager/src", - "apps/start-execution-worker/src", - "apps/disable-private-dns/src", - "apps/update-db/src", - "apps/upload-log/src", - "lib/dynamo", - "lib/lambda_logging", + "**/src", + "lib/*", "tests", ]