Skip to content

Commit

Permalink
Lock file maintenance Python dependencies (#644)
Browse files Browse the repository at this point in the history
* Lock file maintenance Python dependencies

* Fix linting

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Dragomir Penev <dragomir.penev@canonical.com>
  • Loading branch information
renovate[bot] and dragomirp authored Oct 27, 2024
1 parent 4286b57 commit c5c916d
Show file tree
Hide file tree
Showing 19 changed files with 452 additions and 459 deletions.
561 changes: 276 additions & 285 deletions poetry.lock

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@ package-mode = false
[tool.poetry.dependencies]
python = "^3.10"
ops = "^2.17.0"
cryptography = "^43.0.1"
boto3 = "^1.35.38"
cryptography = "^43.0.3"
boto3 = "^1.35.49"
pgconnstr = "^1.0.1"
requests = "^2.32.3"
tenacity = "^9.0.0"
psycopg2 = "^2.9.9"
cosl = "^0.0.40"
psycopg2 = "^2.9.10"
cosl = "^0.0.42"
pydantic = "^1.10.18"
poetry-core = "^1.9.0"
poetry-core = "^1.9.1"
pyOpenSSL = "^24.2.1"
jinja2 = "^3.1.4"

Expand All @@ -38,7 +38,7 @@ opentelemetry-exporter-otlp-proto-http = "1.21.0"
optional = true

[tool.poetry.group.format.dependencies]
ruff = "^0.6.9"
ruff = "^0.7.1"

[tool.poetry.group.lint]
optional = true
Expand All @@ -50,7 +50,7 @@ codespell = "^2.3.0"
optional = true

[tool.poetry.group.unit.dependencies]
coverage = {extras = ["toml"], version = "^7.6.2"}
coverage = {extras = ["toml"], version = "^7.6.4"}
pytest = "^8.3.3"
pytest-asyncio = "*"
parameterized = "^0.9.0"
Expand All @@ -71,7 +71,7 @@ boto3 = "*"
tenacity = "*"
landscape-api-py3 = "^0.9.0"
mailmanclient = "^3.3.5"
psycopg2-binary = "^2.9.9"
psycopg2-binary = "^2.9.10"
allure-pytest = "^2.13.5"
allure-pytest-collection-report = {git = "https://github.com/canonical/data-platform-workflows", tag = "v23.0.4", subdirectory = "python/pytest_plugins/allure_pytest_collection_report"}

Expand Down
16 changes: 8 additions & 8 deletions src/backups.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def can_use_s3_repository(self) -> tuple[bool, str | None]:
return False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE

return_code, system_identifier_from_instance, error = self._execute_command([
f'/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split(".")[0]}/bin/pg_controldata',
f"/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split('.')[0]}/bin/pg_controldata",
POSTGRESQL_DATA_PATH,
])
if return_code != 0:
Expand Down Expand Up @@ -244,7 +244,7 @@ def _construct_endpoint(self, s3_parameters: dict) -> str:

# Use the built endpoint if it is an AWS endpoint.
if endpoint_data and endpoint.endswith(endpoint_data["dnsSuffix"]):
endpoint = f'{endpoint.split("://")[0]}://{endpoint_data["hostname"]}'
endpoint = f"{endpoint.split('://')[0]}://{endpoint_data['hostname']}"

return endpoint

Expand Down Expand Up @@ -392,7 +392,7 @@ def _generate_backup_list_output(self) -> str:
backup_reference = "None"
if backup["reference"]:
backup_reference, _ = self._parse_backup_id(backup["reference"][-1])
lsn_start_stop = f'{backup["lsn"]["start"]} / {backup["lsn"]["stop"]}'
lsn_start_stop = f"{backup['lsn']['start']} / {backup['lsn']['stop']}"
time_start, time_stop = (
datetime.strftime(
datetime.fromtimestamp(stamp, timezone.utc), "%Y-%m-%dT%H:%M:%SZ"
Expand All @@ -404,7 +404,7 @@ def _generate_backup_list_output(self) -> str:
if backup["archive"] and backup["archive"]["start"]
else ""
)
backup_path = f'/{self.stanza_name}/{backup["label"]}'
backup_path = f"/{self.stanza_name}/{backup['label']}"
error = backup["error"]
backup_status = "finished"
if error:
Expand Down Expand Up @@ -1121,16 +1121,16 @@ def _generate_fake_backup_id(self, backup_type: str) -> str:

if last_full_backup is None:
raise TypeError("Differential backup requested but no previous full backup")
return f'{last_full_backup}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SD")}'
return f"{last_full_backup}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SD')}"
if backup_type == "incremental":
backups = self._list_backups(show_failed=False, parse=False).keys()
if not backups:
raise TypeError("Incremental backup requested but no previous successful backup")
return f'{backups[-1]}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SI")}'
return f"{backups[-1]}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SI')}"

def _fetch_backup_from_id(self, backup_id: str) -> str:
"""Fetches backup's pgbackrest label from backup id."""
timestamp = f'{datetime.strftime(datetime.strptime(backup_id, "%Y-%m-%dT%H:%M:%SZ"), "%Y%m%d-%H%M%S")}'
timestamp = f"{datetime.strftime(datetime.strptime(backup_id, '%Y-%m-%dT%H:%M:%SZ'), '%Y%m%d-%H%M%S')}"
backups = self._list_backups(show_failed=False, parse=False).keys()
for label in backups:
if timestamp in label:
Expand Down Expand Up @@ -1285,7 +1285,7 @@ def _retrieve_s3_parameters(self) -> tuple[dict, list[str]]:
# like Ceph Object Gateway (radosgw).
s3_parameters["endpoint"] = s3_parameters["endpoint"].rstrip("/")
s3_parameters["path"] = (
f'/{s3_parameters["path"].strip("/")}' # The slash in the beginning is required by pgBackRest.
f"/{s3_parameters['path'].strip('/')}" # The slash in the beginning is required by pgBackRest.
)
s3_parameters["bucket"] = s3_parameters["bucket"].strip("/")

Expand Down
4 changes: 2 additions & 2 deletions src/relations/async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def result():
# Input is hardcoded
process = run( # noqa: S603
[
f'/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split(".")[0]}/bin/pg_controldata',
f"/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split('.')[0]}/bin/pg_controldata",
POSTGRESQL_DATA_PATH,
],
capture_output=True,
Expand Down Expand Up @@ -645,7 +645,7 @@ def _primary_cluster_endpoint(self) -> str:
def _re_emit_async_relation_changed_event(self) -> None:
"""Re-emit the async relation changed event."""
relation = self._relation
getattr(self.charm.on, f'{relation.name.replace("-", "_")}_relation_changed').emit(
getattr(self.charm.on, f"{relation.name.replace('-', '_')}_relation_changed").emit(
relation,
app=relation.app,
unit=next(unit for unit in relation.units if unit.app == relation.app),
Expand Down
34 changes: 17 additions & 17 deletions tests/integration/ha_tests/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ async def are_all_db_processes_down(ops_test: OpsTest, process: str, signal: str

# If something was returned, there is a running process.
if len(processes) > 0:
logger.info("Unit {unit.name} not yet down")
logger.info(f"Unit {unit.name} not yet down")
# Try to rekill the unit
await send_signal_to_process(ops_test, unit.name, process, signal)
raise ProcessRunningError
Expand Down Expand Up @@ -108,9 +108,9 @@ async def are_writes_increasing(
use_ip_from_inside=use_ip_from_inside,
extra_model=extra_model,
)
assert (
more_writes[member] > count
), f"{member}: writes not continuing to DB (current writes: {more_writes[member]} - previous writes: {count})"
assert more_writes[member] > count, (
f"{member}: writes not continuing to DB (current writes: {more_writes[member]} - previous writes: {count})"
)


async def app_name(
Expand Down Expand Up @@ -214,9 +214,9 @@ async def is_cluster_updated(
) -> None:
# Verify that the old primary is now a replica.
logger.info("checking that the former primary is now a replica")
assert await is_replica(
ops_test, primary_name, use_ip_from_inside
), "there are more than one primary in the cluster."
assert await is_replica(ops_test, primary_name, use_ip_from_inside), (
"there are more than one primary in the cluster."
)

# Verify that all units are part of the same cluster.
logger.info("checking that all units are part of the same cluster")
Expand Down Expand Up @@ -255,9 +255,9 @@ async def check_writes(
print(
f"member: {member}, count: {count}, max_number_written: {max_number_written[member]}, total_expected_writes: {total_expected_writes}"
)
assert (
count == max_number_written[member]
), f"{member}: writes to the db were missed: count of actual writes different from the max number written."
assert count == max_number_written[member], (
f"{member}: writes to the db were missed: count of actual writes different from the max number written."
)
assert total_expected_writes == count, f"{member}: writes to the db were missed."
return total_expected_writes

Expand Down Expand Up @@ -309,7 +309,7 @@ def count_writes_on_members(members, password, down_ips) -> tuple[dict[str, int]
f" host='{host}' password='{password}' connect_timeout=10"
)

member_name = f'{member["model"]}.{member["name"]}'
member_name = f"{member['model']}.{member['name']}"
connection = None
try:
with (
Expand Down Expand Up @@ -378,9 +378,9 @@ async def fetch_cluster_members(ops_test: OpsTest, use_ip_from_inside: bool = Fa
if len(member_ips) > 0:
# If the list of members IPs was already fetched, also compare the
# list provided by other members.
assert member_ips == {
member["host"] for member in cluster_info.json()["members"]
}, "members report different lists of cluster members."
assert member_ips == {member["host"] for member in cluster_info.json()["members"]}, (
"members report different lists of cluster members."
)
else:
member_ips = {member["host"] for member in cluster_info.json()["members"]}
return member_ips
Expand Down Expand Up @@ -929,9 +929,9 @@ async def add_unit_with_storage(ops_test, app, storage):
assert return_code == 0, "Failed to add unit with storage"
async with ops_test.fast_forward():
await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=2000)
assert (
len(ops_test.model.applications[app].units) == expected_units
), "New unit not added to model"
assert len(ops_test.model.applications[app].units) == expected_units, (
"New unit not added to model"
)

# verify storage attached
curr_units = [unit.name for unit in ops_test.model.applications[app].units]
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/ha_tests/test_async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ async def test_promote_standby(
primary = await get_primary(ops_test, any_unit)
address = get_unit_address(ops_test, primary)
password = await get_password(ops_test, primary)
database_name = f'{APPLICATION_NAME.replace("-", "_")}_database'
database_name = f"{APPLICATION_NAME.replace('-', '_')}_database"
connection = None
try:
connection = psycopg2.connect(
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/ha_tests/test_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous
"SELECT EXISTS (SELECT FROM information_schema.tables"
" WHERE table_schema = 'public' AND table_name = 'continuous_writes');"
)
assert not cursor.fetchone()[
0
], "table 'continuous_writes' was replicated to the second cluster"
assert not cursor.fetchone()[0], (
"table 'continuous_writes' was replicated to the second cluster"
)
finally:
connection.close()
12 changes: 6 additions & 6 deletions tests/integration/ha_tests/test_restore_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ async def test_cluster_restore(ops_test):
logger.info("Upscaling the second cluster with the old data")
for storage in storages:
unit = await add_unit_with_storage(ops_test, SECOND_APPLICATION, storage)
assert await reused_full_cluster_recovery_storage(
ops_test, unit.name
), "attached storage not properly re-used by Postgresql."
assert await reused_full_cluster_recovery_storage(ops_test, unit.name), (
"attached storage not properly re-used by Postgresql."
)

primary = await get_primary(
ops_test, ops_test.model.applications[SECOND_APPLICATION].units[0].name
Expand All @@ -111,9 +111,9 @@ async def test_cluster_restore(ops_test):
"SELECT EXISTS (SELECT FROM information_schema.tables"
" WHERE table_schema = 'public' AND table_name = 'restore_table_1');"
)
assert cursor.fetchone()[
0
], "data wasn't correctly restored: table 'restore_table_1' doesn't exist"
assert cursor.fetchone()[0], (
"data wasn't correctly restored: table 'restore_table_1' doesn't exist"
)
connection.close()

# check that there is only one primary
Expand Down
Loading

0 comments on commit c5c916d

Please sign in to comment.