From 44103fbb8956effa6c90f6561cb79a1d788c91a8 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 06:08:50 -0500 Subject: [PATCH 1/7] Provide the "Initial tables" alembic migration Since we already have been using the database for quite some time, we create an initial migration which will take an empty database to the database base schema version as of commit 6a764f154. That commit was the most recent version deployed in Red Hat's staging environment. In order to make it convenient to work with migrations, we also create a `tox` environment, `alembic`, which simply invokes the new script `alembic.run` which one can modify to execute whatever Alembic commands are needed. The committed copy takes no actions, but has the initial commands used to create the first migration: alembic stamp head alembic revision --autogenerate -m "Initial tables" We also update the `alembic.ini` file to assume a `localhost` PostgreSQL database, and provide a Pbench Server configuration file that works in the Alembic environment. --- lib/pbench/server/database/alembic.ini | 2 +- lib/pbench/server/database/alembic.run | 13 ++ .../versions/274bab07e3f8_initial_tables.py | 169 ++++++++++++++++++ lib/pbench/server/database/pbench-server.cfg | 53 ++++++ tox.ini | 7 + 5 files changed, 243 insertions(+), 1 deletion(-) create mode 100755 lib/pbench/server/database/alembic.run create mode 100644 lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py create mode 100644 lib/pbench/server/database/pbench-server.cfg diff --git a/lib/pbench/server/database/alembic.ini b/lib/pbench/server/database/alembic.ini index 1c45b50df2..ad22be111d 100644 --- a/lib/pbench/server/database/alembic.ini +++ b/lib/pbench/server/database/alembic.ini @@ -35,7 +35,7 @@ script_location = alembic # are written from script.py.mako # output_encoding = utf-8 -sqlalchemy.url = driver://user:pass@localhost/dbname +sqlalchemy.url = postgresql://pbench:pbench@localhost:5432/pbench [post_write_hooks] diff --git a/lib/pbench/server/database/alembic.run b/lib/pbench/server/database/alembic.run new file mode 100755 index 0000000000..846ccdc864 --- /dev/null +++ b/lib/pbench/server/database/alembic.run @@ -0,0 +1,13 @@ +#!/bin/bash -e + +# Alembic command expects to run in our database directory. +cd lib/pbench/server/database + +# Our env requires a pbench-server config file. +export _PBENCH_SERVER_CONFIG=$(pwd)/pbench-server.cfg + +# Had to run the command below before I could create the initial tables. +#alembic stamp head + +# Then I could create the initial migration against an empty database. +#alembic revision --autogenerate -m "Initial tables" diff --git a/lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py b/lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py new file mode 100644 index 0000000000..465b513782 --- /dev/null +++ b/lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py @@ -0,0 +1,169 @@ +"""Initial tables + +Revision ID: 274bab07e3f8 +Revises: 62eddcec4817 +Create Date: 2023-01-16 05:54:33.496244 + +Since we are adding Alembic migrations after we have already been using our +database in various contexts, this "Initial tables" migration describes how to +bring an empty database up to the state of the database as of commit 6a764f154. +That commit was the latest working version of the Pbench Server deployed in Red +Hat's staging environment. +""" +from alembic import op +import sqlalchemy as sa + +from pbench.server.database.models import TZDateTime + +# revision identifiers, used by Alembic. +revision = "274bab07e3f8" +down_revision = "62eddcec4817" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "audit", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("root_id", sa.Integer(), nullable=True), + sa.Column("name", sa.String(length=128), nullable=True), + sa.Column( + "operation", + sa.Enum("CREATE", "READ", "UPDATE", "DELETE", name="operationcode"), + nullable=False, + ), + sa.Column( + "object_type", + sa.Enum("DATASET", "CONFIG", "NONE", "TEMPLATE", "TOKEN", name="audittype"), + nullable=True, + ), + sa.Column("object_id", sa.String(length=128), nullable=True), + sa.Column("object_name", sa.String(length=256), nullable=True), + sa.Column("user_id", sa.String(length=128), nullable=True), + sa.Column("user_name", sa.String(length=256), nullable=True), + sa.Column( + "status", + sa.Enum("BEGIN", "SUCCESS", "FAILURE", "WARNING", name="auditstatus"), + nullable=False, + ), + sa.Column( + "reason", + sa.Enum("PERMISSION", "INTERNAL", "CONSISTENCY", name="auditreason"), + nullable=True, + ), + sa.Column("attributes", sa.JSON(), nullable=True), + sa.Column("timestamp", TZDateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "datasets", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column("owner_id", sa.String(length=255), nullable=False), + sa.Column("access", sa.String(length=255), nullable=False), + sa.Column("resource_id", sa.String(length=255), nullable=False), + sa.Column("uploaded", TZDateTime(), nullable=False), + sa.Column("created", TZDateTime(), nullable=True), + sa.Column( + "state", + sa.Enum( + "UPLOADING", + "UPLOADED", + "INDEXING", + "INDEXED", + "DELETING", + "DELETED", + name="states", + ), + nullable=False, + ), + sa.Column("transition", TZDateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("resource_id"), + ) + op.create_table( + "serverconfig", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("key", sa.String(length=255), nullable=False), + sa.Column("value", sa.JSON(), nullable=True), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_serverconfig_key"), "serverconfig", ["key"], unique=True) + op.create_table( + "templates", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column("idxname", sa.String(length=255), nullable=False), + sa.Column("template_name", sa.String(length=255), nullable=False), + sa.Column("file", sa.String(length=255), nullable=False), + sa.Column("mtime", sa.DateTime(), nullable=False), + sa.Column("template_pattern", sa.String(length=255), nullable=False), + sa.Column("index_template", sa.String(length=225), nullable=False), + sa.Column("settings", sa.JSON(), nullable=False), + sa.Column("mappings", sa.JSON(), nullable=False), + sa.Column("version", sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("idxname"), + sa.UniqueConstraint("name"), + sa.UniqueConstraint("template_name"), + ) + op.create_table( + "users", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("username", sa.String(length=255), nullable=False), + sa.Column("first_name", sa.String(length=255), nullable=False), + sa.Column("last_name", sa.String(length=255), nullable=False), + sa.Column("password", sa.LargeBinary(length=128), nullable=False), + sa.Column("registered_on", sa.DateTime(), nullable=False), + sa.Column("email", sa.String(length=255), nullable=False), + sa.Column("role", sa.Enum("ADMIN", name="roles"), nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("email"), + sa.UniqueConstraint("username"), + ) + op.create_table( + "active_tokens", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("token", sa.String(length=500), nullable=False), + sa.Column("created", sa.DateTime(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index( + op.f("ix_active_tokens_token"), "active_tokens", ["token"], unique=True + ) + op.create_table( + "dataset_metadata", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("key", sa.String(length=255), nullable=False), + sa.Column("value", sa.JSON(), nullable=True), + sa.Column("dataset_ref", sa.Integer(), nullable=False), + sa.Column("user_id", sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint( + ["dataset_ref"], + ["datasets.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index( + op.f("ix_dataset_metadata_key"), "dataset_metadata", ["key"], unique=False + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_dataset_metadata_key"), table_name="dataset_metadata") + op.drop_table("dataset_metadata") + op.drop_index(op.f("ix_active_tokens_token"), table_name="active_tokens") + op.drop_table("active_tokens") + op.drop_table("users") + op.drop_table("templates") + op.drop_index(op.f("ix_serverconfig_key"), table_name="serverconfig") + op.drop_table("serverconfig") + op.drop_table("datasets") + op.drop_table("audit") + # ### end Alembic commands ### diff --git a/lib/pbench/server/database/pbench-server.cfg b/lib/pbench/server/database/pbench-server.cfg new file mode 100644 index 0000000000..9a040b9f79 --- /dev/null +++ b/lib/pbench/server/database/pbench-server.cfg @@ -0,0 +1,53 @@ +[DEFAULT] +# The values here override those in pbench-server-default.cfg. +install-dir = ../../../../server + +########################################################################### +## Deployment section +########################################################################### +[pbench-server] +pbench-top-dir = /var/tmp/pbench +pbench-backup-dir = %(pbench-top-dir)s/pbench.archive.backup +environment = alembic +realhost = localhost +max-unpacked-age = 36500 +maximum-dataset-retention-days = 36500 +default-dataset-retention-days = 730 +# Override the roles this pbench server takes on -- omit pbench-prep. +roles = pbench-maintenance, pbench-results, pbench-backup + +[Indexing] +index_prefix = alembic-pbench +bulk_action_count = 2000 + +[elasticsearch] +host = localhost +port = 9200 + +[database] +uri = postgresql://pbench:pbench@localhost:5432/pbench + +# User authentication section to use when authorizing the user with an OIDC +# identity provider. +[authentication] + +# URL of the OIDC auth server +server_url = http://localhost:8090 + +# Realm name that is used for the authentication with OIDC +realm = pbench + +# Client entity name requesting OIDC to authenticate a user +client = pbench-client + +# Client secret if the above client is not public +secret = + +########################################################################### +# crontab roles + +########################################################################### +# The rest will come from the default config file. +[config] +path = %(install-dir)s/lib/config +files = pbench-server-default.cfg diff --git a/tox.ini b/tox.ini index f29b1d6977..1347d202ec 100644 --- a/tox.ini +++ b/tox.ini @@ -40,3 +40,10 @@ basepython = python3.6 deps = -r{toxinidir}/agent/requirements.txt -r{toxinidir}/agent/test-requirements.txt + +[testenv:alembic] +description = Runs the alembic commands in `alembic.run` +deps = + -r{toxinidir}/server/requirements.txt +commands = + {toxinidir}/lib/pbench/server/database/alembic.run From 9ca434f9b4a6937ff69ddff067d9b7756c9f475d Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 17:02:52 -0500 Subject: [PATCH 2/7] Integrate `alembic check` into basic server tests Starting with Alembic v1.9.0 the command, `alembic check` is available which will succeed if no migrations are required, and fail if it detects that a migration is necessary. We also remove the unused `alembic/alembic.ini`, drop the need for a separate `tox` environment, remove the need for a `pbench-server.cfg` file entirely by reverting to the original env.py as generated by `alembic init alembic`. We also collapse the existing migrations to one since we just need one to move from an empty database to our current schema. --- exec-tests | 5 ++ lib/pbench/server/database/alembic.ini | 35 +++++--- lib/pbench/server/database/alembic.run | 13 --- .../server/database/alembic/alembic.ini | 89 ------------------- lib/pbench/server/database/alembic/env.py | 76 +++++----------- .../62eddcec4817_add_dataset_access.py | 44 --------- ...> fa12f45a2a5a_initial_database_schema.py} | 22 ++--- lib/pbench/server/database/pbench-server.cfg | 53 ----------- server/requirements.txt | 2 +- tox.ini | 7 -- 10 files changed, 64 insertions(+), 282 deletions(-) delete mode 100755 lib/pbench/server/database/alembic.run delete mode 100644 lib/pbench/server/database/alembic/alembic.ini delete mode 100644 lib/pbench/server/database/alembic/versions/62eddcec4817_add_dataset_access.py rename lib/pbench/server/database/alembic/versions/{274bab07e3f8_initial_tables.py => fa12f45a2a5a_initial_database_schema.py} (92%) delete mode 100644 lib/pbench/server/database/pbench-server.cfg diff --git a/exec-tests b/exec-tests index 135980fd93..37a06ede26 100755 --- a/exec-tests +++ b/exec-tests @@ -228,6 +228,11 @@ if [[ "${major}" == "all" || "${major}" == "server" ]]; then ${_ECHO} verify_make_source_tree server || rc=1 fi + # Verify no database migrations would be generated. + if [[ "${subtst:-alembic}" == "alembic" && ${rc} -eq 0 ]]; then + (cd lib/pbench/server/database && ${ECHO} alembic check) || rc=1 + fi + # Run the server functional tests if [[ "${subtst}" == "functional" && ${rc} -eq 0 ]]; then server_arg=${1} diff --git a/lib/pbench/server/database/alembic.ini b/lib/pbench/server/database/alembic.ini index ad22be111d..b139e421e4 100644 --- a/lib/pbench/server/database/alembic.ini +++ b/lib/pbench/server/database/alembic.ini @@ -7,8 +7,14 @@ script_location = alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s -# timezone to use when rendering the date -# within the migration file as well as the filename. +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python-dateutil library that can be +# installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = @@ -26,10 +32,19 @@ script_location = alembic # versions/ directory # sourceless = false -# version location specification; this defaults +# version location specification; This defaults # to alembic/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat alembic/versions +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" +# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. Valid values are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +version_path_separator = os # default: use os.pathsep # the output encoding used when revision files # are written from script.py.mako @@ -44,17 +59,17 @@ sqlalchemy.url = postgresql://pbench:pbench@localhost:5432/pbench # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks=black -# black.type=console_scripts -# black.entrypoint=black -# black.options=-l 79 +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] -keys = console, fileHandler +keys = console [formatters] keys = generic diff --git a/lib/pbench/server/database/alembic.run b/lib/pbench/server/database/alembic.run deleted file mode 100755 index 846ccdc864..0000000000 --- a/lib/pbench/server/database/alembic.run +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -e - -# Alembic command expects to run in our database directory. -cd lib/pbench/server/database - -# Our env requires a pbench-server config file. -export _PBENCH_SERVER_CONFIG=$(pwd)/pbench-server.cfg - -# Had to run the command below before I could create the initial tables. -#alembic stamp head - -# Then I could create the initial migration against an empty database. -#alembic revision --autogenerate -m "Initial tables" diff --git a/lib/pbench/server/database/alembic/alembic.ini b/lib/pbench/server/database/alembic/alembic.ini deleted file mode 100644 index 9ccf79ea8b..0000000000 --- a/lib/pbench/server/database/alembic/alembic.ini +++ /dev/null @@ -1,89 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. -prepend_sys_path = . - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to alembic/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat alembic/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = driver://user:pass@localhost/dbname - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/lib/pbench/server/database/alembic/env.py b/lib/pbench/server/database/alembic/env.py index bba332da4c..78f78408dd 100644 --- a/lib/pbench/server/database/alembic/env.py +++ b/lib/pbench/server/database/alembic/env.py @@ -1,62 +1,28 @@ -"""Alembic Migration Driver - -This file was auto generated by `alembic init alembic` but was manually altered -to suit the needs of the Pbench server. Re-running `alembic init alembic` will -overwrite these changes! - -This Python script runs whenever the alembic migration tool is invoked in -/opt/pbench-server/lib/server/database; it contains instructions to configure -and generate a SQLAlchemy engine, procure a connection from that engine along -with a transaction, and then invoke the migration engine, using the connection -as a source of database connectivity. - -This requires access to Pbench library modules and the Pbench server config file -and thus must be run with - - export PYTHONPATH=/opt/pbench-server/lib:${PYTHONPATH} - export _PBENCH_SERVER_CONFIG=/opt/pbench-server/lib/config/pbench-server.cfg - -Examples: - - alembic upgrade head # upgrade database to the latest - alembic downgrade base # downgrade to the original tracked state -""" -import logging -import sys +from logging.config import fileConfig from alembic import context -from sqlalchemy import create_engine +from sqlalchemy import engine_from_config, pool -from pbench.server.api import get_server_config from pbench.server.database.database import Database -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. +# This is the Alembic Config object, which provides access to the values within +# the .ini file in use. config = context.config -# Add syslog handler to send logs to journald -log = logging.getLogger("alembic") -handler = logging.handlers.SysLogHandler("/dev/log") -log.addHandler(handler) - -# add your model's MetaData object here for 'autogenerate' support: +# Interpret the config file for Python logging and setup the loggers. +fileConfig(config.config_file_name) +# Add your model's MetaData object here for 'autogenerate' support: +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata target_metadata = Database.Base.metadata -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - -try: - server_config = get_server_config() - url = Database.get_engine_uri(server_config) -except Exception as e: - print(e) - sys.exit(1) +# Other values from the config, defined by the needs of env.py, can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. -def run_migrations_offline(url: str): +def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an @@ -65,6 +31,7 @@ def run_migrations_offline(url: str): Calls to context.execute() here emit the given string to the script output. """ + url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, @@ -76,14 +43,17 @@ def run_migrations_offline(url: str): context.run_migrations() -def run_migrations_online(url: str): +def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ - - connectable = create_engine(url) + connectable = engine_from_config( + config.get_section(config.config_ini_section), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) @@ -93,8 +63,6 @@ def run_migrations_online(url: str): if context.is_offline_mode(): - print("running migration offline") - run_migrations_offline(url) + run_migrations_offline() else: - print("running migration online") - run_migrations_online(url) + run_migrations_online() diff --git a/lib/pbench/server/database/alembic/versions/62eddcec4817_add_dataset_access.py b/lib/pbench/server/database/alembic/versions/62eddcec4817_add_dataset_access.py deleted file mode 100644 index 776ce30bd1..0000000000 --- a/lib/pbench/server/database/alembic/versions/62eddcec4817_add_dataset_access.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Add Dataset.access - -Revision ID: 62eddcec4817 -Revises: base -Create Date: 2021-05-27 16:22:13.714761 - -""" -from alembic import op -import sqlalchemy as sa - -# revision identifiers, used by Alembic. -revision = "62eddcec4817" -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - """ - Upgrade the "base" revision with changes necessary to support publishing - datasets. - - 1. Add the "access" column, and set all existing rows to "private". This - can't be done in a single step, apparently. Instead, we add the column, - set the value in all existing rows, and then mark it non-nullable. - 2. The document map can be extremely large, so change the dataset metadata - "value" column from 2048 character String to unbounded Text. - """ - op.add_column("datasets", sa.Column("access", sa.String(255), default="private")) - op.execute("UPDATE datasets SET access = 'private'") - op.alter_column("datasets", "access", nullable=False) - op.alter_column( - "dataset_metadata", "value", type_=sa.JSON, existing_type=sa.String(2048) - ) - - -def downgrade(): - """ - Reverse the upgrade if we're downgrading to "base" revision - """ - op.drop_column("datasets", "access") - op.alter_column( - "dataset_metadata", "value", type_=sa.String(2048), existing_type=sa.JSON - ) diff --git a/lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py b/lib/pbench/server/database/alembic/versions/fa12f45a2a5a_initial_database_schema.py similarity index 92% rename from lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py rename to lib/pbench/server/database/alembic/versions/fa12f45a2a5a_initial_database_schema.py index 465b513782..b0b66926d2 100644 --- a/lib/pbench/server/database/alembic/versions/274bab07e3f8_initial_tables.py +++ b/lib/pbench/server/database/alembic/versions/fa12f45a2a5a_initial_database_schema.py @@ -1,14 +1,14 @@ -"""Initial tables +"""Initial database schema -Revision ID: 274bab07e3f8 -Revises: 62eddcec4817 -Create Date: 2023-01-16 05:54:33.496244 +Revision ID: fa12f45a2a5a +Revises: +Create Date: 2023-01-16 18:33:29.144835 Since we are adding Alembic migrations after we have already been using our -database in various contexts, this "Initial tables" migration describes how to -bring an empty database up to the state of the database as of commit 6a764f154. -That commit was the latest working version of the Pbench Server deployed in Red -Hat's staging environment. +database in various contexts, this "Initial database schema" migration describes +how to bring an empty database up to the state of the database as of commit +6a764f154. That commit was the latest working version of the Pbench Server +deployed in Red Hat's staging environment. """ from alembic import op import sqlalchemy as sa @@ -16,8 +16,8 @@ from pbench.server.database.models import TZDateTime # revision identifiers, used by Alembic. -revision = "274bab07e3f8" -down_revision = "62eddcec4817" +revision = "fa12f45a2a5a" +down_revision = None branch_labels = None depends_on = None @@ -60,7 +60,7 @@ def upgrade(): op.create_table( "datasets", sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), - sa.Column("name", sa.String(length=255), nullable=False), + sa.Column("name", sa.String(length=1024), nullable=False), sa.Column("owner_id", sa.String(length=255), nullable=False), sa.Column("access", sa.String(length=255), nullable=False), sa.Column("resource_id", sa.String(length=255), nullable=False), diff --git a/lib/pbench/server/database/pbench-server.cfg b/lib/pbench/server/database/pbench-server.cfg deleted file mode 100644 index 9a040b9f79..0000000000 --- a/lib/pbench/server/database/pbench-server.cfg +++ /dev/null @@ -1,53 +0,0 @@ -[DEFAULT] -# The values here override those in pbench-server-default.cfg. -install-dir = ../../../../server - -########################################################################### -## Deployment section -########################################################################### -[pbench-server] -pbench-top-dir = /var/tmp/pbench -pbench-backup-dir = %(pbench-top-dir)s/pbench.archive.backup -environment = alembic -realhost = localhost -max-unpacked-age = 36500 -maximum-dataset-retention-days = 36500 -default-dataset-retention-days = 730 -# Override the roles this pbench server takes on -- omit pbench-prep. -roles = pbench-maintenance, pbench-results, pbench-backup - -[Indexing] -index_prefix = alembic-pbench -bulk_action_count = 2000 - -[elasticsearch] -host = localhost -port = 9200 - -[database] -uri = postgresql://pbench:pbench@localhost:5432/pbench - -# User authentication section to use when authorizing the user with an OIDC -# identity provider. -[authentication] - -# URL of the OIDC auth server -server_url = http://localhost:8090 - -# Realm name that is used for the authentication with OIDC -realm = pbench - -# Client entity name requesting OIDC to authenticate a user -client = pbench-client - -# Client secret if the above client is not public -secret = - -########################################################################### -# crontab roles - -########################################################################### -# The rest will come from the default config file. -[config] -path = %(install-dir)s/lib/config -files = pbench-server-default.cfg diff --git a/server/requirements.txt b/server/requirements.txt index dedd1f875d..2638a2eee3 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -1,4 +1,4 @@ -alembic>=1.6.4 +alembic>=1.9.0 aniso8601>=9.0.1 Bcrypt-Flask boto3 diff --git a/tox.ini b/tox.ini index 1347d202ec..f29b1d6977 100644 --- a/tox.ini +++ b/tox.ini @@ -40,10 +40,3 @@ basepython = python3.6 deps = -r{toxinidir}/agent/requirements.txt -r{toxinidir}/agent/test-requirements.txt - -[testenv:alembic] -description = Runs the alembic commands in `alembic.run` -deps = - -r{toxinidir}/server/requirements.txt -commands = - {toxinidir}/lib/pbench/server/database/alembic.run From ccd45be83f1f7f00aeeb1d0de4f6665bccebeafe Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 21:42:08 -0500 Subject: [PATCH 3/7] Restore changes lost messing around with alembic versions --- lib/pbench/server/database/alembic.ini | 15 ++++++++++----- lib/pbench/server/database/alembic/env.py | 4 ++-- lib/pbench/server/database/alembic/script.py.mako | 4 ++-- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/pbench/server/database/alembic.ini b/lib/pbench/server/database/alembic.ini index b139e421e4..6edd2fd8f3 100644 --- a/lib/pbench/server/database/alembic.ini +++ b/lib/pbench/server/database/alembic.ini @@ -4,8 +4,11 @@ # path to migration scripts script_location = alembic -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. @@ -35,16 +38,18 @@ prepend_sys_path = . # version location specification; This defaults # to alembic/versions. When using multiple version # directories, initial revisions must be specified with --version-path. -# The path separator used here should be the separator specified by "version_path_separator" +# The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:alembic/versions # version path separator; As mentioned above, this is the character used to split -# version_locations. Valid values are: +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space -version_path_separator = os # default: use os.pathsep +version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako diff --git a/lib/pbench/server/database/alembic/env.py b/lib/pbench/server/database/alembic/env.py index 78f78408dd..0de65205a2 100644 --- a/lib/pbench/server/database/alembic/env.py +++ b/lib/pbench/server/database/alembic/env.py @@ -22,7 +22,7 @@ # ... etc. -def run_migrations_offline(): +def run_migrations_offline() -> None: """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an @@ -43,7 +43,7 @@ def run_migrations_offline(): context.run_migrations() -def run_migrations_online(): +def run_migrations_online() -> None: """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with diff --git a/lib/pbench/server/database/alembic/script.py.mako b/lib/pbench/server/database/alembic/script.py.mako index 2c0156303a..55df2863d2 100644 --- a/lib/pbench/server/database/alembic/script.py.mako +++ b/lib/pbench/server/database/alembic/script.py.mako @@ -16,9 +16,9 @@ branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} -def upgrade(): +def upgrade() -> None: ${upgrades if upgrades else "pass"} -def downgrade(): +def downgrade() -> None: ${downgrades if downgrades else "pass"} From 615c92530ce1d07873422dd75abde85f432fe0a1 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 22:58:02 -0500 Subject: [PATCH 4/7] Enhance the migrations check to use a container We now run a very simple PostgreSQL container against which we run all our known migrations and then ask Alembic to check that we don't have any additional model changes which need a migration. The CI build will fail when any migrations are missing, and a local `tox -- server alembic` will also fail (really, any `tox` run that ends up running the server side, e.g. `tox -- server`, `tox -- all`, or just `tox`). --- exec-tests | 2 +- lib/pbench/server/database/alembic.check | 38 ++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100755 lib/pbench/server/database/alembic.check diff --git a/exec-tests b/exec-tests index 37a06ede26..65e7a07951 100755 --- a/exec-tests +++ b/exec-tests @@ -230,7 +230,7 @@ if [[ "${major}" == "all" || "${major}" == "server" ]]; then # Verify no database migrations would be generated. if [[ "${subtst:-alembic}" == "alembic" && ${rc} -eq 0 ]]; then - (cd lib/pbench/server/database && ${ECHO} alembic check) || rc=1 + ${_ECHO} lib/pbench/server/database/alembic.check || rc=1 fi # Run the server functional tests diff --git a/lib/pbench/server/database/alembic.check b/lib/pbench/server/database/alembic.check new file mode 100755 index 0000000000..42fcaba3c1 --- /dev/null +++ b/lib/pbench/server/database/alembic.check @@ -0,0 +1,38 @@ +#!/bin/bash -e + +cd lib/pbench/server/database + +podman run --name postgresql-alembic \ + --detach \ + --rm \ + --network host \ + --workdir /opt/app-root/src \ + --env 'PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + --env 'TERM=xterm' \ + --env 'container=oci' \ + --env 'STI_SCRIPTS_URL=image:///usr/libexec/s2i' \ + --env 'PGUSER=postgres' \ + --env 'PLATFORM=el8' \ + --env 'APP_DATA=/opt/app-root' \ + --env 'CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql' \ + --env 'ENABLED_COLLECTIONS' \ + --env 'POSTGRESQL_VERSION=13' \ + --env 'APP_ROOT=/opt/app-root' \ + --env 'STI_SCRIPTS_PATH=/usr/libexec/s2i' \ + --env 'HOME=/var/lib/pgsql' \ + --env 'POSTGRESQL_USER=pbench' \ + --env 'POSTGRESQL_PASSWORD=pbench' \ + --env 'POSTGRESQL_DATABASE=pbench' \ + images.paas.redhat.com/pbench/postgresql-13:latest container-entrypoint run-postgresql + +trap "podman stop postgresql-alembic" INT ABRT QUIT EXIT + +until pg_isready --host=localhost; do + sleep 1 +done + +# First we run all our migrations to bring the blank database up to speed. +alembic upgrade head +# Then we check to see if we have any model changes not captured in existing +# migrations. +alembic check From 6732ab09cd0488c79b1a211dbe6a8967dd5f4380 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 23:32:38 -0500 Subject: [PATCH 5/7] Bugger all; can't run from tox invoked by build.sh The build.sh is invoked inside a container, so we can't use podman from there to launch an empty live database. So move to a CI only pipeline check. One can run it locally by just invoking: jenkins/run-alembic-migrations-check --- exec-tests | 5 ---- jenkins/Pipeline.gy | 6 +++++ jenkins/run-alembic-migrations-check | 32 ++++++++++++++++++++++++ lib/pbench/server/database/alembic.check | 30 +--------------------- tox.ini | 7 ++++++ 5 files changed, 46 insertions(+), 34 deletions(-) create mode 100755 jenkins/run-alembic-migrations-check diff --git a/exec-tests b/exec-tests index 65e7a07951..135980fd93 100755 --- a/exec-tests +++ b/exec-tests @@ -228,11 +228,6 @@ if [[ "${major}" == "all" || "${major}" == "server" ]]; then ${_ECHO} verify_make_source_tree server || rc=1 fi - # Verify no database migrations would be generated. - if [[ "${subtst:-alembic}" == "alembic" && ${rc} -eq 0 ]]; then - ${_ECHO} lib/pbench/server/database/alembic.check || rc=1 - fi - # Run the server functional tests if [[ "${subtst}" == "functional" && ${rc} -eq 0 ]]; then server_arg=${1} diff --git a/jenkins/Pipeline.gy b/jenkins/Pipeline.gy index d7ff185463..a301ac6d5f 100644 --- a/jenkins/Pipeline.gy +++ b/jenkins/Pipeline.gy @@ -28,6 +28,12 @@ pipeline { sh 'jenkins/run tox -e agent-py36 -- agent' } } + stage('Server Alembic Migrations Check') { + steps { + echo 'Verify alembic migrations cover latest database schema' + sh 'jenkins/run-alembic-migrations-check' + } + } stage('Linting, Unit Tests, RPM builds') { steps { // If we don't have a sequence number file left over from a diff --git a/jenkins/run-alembic-migrations-check b/jenkins/run-alembic-migrations-check new file mode 100755 index 0000000000..dfb8f4b534 --- /dev/null +++ b/jenkins/run-alembic-migrations-check @@ -0,0 +1,32 @@ +#!/bin/bash -e + +podman run --name postgresql-alembic \ + --detach \ + --rm \ + --network host \ + --workdir /opt/app-root/src \ + --env 'PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + --env 'TERM=xterm' \ + --env 'container=oci' \ + --env 'STI_SCRIPTS_URL=image:///usr/libexec/s2i' \ + --env 'PGUSER=postgres' \ + --env 'PLATFORM=el8' \ + --env 'APP_DATA=/opt/app-root' \ + --env 'CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql' \ + --env 'ENABLED_COLLECTIONS' \ + --env 'POSTGRESQL_VERSION=13' \ + --env 'APP_ROOT=/opt/app-root' \ + --env 'STI_SCRIPTS_PATH=/usr/libexec/s2i' \ + --env 'HOME=/var/lib/pgsql' \ + --env 'POSTGRESQL_USER=pbench' \ + --env 'POSTGRESQL_PASSWORD=pbench' \ + --env 'POSTGRESQL_DATABASE=pbench' \ + images.paas.redhat.com/pbench/postgresql-13:latest container-entrypoint run-postgresql + +trap "podman stop postgresql-alembic" INT ABRT QUIT EXIT + +until nc -z localhost 5432; do + sleep 1 +done + +tox -e alembic-check diff --git a/lib/pbench/server/database/alembic.check b/lib/pbench/server/database/alembic.check index 42fcaba3c1..f59f6455dd 100755 --- a/lib/pbench/server/database/alembic.check +++ b/lib/pbench/server/database/alembic.check @@ -2,37 +2,9 @@ cd lib/pbench/server/database -podman run --name postgresql-alembic \ - --detach \ - --rm \ - --network host \ - --workdir /opt/app-root/src \ - --env 'PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ - --env 'TERM=xterm' \ - --env 'container=oci' \ - --env 'STI_SCRIPTS_URL=image:///usr/libexec/s2i' \ - --env 'PGUSER=postgres' \ - --env 'PLATFORM=el8' \ - --env 'APP_DATA=/opt/app-root' \ - --env 'CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql' \ - --env 'ENABLED_COLLECTIONS' \ - --env 'POSTGRESQL_VERSION=13' \ - --env 'APP_ROOT=/opt/app-root' \ - --env 'STI_SCRIPTS_PATH=/usr/libexec/s2i' \ - --env 'HOME=/var/lib/pgsql' \ - --env 'POSTGRESQL_USER=pbench' \ - --env 'POSTGRESQL_PASSWORD=pbench' \ - --env 'POSTGRESQL_DATABASE=pbench' \ - images.paas.redhat.com/pbench/postgresql-13:latest container-entrypoint run-postgresql - -trap "podman stop postgresql-alembic" INT ABRT QUIT EXIT - -until pg_isready --host=localhost; do - sleep 1 -done - # First we run all our migrations to bring the blank database up to speed. alembic upgrade head + # Then we check to see if we have any model changes not captured in existing # migrations. alembic check diff --git a/tox.ini b/tox.ini index f29b1d6977..3dd35b91cb 100644 --- a/tox.ini +++ b/tox.ini @@ -40,3 +40,10 @@ basepython = python3.6 deps = -r{toxinidir}/agent/requirements.txt -r{toxinidir}/agent/test-requirements.txt + +[testenv:alembic-check] +description = Verify alembic migrations cover latest database schema +deps = + -r{toxinidir}/server/requirements.txt +commands = + bash -c "{toxinidir}/lib/pbench/server/database/alembic.check" From be040888f454fc391818ce2dffa13e36ee5c4ab9 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 16 Jan 2023 23:42:27 -0500 Subject: [PATCH 6/7] Make it work --- jenkins/run-alembic-migrations-check | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jenkins/run-alembic-migrations-check b/jenkins/run-alembic-migrations-check index dfb8f4b534..67b7137307 100755 --- a/jenkins/run-alembic-migrations-check +++ b/jenkins/run-alembic-migrations-check @@ -29,4 +29,4 @@ until nc -z localhost 5432; do sleep 1 done -tox -e alembic-check +EXTRA_PODMAN_SWITCHES="${EXTRA_PODMAN_SWITCHES} --network host" jenkins/run tox -e alembic-check From 644dad1c2c22e9e4426189fb6c1140d83bd2c945 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Tue, 17 Jan 2023 19:09:50 -0500 Subject: [PATCH 7/7] Remove tabs --- jenkins/run-alembic-migrations-check | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jenkins/run-alembic-migrations-check b/jenkins/run-alembic-migrations-check index 67b7137307..52b93c84b3 100755 --- a/jenkins/run-alembic-migrations-check +++ b/jenkins/run-alembic-migrations-check @@ -6,8 +6,8 @@ podman run --name postgresql-alembic \ --network host \ --workdir /opt/app-root/src \ --env 'PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ - --env 'TERM=xterm' \ - --env 'container=oci' \ + --env 'TERM=xterm' \ + --env 'container=oci' \ --env 'STI_SCRIPTS_URL=image:///usr/libexec/s2i' \ --env 'PGUSER=postgres' \ --env 'PLATFORM=el8' \