diff --git a/.github/workflows/docs_deployment.yml b/.github/workflows/docs_deployment.yml deleted file mode 100644 index 2863503d..00000000 --- a/.github/workflows/docs_deployment.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Deploy docs website - -on: - push: - branches: [main, release] - paths: - - "docsite/**" - workflow_dispatch: - inputs: - deployment: - description: Deploy to - required: true - default: staging - type: choice - options: - - staging - - production - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-node@v3 - with: - node-version: '14' - - run: cd docsite && npm install - - uses: amondnet/vercel-action@v20 - if: ${{ github.ref == 'refs/heads/release' }} or ${{ inputs.deployment == 'prod' }} - with: - vercel-token: ${{ secrets.VERCEL_TOKEN }} - vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} - vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} - vercel-args: '--prod' - working-directory: docsite - - uses: amondnet/vercel-action@v20 - if: ${{ github.ref == 'refs/heads/main' }} or ${{ inputs.deployment == 'staging' }} - with: - vercel-token: ${{ secrets.VERCEL_TOKEN }} - vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} - vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} - working-directory: docsite diff --git a/.github/workflows/generate_typing.yml b/.github/workflows/generate_typing.yml deleted file mode 100644 index 8f7a2b97..00000000 --- a/.github/workflows/generate_typing.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Generate typing module - -on: - pull_request: - types: [assigned, opened, synchronize, reopened] - paths: - - "projects/adapter/**" - - ".github/workflows/generate_typing.yml" - -jobs: - run: - runs-on: ubuntu-latest - - # Run only the latest commit pushed to PR - concurrency: - group: "${{ github.head_ref || github.run_id }}-${{ github.workflow }}-${{ matrix.profile }}-${{ matrix.dbt }}-${{ matrix.python }}" - cancel-in-progress: true - - steps: - - uses: actions/checkout@v4 - with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.head_ref }} - - - uses: actions/setup-python@v5 - with: - python-version: 3.9 - - - name: Setup black - run: pip install black - - - name: Generate typing - run: | - python tools/generate_typing_context.py - - - uses: stefanzweifel/git-auto-commit-action@v5 - with: - commit_message: Generate typing - commit_user_email: kudryk@me.com diff --git a/.github/workflows/poetry_lock.yml b/.github/workflows/poetry_lock.yml index e36c8ee9..66505db6 100644 --- a/.github/workflows/poetry_lock.yml +++ b/.github/workflows/poetry_lock.yml @@ -31,7 +31,7 @@ jobs: - name: Install poetry shell: bash - run: pip install poetry==1.2.* + run: pip install poetry==1.5.* - name: Check poetry lock working-directory: "projects/adapter" diff --git a/.github/workflows/python-release.yml b/.github/workflows/python-release.yml index 4a2aea0c..785bd694 100644 --- a/.github/workflows/python-release.yml +++ b/.github/workflows/python-release.yml @@ -59,7 +59,7 @@ jobs: - name: Install poetry shell: bash - run: pip install poetry=="1.4.2" + run: pip install poetry=="1.5.0" - name: Bump to publishing version working-directory: ${{ env.PACKAGE_DIR }} diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml deleted file mode 100644 index 108c72ee..00000000 --- a/.github/workflows/test_cli.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: fal pytest - -on: - pull_request: - types: [assigned, opened, synchronize, reopened] - paths: - - "projects/adapter/**" - - ".github/actions/setup-local-fal/**" - - ".github/workflows/test_cli.yml" - workflow_dispatch: - -jobs: - run: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python: - - "3.8" - # - "3.9" - # - "3.10" - # - "3.11" - dbt: - - "1.5.9" - - # Run only the latest commit pushed to PR - concurrency: - group: "${{ github.ref }}-${{ github.workflow }}-${{ matrix.dbt }}-${{ matrix.python }}" - cancel-in-progress: true - - steps: - - uses: actions/checkout@v4 - - - name: Setup local fal - uses: ./.github/actions/setup-local-fal - with: - python: ${{ matrix.python }} - dbt: ${{ matrix.dbt }} - adapter: postgres - - - name: Start Docker database - working-directory: projects/adapter/tests - run: docker-compose up -d - - - name: Setup pytest - run: pip install pytest pytest-mock mock black - - - name: Run dbt - working-directory: projects/adapter - run: dbt run --profiles-dir tests/mock/mockProfile/ --project-dir tests/mock - - - name: Run tests - working-directory: projects/adapter - env: - FAL_STATS_ENABLED: false - DBT_TARGET_PATH: mockTarget - run: pytest tests -s diff --git a/.github/workflows/test_integration_adapter.yml b/.github/workflows/test_integration_adapter.yml index d5c313e5..f810851d 100644 --- a/.github/workflows/test_integration_adapter.yml +++ b/.github/workflows/test_integration_adapter.yml @@ -27,7 +27,7 @@ jobs: profile: - postgres dbt_version: - - "1.5.9" + - "1.6.16" python: - "3.8" - "3.9" @@ -44,7 +44,7 @@ jobs: path: "fal" - name: Start Docker database - working-directory: fal/projects/adapter/cli_tests + working-directory: fal/projects/adapter/integration_tests if: contains(fromJSON('["postgres"]'), matrix.profile) run: docker-compose up -d @@ -70,11 +70,6 @@ jobs: EXTRAS="$EXTRAS,teleport" fi - if [[ '${{ matrix.cloud }}' == 'true' ]] - then - EXTRAS="$EXTRAS,cloud" - fi - DBT_FAL_PACKAGE=".[$EXTRAS]" echo "pip install $ADAPTER_PACKAGE -e $DBT_FAL_PACKAGE" @@ -106,11 +101,6 @@ jobs: BEHAVE_TAGS="$BEHAVE_TAGS --tags=-teleport" fi - if [[ '${{ matrix.cloud }}' != 'true' ]] - then - BEHAVE_TAGS="$BEHAVE_TAGS --tags=-cloud" - fi - if [[ -z "${GITHUB_HEAD_REF}" ]] then export FAL_GITHUB_BRANCH=${GITHUB_BASE_REF:-${GITHUB_REF#refs/heads/}} diff --git a/.github/workflows/test_integration_cli.yml b/.github/workflows/test_integration_cli.yml deleted file mode 100644 index ec055441..00000000 --- a/.github/workflows/test_integration_cli.yml +++ /dev/null @@ -1,241 +0,0 @@ -name: CLI integration tests - -on: - pull_request: - types: [assigned, opened, synchronize, reopened] - paths: - - "projects/adapter/**" - - ".github/actions/setup-local-fal/**" - - ".github/workflows/test_integration_cli.yml" - - push: - branches: [main] - paths: - - "projects/adapter/**" - - schedule: - # every monday - - cron: "0 0 * * 1" - - workflow_dispatch: - inputs: - adapter: - description: dbt Adapter to test with - required: false - default: "" - type: choice - options: - - "" - - postgres - - python: - description: Python version to test with - required: false - default: "3.8" - type: choice - options: - - "" - - "3.8" - - "3.9" - - "3.10" - - "3.11" - - dbt: - description: dbt version to test with - required: false - default: "latest" - type: choice - options: - - "" - - "latest" - - "1.5.9" - -jobs: - matrix-adapter: - runs-on: ubuntu-latest - outputs: - list: ${{ steps.matrix-step.outputs.list }} - steps: - - id: matrix-step - shell: python - run: | - OPTIONS = [ - 'postgres' - ] - EXTRA_OPTIONS = [ - ] - OUTPUT = OPTIONS - - if '${{ github.event_name }}' == 'pull_request': - import re - - PR_TITLE = '${{ github.event.pull_request.title }}'.lower() - PR_BRANCH = '${{ github.head_ref }}'.lower() - PR_DESCRIPTION = '''${{ github.event.pull_request.body }}'''.lower() - PR_DESCRIPTION = re.sub("", "", PR_DESCRIPTION, flags=re.DOTALL) - - # Only test adapters mentioned in the pull request title or branch. - # We always test postgres and fal adapter as a sanity check. - OUTPUT = [ - a for a in OPTIONS + EXTRA_OPTIONS - if a == 'postgres' or a == 'fal' or - a in PR_TITLE or - a in PR_BRANCH or - a in PR_DESCRIPTION - ] - - elif '${{ github.event_name }}' == 'push': - OUTPUT = ['postgres'] - - elif '${{ github.event_name }}' == 'workflow_dispatch': - INPUT_CHOICE = '${{ github.event.inputs.adapter }}' - if INPUT_CHOICE == '': - OUTPUT = OPTIONS + EXTRA_OPTIONS - else: - OUTPUT = [INPUT_CHOICE] - - import json - import os - with open(os.environ['GITHUB_OUTPUT'], 'a') as output_fp: - print(f'list={json.dumps(OUTPUT)}', file=output_fp) - - matrix-python: - runs-on: ubuntu-latest - outputs: - list: ${{ steps.matrix-step.outputs.list }} - steps: - - id: matrix-step - shell: python - run: | - OPTIONS = [ - "3.8", - "3.9", - "3.10", - "3.11", - ] - OUTPUT = ["3.8"] - - if '${{ github.event_name }}' == 'pull_request': - import re - - PR_TITLE = '${{ github.event.pull_request.title }}'.lower() - PR_BRANCH = '${{ github.head_ref }}'.lower() - PR_DESCRIPTION = '''${{ github.event.pull_request.body }}'''.lower() - PR_DESCRIPTION = re.sub("", "", PR_DESCRIPTION, flags=re.DOTALL) - - # Test version mentioned in the pull request title or branch. - OUTPUT = [ - v for v in OPTIONS - if v in PR_TITLE or - v in PR_BRANCH or - v in PR_DESCRIPTION - ] - - if not OUTPUT: - # If none were found in PR info - OUTPUT=["3.8"] - - elif '${{ github.event_name }}' in ('schedule', 'push'): - OUTPUT=OPTIONS - - elif '${{ github.event_name }}' == 'workflow_dispatch': - INPUT_CHOICE = '${{ github.event.inputs.python }}' - if INPUT_CHOICE == '': - OUTPUT = OPTIONS - else: - OUTPUT = [INPUT_CHOICE] - - import json - import os - with open(os.environ['GITHUB_OUTPUT'], 'a') as output_fp: - print(f'list={json.dumps(OUTPUT)}', file=output_fp) - - matrix-dbt: - runs-on: ubuntu-latest - outputs: - list: ${{ steps.matrix-step.outputs.list }} - steps: - - id: matrix-step - shell: python - run: | - OPTIONS = [ - "1.5.9", - ] - OUTPUT = OPTIONS - - if '${{ github.event_name }}' == 'workflow_dispatch': - INPUT_CHOICE = '${{ github.event.inputs.dbt }}' - if INPUT_CHOICE == '': - OUTPUT = OPTIONS + EXTRA_OPTIONS - else: - OUTPUT = [INPUT_CHOICE] - - import json - import os - with open(os.environ['GITHUB_OUTPUT'], 'a') as output_fp: - print(f'list={json.dumps(OUTPUT)}', file=output_fp) - - run: - needs: - - matrix-adapter - - matrix-dbt - - matrix-python - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - profile: ${{ fromJSON(needs.matrix-adapter.outputs.list) }} - dbt: ${{ fromJSON(needs.matrix-dbt.outputs.list) }} - python: ${{ fromJSON(needs.matrix-python.outputs.list) }} - - # Run only the latest commit pushed to PR - concurrency: - group: "${{ github.head_ref || github.run_id }}-${{ github.workflow }}-${{ matrix.profile }}-${{ matrix.dbt }}-${{ matrix.python }}" - cancel-in-progress: true - - steps: - - uses: actions/checkout@v4 - - - name: Setup local fal - uses: ./.github/actions/setup-local-fal - with: - python: ${{ matrix.python }} - dbt: ${{ matrix.dbt }} - adapter: ${{ matrix.profile }} - - - name: Start Docker database - working-directory: projects/adapter/cli_tests - if: contains(fromJSON('["postgres", "fal"]'), matrix.profile) - run: docker-compose up -d - - - name: Install conda - uses: s-weigand/setup-conda@v1 - with: - activate-conda: false - python-version: ${{ matrix.python }} - # PyJokes is available on conda-forge - conda-channels: anaconda, conda-forge - - - name: Setup behave - working-directory: projects/adapter/cli_tests - run: pip install behave ipython - - - name: Run tests - id: test_run - working-directory: projects/adapter/cli_tests - env: - FAL_STATS_ENABLED: false - run: | - # Could not get the real job_id easily from context - UUID=$(uuidgen | head -c8) - export DB_NAMESPACE="${{ github.run_id }}_${UUID}" - - BEHAVE_TAGS="--tags=-TODO-${{ matrix.profile }}" - - if [[ '${{ matrix.profile }}' != 'postgres' ]] && [[ '${{ matrix.profile }}' != 'fal' ]] - then - # 'broken_profile' tests only works for postgres and postgres+fal right now - BEHAVE_TAGS="$BEHAVE_TAGS --tags=-broken_profile" - fi - - behave $BEHAVE_TAGS -fplain -D profile=${{ matrix.profile }} --no-capture diff --git a/docsite/.gitignore b/docsite/.gitignore deleted file mode 100644 index 4cc7d55f..00000000 --- a/docsite/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Dependencies -/node_modules - -# Production -/build - -# Generated files -.docusaurus -.cache-loader - -# Misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -.vercel diff --git a/docsite/README.md b/docsite/README.md deleted file mode 100644 index 16f9f96f..00000000 --- a/docsite/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Website - -This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. - -### Installation - -``` -$ npm install -``` - -### Local Development - -``` -$ npm start -``` - -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. - -### Build - -``` -$ npm build -``` - -This command generates static content into the `build` directory and can be served using any static contents hosting service. - -### Deployment - -Using SSH: - -``` -$ USE_SSH=true npm deploy -``` - -Not using SSH: - -``` -$ GIT_USER= npm deploy -``` - -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docsite/babel.config.js b/docsite/babel.config.js deleted file mode 100644 index e00595da..00000000 --- a/docsite/babel.config.js +++ /dev/null @@ -1,3 +0,0 @@ -module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], -}; diff --git a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/_category_.json b/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/_category_.json deleted file mode 100644 index 34b4dfb8..00000000 --- a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 4, - "label": "Access dbt metadata in Python with fal", - "collapsible": true, - "collapsed": false -} diff --git a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/notebook-files.md b/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/notebook-files.md deleted file mode 100644 index d07bba7d..00000000 --- a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/notebook-files.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -sidebar_position: 10 ---- - -# Jupyter (.ipynb) helpers - -## `init_fal` magic command -In order to access fal functions such as `ref` and `write_to_model` from within notebook runtime, you can use the `init_fal` magic command. - -First, you need to import `init_fal` within a Python cell: - -```python -from faldbt.magics import init_fal -``` - -Now, you can initialize `fal` in your notebook: - -``` -%init_fal project_dir=project_dir profiles_dir=profiles_dir default_model_name=my_model -``` - -`init_fal` requires three inline arguments: - -- `project_dir`: path to the dbt project directory -- `profiles_dir`: path to the dbt profiles directory -- `default_model_name`: the model name that will be used in `write_to_model`, applies only in notebook runtime. - -Once executed, you can use fal functions in your notebook's Python cells: - -```python -my_df = ref('some_model') - -# We made some predictions and stored them in `my_predictions` - -write_to_model(my_predictions) -``` diff --git a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/python-package.md b/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/python-package.md deleted file mode 100644 index a7d9563d..00000000 --- a/docsite/docs/dbt-fal/access-dbt-metadata-with-fal/python-package.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Overview - -You may be interested in accessing dbt models and sources easily from a Python script. -For that, just use the `fal` package and intantiate a FalDbt project: - -```py -from fal import FalDbt -faldbt = FalDbt(profiles_dir="~/.dbt", project_dir="../my_project") - -faldbt.list_sources() -# [['results', 'ticket_data_sentiment_analysis']] - -faldbt.list_models() -# { -# 'zendesk_ticket_metrics': , -# 'stg_o3values': , -# 'stg_zendesk_ticket_data': , -# 'stg_counties': -# } - -sentiments = faldbt.source('results', 'ticket_data_sentiment_analysis') -# pandas.DataFrame -tickets = faldbt.ref('stg_zendesk_ticket_data') -# pandas.DataFrame -``` - ---- - -Check out the [FalDbt class explanation](/dbt-fal/reference/faldbt-class-object) in the reference section. diff --git a/docsite/docs/dbt-fal/compatibility.md b/docsite/docs/dbt-fal/compatibility.md deleted file mode 100644 index ca9a5922..00000000 --- a/docsite/docs/dbt-fal/compatibility.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -sidebar_position: 8 ---- - -# Compatibility - -## Picking the right fal version - -If you are starting a new project, we'd recommend you to use the latest fal -version in order to gain access to all the features fal is offering. In the -case of adoption to an existing project, you can use the tables below for -picking the right version that would work best for your environment - -#### fal & dbt matrix - -| | fal version | Supported dbt version | -| --- | ----------- | ---------------------------- | -| | 0.4.0>= | 1.0.X, 1.1.X | -| | 0.3.6\<= | 0.20.X, 0.21.X, 1.0.X, 1.1.X | - -#### fal & dbt adapter matrix - -| | fal version | Supported dbt adapters | Notes | -| --- | ----------- | ----------------------------------------------- | ------------------------------------------------------------------------------------ | -| | 0.3.6>= | Postgres, BigQuery, Snowflake, Redshift, DuckDB | dbt-bigquery\<=1.1 support is added ([#443](https://github.com/fal-ai/fal/pull/443)) | -| | 0.3.1>= | Postgres, BigQuery, Snowflake, Redshift, DuckDB | | -| | 0.3.0\<= | Postgres, BigQuery, Snowflake, Redshift | | - -## Migration - -### From `after` scripts to `post-hook`s - -With the [`0.4.0`](https://github.com/fal-ai/fal/releases/tag/v0.4.0) release, -fal will start showing deprecation warnings for `after` scripts when using -`fal flow run`. The easiest way forward is migrating them to `post-hook`s, and -in general the migration is seamless. But if any of the scripts you have is -using `write_to_source` or `write_to_model` functions, we'd recommend promoting -them to individual ["Python models"](/dbt-fal/guides/python-models-migration). diff --git a/docsite/docs/dbt-fal/examples.md b/docsite/docs/dbt-fal/examples.md deleted file mode 100644 index 70ca051e..00000000 --- a/docsite/docs/dbt-fal/examples.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Examples - -You can find examples in - -- [fal-ai/fal](https://github.com/fal-ai/fal/tree/main/examples) repository -- [fal-ai/fal_dbt_examples](https://github.com/fal-ai/fal_dbt_examples) repository as an example project -- [How-to blog posts](https://blog.fal.ai/tag/how-to/) for several use cases - -Can't find the answer to a question you have? Reach out to us in Discord or GitHub, or send us an email to hello at fal.ai. We will do our best to help you get where you need to. diff --git a/docsite/docs/dbt-fal/guides/_category_.json b/docsite/docs/dbt-fal/guides/_category_.json deleted file mode 100644 index 26bb7d52..00000000 --- a/docsite/docs/dbt-fal/guides/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 11, - "label": "Guides", - "collapsible": true, - "collapsed": true -} diff --git a/docsite/docs/dbt-fal/guides/python-models-migration.md b/docsite/docs/dbt-fal/guides/python-models-migration.md deleted file mode 100644 index f89b5716..00000000 --- a/docsite/docs/dbt-fal/guides/python-models-migration.md +++ /dev/null @@ -1,17 +0,0 @@ -# How to migrate your after scripts to Python dbt models - -Python dbt models (or Python Data Models) are a way to include a Python transformation of data inside of your dbt DAG. - -We will explore how what we were able to achieve before with [`write_to_source`](../reference/variables-and-functions.md#write_to_source-function) and [`write_to_model`](../reference/variables-and-functions.md#write_to_model-function) in after scripts is now possible more clearly with Python dbt models. - -## When to use a Python Data Model vs an after script - -The rule-of-thumb is that if you are writing to the data warehouse, you should be using a Python Data Model. - -We are deprecating the use of `write_to_source` and `write_to_model` outside of Python Data Models. - -## Example - -If you are already using `write_to_model` to enrich an existing table, you can remove said table and replace with the model. - -Example commit: https://github.com/fal-ai/jaffle_shop_with_fal/commit/664620008679a3d18ba76b9f6421e9c908444bea diff --git a/docsite/docs/dbt-fal/model-training-inference/_category_.json b/docsite/docs/dbt-fal/model-training-inference/_category_.json deleted file mode 100644 index 4e143db0..00000000 --- a/docsite/docs/dbt-fal/model-training-inference/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 10, - "label": "Model Training and Inference", - "collapsible": true, - "collapsed": true -} diff --git a/docsite/docs/dbt-fal/model-training-inference/feature-store.md b/docsite/docs/dbt-fal/model-training-inference/feature-store.md deleted file mode 100644 index fc9361e7..00000000 --- a/docsite/docs/dbt-fal/model-training-inference/feature-store.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 7 ---- - -# dbt feature store - -A feature store is data system that facilitates managing the data transformations centrally for predictive analysis and ML models in production. - -fal-dbt feature store is a feature store implementation that consists of a dbt package and a Python library. - -## Why are we doing this? - -**Empower analytics engineer:** ML models and analytics operate on the same data. Analytics engineers know this data inside out. They are the ones setting up metrics, ensuring data quality and freshness. Why shouldn’t they be the ones responsible for the predictive analysis? With the rise of open source modelling libraries most of the work that goes into an ML model is done on the data processing side. - -**Leverage the Warehouse:** Warehouses are secure, scalable and relatively cheap environments to do data transformation. Doing transformations in other environments is at least an order of magnitude more complicated. Warehouse should be part of the ML engineer toolkit especially for batch predictions. dbt is the best tool out there to do transformations with the warehouse. dbt feature store will make ML workflows leverage all the advantages of the modern data warehouses. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/_category_.json b/docsite/docs/dbt-fal/orchestrate-dbt-runs/_category_.json deleted file mode 100644 index 43b8735d..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 5, - "label": "Orchestrate dbt runs with fal CLI", - "collapsible": true, - "collapsed": true -} diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/environments.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/environments.md deleted file mode 100644 index e0d9283e..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/environments.md +++ /dev/null @@ -1,293 +0,0 @@ -# Evironment management - -> Note: Isolated scripts feature is still experimental, and there might be both -> internal and external (user-visible) changes on how these environments are -> defined and managed. - -If you have Fal Python models (or hooks) running up in your project with -dependencies against external packages (like `scikit-learn` for machine learning -models or `slack-sdk` for alerts ) you can let Fal manage such packages for you without -even thinking they are there. This provides extensive interoperability when -sharing this project with others (like your co-workers) and ensuring that the -local runs you are making are easily reproducible in different environments -(like in your CI/CD process). - -## Defining environments - -All the different environments that your Fal project might need can be defined -inside `fal_project.yml`, a file that is located in the same directory as your -`dbt_project.yml`, under your project's root directory. - -An example `fal_project.yml` might look like this, where you have a list of -named environments: - -```yml -environments: - - name: alerts - type: venv - requirements: - - slack-sdk==1.1.0 - - - name: training - type: conda - packages: - - scikit-learn=2.0.0 -``` - -Each environment comes with a unique name (an identifier for you to reference it -later on, inside your `schema.yml`), a type (which would allow you to choose the -environment management backend that you want to use) and a list of configuration -options for the selected environment manager. - -### Using virtual environments (for `pip` packages) - -If your hook depends on packages from [PyPI](https://pypi.org/), you can create -an environment with the type `venv` and Fal will take care of creating and -managing (e.g. installing all the required packages) that virtual environment -for you. - -The only parameter for `venv` based installations is the `requirements` -section, which is just a list of PyPI packages (and their pinned versions) that -your script needs. If you already have a `requirements.txt` laying around, -you can simply copy the contents from it. - -```yml -environments: - - name: interact-cloud - type: venv - requirements: - - boto3==1.24 - - google-cloud==0.34 - - azure==5.0.0 -``` - -The `interact-cloud` named environment above will have 3 dependencies (`boto3`, -`google-cloud` and `azure` libraries, as well as their dependencies) and each -hook/model that uses this environment can freely import any of the desired -functions that reside under these dependencies. - -### Using conda-based solutions - -If your program depends on packages from [Conda](https://conda.io/) (or if you -want to isolate the system level dependencies), you can use `conda` as the -environment management backend. It is very similar to `venv`, but this time -instead of defining a list of `requirements` you will define a list of -`packages` from the conda channels you have configured. - -```yml -environments: - - name: train - type: conda - packages: - - fbprophet=0.7.1 - - scikit-learn=1.1.2 -``` - -> Note: The environment above will be built with using the `conda` executable. -> If there aren't any executables named `conda` under your shell's binary search -> path, you can customize where Fal needs to look for the `conda` binary by -> specifying the `FAL_CONDA_HOME` environment variable. E.g. if you have a -> `miniconda` installation under `~/Downloads`, you can start fal with -> `FAL_CONDA_HOME=~/Downloads/miniconda3/bin fal flow run`. - -Fal currently uses the global `~/.condarc` file when looking for channels to -install packages (or otherwise defaulting to the `conda` executable's bundled -search channels), so if you want to use packages from `conda-forge` (or any -other non-default channels), you can configure them with the following commands: - -```console -$ conda config --add channels conda-forge -$ conda config --set channel_priority strict -``` - -## Specifying environments - -You can specify environments for your hooks and models under your schema file -(`schema.yml`). There are currently two scopes to configure the environment -option, one is in the model-level (which all the hooks would automatically -inherit from) and the other one is in the hook-level (so only the selected hook -would run in the specified environment, and nothing else would be affected by -it). - -### Changing a hook's environment - -If you only want to run a hook in a specific environment, you can define the -`environment` attribute for that hook and Fal will ensure that the hook will be -executed under the specified environment. - -```yml -version: 2 - -models: - - name: reorder_alphabetically - meta: - fal: - pre-hook: - - print_enter_banner.py - - post-hook: - - print_exit_banner.py - - path: send_slack_alert.py - environment: alerts -``` - -In the example above, both the Fal Python model `reoder_alphabetically` and the banner -printing scripts (`print_enter_banner.py` / `print_exit_banner.py`) will use -your computer's local environment (think of it as the Python environment Fal -itself is installed on) since they might not depend on any external Python -packages. But the `send_slack_alert.py` hook will use the `alerts` environment -that is defined under `fal_project.yml` which contains `slack-sdk` as a -dependency. So no matter where this runs (your computer, your co-worker's -computer or your project's CI environment), Fal will create a new virtual -environment and install all the necessary requirements that needs to be -available for `send_slack_alert.py` to work properly and a send a slack -notification. - -### Changing a model's environment (with all the underlying hooks) - -If you have a Fal Python model and want it to run on an isolated environment, -you can change the model-level environment which would then make Fal run your -Python model (as well as all the pre-hooks/post-hooks, unless you override it) -on the specified environment. - -```yml -version: 2 - -models: - - name: load_data_from_s3 - meta: - fal: - environment: interact-cloud - pre-hook: - - path: change_s3_permissions.py - with: - my_param: "you can still pas arbitrary parameters" - - post-hook: - - path: change_s3_permissions.py -``` - -For the model above, both the Fal Python model `load_data_from_s3` -and the `change_s3_permissions.py` hooks will run on the same isolated -environment named `interact-cloud`. This is primarily useful when you want to -have a model-level environment that has everything that might be needed for the -specified model's execution (rather than fine-grained environments for each -hook/model script). - -### Overriding model-level environment on hooks - -If any of the hooks you have requires a different execution environment than -what is available as the model-level environment, you can simply change the -environment setting (as if setting it independently from the model) under the -model and Fal will make it run on its own environment (rather than the model -level one). - -```yml -version: 2 - -models: - - name: load_data_from_s3 - meta: - fal: - environment: interact-cloud - pre-hook: - - path: change_s3_permissions.py - - post-hook: - - path: change_s3_permissions.py - - - path: send_slack_alert.py - environment: alerts -``` - -If we add `send_slack_alert.py` to the example above, all the `s3` related -scripts will still run in the `interact-cloud` environment but the -`send_slack_alert.py` will now run under the `alerts` environment. - -#### Overriding the model-level environment with the local environment - -If you want your hooks to run in the same environment as your `fal` process -(without any sort of influence from the outside scopes, e.g. model-level -environments), you can use a reserved environment called `local`. It makes Fal -run them as if they are regular hooks without any environments, in the same -Python runtime that runs Fal itself. - - -```yml -version: 2 - -models: - - name: load_data_from_s3 - meta: - fal: - environment: interact-cloud - pre-hook: - - path: print_enter_banner.py - environment: local - - post-hook: - - path: print_exit_banner.py - environment: local -``` - -The `load_data_from_s3.py` (Fal model) will still continue to be ran inside -`interact-cloud` environment, but all the banner printing hooks now specify -`local` as their environment (which is a reserved name, so you can't re-define -it). Fal will run them as if it were running them outside of this model-level -environment's scope. - - -## Environment caches - -Since creating an environment is the longest part of the job, Fal will avoid -doing so as long as there weren't any changes in the environment's definition. - -```yml -environments: - - name: alerts - type: venv - requirements: - - slack-sdk==1.1.0 - - - name: training - type: conda - packages: - - scikit-learn=2.0.0 -``` - -When the `alerts` or `training` environment is used for the first time, Fal will -create it from scratch and save it under your user cache dir (this depends on -your system, but you can see the default -[value in this page](https://github.com/platformdirs/platformdirs#example-output)). -For all the subsequent runs (of either the same script, or different ones that -use the same environment) Fal will try to use the already created environment -unless the environment definition itself has been changed. If the -`fal_project.yml` above evolves into something like the following (e.g. a new -dependency has been added under the `training` environment): - -```yml -environments: - - name: alerts - type: venv - requirements: - - slack-sdk==1.1.0 - - - name: training - type: conda - packages: - - scikit-learn=2.0.0 - - xgboost=1.0.0 -``` - -Fal will still use the same `alerts` environment no matter what, since it can -see that it hasn't been changed. But when the `training` environment is -referenced for the first time (after the change in the definition), it will now -re-create it as if it is creating a new environment and the same caching will -still be in effect (all the subsequent runs will now use the newly created -`training` environment, not the old one). - -If two or more environments share the same set of unique dependencies -(`requirements` in `venv` based installations or `packages` in `conda` based -installations), they will point out to the same environment location under the -hood (if the definition for any of them changed, it won't affect the other ones, -due to the immutable nature of the created environments). diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-run.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-run.md deleted file mode 100644 index bc93dc42..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-run.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -sidebar_position: 8 ---- - -# `fal run` command - -> **NOTICE**: The `fal run` command was previously the only way to run scripts. We still support the `fal run` behavior but we recommend the [`fal flow run`](.) for more capabilities. - -By default, the `fal run` command runs the Python scripts as a post-hook, **only** on the models that were run on the last `dbt run`; that means that if you are using model selection with `dbt`, `fal` will only run on the models `dbt` ran. To achieve this, fal needs the dbt-generated file [`run_results.json`](https://docs.getdbt.com/reference/artifacts/run-results-json) available. - -If you are running `fal` without a `run_results.json` available, or just want to specify which models you want to run the scripts for, `fal` handles [dbt's selection flags](https://docs.getdbt.com/reference/node-selection/syntax) for `dbt run` as well as offering an extra flag to ignore the run results and run _all_ models: - -``` ---all Run scripts for all models. --s SELECT [SELECT ...], --select SELECT [SELECT ...] - Specify the nodes to include. --m SELECT [SELECT ...], --models SELECT [SELECT ...] - Specify the nodes to include. ---exclude EXCLUDE [EXCLUDE ...] - Specify the nodes to exclude. ---selector SELECTOR The selector name to use, as defined in selectors.yml -``` - -You may pass more than one selection at a time: - -```bash -$ fal run --select model_alpha model_beta -... | Starting fal run for following models and scripts: -model_alpha: script.py -model_beta: script.py, other.py -``` - -## Running scripts before dbt runs - -The `--before` flag let's users run scripts before their dbt runs. - -Given the following schema.yml: - -``` -models: - - name: boston - description: Ozone levels - config: - materialized: table - meta: - owner: "@meder" - fal: - scripts: - before: - - fal_scripts/postgres.py - after: - - fal_scripts/slack.py -``` - -`fal run --before` will run `fal_scripts/postgres.py` script regardless if dbt has calculated the boston model or not. `fal run` without the `--before` flag, will run `fal_scripts/slack.py`, but only if boston model is already calculated by dbt. - -A typical workflow involves running `dbt run` after invoking `fal run --before`. - -```bash -$ fal run --before --select boston -$ dbt run --select boston -``` - - -from fal import use -fal = use_dbt(profiles_dir="~/.dbt", project_dir="../my_project") -fal.ref("model_name") -fal.write(df, "model_name") diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-with-dbt-test.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-with-dbt-test.md deleted file mode 100644 index 004614ee..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/fal-with-dbt-test.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -sidebar_position: 6 ---- -# Access dbt test results - -If you run `dbt test`, you may want to run some scripts based on the results of the tests (e.g. to notify failures). - -This can be achieved by manually invoking - -``` -$ dbt test -$ fal run -``` - -The fal run command also processes test results, and they will be available in the `context` variable. - -```py -for test in context.current_model.tests: - if test.status != 'sucess': - notify(test.modelname, test.name, test.column, test.status) -``` - -***** - -Better integration of fal flow with dbt test should be done in the future. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/global-scripts.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/global-scripts.md deleted file mode 100644 index b392d195..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/global-scripts.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -sidebar_position: 5 ---- - -# Global Scripts - -Typically a [fal script is associated with a dbt model](model-scripts.md), this is how the [context variable is populated](../reference/variables-and-functions.md#context-variable). However you may want to invoke scripts independent of a dbt model as well. We call these global scripts. This can be achieved by adding a script configuration similar to meta for models, but in the schema.yml top level: - -```yaml -models: -# your model declarations... - -fal: - scripts: - before: - - global/prepare_run.py - after: - - global/close_run.py -``` - -These will happen at the beginning and end of any `fal run`, no matter which models are selected, since these are pre and post hooks of running fal. - -The `before` part will run before all model scripts and the `after` will run after all model scripts. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/index.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/index.md deleted file mode 100644 index 26797de1..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/index.md +++ /dev/null @@ -1,46 +0,0 @@ -# Run Python in a dbt project - -> ✨ NEW ✨ : If you want to create dbt models head over to the [dbt-fal](/dbt-fal/quickstart) adapter documentation - -fal extends dbt's functionality to run Python scripts before and after dbt models. With fal you can interact with dbt models in your Python scripts seamlessly. - -This is done using the command `fal flow run`. - -Under the hood, the workflow is run in 3 parts: - -1. Runs the pre-hook Python scripts assgined as `before` scripts -2. Runs the dbt models -3. Runs the post-hook Python scripts assgined as `after` scripts - -![flow run](./../../../static/img/flow_run.png) -_Yellow border indicates which models are run with the command_ - -Consider the example DAG above. Triggering a `fal flow run` command will run all the nodes, sql and python with a single command. - -```bash -$ fal flow run -## Runs the following nodes; load_data.py, transform, dataset, fact, truth, clustering.py -``` - ---- - -You can also use the familar [dbt graph operations](https://docs.getdbt.com/reference/node-selection/graph-operators). For example `fal flow run load_data.py+` would only run the node itself and downstream dependencies of the `load_data.py` node. - -![dag](./../../../static/img/load_data_graph.png) -_Yellow border indicates which models are run with the command_ - -```bash -$ fal flow run --select load_data.py+ -## Runs the following nodes; load_data.py, transform, dataset, clustering.py -``` - -Alternatively, pieces of graph can be invoked individually in parts with the [`fal run`](fal-run.md) command. - -```bash -$ fal run --before -## Runs the following nodes; load_data.py -$ dbt run -## Runs the following nodes; transform, dataset, fact, truth -$ fal run -## Runs the following nodes; clustering.py -``` diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/local-imports.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/local-imports.md deleted file mode 100644 index c3091a4a..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/local-imports.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -sidebar_position: 9 ---- - -# Custom script directory -By default, fal assumes that the directory where scripts are stored is the root directory of a dbt project. You can change this. Within your `dbt_project.yml`, you can add a custom variable that will tell fal where to look for scripts: -```yaml -... -vars: - fal-scripts-path: "my_scripts_dir" -``` -Now, if a script (`after.py`) is put in `my_scripts_dir` directory within your dbt project, you can refer to it by name in `schema.yml`: -```yaml -models: - - name: some_model - meta: - fal: - scripts: - - after.py -``` -Similarly, you can do it when using the `--scripts` flag: -``` -fal run --scripts after.py -``` - -## Local imports in your fal scripts -The directory containing fal scripts is temporarily added to `sys.path` during runtime. This means that you can import local modules in your scripts directory. Say we have the following structure in our scripts directory: - -``` -└── my_scripts_dir - ├── after.py - ├── random_script.py - └── my_utils - ├── custom_functions.py - └── message - └── slack.py -``` - -This will allow you to do local imports in `after.py`: - -```python -from my_utils.message.slack import send_slack_message -... -send_slack_message(my_message) -... -``` - -Also, within `slack.py` you can do relative imports: - -```python -from ..custom_functions import format_string -... -formatted = format_string(my_string) -... -``` - -Note that in this example, `my_scripts_dir` is not itself loaded as a module, so a `from ..` import wouldn't work in `custom_functions.py`. Instead, you can do `import random_script`, because everything in `my_scripts_dir` is available in `sys.path`. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts-selection.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts-selection.md deleted file mode 100644 index dfdb97c5..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts-selection.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Select `before` and `after` scripts - -For node selection, `fal` handles [dbt's selection flags](https://docs.getdbt.com/reference/node-selection/syntax): - -``` --s SELECT [SELECT ...], --select SELECT [SELECT ...] - Specify the nodes to include. --m SELECT [SELECT ...], --models SELECT [SELECT ...] - Specify the nodes to include. ---exclude EXCLUDE [EXCLUDE ...] - Specify the nodes to exclude. ---selector SELECTOR The selector name to use, as defined in selectors.yml -``` - -You may pass more than one selection at a time and use graph operators: - -```bash -$ fal flow run --select model_alpha+ model_beta+ -Executing command: dbt --log-format json run --project-dir fal/fal_dbt_examples --profiles-dir . --exclude lombardia_covid miami -Running with dbt=1.0.3 -... -1 of 2 START view model dbt_matteo.model_alpha......... [RUN] -2 of 2 START table model dbt_matteo.model_beta......... [RUN] -1 of 2 OK created view model dbt_matteo.model_alpha.... [OK in 2.71s] -2 of 2 OK created table model dbt_matteo.model_beta.... [CREATE TABLE (10.0 rows, 2.6 KB processed) in 4.32s] -Finished running 1 view models, 1 table models in 11.32s. -Completed successfully -Done. PASS=2 WARN=0 ERROR=0 SKIP=0 TOTAL=2 -... -... | Starting fal run for following models and scripts: -model_alpha: script.py -model_beta: script.py, other.py -... -``` - -> Please note that individual `post-hook`s can not be selected, they will be ran if the underlying -> model is selected. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts.md deleted file mode 100644 index c846b20c..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/model-scripts.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Run Python before and after dbt models - -Python scripts can be attached as either to run before or after a model. You have to add them in the `meta` section of the dbt model config to use: - -```yaml -models: - - name: modela - meta: - fal: - pre-hook: - - prepare.py - - other.py - post-hook: - - send_slack_message.py -``` - -### `after` and `before` scripts vs `post-hook`s and `pre-hook`s - -Starting with `0.4.0`, we are deprecating `after` (and `before`) scripts in favor of the newly -introduced `post-hook`s (and `pre-hook`s) for `fal flow run`. The table below is a general -overview between these two features: - -| | `after` scripts | `post-hook`s | -| --------------------------------------------------------------- | ---------------- | ------------------------------------------------------------- | -| Runs after the bound model | ✅ | ✅ | -| Run before dependant models (Runs _as part_ of the bound model) | ✅ (after 0.4.0) | ✅ | -| Parallelization (thread-level) is enabled | ✅ | ✅ | -| Runs even if the underlying model's `dbt run` fails | ❌ | ✅ | -| Can be [parameterized](./structured-hooks.md) | ❌ | ✅ | -| Can be individually selected / executed | ✅ | ❌ | -| Can use `write_to_source`/`write_to_model` | ✅ | ❌ (use [Python models](/dbt-fal/python-models/overview) instead) | - -If you have an existing project and want to move away from `after` scripts, please see the related section in the ["Compatibility" page](../compatibility.md). diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/notebook-files.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/notebook-files.md deleted file mode 100644 index 7011c6bd..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/notebook-files.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_position: 10 ---- - -# Use .ipynb files as fal hooks - -You can use Jupyter notebook files as `fal` scripts. - -Note that the `default_model_name` is only active during notebook runtime. When the script is run with `fal run` or `fal flow run`, fal will determine the model to write to according to the relevant `schema.yml` file. In fal runtime, the `init_fal` line is ignored. - -You can specify a .ipynb file the same way as a regular Python file: - -```yaml -models: - - name: some_model - meta: - fal: - scripts: - - my_notebook.ipynb -``` diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/refs-sources-context.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/refs-sources-context.md deleted file mode 100644 index 1d30d400..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/refs-sources-context.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Refer to dbt models and sources from a Python context - -fal introduces variables and functions in the context of a script to make it easier to interact with your data. - -You can reference a model just like you do from dbt with a simple use of the familiar `ref` function. The same can be done for source relations with the `source` function. - -```py -model_df = ref('my_model') # pandas.DataFrame -source_df = source('schema', 'table') # pandas.DataFrame -``` - -And when a script is attached to a model, the context also includes information about the model the script is attached to: - -```py -context.current_model.name -context.current_model.meta.get("owner") -``` - -Review the [reference](../reference/variables-and-functions.md) for a more thorough view of this subject. diff --git a/docsite/docs/dbt-fal/orchestrate-dbt-runs/structured-hooks.md b/docsite/docs/dbt-fal/orchestrate-dbt-runs/structured-hooks.md deleted file mode 100644 index fc5bf4f5..00000000 --- a/docsite/docs/dbt-fal/orchestrate-dbt-runs/structured-hooks.md +++ /dev/null @@ -1,57 +0,0 @@ -# Passing parametrized data around hooks - -Aside from the context around the model that it is bound to, each Fal -hook (hooks defined under `pre-hook` and `post-hook`) can have a set -of parameters that would allow it to be parametrized in a per-usage -basis (rather than having multiple duplicate scripts with statically -embedded configuration values). This customization can be applied by -using the `with:` section, for example like the `send_alert.py` below: - -```yml -models: - - name: normalize_balances - meta: - fal: - post-hook: - - my_regular_hook.py - - path: send_alert.py - with: - channel: "#finance-alerts" - severity: 1 - - - name: revoke_account_permissions - meta: - fal: - post-hook: - - path: send_alert.py - with: - channel: "#hr-alerts" - severity: 0 -``` - -A hook is a dictionary with a `path` and `with` properties. If a string is set -instead of the dictionary format, it is assumed as the `path` property. - -In our example, we keep the same script for sending all alerts (`send_alert.py`) but we customize -which channel we want to send an alert on depending on the model that we are processing by simply -passing that extra piece of information under the `with:` section. We can also pass different types -like how the `severity` option is an integer, and they will be exposed to our hooks with the original -types (`channel` is going to be a `str`, and `severity` is going to be an `int`). - -For leveraging this set of parameters in our hooks, we can use the existing `context` object but this time -accessing the `context.arguments` property: - -```py -from fal.typing import * -from my_alerts import send_slack_msg, send_sentry_alert - -channel = context.arguments["channel"] -# We can use `get` to set a default value for an argument -severity = context.arguments.get("severity", 1) - -if context.current_model.status == "fail": - message = "{context.current_model.name} has failed" - send_slack_msg(to=channel, message=message) - if severity >= 1: - send_sentry_alert(message) -``` diff --git a/docsite/docs/dbt-fal/python-models/_category_.json b/docsite/docs/dbt-fal/python-models/_category_.json deleted file mode 100644 index 356c512d..00000000 --- a/docsite/docs/dbt-fal/python-models/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 3, - "label": "Build dbt Python Models", - "collapsible": true, - "collapsed": false -} diff --git a/docsite/docs/dbt-fal/python-models/environment_management.md b/docsite/docs/dbt-fal/python-models/environment_management.md deleted file mode 100644 index 95063c84..00000000 --- a/docsite/docs/dbt-fal/python-models/environment_management.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Environment management with dbt-fal - -Our recommended way of using dbt-fal is to use named environments. They help you define reusable Python environments that are automatically managed by dbt-fal. You can use them by creating a `fal_project.yml` file in the same folder as your dbt project, and then use these environments in any Python model. - -In your dbt project folder: - -```bash -$ touch fal_project.yml - -# Paste the config below -environments: - - name: ml - type: venv - requirements: - - prophet -``` - -and then in your dbt model: - -```bash -$ vi models/orders_forecast.py - -def model(dbt, fal): - dbt.config(fal_environment="ml") # Add this line - - df: pd.DataFrame = dbt.ref("orders_daily") -``` - -The `dbt.config(fal_environment=“ml”)` will give you an isolated clean env to run things in, so you dont pollute your package space. diff --git a/docsite/docs/dbt-fal/python-models/machine-types.md b/docsite/docs/dbt-fal/python-models/machine-types.md deleted file mode 100644 index c758cf2d..00000000 --- a/docsite/docs/dbt-fal/python-models/machine-types.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Machine Types - -With fal-serverless, you can choose different sizes of machines to run each of your models. You are charged only for what you use. - -To use different types of machines: - -```python -$ vi models/orders_forecast.py - -def model(dbt, fal): - dbt.config(fal_environment="ml", fal_machine="GPU") # Add this line - - df: pd.DataFrame = dbt.ref("orders_daily") -``` - -The following options are available for the `fal_machine` argument: - -| Value | Description | -| ----- | ------------------------------------------------ | -| `XS` | 0.25 CPU cores, 256 mb RAM (default) | -| `S` | 0.50 CPU cores, 1 gb RAM | -| `M` | 2 CPU cores, 8 gb RAM | -| `L` | 4 CPU cores, 32 gb RAM | -| `XL` | 8 CPU cores, 128 gb RAM | -| `GPU` | 8 CPU cores, 64 gb RAM, 1 GPU core (a100, 40 gb) | diff --git a/docsite/docs/dbt-fal/python-models/overview.md b/docsite/docs/dbt-fal/python-models/overview.md deleted file mode 100644 index a8f4073c..00000000 --- a/docsite/docs/dbt-fal/python-models/overview.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -sidebar_position: 1 ---- - -# dbt-fal Python adapter - -dbt-fal adapter is the ✨easiest✨ way to run your [dbt Python models](https://docs.getdbt.com/docs/building-a-dbt-project/building-models/python-models). - -Starting with dbt v1.3, you can now build your dbt models in Python. However the developer experience with existing datawarehouse Python runtimes is not ideal. - -dbt-fal provides the best environment to run your Python models that works with all other data warehouses! This includes Postgres, Redshift which do not have Python support, as well as Bigquery, Snowflake which are too hard to work with. - -With dbt-fal, you can: - -- Build and test your models locally -- Isolate each model to run in its own environment with its own dependencies -- Run your Python models in the [☁️ cloud ☁️](/) with elasticly scaling Python environments and pay for only what you use. -- Even add GPUs to your models for some heavy workloads such as training ML models. 🤖 - -## 1. Install dbt-fal - -`pip install dbt-fal[bigquery, snowflake]` _Add your current warehouse here_ - -## 2. Update your `profiles.yml` and add the fal adapter - -Add another entry to `outputs` in your desired profile (below we've added `dev_with_fal`) - -```yaml -jaffle_shop: - target: dev_with_fal # target points at the new output - outputs: - dev_bigquery: - type: bigquery - method: service-account - keyfile: /path/to/keyfile.json - project: my_gcp_project - dataset: my_dbt_dataset - threads: 4 - timeout_seconds: 300 - location: US - priority: interactive - dev_with_fal: # Name of your new output - type: fal - db_profile: dev_bigquery # This points to your main adapter -``` - -Don't forget to point to your main adapter with the `db_profile` attribute. This is how the fal adapter knows how to connect to your data warehouse. - -## 3. Run dbt - -```bash -dbt run -``` - -That is it! It is really that simple 😊 diff --git a/docsite/docs/dbt-fal/python-models/using-fal-serverless.md b/docsite/docs/dbt-fal/python-models/using-fal-serverless.md deleted file mode 100644 index 40134f3a..00000000 --- a/docsite/docs/dbt-fal/python-models/using-fal-serverless.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -sidebar_position: 3 ---- - -# fal-serverless Integration - -fal-serverless is our serverless compute solution that allows you to run Python models on a reliable and scalable infrastructure. Setting up dbt-fal with fal-serverless is quick and straightforward. - -## 0. Install fal-serverless - -```bash -pip install fal-serverless -``` - -## 1. Authenticate to fal-serverless - -fal-serverless uses GitHub for authentication. Run the following command in your shell: - -```bash -fal-serverless auth login -``` - -Follow the link that's generated and login using GitHub. Come back to the shell, when ready. - -## 2. Generate keys - -Next, generate keys that will allow dbt to connect to fal cloud: - -```bash -fal-serverless key generate -``` - -This will print a message containing values for `KEY_ID` and `KEY_SECRET`. We will need these for setting up the dbt profile. - -## 3. Update your dbt profiles.yml - -In order to run your Python models in fal-serverless, you should update the profiles.yml to include the newly generated credentials. Here's an example of how it should look like: - -```yaml -fal_profile: - target: fal_serverless - outputs: - fal_serverless: - type: fal - db_profile: db - host: cloud - key_secret: MY_KEY_SECRET_VALUE - key_id: MY_KEY_ID_VALUE - db: - type: postgres - host: MY_PG_HOST - port: MY_PG_PORT - ... -``` - -That's it. Doing a dbt run against this profile will execute your Python models in fal-serverless. - -## 4. (Optional) Define separate output for fal-serverless - -You can have fal outputs, e.g.: - -```yaml -fal_profile: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: cloud ## ask your account exec - key_secret: MY_KEY_SECRET_VALUE - key_id: MY_KEY_ID_VALUE - db: - type: postgres - host: MY_PG_HOST - port: MY_PG_PORT - ... -``` - -In the example above, we have two fal outputs: `staging` and `prod`. Output `staging` will execute your Python models only locally, whereas `prod` will run them on fal-serverless. So now you can control where your models are ran with a `-t` flag. - -This will run Python models locally: - -```bash -dbt run -``` - -And this will run Python models on fal-serverless: - -```bash -dbt run -t prod -``` diff --git a/docsite/docs/dbt-fal/quickstart.md b/docsite/docs/dbt-fal/quickstart.md deleted file mode 100644 index d0afe298..00000000 --- a/docsite/docs/dbt-fal/quickstart.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Quickstart - -fal is the easiest way to run Python with your dbt project. - -The fal ecosystem has two main components: the dbt-fal adapter and the fal CLI. - -### With the dbt-fal Python adapter, you can: - -- Enable a developer-friendly Python environment for most databases, including ones without dbt Python support such as Redshift, Postgres. -- Use Python libraries such as sklearn or prophet to build more complex dbt models including ML models. -- Easily manage your Python environments with isolate. -- Iterate on your Python models locally and then scale them out in the cloud. - -Go to the [dbt-fal](/dbt-fal/python-models/overview) documentation for more details! - -### With the fal CLI, you can: - -- Send Slack notifications upon dbt model success or failure. -- Load data from external data sources before a model starts running. -- Download dbt models into a Python context with a familiar syntax: `ref('my_dbt_model')` -- Programatically access rich metadata about your dbt project using `FalDbt`. - -Go to the [fal CLI](/dbt-fal/orchestrate-dbt-runs/) documentation for more details! - -## 1. Install fal and dbt-fal - -```bash -$ pip install fal dbt-fal[postgres] -``` - -## 2. Go to your dbt directory - -```bash -$ cd ~/src/my_dbt_project -``` - -## 3. Create a Python script: `send_slack_message.py` - -```python -import os -from slack_sdk import WebClient -from slack_sdk.errors import SlackApiError - -CHANNEL_ID = os.getenv("SLACK_CHANNEL") -SLACK_TOKEN = os.getenv("SLACK_BOT_TOKEN") - -client = WebClient(token=SLACK_TOKEN) - -message_text = f"Model: {context.current_model.name}. Status: {context.current_model.status}." - -if str(context.current_model.status) == 'success': - # Read model as pandas.DataFrame - df = ref(context.current_model.name) - message_text += f" Size: {df.size}." - -try: - response = client.chat_postMessage( - channel=CHANNEL_ID, - text=message_text - ) -except SlackApiError as e: - assert e.response["error"] -``` - -As you can see from the `context` object, fal makes certain variables (and functions) avaliable in this script. [Check out the fal scripts section for more details](./reference/variables-and-functions.md) - -## 4. Add a `meta` section in your `schema.yml` - -```yaml -models: - - name: some_model - meta: - fal: - scripts: - - send_slack_message.py -``` - -## 5. Run `fal flow run` - -This command manages your dbt runs for you, by running scripts and models in the correct order. - -```bash -$ fal flow run -# 1. dbt model `some_model` is run -# 2. slack message is sent with the run result -``` diff --git a/docsite/docs/dbt-fal/reference/_category_.json b/docsite/docs/dbt-fal/reference/_category_.json deleted file mode 100644 index 175626f6..00000000 --- a/docsite/docs/dbt-fal/reference/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "position": 12, - "label": "Reference", - "collapsible": true, - "collapsed": true -} diff --git a/docsite/docs/dbt-fal/reference/fal-scripts-path.md b/docsite/docs/dbt-fal/reference/fal-scripts-path.md deleted file mode 100644 index 6f09a86a..00000000 --- a/docsite/docs/dbt-fal/reference/fal-scripts-path.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Variable `fal-scripts-path` - -In order to find scripts in your project, fal uses the `fal-scripts-path` [dbt variable](https://docs.getdbt.com/docs/building-a-dbt-project/building-models/using-variables#defining-variables-in-dbt_projectyml). - -Let's consider the following project structure: - -``` -. -├── dbt_project.yml -├── models -│   ├── schema.yml -│   └── some_model.sql -└── scripts -    ├── after.py -    ├── before.py -    └── utils -    ├── my_utils.py -    └── process -    └── process_df.py -``` - -By default `fal-scripts-path` is the [dbt project directory](https://docs.getdbt.com/reference/dbt_project.yml) (where the `dbt_project.yml` is located). - -But it can be changed by setting in a dbt [var](https://docs.getdbt.com/docs/building-a-dbt-project/building-models/using-variables#defining-variables-in-dbt_projectyml). An example of setting the variable would be: - -```yaml -name: "fal_test" -version: "1.0.0" -config-version: 2 - -vars: - fal-scripts-path: "scripts" -``` - -## Script reference in `schema.yml` - -Use the `fal-scripts-path` dbt variable as a base directory when associating to fal scripts. - -Referencing a script in your `schema.yml` with the default `fal-scripts-path` value looks like: - -```yaml -version: 2 - -models: - - name: some_model - meta: - fal: - scripts: - before: - # searches in `./` because var has default value - - scripts/before.py - post-hook: - - scripts/after.py -``` - -But if the `fal-scritps-path` value is changed to `scripts`, like specified above, the `schema.yml` would be: - -```yaml -version: 2 - -models: - - name: some_model - meta: - fal: - scripts: - before: - # searches in `./scripts/` because of var - - before.py - post-hook: - - after.py -``` - -## Script importing during runs - -For larger scripts or repeated functionality, you may decide to have several Python files with functions to be imported into your fal scripts. - -The `fal-scripts-path` variable refers to the base directory from which you do your imports. Changing `fal-scripts-path` also changes the base import directory. - -For example; importing a script with the default `fal-scripts-path` value looks like: - -```py -# Searching from the top level: include `script` directory in path -import scripts.utils.my_utils as my_utils -from scripts.utils.process.process_df import some_func -``` - -Changing the `fal-scripts-path` value to `scripts`, like specified above, would require `import` changes: - -```py -# Searching from the `scripts` directory -import utils.my_utils as my_utils -from utils.process.process_df import some_func -``` diff --git a/docsite/docs/dbt-fal/reference/faldbt-class-object.md b/docsite/docs/dbt-fal/reference/faldbt-class-object.md deleted file mode 100644 index f6684783..00000000 --- a/docsite/docs/dbt-fal/reference/faldbt-class-object.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_position: 2 ---- - -# The FalDbt class - -The `FalDbt` is available from the `fal` package to import in a script. This is used to read the configurations of a dbt project. - -You must provide a `profiles_dir` and a `project_dir` for a dbt project. - -Some of the methods available are: - -- ``: Explanation - ```python - faldbt.() - #= - ``` -... - -TODO: This section needs to be completed once we settle on the API to expose. Also, consider private vs public properties. diff --git a/docsite/docs/dbt-fal/reference/variables-and-functions.md b/docsite/docs/dbt-fal/reference/variables-and-functions.md deleted file mode 100644 index 4b93f0ee..00000000 --- a/docsite/docs/dbt-fal/reference/variables-and-functions.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Variables and functions - -Inside a Python script, you get access to some useful variables and functions. - -## `context` Variable - -`context` is an object with information about the current script context. - -### `context.current_model` - -This propery holds information relevant to the model, which is associated with the running script. For the [`meta` Syntax](#meta-syntax) example, we would get the following: - -```python -context.current_model.name -# str -#= historical_ozone_levels - -context.current_model.status -# NodeStatus, enum: 'success' | 'error' | 'skipped' - -context.current_model.columns -# Dict[str, ColumnInfo(name: str, tags: List[str], meta: Dict)] - -context.current_model.tests -# List[CurrentTest(name: str, modelname: str, column: str, status: str)] - -context.current_model.meta -# meta information in the schema.yml -#= {'owner': '@me'} -``` - -`context.current_model` object also has access to test information related to the current model. If the previous dbt command was either `test` or `build`, the `context.current_model.test` property is populated with a list of tests: - -```python -context.current_model.tests -#= [CurrentTest(name='not_null', modelname='historical_ozone_levels, column='ds', status='Pass')] -``` - -Another relevant property of the `current_model` is `adapter_response`. It contains information that was received from the dbt SQL adapter after computing the model: - -```python -context.current_model.adapter_response -#= CurrentAdapterResponse(message='SELECT 10', code='SELECT', rows_affected=10) -``` - -## Read functions - -The familiar dbt functions `ref` and `source` are available in fal scripts to read the models and sources as a Pandas DataFrame. - -### `ref` function - -The `ref` function is used exactly like in `dbt`. You reference a model in your project - -```py -# returned as `pandas.DataFrame` -df = ref('model_name') -``` - -Or a package model (package first, model second) - -```py -df = ref('dbt_artifacts', 'dim_dbt__exposures') -``` - -You can use the context variable to the the associated model data - -```py -df = ref(context.current_model.name) -``` - -### `source` function - -The `source` function is used exactly like in `dbt`. You reference a source in your project - -```py -# returned as `pandas.DataFrame` -df = source('source_name', 'table_name') -``` - -### `execute_sql` function - -You can execute artbitrary SQL from within your Python scripts and get results as pandas DataFrames: - -```python -my_df = execute_sql('SELECT * FROM {{ ref("my_model") }}') -``` - -As you can see, the query strings support jinja. - -### `list_models` function - -You can access model information for all models in the dbt project: - -```python -my_models = list_models() - -my_models[0].status -# - -my_models[0].name -# 'zendesk_ticket_data' -``` - -`list_models` returns a list of `DbtModel` objects that contain model and related test information. - -### `list_sources` function - -You can access source information for all sources in the dbt project: - -```python -my_sources = list_sources() - -my_sources[0].name -# 'zendesk_ticket_data' - -my_sources[0].tests -# [] -``` - -`list_sources` returns a list of `DbtSource` objects that contain source and related test information. - -## Write functions - -It is also possible to send data back to your data warehouse. This makes it easy to get the data, process it, and upload it back into dbt territory. - -### `write_to_source` function - -You first have to define the source in your schema. -This operation appends to the existing source by default and should only be used targetting tables, not views. - -```python -# Upload a `pandas.DataFrame` back to the data warehouse -write_to_source(df, 'source_name', 'table_name2') -``` - -`write_to_source` also accepts an optional `dtype` argument, which lets you specify datatypes of columns. It works the same way as `dtype` argument for [`DataFrame.to_sql` function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html). - -```python -from sqlalchemy.types import Integer -# Upload but specifically create the `value` column with type `integer` -# Can be useful if data has `None` values -write_to_source(df, 'source', 'table', dtype={'value': Integer()}) -``` - -### `write_to_model` function - -This operation overwrites the existing relation by default and should only be used targetting tables, not views. - -For example, if the script is attached to the `zendesk_ticket_metrics` model, - -```yaml -models: - - name: zendesk_ticket_metrics - meta: - fal: - scripts: - after: - - from_zendesk_ticket_data.py -``` - -`write_to_model` will write to the `zendesk_ticket_metrics` table: - -```python -df = faldbt.ref('stg_zendesk_ticket_data') -df = add_zendesk_metrics_info(df) - -# Upload a `pandas.DataFrame` back to the data warehouse -write_to_model(df) # writes to attached model: zendesk_ticket_metrics -``` - -> NOTE: When used with `fal flow run` or `fal run` commands, `write_to_model` does not accept a model name, it only operates on the associated model. - -But when importing `fal` as a Python module, you have to specify the model to write to: - -```python -from fal import FalDbt -faldbt = FalDbt(profiles_dir="~/.dbt", project_dir="../my_project") - -faldbt.list_models() -# [ -# DbtModel(name='zendesk_ticket_data' ...), -# DbtModel(name='agent_wait_time' ...) -# ] - -df = faldbt.ref('stg_zendesk_ticket_data') -df = add_zendesk_metrics_info(df) - -faldbt.write_to_model(df, 'zendesk_ticket_metrics') # specify the model -``` - -### Specifying column types - -The functions `write_to_source` and `write_to_model` also accept an optional `dtype` argument, which lets you specify datatypes of columns. -It works the same way as `dtype` argument for [`DataFrame.to_sql` function.](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html) - -```python -from sqlalchemy.types import Integer - -# Upload but specifically create the `my_col` column with type `integer` -# Can be specially useful if data has `None` values -write_to_source(df, 'source', 'table', dtype={'my_col': Integer()}) -``` - -### Modes of _writing_ - -These functions accepts two modes of _writing_: `append` and `overwrite`. - -They are passed with the optional `mode` argument (`append` is the default value). - -```python -# Overwrite the table with the dataframe data, deleting old data -write_to_source(df, 'source_name', 'table_name', mode='overwrite') -write_to_model(df, 'model_name', mode='overwrite') # default mode - -# Append more data to the existing table (create it if it does not exist) -write_to_source(df2, 'source_name', 'table_name', mode='append') # default mode -write_to_model(df2, 'model_name', mode='apend') -``` - -#### The `append` mode - -1. creates the table if it does not exist yet -2. insert data into the table - -#### The `overwrite` mode - -1. creates a temporal table -2. insert data into the temporal table -3. drops the old table if it exists -4. renames the temporal table to the final table name - -## `meta` syntax - -```yaml -models: - - name: historical_ozone_levels - ... - meta: - owner: "@me" - fal: - scripts: - - send_slack_message.py - - another_python_script.py # will be run sequentially -``` - -Use the `fal` and `scripts` keys underneath the `meta` config to let fal CLI know where to look for the Python scripts. -You can pass a list of scripts as shown above to run one or more scripts as a post-hook operation after a dbt run. diff --git a/docsite/docs/fal-serverless/authentication/_category_.yml b/docsite/docs/fal-serverless/authentication/_category_.yml deleted file mode 100644 index 7adf268d..00000000 --- a/docsite/docs/fal-serverless/authentication/_category_.yml +++ /dev/null @@ -1,9 +0,0 @@ -label: "Authentication" -position: 3 -collapsible: false -collapsed: false -link: - type: generated-index - title: Authentication - - diff --git a/docsite/docs/fal-serverless/authentication/env_var.md b/docsite/docs/fal-serverless/authentication/env_var.md deleted file mode 100644 index 420377c0..00000000 --- a/docsite/docs/fal-serverless/authentication/env_var.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Key-Based Authentication - -When you are not able to use GitHub based authentication (in remote environments or CI/CD), you can generate key-based credentials using the CLI or our web UI. - -## Generating API Keys - -Run the following command to generate a KEY with the scope of your choice. The ADMIN scope gives you access to use the SDK, whereas API gives you access to use the web endpoints. - -```bash -fal-serverless key generate --scope ADMIN -``` - -If successful, the following message should be printed out in your terminal: - -``` -Generated key id and key secret. -This is the only time the secret will be visible. -You will need to generate a new key pair if you lose access to this secret. -FAL_KEY_ID='your-key-id' -FAL_KEY_SECRET='your-key-secret' -``` - -You should store the values of `FAL_KEY_ID` and `FAL_KEY_SECRET` in your environment now. - -## Key Scopes - -Key scopes provide a way to control the permissions and access levels of different keys within a system. By assigning scopes to keys, you can limit the operations and resources that each key can access. Currently there are only two levels of control `ADMIN` scope and `API` scope. If you are just consuming model APIs, using `API` scope is recommended. - -### ADMIN Scope - -- Grants full acccess to the SDK. -- Grants full access to CLI operations. -- Grants access to Model API endpoints. - -### API Scope - -- Grants access to Model API endpoints. - -## Using the API credentials - -In order to use key-based credentials, you need to set two environment variables `FAL_KEY_ID` and `FAL_KEY_SECRET`: - -```bash -export FAL_KEY_ID="your-key-id" -export FAL_KEY_SECRET="your-key-secret" -``` - -fal-serverless will automatically detect the above variables. Key-based auth will take precedence if both key-based and GitHub auth are set in an environment. diff --git a/docsite/docs/fal-serverless/authentication/github.md b/docsite/docs/fal-serverless/authentication/github.md deleted file mode 100644 index 84c09af1..00000000 --- a/docsite/docs/fal-serverless/authentication/github.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Github Authentication - -`fal-serverless` uses GitHub authentication by default. This means that in order to use fal-serverless you need to have a [GitHub account](https://github.com/login). - -## Logging in - -[Installing fal-serverless](/fal-serverless/quickstart) Python library gives access to `fal-serverless CLI`, which you can use for authentication. In your terminal, you can run the following command: - -```bash -fal-serverless auth login -``` - -Follow the instructions on your terminal to confirm your credentials. Once you're done, you should get a success message in your terminal. - -Now you're ready to write your first fal-serverless function! - -**Note:** that your login credentials are persisted on your local machine and cannot be transfered to another machine. If you want to use fal-serverless on multiple machines, you either need to login on each one or use [key-based credentials](#key_based_credentials). diff --git a/docsite/docs/fal-serverless/authentication/model_apis.md b/docsite/docs/fal-serverless/authentication/model_apis.md deleted file mode 100644 index 1a6b1e1e..00000000 --- a/docsite/docs/fal-serverless/authentication/model_apis.md +++ /dev/null @@ -1,14 +0,0 @@ -# Authenticate with Model APIs - -If you are authenticating with a Model API, you can create a key pair by clicking the [Create a Key Pair](https://serverless.fal.ai/dashboard -) button in our dashboard. Make sure to take a note of your Key Secret, as it will not be shown again. - -Once you have the key pair, pass it as an Authentication header to use the Model Api of your choice. - -### cURL -``` -curl -X POST https://110602490-sharedsdxl.gateway.alpha.fal.ai \ - -H "Authorization: Basic $FAL_KEY_ID:$FAL_KEY_SECRET" \ - -H "Content-Type: application/json" \ - -d '{"prompt": "an astronaut in the jungle, cold color pallete with butterflies in the background, highly detailed, 8k", "height": 1024, "width": 1024, "num_inference_steps": 30, "guidance_scale": 5, "negative_prompt": "blurry", "num_images_per_prompt": 1}' -``` \ No newline at end of file diff --git a/docsite/docs/fal-serverless/examples/_category_.yml b/docsite/docs/fal-serverless/examples/_category_.yml deleted file mode 100644 index a07e0d6f..00000000 --- a/docsite/docs/fal-serverless/examples/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: "Examples" -position: 2 -collapsible: false -collapsed: false -link: - type: generated-index - title: Examples diff --git a/docsite/docs/fal-serverless/examples/controlnet.md b/docsite/docs/fal-serverless/examples/controlnet.md deleted file mode 100644 index 710e6704..00000000 --- a/docsite/docs/fal-serverless/examples/controlnet.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Restyle Room Photos with ControlNet -In this example, we will demonstrate how to use fal-serverless for deploying a ControlNet model. - -## 1. Create a new file called controlnet.py -```python -from __future__ import annotations -from fal_serverless import isolated, cached - -from pathlib import Path -import base64 -import io - -requirements = [ - "controlnet-aux", - "diffusers", - "torch", - "mediapipe", - "transformers", - "accelerate", - "xformers" -] - - -def get_image_from_url_as_bytes(url: str) -> bytes: - import requests - - response = requests.get(url) - # This will raise an exception if the request returned an HTTP error code - response.raise_for_status() - return response.content - -def read_image_bytes(file_path): - with open(file_path, "rb") as file: - image_bytes = file.read() - return image_bytes - -@cached -def load_model(): - import torch - from diffusers import StableDiffusionControlNetPipeline, ControlNetModel - - controlnet = ControlNetModel.from_pretrained( - "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16 - ) - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "peterwilli/deliberate-2", controlnet=controlnet, torch_dtype=torch.float16 - ) - - pipe = pipe.to("cuda:0") - pipe.unet.to(memory_format=torch.channels_last) - pipe.controlnet.to(memory_format=torch.channels_last) - return pipe - - -def resize_image(input_image, resolution): - import cv2 - import numpy as np - - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize( - input_image, - (W, H), - interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA, - ) - return img - -@isolated( - requirements=requirements, - machine_type="GPU", - keep_alive=30, - serve=True -) -def generate( - image_url: str, prompt: str, num_samples: int, num_steps: int, gcs=False -) -> list[bytes] | None: - - from controlnet_aux import CannyDetector - from PIL import Image - import numpy as np - import uuid - import os - from base64 import b64encode - - image_bytes = get_image_from_url_as_bytes(image_url) - - pipe = load_model() - image = Image.open(io.BytesIO(image_bytes)) - - canny = CannyDetector() - init_image = image.convert("RGB") - - init_image = resize_image(np.asarray(init_image), 512) - detected_map = canny(init_image, 100, 200) - image = Image.fromarray(detected_map) - - negative_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality" - results = pipe( - prompt=prompt, - image=image, - negative_prompt=negative_prompt, - num_inference_steps=num_steps, - num_images_per_prompt=num_samples - ).images - - result_id = uuid.uuid4() - out_dir = Path(f"/data/cn-results/{result_id}") - out_dir.mkdir(parents=True, exist_ok=True) - - - for i, res in enumerate(results): - res.save(out_dir / f"res_{i}.png") - - file_names = [ - f for f in os.listdir(out_dir) if os.path.isfile(os.path.join(out_dir, f)) - ] - - list_of_bytes = [read_image_bytes(out_dir / f) for f in file_names] - raw_image = list_of_bytes[0] - - return b64encode(raw_image).decode("utf-8") -``` - -## 2. Deploy the model as an endpoint -To use this fal-serverless function as an API, you can serve it with the `fal-serverless` CLI command: - -```bash -fal-serverless fn serve controlnet.py generate --alias controlnet --auth public -``` - -This will return a URL like: -``` -Registered a new revision for function 'controltest' (revision='c75db134-23f0-4863-94cd-3358d6c8d94c'). -URL: https://user_id-controlnet.gateway.alpha.fal.ai -``` - -## 3. Test it out -```bash -curl https://user_id-controlnet.gateway.alpha.fal.ai/ -H 'content-type: application/json' -H 'accept: application/json' -d '{"image_url":"https://restore.tchabitat.org/hubfs/blog/2019%20Blog%20Images/July/Old%20Kitchen%20Cabinets%20-%20Featured%20Image.jpg","prompt":"scandinavian kitchen","num_samples":1,"num_steps":30}' -``` - -This should return a JSON with the image encoded in base64. diff --git a/docsite/docs/fal-serverless/examples/image-restoration.md b/docsite/docs/fal-serverless/examples/image-restoration.md deleted file mode 100644 index 8507e58c..00000000 --- a/docsite/docs/fal-serverless/examples/image-restoration.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Restore Old Images with Transformers - -In this example, we will demonstrate how to use the [SwinIR](https://github.com/JingyunLiang/SwinIR) library and fal-serverless to restore images. SwinIR is an image restoration library that uses a Swin Transformer to restore images. The [Swin Transformer](https://arxiv.org/abs/2103.14030) is a type of neural network architecture that is designed for processing images. The Swin Transformer is similar to the popular Vision Transformer (ViT) architecture, but it uses a hierarchical structure that allows it to process images more efficiently. SwinIR uses a pre-trained Swin Transformer to restore images. - -### Step 1: Install fal-serverless and authenticate - -```bash -pip install fal-serverless -fal-serverless auth login -``` - -[More info on authentication](/category/authentication). - -### Step 2: Import fal_serverless and define requirements - -In a new python file, `ir.py`, import fal_serverless and define a requirements list: - -```python -from fal_serverless import isolated, cached - -requirements = [ - "timm==0.6.*", - "numpy==1.24.*", - "torch==1.13.*", - "opencv-python-headless==4.7.*", - "Pillow==9.4.*",] -``` - -### Step 3: Define cached functions - -Next, we define two functions that will be cached using the [@cached decorator](../isolated_functions/cached_function). - -```python -@cached -def download_model(): - import os - model_path = "/data/models/swinir/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth" - if not os.path.exists(model_path): - print("Downloading SwinIR model.") - url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth" - os.system(f"mkdir -p /data/models/swinir && cd /data/models/swinir && wget {url}") - print("Done.") - -@cached -def clone_repo(): - import os - repo_path = "/data/repos/swinir" - if not os.path.exists(repo_path): - print("Cloning SwinIR repository") - os.system("git clone --depth=1 https://github.com/JingyunLiang/SwinIR /data/repos/swinir") -``` - -The `download_model` function downloads the SwinIR model if it is not already present in the `/data/models/swinir` directory. The `clone_repo` function clones the SwinIR repository from GitHub if it is not already present in the `/data/repos/swinir` directory. - -### Step 4: Define the isolated function - -Next, we define the `run` function that will be executed using fal-serverless. - -```python -@isolated(requirements=requirements, machine_type="GPU", keep_alive=1800) -def run(data): - import os - import sys - import io - import uuid - from PIL import Image - - # Setup - clone_repo() - download_model() - - os.chdir('/data/repos/swinir') - imagedir = str(uuid.uuid4()) - os.system(f'mkdir -p {imagedir}') - if os.path.exists("results/swinir_real_sr_x4"): - os.system('rm -rf results/swinir_real_sr_x4/*') - imagename = str(uuid.uuid4()) - img = Image.open(io.BytesIO(data)) - if img.mode in ("RGBA", "P"): - img = img.convert("RGB") - basewidth = 256 - wpercent = (basewidth/float(img.size[0])) - hsize = int((float(img.size[1])*float(wpercent))) - img = img.resize((basewidth,hsize), Image.ANTIALIAS) - img.save(f"{imagedir}/0.jpg", "JPEG") - command = ( - f"python main_test_swinir.py --task real_sr --folder_lq {imagedir} --scale 4 " - "--model_path /data/models/swinir/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth" - ) - os.system(command) - os.system(f"rm -rf {imagedir}") - with open('results/swinir_real_sr_x4/0_SwinIR.png', "rb") as f: - result_data = f.read() - return result_data -``` - -In this function, we first call the `clone_repo` and `download_model` functions to ensure that we have the SwinIR repository and model downloaded. We then create a directory for the input image and save the image as a JPEG file. We then execute the SwinIR command to restore the image. Finally, we read the restored image and return it in bytes. - -### Step 5: Restore an image - -Finally, we try to restore an image: - -```python -with open("test_image.png", "rb") as f: - data = f.read() - result = run(data) - -with open("result.png", "wb") as f: - f.write(result) -``` - -Here, we're openning `test_image.png` and passing its bytes to the isolated `run` function. We then save the result in `result.png`. - -### Conclusion - -This example demonstrates how to use the SwinIR model for image restoration by using fal-serverless. This type of image restoration process can be performed in an isolated and scalable manner, while using minimal local resources. diff --git a/docsite/docs/fal-serverless/examples/llama.md b/docsite/docs/fal-serverless/examples/llama.md deleted file mode 100644 index 02c3fb14..00000000 --- a/docsite/docs/fal-serverless/examples/llama.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Run Llama 2 with llama.cpp (OpenAI API Compatible Server) - -In this example, we will demonstrate how to use fal-serverless for deploying Llama 2 and serving it through a OpenAI API compatible server with SSE. - -# 1. Use already deployed example -:::tip - -You can go to [Llama 2 Playground](https://llama.fal.ai/) to see it in action. - -::: - -If you want to use an already deployed API, here is a public endpoint running on a T4: - -https://110602490-llama-server.gateway.alpha.fal.ai/docs - -To use this endpoint: - -```bash -curl -X POST -H "Content-Type: application/json" \ --H "Accept: text/event-stream" \ --H "Authorization: Access-Control-Allow-Origin: *" \ --d '{ - "messages": [ - { - "role": "user", - "content": "can you write a happy story" - } - ], - "stream": true, - "max_tokens": 2000 - }' \ -https://110602490-llama-server.gateway.alpha.fal.ai/v1/chat/completions \ -``` - -This should return a streaming response. - -# 2. To deploy your own version: - -In this example, we will use the conda backend so that we can install CUDA dependencies. First, create the files below: - -**llama_cpp_env.yml** - -```yaml -name: myenv -channels: - - conda-forge - - nvidia/label/cuda-12.0.1 -dependencies: - - cuda-toolkit - - pip - - pip: - - pydantic==1.10.7 - - llama-cpp-python[server] - - cmake - - setuptools -``` - -**llama_cpp.py** - -```python -from fal_serverless import isolated - -MODEL_URL = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin" -MODEL_PATH = "/data/models/llama-2-13b-chat.ggmlv3.q4_0.bin" - -@isolated( - kind="conda", - env_yml="llama_cpp_env.yml", - machine_type="M", -) -def download_model(): - print("---> This is download_model()") - import os - - if not os.path.exists("/data/models"): - os.system("mkdir /data/models") - if not os.path.exists(MODEL_PATH): - print("Downloading SAM model.") - os.system(f"cd /data/models && wget {MODEL_URL}") - -@isolated( - kind="conda", - env_yml="llama_cpp_env.yml", - machine_type="GPU-T4", - exposed_port=8080, - keep_alive=30 -) -def llama_server(): - import uvicorn - from llama_cpp.server import app - - settings = app.Settings(model=MODEL_PATH, n_gpu_layers=96) - - server = app.create_app(settings=settings) - uvicorn.run(server, host="0.0.0.0", port=8080) -``` - -This script has two main functions: one two download the model, and the second one to start the server. - -We first need to download the model. You do this by calling the `download_model()` from a Python context. - -We then deploy this as a public endpoint: - -```bash -fal-serverless function serve llama_cpp.py llama_server --alias llama-server --auth public -``` - -This should return a URL, and you can use it like the above. First deploy might take a little bit of time. diff --git a/docsite/docs/fal-serverless/examples/model_apis.md b/docsite/docs/fal-serverless/examples/model_apis.md deleted file mode 100644 index c12f9466..00000000 --- a/docsite/docs/fal-serverless/examples/model_apis.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Model APIs - -In 2 short steps, programmatically create a beautiful image using the SDXL model. - -**Step 1.** Click on the [Create a Key Pair](https://serverless.fal.ai/dashboard -) button in our dashboard to create a new Key. Make sure to take a note of your Key Secret, as it will not be shown again. - -**Step 2.** Use your Key Pair as a Authorization Header to call model APIs provided by fal. - -Following is the url for the SDXL model API: - -``` -https://110602490-sharedsdxl.gateway.alpha.fal.ai -``` - -You can call the Model API with the language or framework of your choice. Here are some options: - -### Python -``` -import requests - -headers = { - "Authorization": "Basic :" -} -response = requests.post( - "https://110602490-sharedsdxl.gateway.alpha.fal.ai", - headers=headers, - json={ - "prompt": "an astronaut in the jungle, cold color pallete with butterflies in the background, highly detailed, 8k", - "height": 1024, - "width": 1024, - "num_inference_steps": 30, - "guidance_scale": 5, - "negative_prompt": "blurry", - "num_images_per_prompt": 1 - }, -) -``` - -### Javascript -``` -import fetch from "node-fetch"; - -const headers = { - Authorization: "Basic :", -}; - -const body = { - prompt: "an astronaut in the jungle, cold color pallete with butterflies in the background, highly detailed, 8k", - height: 1024, - width: 1024, - num_inference_steps: 30, - guidance_scale: 5, - negative_prompt: "blurry", - num_images_per_prompt: 1 -}; - -fetch("https://110602490-sharedsdxl.gateway.alpha.fal.ai", { - method: "POST", - body: JSON.stringify(body), - headers: headers, -}) - .then((response) => response.json()) - .then((data) => console.log(data)) - .catch((error) => console.error("Error:", error)); -``` - -### cURL -``` -curl -X POST https://110602490-sharedsdxl.gateway.alpha.fal.ai \ - -H "Authorization: Basic $FAL_KEY_ID:$FAL_KEY_SECRET" \ - -H "Content-Type: application/json" \ - -d '{"prompt": "an astronaut in the jungle, cold color pallete with butterflies in the background, highly detailed, 8k", "height": 1024, "width": 1024, "num_inference_steps": 30, "guidance_scale": 5, "negative_prompt": "blurry", "num_images_per_prompt": 1}' -``` \ No newline at end of file diff --git a/docsite/docs/fal-serverless/examples/stable-diffusion.md b/docsite/docs/fal-serverless/examples/stable-diffusion.md deleted file mode 100644 index dcb861f3..00000000 --- a/docsite/docs/fal-serverless/examples/stable-diffusion.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Generate Images from Text with Stable Diffusion -In this example, we will deploy Stable Diffusion using fal-serverless. As we do that, we will learn about important fal-serverless concepts. - -## Step 1: Install fal-serverless and authenticate - -```bash -pip install fal-serverless -fal-serverless auth login -``` - -[More info on authentication](/category/authentication). - - -## Step 2: Import required libraries - -First, we need to define the requirements for our project: - -```python -requirements = [ - "accelerate", - "diffusers[torch]>=0.10", - "ftfy", - "torch", - "torchvision", - "transformers", - "triton", - "safetensors", - "xformers==0.0.16", -] -``` - -## Step 3: Define the generate function - -Next, we will define the `generate` function, which will be responsible for generating an image using Stable Diffusion: - -```python -from fal_serverless import isolated - -@isolated(requirements=requirements, machine_type="GPU-T4", keep_alive=30) -def generate(prompt: str): - import torch - import os - import io - import base64 - from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler - - model_id = "runwayml/stable-diffusion-v1-5" - os.environ['TRANSFORMERS_CACHE'] = '/data/hugging_face_cache' - - pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16, - cache_dir=os.environ['TRANSFORMERS_CACHE']) - pipe = pipe.to("cuda") - pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) - - image = pipe(prompt, num_inference_steps=20).images[0] - - buf = io.BytesIO() - image.save(buf, format="PNG") - return buf -``` - -The `@isolated` decorator is the most important building block in fal-serverless. It lets you run any Python function in the cloud instantly on many types of GPUs. In this example, the decorator accepts a `requirements` argument which defines the libraries needed to run the function, a `machine_type` argument that specifies the machine that we want to run this function on and a `keep_alive` argument that specifies the number of seconds to keep the underlying machine alive in case there are no other function calls. - -## Step 4: Generate the image -Now that we defined the `generate` function, we can use it to generate an image by passing a prompt. In fal-serverless, you call an `@isolated` function just as if it is a local Python function. - -```python -image_data = generate("Donkey walking on clouds") -``` - -This will generate an image based on the given prompt "Donkey walking on clouds" and store it in image_data. - -To save this image locally: -```python -with open("test.png", "wb") as f: - f.write(image_data.getvalue()) -``` - -## Step 5: Make it faster with @cached -You may notice that we are loading the model to GPU VRAM every time we call the generate function. We will now introduce another building block in fal-serverless: the `@cached` decorator. This decorator lets you keep expensive operations in memory. By caching the model, we can get improved performance. Our code now looks like: - -```python -from fal_serverless import isolated, cached - -requirements = [ - "accelerate", - "diffusers[torch]>=0.10", - "ftfy", - "torch", - "torchvision", - "transformers", - "triton", - "safetensors", - "xformers==0.0.16", -] - -@cached -def model(): - import torch - import os - import io - import base64 - from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler - - model_id = "runwayml/stable-diffusion-v1-5" - os.environ['TRANSFORMERS_CACHE'] = '/data/hugging_face_cache' - - pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16, - cache_dir=os.environ['TRANSFORMERS_CACHE']) - pipe = pipe.to("cuda") - pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) - return pipe - -@isolated(requirements=requirements, machine_type="GPU", keep_alive=30) -def generate(prompt: str): - import io - - pipe = model() - image = pipe(prompt, num_inference_steps=50).images[0] - - buf = io.BytesIO() - image.save(buf, format="PNG") - return buf - -image_data = generate("astronaut riding a horse") - -with open("test.png", "wb") as f: - f.write(image_data.getvalue()) -``` diff --git a/docsite/docs/fal-serverless/isolated_functions/_category_.yml b/docsite/docs/fal-serverless/isolated_functions/_category_.yml deleted file mode 100644 index d66c1ca0..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: "Isolated Functions" -position: 4 -collapsible: false -collapsed: false -link: - type: generated-index - title: Isolated Functions diff --git a/docsite/docs/fal-serverless/isolated_functions/cached_function.md b/docsite/docs/fal-serverless/isolated_functions/cached_function.md deleted file mode 100644 index 2b521af1..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/cached_function.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Cached functions: Caching the Output of Functions for Improved Performance -The `@cached` decorator is a tool for improving the performance of your isolated function. When a function is decorated with `@cached`, it is referred to as a "cached function." - -A cached function can be called inside an isolated function, and the output of the cached function is cached. If the cached function is called in an isolated environment and the environment has been [kept alive](./keep_alive) since the last time the same cached function was called, then the cached function is not executed. Instead, a cached return value is returned. This can significantly improve the performance of your functions by reducing the time it takes to repeatedly execute code and minimizing the resource consumption. - -Here's an example that demonstrates how to use the `@cached` decorator: - -```python -from fal_serverless import isolated, cached - -@cached -def my_cached_function(x): - # Perform some time-consuming calculation - import time - time.sleep(10) - return x ** 2 - -@isolated(keep_alive=10) -def my_isolated_function(x): - result = my_cached_function(x) - return result - -# Call the isolated function multiple times -result1 = my_isolated_function(2) # Takes more than 10 seconds -result2 = my_isolated_function(2) # Almost instant - -print(result1) # Output: 4 -print(result2) # Output: 4 -``` - -In the above example, we have a cached function `my_cached_function` that takes one argument x and performs a time-consuming calculation to return x ** 2. We then have an isolated function `my_isolated_function` that calls the cached function. `my_isolated_function` has `keep_alive` set for 10 seconds. When you try this example, you'll see that the second call does not wait for 10 seconds and returns the same result right away. This is because the `@cached` decorator has cached the output of `my_cached_function` in the isolated environment. Since the environment is kept alive for 10 seconds, the second call to `my_cached_function` returns a cached result, instead of re-executing the function. diff --git a/docsite/docs/fal-serverless/isolated_functions/keep_alive.md b/docsite/docs/fal-serverless/isolated_functions/keep_alive.md deleted file mode 100644 index fc4be49e..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/keep_alive.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Keep-Alive - Reusing functions - -The `keep_alive` argument is used with the `@isolated` decorator to specify the number of seconds that a target environment should be kept alive after a function is executed. - -The purpose of the `keep_alive` argument is to optimize the performance of isolated functions by re-using environments. By keeping the environment alive for a specified number of seconds after function execution, subsequent function calls can be executed more quickly, since the environment does not need to be created from scratch each time. - -The `keep_alive` argument is specified when the `@isolated` decorator is applied to a function. For example: - -```python -from fal_serverless import isolated - -@isolated(keep_alive=300) -def get_two(): - return 1+1 - -get_two() -``` - -In this example, the `@isolated` decorator is applied to the `get_two` function with a `keep_alive` argument set to 300 seconds. This means that the target environment will be kept alive for 300 seconds after the function execution is complete. If the same function is called again within 300 seconds, the target environment will be re-used, and the function will be executed in the same environment as before. - -Here's another example: - -```python -from fal_serverless import isolated - -@isolated(keep_alive=10, requirements=["requests"]) -def get_response(url): - import requests - response = requests.get(url) - return response.text - -get_response('https://www.example.com') # First call (slower) - -get_response('https://www.anotherexample.com') # Second call (faster) -``` - -The first time `get_response` is called, the target environment is created, and the function is executed. The second time `get_response` is called, the target environment is re-used, since it was kept alive for 10 seconds after the first function call. - -Note that the `keep_alive` timer is restarted each time `get_response` is called. - -In summary, the `keep_alive` argument is a useful feature of the `@isolated` decorator, allowing you to optimize the performance of isolated functions by re-using environments that are still alive. By controlling the number of seconds that the environment will be kept alive, you can strike a balance between performance and resource utilization. diff --git a/docsite/docs/fal-serverless/isolated_functions/managing_environments.md b/docsite/docs/fal-serverless/isolated_functions/managing_environments.md deleted file mode 100644 index 3d9e2dce..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/managing_environments.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Managing Environments - -The `@isolated` decorator supports two types of target environments: `virtualenv` and `conda`. To specify the type of environment to create, pass the `type` argument to the decorator. For example: - -```python -@isolated(kind="virtualenv") -def my_function(): - ... - -@isolated(kind="conda") -def my_other_function(): - ... -``` - -The default `kind` is `virtualenv`, so `@isolated()` is the same as `@isolated(kind="virtualenv")` - -## `virtualenv` environments - -When using a `virtualenv` environment, you can specify additional dependencies using the `requirements` argument, which takes a list of package names. For example: - -```python -@isolated(kind="virtualenv", requirements=["pyjokes"]) -def my_function(): - ... -``` - -You can also specify an exact version using the `==` operator. - -```python -@isolated(kind="virtualenv", requirements=["pyjokes==0.0.6"]) -def my_function(): - ... -``` - -## `conda` environments - -`conda` environmet allow users to define both system and Python packages. When using a `conda` environment, you dependencies can be specified by using the `packages` argument, which takes a list of package names. For example: - -```python -@isolated(kind="conda", packages=["pytorch"]) -def my_other_function(): - ... -``` - -The conda environment has the conda-forge and pytorch channels enabled by default. - -If you need more control over packages and channels, you can use either `env_yml` or `env_dict` argument instead of `packages`. - -`env_yml` accepts a path to a YAML file with a [conda environment definition](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#create-env-file-manually). For example: - -```python -@isolated(kind="conda", env_yml="my_env.yml") -def my_other_function(): - ... -``` - -where `my_env.yml` could be something like: - -```yaml -name: my_env -channels: - - pytorch - - defaults -dependencies: - - pytorch -``` - -Similarly, `env_dict` is a dictionary representation of a conda environment YAML: - -```python -my_env = { - "name": "my_env", - "channels": ["pytorch", "defaults"], - "dependencies": ["pytorch"] -} - -@isolated(kind="conda", env_dict=my_env) -def my_other_function(): - ... -``` - -In the example above `my_env` has the same structure as an environment YAML file. You can think of it as a parsed version of a conda environment YAML. diff --git a/docsite/docs/fal-serverless/isolated_functions/overview.md b/docsite/docs/fal-serverless/isolated_functions/overview.md deleted file mode 100644 index 86eb5d64..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/overview.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Overview - -Isolated Functions are Python functions that use the `@isolated` decorator. - -With `@isolated` you can run Python function in a serverless manner. This is accomplished by creating a dedicated environment in the cloud when an isolated function is called. The environment is then destroyed to save resources. This allows you to run functions in an isolated and scalable manner, freeing up local resources and improving performance. - -The `@isolated` decorator accepts several arguments, including [`keep_alive`](./keep_alive), [`requirements`](managing_environments), [`machine_type`](../scaling/machine_types) and more. The `keep_alive` argument determines the number of seconds that the target environment should be kept alive after function execution is complete. The `requirements` argument is a list of packages to be installed in the target environment. The `machine_type` argument lets you specify the size of a machine, on which the isolated function is executed. - -You can also define [cached functions](./cached_function) for time consuming operations and improve performance of your isolated functions. - -By using `@isolated`, you can simplify the process of running complex functions in the cloud, making it easier to scale and manage your applications. - -## Calling an isolated function inside an isolated function - -It is possible to call another isolated function inside an already running isolated function. -This may be useful if you have a function you would like to isolate that depends on another isolated function. - -```py -from fal_serverless import isolated - -@isolated(requirements=["pyjokes"], machine_type='GPU', keep_alive=10) -def isolated_joke() -> str: - import pyjokes - return pyjokes.get_joke() - -@isolated(requirements=["cowsay", "fal_serverless"]) -def tell_jokes(): - import cowsay - cowsay.cow(isolated_joke()) - cowsay.fox(isolated_joke()) -``` - -The example above uses the isolated function `isolated_joke` inside of the isolated function `tell_jokes`. -Both of the calls to `isolated_joke` will be handled by fal-serverless separately and a machine will be provisioned as necessary to run them. -Since the environment and the machine type can be different, this is a completely new machine being provisioned. - -The `fal-serverless` package needs to be added to the requirements of the second function explicitly. -Notice that authentication is done automatically by the system. diff --git a/docsite/docs/fal-serverless/isolated_functions/scheduling.md b/docsite/docs/fal-serverless/isolated_functions/scheduling.md deleted file mode 100644 index e5a25d20..00000000 --- a/docsite/docs/fal-serverless/isolated_functions/scheduling.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Scheduling runs - -Functions can also be scheduled to run at specific times and/or frequency using cron expressions. - -The command to schedule function expects a cron expression, a path to a python file containing an `@isolated` function, and the function name. - -``` -fal-serverless function schedule -``` - -## Example - -Let's write a fun example. How about a Slack bot that sends the joke of the day to a Slack channel. - -1. First write your `@isolated` function: - -```python -@isolated(requirements=["pyjokes", "httpx"]) -def joke_of_the_day(): - import httpx - import pyjokes - - joke = pyjokes.get_joke() - message = f"Hello! I'm the funny bot, here's the joke of the day:\n> {joke}" - - slack_token = "" - slack_webhook_url = f"https://hooks.slack.com/services/{slack_token}" - httpx.post(slack_webhook_url, json={"text": message}) -``` - -2. Schedule it so it's executed once a day at 7am UTC: - -``` -fal-serverless function schedule "0 7 * * *" jokes.py joke_of_the_day -``` - -*Voilà!* That's all that it takes to have your `@isolated` function scheduled. - -**Tip:** if you're not familiar with cron expressions, this [cron editor](https://crontab.guru/) can help you get started. - - -## Managing scheduled functions - -Once scheduled, functions can be managed with a few commands. Let's check them out. - -### List all scheduled functions - -Get a list of all currently scheduled functions with: - -``` -fal-serverless crons list -``` - -### Cancel scheduling - -In order to cancel a scheduled function, run: - -``` -fal-serverless crons cancel -``` - -### List activations - -You can get the history of a particular function activation (i.e. execution) with: - -``` -fal-serverless crons activations -``` - -You can also specify `--limit=n` to list the `n` most recent activations. It defaults to 15. - -## Logs - -Since scheduled functions run asynchronously, logs are stored so they can be retrieved later. This is particularly useful for debugging executions. Get the logs for a particular activation with: - -``` -fal-serverless crons logs -``` diff --git a/docsite/docs/fal-serverless/quickstart.md b/docsite/docs/fal-serverless/quickstart.md deleted file mode 100644 index 9f4cc57b..00000000 --- a/docsite/docs/fal-serverless/quickstart.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -sidebar_position: 1 -slug: / ---- - -# Quickstart - - - -fal-serverless is a serverless Python runtime that lets you run and scale code in the cloud with no infra management. - -With fal-serverless, you can build pipelines, serve ML models and scale them up to many users. You scale down to 0 when you don't use any resources. - -fal-serverless supports custom compute resources including GPUs (A100s and T4). - -Let's discover **fal-serverless in less than 2 minutes**. - -## Install fal-serverless - -Get started by installing `fal-serverless`: - -```bash -pip install fal-serverless -``` - -Login using the `auth` command: - -```bash -fal-serverless auth login -``` - -## Create a Python script - -Create a new Python file, for example `try_fal_serverless.py`. - -In this file, define a decorated function that returns a joke: - -```python -from fal_serverless import isolated - -@isolated(requirements=["pyjokes"]) -def isolated_joke(): - import pyjokes - return pyjokes.get_joke() - -print(isolated_joke()) -``` - -## Run your script - -Run your script with `python`: - -```bash -python try_fal_serverless.py -``` - -This should print out a joke, for example `How many programmers does it take to change a lightbulb? None, that's a hardware problem.` - -Congratulations! You have successfully run a function on `fal-serverless`! - -## Ready for more? - -See our [examples](/category/examples). diff --git a/docsite/docs/fal-serverless/reference.md b/docsite/docs/fal-serverless/reference.md deleted file mode 100644 index 8da23f30..00000000 --- a/docsite/docs/fal-serverless/reference.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -sidebar_position: 8 ---- - -# Reference - -This is a reference document for `fal-serverless` - -## `isolated` decorator - -`isolated` decorator lets you define a function that will be run in isolated environment by fal-serverless. Here is a simple example: - -```python -from fal_serverless import isolated - -@isolated() -def my_function(): - return "Hello World" -``` - -`my_function` is now what we call an **isolated function**. The subsequent `my_function` calls, such as `my_function()`, are sent to fal-serverless. This means that `my_function` is not run locally, instead it's contents are sent to fal-serverless, executed there and the result is returned to the local environment. - -The `isolated` decorator accepts a number of arguments, that are described below. - -### `requirements` - -You can provide custom requirements to your isolated functions. These requirement are installed only inside a fal-serverless environment and not in your local Python environment. - -```python -from fal_serverless import isolated - -requirements = ["pyjokes"] - -@isolated(requirements=requirements) -def my_function(): - import pyjokes - return pyjokes.get_joke() -``` - -In the above example, it is not necessary to install the `pyjokes` module locally. Providing it in a `requirements` list results in fal-serverless automatically installing it in the fal-serverless environment. Note how `pyjokes` is imported inside the `my_function` definition. This way, the Python interpreter will not look for `pyjokes` in your locally installed modules. - -### `machine_type` - -Isolated functions can run on different machine types. Here's a list of currently supported machine types: - -- `XS`: 0.25 CPU cores, 256MB RAM -- `S`: 0.50 CPU cores, 1GB RAM -- `M`: 2 CPU cores, 8GB RAM -- `L`: 4 CPU cores, 32GB RAM -- `XL`: 8 CPU cores, 128GB RAM -- `GPU-T4`: 4 CPU cores, 26GB RAM, 1 GPU core (T4, 16GB VRAM) -- `GPU`: 8 CPU cores, 64GB RAM, 1 GPU core (A100, 40GB VRAM) - -The default machine type is `XS`. Machine type can be specified when defining an isolated function: - -```python -from fal_serverless import isolated - -requirements = ["pyjokes"] - -@isolated(requirements=requirements, machine_type="M") -def my_function(): - import pyjokes - return pyjokes.get_joke() -``` - -You can also use an isolated function to define a new one with a different machine type: - -```python -my_function_L = my_function.on(machine_type="L") -``` - -In the above example, `my_function_L` is a new isolated function that has the same contents as `my_function`, but it will run on a machine type `L`. - -Both functions can be called: - -```python -my_function() # executed on machine type `M` -my_function_L() # same as my_function but executed on machine type `L` -``` - -`my_function` is executed on machine type `M`. And `my_function_L`, which has the same logic as `my_function`, is now executed on machine type `L`. - -### Local development - -Sometimes you might want to test your isolated functions locally before running them on fal-serverless. For this purpose we provided a special `local` host environment. Taking `my_function` from the previous example: - -```python -from fal_serverless import isolated, local - -my_function_local = my_function.on(local) -``` - -`my_function_local` is still an isolated function, but it will not run in fal-serverless. Instead, fal-serverless will create a dedicated isolated Python environment on your local machine and execute `my_function_local` there. - -The `.on()` method accepts the same arguments as the `isolated` decorator. So you can define a new isolated function based on `my_function_local` this way: - -```python -from fal_serverless import cloud - -my_function_cloud = my_function_local.on(cloud, machine_type="M") -``` - -The resulting `my_function_cloud` is the same as the original `my_function`. It has the same logic and it is executed on a machine type `M`. - -Local isolated functions can also be defined with a decorator by providing a `host` argument: - -```python -from fal_serverless import isolated, local - -requirements = ["pyjokes"] - -@isolated(requirements=requirements, host=local) -def my_function_local2(): - import pyjokes - return pyjokes.get_joke() -``` - -In this case, `my_function_local` and `my_function_local2` will have exactly the same behavior. - -### `credentials` - -The credentials argument lets you provide key-based credentials: - -```python -from fal_serverless import isolated, CloudKeyCredentials - -credentials = CloudKeyCredentials(key_id='your-key-id', key_secret='your-key-secret') - -@isolated(credentials=credentials) -def my_function() - return "hello world" -``` - -[More information on credentials and authentication](/category/authentication). - -### `keep_alive` - -The `keep_alive` argument allows for optimization of the function execution process. It lets you specify the number of seconds that the isolated environment should be kept alive after a function execution. This means that subsequent calls to the same function can reuse the existing memory and compute resources if it's still alive, rather than provisioning new infrastructure. This can significantly reduce the time required to start up the runtime and the overall execution time of the isolated function. - -By keeping the environment alive, fal-serverless minimizes the time spent on environment setup and initializations. This is especially useful for functions that are frequently called. The keep_alive feature makes it easier to run isolated functions at scale, as it helps to minimize the overhead associated with function execution. - -```python -from fal_serverless import isolated - -@isolated(keep_alive=20, requirements=["pyjokes"]) -def get_joke() - import pyjokes - return pyjokes.get_joke() -``` - -In the example above, isolated environment of `get_joke` will be kept alive for 20 seconds. If `get_joke` is called again within 20 seconds, it will reuse isolated environment **and restart the `keep_alive` timer**. - -The default value for keep_alive is 10 seconds. - -## `cached` decorator - -Functions with `@cached` decorator get their output cached for improved performance. If the same cached function is called in an isolated environment and the environment has been kept alive since the last time the function was called, then the function is not executed and instead, a cached return value is returned. This can significantly reduce the time it takes to execute isolated functions and minimize the resources used. - -```python -import time -from fal_serverless import isolated, cached - -@cached -def my_cached_function(): - # Simulate a time-consuming calculation - time.sleep(2) - return "Hello, World!" - -@isolated(keep_alive=10) -def my_isolated_function(): - result = my_cached_function() - return f"The result is: {result}" - -# Call the isolated function multiple times -result1 = my_isolated_function() # Takes more than 2 seconds -result2 = my_isolated_function() # Almost instant - -print(result1) # Output: "The result is: Hello, World!" -print(result2) # Output: "The result is: Hello, World!" -``` - -## `sync_dir` Function - -The `sync_dir` function allows you to upload local directories to the persistent `/data` directory. - -### Syntax - -```python -from fal_serverless import sync_dir - -sync_dir(path_to_local_dir, remote_dir) -``` - -### Parameters - -- `path_to_local_dir` (str): The local path to the directory you want to upload. -- `remote_dir` (str): The remote directory in which to store the uploaded files. This has to be an absolute directory that starts with `/data`. - -### Return - -It returns a string with remote directory location for easy usage in other isolated functions. - -### Usage example - -Here's an example of how to use the sync_dir function: - -```python -from fal_serverless import sync_dir, isolated - -# Upload a local directory to the persistent /data directory -remote_dir = sync_dir("path/to/local/dir", "/data/remote_dir") - -# An isolated function to list the contents of the uploaded directory -@isolated() -def test(): - import os - os.system(f"ls {remote_dir}") - -# Execute the test function -test() # prints contents of the uploaded directory -``` - -In this example, the local directory specified by path/to/local/dir is uploaded to /data/sync/remote_dir in the fal-serverless environment. The isolated function test then lists the contents of the uploaded directory. - -### Notes - -- The `sync_dir` function uses isolated functions to efficiently upload the directory contents in the background. -- Uploaded files and directories are persistent in the `/data` directory on the fal-serverless platform. This allows you to access them across different isolated functions and invocations. -- The `sync_dir` function should be called before the execution of any isolated functions that rely on the uploaded data. diff --git a/docsite/docs/fal-serverless/scaling/_category_.yml b/docsite/docs/fal-serverless/scaling/_category_.yml deleted file mode 100644 index 3b207c9a..00000000 --- a/docsite/docs/fal-serverless/scaling/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: "Scaling Your Workloads" -position: 5 -collapsible: false -collapsed: false -link: - type: generated-index - title: Scaling diff --git a/docsite/docs/fal-serverless/scaling/machine_types.md b/docsite/docs/fal-serverless/scaling/machine_types.md deleted file mode 100644 index 0fa628b7..00000000 --- a/docsite/docs/fal-serverless/scaling/machine_types.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Vertical - Machine Types and GPUs -You can specify the size of the machine that runs the target environment of an isolated function. This is done using the `machine_type` argument. The following options are available for the machine_type argument: - -| Value | Description | -|----------|-----------------------------------------------------| -| `XS` | 0.25 CPU cores, 256MB RAM (default) | -| `S` | 0.50 CPU cores, 1GB RAM | -| `M` | 2 CPU cores, 8GB RAM | -| `L` | 4 CPU cores, 32GB RAM | -| `XL` | 8 CPU cores, 128GB RAM | -| `GPU-T4` | 4 CPU cores, 26GB RAM, 1 GPU core (T4, 16 GB VRAM) | -| `GPU` | 8 CPU cores, 64GB RAM, 1 GPU core (A100, 40GB VRAM) | - -For example: - -```python -@isolated(machine_type="GPU") -def my_function(): - ... - -@isolated(machine_type="L") -def my_other_function(): - ... -``` - -By default, the `machine_type` is set to `XS`. - -You can also use an isolated function to define a new one with a different machine type: - -```python -my_function_S = my_function.on(machine_type="S") -``` - -In the above example, `my_function_S` is a new isolated function that has the same contents as `my_function`, but it will run on a machine type `S`. - -Both functions can be called: - -```python -my_function() # executed on machine type `GPU` -my_function_S() # same as my_function but executed on machine type `S` -``` - -`my_function` is executed on machine type `GPU`. And `my_function_S`, which has the same logic as `my_function`, is now executed on machine type `S`. diff --git a/docsite/docs/fal-serverless/scaling/managing_concurrency.md b/docsite/docs/fal-serverless/scaling/managing_concurrency.md deleted file mode 100644 index 4159c3eb..00000000 --- a/docsite/docs/fal-serverless/scaling/managing_concurrency.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Horizontal - Managing Concurrency - -Isolated functions in Python, decorated with the `@isolated` decorator, have a convenient `submit` method that allows you to run the function as an asynchronous task. When you call the `submit` method, the isolated function returns an instance of the [`Future` class](https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future) from the [concurrent.futures](https://docs.python.org/3/library/concurrent.futures.html#module-concurrent.futures) module. - -A `Future` represents the result of an asynchronous operation. You can use the `Future` object to wait for the operation to complete and retrieve the result. This makes it easy to run isolated functions in the background, freeing up the main thread to perform other tasks. - -Here's an example that demonstrates how to use the `submit` method to run an isolated function as an asynchronous task: - -```python -from fal_serverless import isolated -from concurrent.futures import as_completed - -@isolated() -def my_function(x, y): - return x + y - -# Call the submit method to run the function as an asynchronous task -future = my_function.submit(2, 3) - -# Wait for the task to complete and retrieve the result -result = future.result() - -print(result) # Output: 5 -``` - -In this example, we define an isolated function `my_function` that takes two arguments x and y and returns their sum. We then call the `submit` method to run the function as an asynchronous task, passing in the arguments 2 and 3. The `submit` method returns a `Future` object that represents the result of the asynchronous operation. Finally, we use the `result` method on the `Future` object to wait for the task to complete and retrieve the result, which is 5. - -One of the benefits of using the `submit` method to run isolated functions as asynchronous tasks is that it allows you to take advantage of concurrency. By running multiple instances of an isolated function in parallel, you can perform tasks more quickly and efficiently. - -Here's an example that demonstrates how to use the `submit` method to take advantage of concurrency: - -```python -import time -from fal_serverless import isolated -from concurrent.futures import as_completed - -@isolated() -def my_function(x): - time.sleep(x) - return x - -# Call the submit method many times to run multiple instances of the function -futures = [my_function.submit(i) for i in range(10)] - -# Wait for all the tasks to complete -start_time = time.time() -for future in as_completed(futures): - result = future.result() - print(f"Task {result} completed in {time.time() - start_time:.2f} seconds") -``` - -In this example, we define an isolated function `my_function` that takes one argument x and simulates a long-running task by sleeping for x seconds. We then use a list comprehension to call the `run` method many times, passing in different arguments each time. We use the `as_completed` function to wait for all the tasks to complete and retrieve the results. - -When you run this code, you'll see that the tasks complete in a shorter amount of time compared to if you ran the tasks sequentially. This is because the tasks are run in parallel, taking advantage of the isolated environments. - diff --git a/docsite/docs/fal-serverless/secrets.md b/docsite/docs/fal-serverless/secrets.md deleted file mode 100644 index 116f77e8..00000000 --- a/docsite/docs/fal-serverless/secrets.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -sidebar_position: 9 ---- - -# Secrets - -`fal-serverless` offers a convenient way to manage sensitive information, such as API keys or database credentials, within your isolated functions. The secrets functionality enables you to store and access secrets as environment variables, ensuring that your sensitive data remains protected while being readily available when needed. - -## Managing Secrets - -### Setting Secrets - -To store a secret, use the `fal-serverless secrets set` command followed by the secret name and its corresponding value: - -```python -fal-serverless secrets set SECRET_NAME SECRET_VALUE -``` - -This command securely saves the secret to your fal-serverless account, making it accessible within your isolated functions. - -### Listing Secrets - -To view a list of all stored secrets, use the fal-serverless secrets list command: - -```python -fal-serverless secrets list -``` - -This command displays a table containing the secret names and metadata, such as the creation and modification dates. Note that the secret values are not shown for security reasons. - -### Deleting Secrets - -To delete a secret, use the fal-serverless secrets delete command followed by the secret name: - -```python -fal-serverless secrets delete SECRET_NAME -``` - -This command removes the secret from your fal-serverless account, making it inaccessible within your isolated functions. - -## Accessing Secrets within Isolated Functions - -Secrets can be accessed within isolated functions as environment variables. To do this, simply import the `os` module and use the `os.environ` dictionary to retrieve the secret value by its name: - -```python -@isolated() -def my_isolated_function(): - import os - my_secret_value = os.environ['SECRET_NAME'] -``` - -In this example, the `my_secret_value` variable will contain the value of the secret named `SECRET_NAME`. This allows you to securely use sensitive data within your isolated functions without exposing the data in your code. diff --git a/docsite/docs/fal-serverless/storage_and_persistence.md b/docsite/docs/fal-serverless/storage_and_persistence.md deleted file mode 100644 index be489bd9..00000000 --- a/docsite/docs/fal-serverless/storage_and_persistence.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Storage and Persistence - -The target environments of isolated functions have access to a directory `/data`. The `/data` directory is persistent and user-specific. This directory allows you to store and retrieve data across function invocations, enabling you to persist data between executions. - -Accessing the `/data` directory is simple, as it is automatically mounted in the function's environment. You can read from and write to the `/data` directory just as you would with any other directory in your file system. - -Here are two examples to demonstrate the use of the `/data` directory: - -**Example 1: Storing User Preferences** - -Let's say you have a function that generates personalized recommendations for users based on their preferences. The function can store the preferences in the `/data` directory so that they persist across invocations. The code would look something like this: - -```python -@isolated() -def generate_recommendations(user_id, preferences): - preferences_file = f"/data/user_{user_id}_preferences.txt" - with open(preferences_file, "w") as f: - f.write(preferences) - # Generate recommendations based on the stored preferences - # ... -``` - -**Example 2: Persisting Model Weights** - -In machine learning, you may want to persist the model weights so that you don't have to download them every time you need to make a prediction. The `/data` directory provides an easy way to persist the model weights between function invocations. - -Here's an example of how you might use the `/data` directory to store model weights in a deep learning scenario: - -```python -import os -import tensorflow as tf -from fal_serverless import isolated - -@isolated(requirements=["tensorflow"]) -def train_and_predict(data, model_weights_file='/data/model_weights.h5'): - model = create_model() - if os.path.exists(model_weights_file): - model.load_weights(model_weights_file) - else: - model.fit(data) - model.save_weights(model_weights_file) - return model.predict(data) - -def create_model(): - model = tf.keras.Sequential([ - tf.keras.layers.Dense(64, activation='relu'), - tf.keras.layers.Dense(64, activation='relu'), - tf.keras.layers.Dense(10, activation='softmax') - ]) - model.compile(optimizer='adam', - loss='categorical_crossentropy', - metrics=['accuracy']) - return model -``` - -In this example, the function `train_and_predict` first checks if the model weights file `model_weights.h5` exists in the `/data` directory. If it does, the function loads the weights into the model. If not, the function trains the model and saves the weights to the `/data` directory. This way, on subsequent invocations, the function can simply load the weights from the `/data` directory, which will be much faster than retraining the model from scratch. - -## `sync_dir` Function - -The `sync_dir` function allows you to easily upload local directories to the persistent `/data` directory. Here's an example of how to use the sync_dir function: - -```python -from fal_serverless import sync_dir, isolated - -# Upload a local directory to the persistent /data directory -sync_dir("path/to/local/dir", "remote_dir") - -# An isolated function to list the contents of the uploaded directory -@isolated() -def test(): - import os - os.system("ls /data/sync/remote_dir") - -# Execute the test function -test() # prints contents of the uploaded directory -``` - -In this example, the local directory specified by `path/to/local/dir` is uploaded to `/data/sync/remote_dir` in the fal-serverless environment. diff --git a/docsite/docs/fal-serverless/web_endpoints/_category_.yml b/docsite/docs/fal-serverless/web_endpoints/_category_.yml deleted file mode 100644 index 0ec94d03..00000000 --- a/docsite/docs/fal-serverless/web_endpoints/_category_.yml +++ /dev/null @@ -1,7 +0,0 @@ -label: "Web Endpoints" -position: 7 -collapsible: false -collapsed: false -link: - type: generated-index - title: Web Endpoints diff --git a/docsite/docs/fal-serverless/web_endpoints/queue_and_webhooks.md b/docsite/docs/fal-serverless/web_endpoints/queue_and_webhooks.md deleted file mode 100644 index b6d8d388..00000000 --- a/docsite/docs/fal-serverless/web_endpoints/queue_and_webhooks.md +++ /dev/null @@ -1,103 +0,0 @@ -# Queue and Webhooks - -Upon deploying an endpoint to fal-serverless as an HTTP server, such as Flask or FastAPI, several methods exist for interacting with this HTTP server. - -Consider the following deployed HTTP server: - -``` -@isolated( - requirements=requirements, - machine_type="GPU-T4", - keep_alive=300, - exposed_port=8080, -) -def app(): - from flask import Flask, request - import uuid - - app = Flask("test") - - @app.route("/test", methods=["POST"]) - def remove(): - return 200 -``` - -When the application is registered with following command: - -``` ->> fal-serverless function serve app.py app --alias app - -Registered a new revision for function 'app' (revision='39c4e168-414f-49f1-8160-f9f7a958e8cb'). -URL: https://123-app.gateway.alpha.fal.ai -``` - -This application is now ready recieve http requests, exactly like you configured it. The server will respond back when the response is ready. - -``` -curl -X POST https://123-app.gateway.alpha.fal.ai/test -``` - -# Queue - -You may alternatively choose to add this request to our queue system. Upon doing so, you will promptly receive a request_id. This id can then be used to poll our system periodically until the response is prepared and ready for retrieval. - -Utilizing our queue system offers you a more granulated control to handle unexpected surges in traffic. It further provides you with the capability to cancel requests if needed and grants you the observability to monitor your current position within the queue. - -To add a request to the queue, simply incorporate the "/fal/queue/submit/" path to the prefix of your URL. - -For instance, should you want to use the curl command to submit a request to the aforementioned endpoint and add it to the queue, your command would appear as follows: - -```bash -curl -X POST https://123-app.gateway.alpha.fal.ai/fal/queue/submit/test -``` - -```python -import requests - -response = requests.post("123-app.gateway.alpha.fal.ai/fal/queue/submit/test") -data = await response.json() -request_id = data.get("request_id") -``` - -## Getting the status of request - -Once you have the request id you may use this request id to get the status of the request. This endpoint will give you information about your request's status and it's position in the queue. - -```python -import requests - -response = requests.get("123-app.gateway.alpha.fal.ai/fal/queue/{request_id}/get") -data = await response.json() -queue_position = data.get("queue_position") // 5 -queue_position = data.get("status") // IN_PROGRESS -``` - -## Cancelling a Request - -If your request is still in the queue and not already being processed you may cancel it. - -```python -import requests - -response = requests.put("123-app.gateway.alpha.fal.ai/fal/queue/{request_id}/cancel") -``` - -# Webhooks - -Webhooks work in tandem with the queue system explained above, it is another way to interact with our queue. By providing us a webook endpoint you get notified when the request is done as opposed to polling it. - -Here is how this works in practice, it is very similar to submitting something to the queue but we require you to pass an extra fal_webhook query parameter. - -```bash -curl -X POST https://123-app.gateway.alpha.fal.ai/fal/queue/submit/test?fal_webhook=url_that_expects_the_hook -``` - -```python -import requests - -response = requests.post("123-app.gateway.alpha.fal.ai/fal/queue/submit/test", params={"fal_webhook": "url_that_expects_the_hook"}) -data = await response.json() -request_id = data.get("request_id") -``` - -Once the request is done waiting in the queue, the webhook url is called with the response from the application. diff --git a/docsite/docs/fal-serverless/web_endpoints/serving.md b/docsite/docs/fal-serverless/web_endpoints/serving.md deleted file mode 100644 index e665dc52..00000000 --- a/docsite/docs/fal-serverless/web_endpoints/serving.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -sidebar_position: 5 ---- - -# Web Endpoints - -Serving an `@isolated` function exposes the function through a webserver managed by `fal-serverless`. - -Simplest way to serve a function is to mark the function you want to serve using the `@isolated` decorator's `serve=True` option and `fal-serverless` cli command. - -To serve a function do the following steps: - -1. Mark the function you want to serve using the `@isolated` decorator with the `serve=True` option: - -```python -@isolated(serve=True) -def call_text(text): - return text -``` - -2. Use the `fal-serverless` CLI command with the following syntax: - -``` -fal-serverless function serve ./path/to/file call_text --alias call - ->> Registered a new revision for function 'call' (revision='21847a72-93e6-4227-ae6f-56bf3a90142d'). ->> URL: https://1714827-call.gateway.alpha.fal.ai -``` - -You'll receive an revision ID in the following format: \`XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\`. This is the revision id of your function. -Everytime you call the `fal-serverless function serve` command a new revision id will -be generated. We will keep the old revisions around so can still access them. - -Serving a function with the `--alias` option will create a url that includes the alias you specified instead of the revision id. If you serve a new revision with the same alias, the url will point to the most recent revision of the function. - -Alternatively you can call the `fal-serverless function serve` command without the `--alias` option. In that case `fal-serverless` will create an anonymous function that is only accessible by its revision id. - -``` -fal-serverless function serve ./path/to/file call_text - -Registered anonymous function '37f8658e-b841-4b51-ab1a-92565f3a4c04'. -URL: https://1714827-37f8658e-b841-4b51-ab1a-92565f3a4c04.gateway.alpha.fal.ai -``` - -### Public URLs - -By default each registered function is private. In other words requires the caller to pass a FAL key ID and key secret either in the header or as query params. If you wish to skip this validation you can register your function to be public. A public URL is open to the internet and anyone who has access to the URL will be able to call it. - -To expose a public URL set the `--auth` option to `public`: - -``` -fal-serverless function serve ./path/to/file call_text --auth public -``` - -## Access Served Function via REST API - -To access the served function, make a POST REST API request to the following URL: - -``` -https://-.gateway.alpha.fal.ai -``` - -Replace `` with your user ID (e.g., `123` if your github id is `github|123`) and `` with the alias or the revision ID you received earlier. Additionally, pass your FAL key ID and key secret as headers in the request. You can generate keys by following the instructions [here](https://docs.fal.ai/fal-serverless/authentication/env_var). - -Here's an example of a cURL request to call the served function: - -```bash -curl -X POST "https://123-d9ff88a9-6ae3-45cf-ab67-022e33e4418e.gateway.alpha.fal.ai" -H "Content-Type: application/json" -H "Authorization: Basic $FAL_KEY_ID:$FAL_KEY_SECRET" -d '{"str":"str to be returned"}' -``` - -## Expose Function Using Python Web Framework - -You can also expose your function using a Python web framework, such as Flask or Fast API. To do so, provide an `exposed_port` in the `@isolated` decorator instead of `serve`. This option gives you more flexibility to decide on which web protocol to use, which ports to expose and other details. - -Here's an example using Flask: - -```python -@isolated(requirements=["flask"], exposed_port=8080) -def flask_app(): - from flask import Flask, jsonify, request - - app = Flask(__name__) - - @app.route("/") - def call_str(str): - return jsonify({"result": str}) - - app.run(host="0.0.0.0", port=8080) - -``` - -In this example, the Flask app is exposed on port 8080 and returns the input string as a JSON response. - -## Access Web Endpoint Logs -Logs for function calls for web endpoints can be accessed via fal-serverless CLI. - -```bash -fal-serverless function logs -``` - -By default the above command will print out the latest 100 log entries from all of your web endpoints. You can also specify an endpoint url: - -```bash -fal-serverless function logs --url $MY_ENDPOINT_URL -``` - -Here's how to specify how many lines should be printed: - -```bash -fal-serverless function logs --url $MY_ENDPOINT_URL --lines 20 -``` - -The above command will print out the latest 20 lines of logs for a web endpoint deployed at `$MY_ENDPOINT_URL`. diff --git a/docsite/docusaurus.config.js b/docsite/docusaurus.config.js deleted file mode 100644 index c1336a2d..00000000 --- a/docsite/docusaurus.config.js +++ /dev/null @@ -1,123 +0,0 @@ -// @ts-check -// Note: type annotations allow type checking and IDEs autocompletion - -const lightCodeTheme = require('prism-react-renderer/themes/github'); -const darkCodeTheme = require('prism-react-renderer/themes/dracula'); - -/** @type {import('@docusaurus/types').Config} */ -const config = { - title: 'Features & Labels', - tagline: 'fal: run python with dbt', - url: 'https://docs.fal.ai', - baseUrl: '/', - onBrokenLinks: 'warn', - onBrokenMarkdownLinks: 'warn', - favicon: 'img/new-logo-no-text.png', - organizationName: 'fal-ai', // Usually your GitHub org/user name. - projectName: 'fal', // Usually your repo name. - plugins: [require.resolve("@cmfcmf/docusaurus-search-local")], - presets: [ - [ - 'classic', - /** @type {import('@docusaurus/preset-classic').Options} */ - { - docs: { - routeBasePath: '/', - sidebarPath: require.resolve('./sidebars.js'), - editUrl: 'https://github.com/fal-ai/fal/tree/main/docsite', - }, - blog: { - showReadingTime: true, - editUrl: - 'https://github.com/fal-ai/fal/tree/main/docsite', - }, - theme: { - customCss: require.resolve('./src/css/custom.css'), - }, - gtag: { - trackingID: 'G-343YEVGVHQ', - anonymizeIP: true, - }, - }, - ], - ], - - themeConfig: - /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ - ({ - navbar: { - logo: { - alt: 'fal Logo', - src: 'img/new-logo.png', - }, - items: [ - { - type: 'docSidebar', - position: 'left', - sidebarId: 'serverless', - label: 'fal-serverless', - href: '/', - }, - { - type: 'docSidebar', - position: 'left', - sidebarId: 'adapter', - label: 'dbt-fal', - }, - { - to: 'https://blog.fal.ai', - label: 'Blog', - position: 'left' - }, - { - to: 'https://fal.ai', - label: 'Home', - position: 'right' - }, - { - href: 'https://github.com/fal-ai/fal', - label: 'GitHub', - position: 'right', - }, - ], - }, - footer: { - style: 'dark', - links: [ - { - title: 'Community', - items: [ - { - label: 'GitHub', - href: 'https://github.com/fal-ai', - }, - { - label: 'Discord', - href: 'https://discord.gg/Fyc9PwrccF', - }, - ], - }, - { - title: 'More', - items: [ - { - label: 'fal', - href: 'https://fal.ai', - }, - { - label: 'Blog', - href: 'https://blog.fal.ai', - }, - ], - }, - ], - copyright: `Copyright © ${new Date().getFullYear()} Features & Labels, Inc. Built with Docusaurus.`, - }, - prism: { - theme: lightCodeTheme, - darkTheme: darkCodeTheme, - }, - }), -}; - -module.exports = config; diff --git a/docsite/package-lock.json b/docsite/package-lock.json deleted file mode 100644 index dd47abca..00000000 --- a/docsite/package-lock.json +++ /dev/null @@ -1,11606 +0,0 @@ -{ - "name": "docsite", - "version": "0.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "docsite", - "version": "0.0.0", - "dependencies": { - "@cmfcmf/docusaurus-search-local": "^1.1.0", - "@docusaurus/core": "2.4.0", - "@docusaurus/plugin-google-gtag": "2.4.0", - "@docusaurus/preset-classic": "2.4.0", - "@mdx-js/react": "^1.6.22", - "clsx": "^1.2.1", - "prism-react-renderer": "^1.3.5", - "react": "^17.0.2", - "react-dom": "^17.0.2" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "2.4.0" - } - }, - "node_modules/@algolia/autocomplete-core": { - "version": "1.8.3", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.8.3" - } - }, - "node_modules/@algolia/autocomplete-js": { - "version": "1.8.3", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-core": "1.8.3", - "@algolia/autocomplete-preset-algolia": "1.8.3", - "@algolia/autocomplete-shared": "1.8.3", - "htm": "^3.1.1", - "preact": "^10.0.0" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.5.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.8.3", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.8.3" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-shared": { - "version": "1.8.3", - "license": "MIT" - }, - "node_modules/@algolia/autocomplete-theme-classic": { - "version": "1.8.3", - "license": "MIT" - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.17.0" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.17.0", - "license": "MIT" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.17.0" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.17.0", - "@algolia/client-search": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.17.0", - "@algolia/client-search": "4.17.0", - "@algolia/requester-common": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/@algolia/client-common": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/@algolia/client-personalization": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.17.0", - "@algolia/requester-common": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/@algolia/client-search": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.17.0", - "@algolia/requester-common": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/@algolia/events": { - "version": "4.0.1", - "license": "MIT" - }, - "node_modules/@algolia/logger-common": { - "version": "4.17.0", - "license": "MIT" - }, - "node_modules/@algolia/logger-console": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/logger-common": "4.17.0" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.17.0" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.17.0", - "license": "MIT" - }, - "node_modules/@algolia/requester-node-http": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.17.0" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.17.0", - "@algolia/logger-common": "4.17.0", - "@algolia/requester-common": "4.17.0" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.2.1", - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/highlight": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.21.4", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helpers": "^7.21.0", - "@babel/parser": "^7.21.4", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.4", - "@babel/types": "^7.21.4", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/generator": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.21.4", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", - "jsesc": "^2.5.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.18.6", - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-validator-option": "^7.21.0", - "browserslist": "^4.21.3", - "lru-cache": "^5.1.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-member-expression-to-functions": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/helper-split-export-declaration": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "regexpu-core": "^5.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.3.3", - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0-0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/template": "^7.20.7", - "@babel/types": "^7.21.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.21.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.21.4" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.21.2", - "license": "MIT", - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.20.2", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.19.1", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.2", - "@babel/types": "^7.21.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.20.2", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-wrap-function": "^7.18.9", - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-member-expression-to-functions": "^7.20.7", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.20.7", - "@babel/types": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.20.2", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.20.0", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.20.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-string-parser": { - "version": "7.19.4", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.21.0", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.20.5", - "license": "MIT", - "dependencies": { - "@babel/helper-function-name": "^7.19.0", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.5", - "@babel/types": "^7.20.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.0", - "@babel/types": "^7.21.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "license": "MIT", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "license": "MIT", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "license": "MIT", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "license": "MIT" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "license": "MIT", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.21.4", - "license": "MIT", - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/plugin-proposal-optional-chaining": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-static-block": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.20.5", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.20.0", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-split-export-declaration": "^7.18.6", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/template": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.21.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-function-name": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.20.11", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.21.2", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-simple-access": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.20.11", - "license": "MIT", - "dependencies": { - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-identifier": "^7.19.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.20.5", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.20.5", - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.21.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.21.3", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-jsx": "^7.18.6", - "@babel/types": "^7.21.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.20.5", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "regenerator-transform": "^0.15.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.18.9", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.21.3", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-typescript": "^7.20.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.18.10", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.20.7", - "@babel/plugin-proposal-async-generator-functions": "^7.20.7", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-class-static-block": "^7.21.0", - "@babel/plugin-proposal-dynamic-import": "^7.18.6", - "@babel/plugin-proposal-export-namespace-from": "^7.18.9", - "@babel/plugin-proposal-json-strings": "^7.18.6", - "@babel/plugin-proposal-logical-assignment-operators": "^7.20.7", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", - "@babel/plugin-proposal-numeric-separator": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.7", - "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", - "@babel/plugin-proposal-optional-chaining": "^7.21.0", - "@babel/plugin-proposal-private-methods": "^7.18.6", - "@babel/plugin-proposal-private-property-in-object": "^7.21.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.20.0", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.20.7", - "@babel/plugin-transform-async-to-generator": "^7.20.7", - "@babel/plugin-transform-block-scoped-functions": "^7.18.6", - "@babel/plugin-transform-block-scoping": "^7.21.0", - "@babel/plugin-transform-classes": "^7.21.0", - "@babel/plugin-transform-computed-properties": "^7.20.7", - "@babel/plugin-transform-destructuring": "^7.21.3", - "@babel/plugin-transform-dotall-regex": "^7.18.6", - "@babel/plugin-transform-duplicate-keys": "^7.18.9", - "@babel/plugin-transform-exponentiation-operator": "^7.18.6", - "@babel/plugin-transform-for-of": "^7.21.0", - "@babel/plugin-transform-function-name": "^7.18.9", - "@babel/plugin-transform-literals": "^7.18.9", - "@babel/plugin-transform-member-expression-literals": "^7.18.6", - "@babel/plugin-transform-modules-amd": "^7.20.11", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-modules-systemjs": "^7.20.11", - "@babel/plugin-transform-modules-umd": "^7.18.6", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.20.5", - "@babel/plugin-transform-new-target": "^7.18.6", - "@babel/plugin-transform-object-super": "^7.18.6", - "@babel/plugin-transform-parameters": "^7.21.3", - "@babel/plugin-transform-property-literals": "^7.18.6", - "@babel/plugin-transform-regenerator": "^7.20.5", - "@babel/plugin-transform-reserved-words": "^7.18.6", - "@babel/plugin-transform-shorthand-properties": "^7.18.6", - "@babel/plugin-transform-spread": "^7.20.7", - "@babel/plugin-transform-sticky-regex": "^7.18.6", - "@babel/plugin-transform-template-literals": "^7.18.9", - "@babel/plugin-transform-typeof-symbol": "^7.18.9", - "@babel/plugin-transform-unicode-escapes": "^7.18.10", - "@babel/plugin-transform-unicode-regex": "^7.18.6", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.21.4", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "core-js-compat": "^3.25.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.5", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-react": { - "version": "7.18.6", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-react-display-name": "^7.18.6", - "@babel/plugin-transform-react-jsx": "^7.18.6", - "@babel/plugin-transform-react-jsx-development": "^7.18.6", - "@babel/plugin-transform-react-pure-annotations": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-typescript": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-syntax-jsx": "^7.21.4", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-typescript": "^7.21.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/regjsgen": { - "version": "0.8.0", - "license": "MIT" - }, - "node_modules/@babel/runtime": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "regenerator-runtime": "^0.13.11" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/runtime-corejs3": { - "version": "7.21.0", - "license": "MIT", - "dependencies": { - "core-js-pure": "^3.25.1", - "regenerator-runtime": "^0.13.11" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.20.7", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.20.7", - "@babel/types": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.21.4", - "@babel/types": "^7.21.4", - "debug": "^4.1.0", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/types": { - "version": "7.21.4", - "license": "MIT", - "dependencies": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", - "to-fast-properties": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@cmfcmf/docusaurus-search-local": { - "version": "1.1.0", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-js": "^1.8.2", - "@algolia/autocomplete-theme-classic": "^1.8.2", - "@algolia/client-search": "^4.12.0", - "algoliasearch": "^4.12.0", - "cheerio": "^1.0.0-rc.9", - "clsx": "^1.1.1", - "lunr-languages": "^1.4.0", - "mark.js": "^8.11.1" - }, - "peerDependencies": { - "@docusaurus/core": "^2.0.0", - "nodejieba": "^2.5.0" - }, - "peerDependenciesMeta": { - "nodejieba": { - "optional": true - } - } - }, - "node_modules/@colors/colors": { - "version": "1.5.0", - "license": "MIT", - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/@cspotcode/source-map-support": { - "version": "0.8.1", - "license": "MIT", - "peer": true, - "dependencies": { - "@jridgewell/trace-mapping": "0.3.9" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { - "version": "0.3.9", - "license": "MIT", - "peer": true, - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "node_modules/@discoveryjs/json-ext": { - "version": "0.5.7", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@docsearch/css": { - "version": "3.3.3", - "license": "MIT" - }, - "node_modules/@docsearch/react": { - "version": "3.3.3", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-core": "1.7.4", - "@algolia/autocomplete-preset-algolia": "1.7.4", - "@docsearch/css": "3.3.3", - "algoliasearch": "^4.0.0" - }, - "peerDependencies": { - "@types/react": ">= 16.8.0 < 19.0.0", - "react": ">= 16.8.0 < 19.0.0", - "react-dom": ">= 16.8.0 < 19.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "react": { - "optional": true - }, - "react-dom": { - "optional": true - } - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-core": { - "version": "1.7.4", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.7.4" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.7.4", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.7.4" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-shared": { - "version": "1.7.4", - "license": "MIT" - }, - "node_modules/@docusaurus/core": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.18.6", - "@babel/generator": "^7.18.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.18.6", - "@babel/preset-env": "^7.18.6", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@babel/runtime": "^7.18.6", - "@babel/runtime-corejs3": "^7.18.6", - "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "@slorber/static-site-generator-webpack-plugin": "^4.0.7", - "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.7", - "babel-loader": "^8.2.5", - "babel-plugin-dynamic-import-node": "^2.3.3", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "clean-css": "^5.3.0", - "cli-table3": "^0.6.2", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "copy-webpack-plugin": "^11.0.0", - "core-js": "^3.23.3", - "css-loader": "^6.7.1", - "css-minimizer-webpack-plugin": "^4.0.0", - "cssnano": "^5.1.12", - "del": "^6.1.1", - "detect-port": "^1.3.0", - "escape-html": "^1.0.3", - "eta": "^2.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "html-minifier-terser": "^6.1.0", - "html-tags": "^3.2.0", - "html-webpack-plugin": "^5.5.0", - "import-fresh": "^3.3.0", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.6.1", - "postcss": "^8.4.14", - "postcss-loader": "^7.0.0", - "prompts": "^2.4.2", - "react-dev-utils": "^12.0.1", - "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.3", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.3", - "rtl-detect": "^1.0.4", - "semver": "^7.3.7", - "serve-handler": "^6.1.3", - "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.3", - "tslib": "^2.4.0", - "update-notifier": "^5.1.0", - "url-loader": "^4.1.1", - "wait-on": "^6.0.1", - "webpack": "^5.73.0", - "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.9.3", - "webpack-merge": "^5.8.0", - "webpackbar": "^5.0.2" - }, - "bin": { - "docusaurus": "bin/docusaurus.mjs" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/cssnano-preset": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "cssnano-preset-advanced": "^5.3.8", - "postcss": "^8.4.14", - "postcss-sort-media-queries": "^4.2.1", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - } - }, - "node_modules/@docusaurus/logger": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "chalk": "^4.1.2", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - } - }, - "node_modules/@docusaurus/mdx-loader": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.18.8", - "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@mdx-js/mdx": "^1.6.22", - "escape-html": "^1.0.3", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "image-size": "^1.0.1", - "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.2.0", - "stringify-object": "^3.3.0", - "tslib": "^2.4.0", - "unified": "^9.2.2", - "unist-util-visit": "^2.0.3", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/module-type-aliases": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.4.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" - }, - "peerDependencies": { - "react": "*", - "react-dom": "*" - } - }, - "node_modules/@docusaurus/plugin-content-blog": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "cheerio": "^1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "reading-time": "^1.5.0", - "tslib": "^2.4.0", - "unist-util-visit": "^2.0.3", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-docs": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "@types/react-router-config": "^5.0.6", - "combine-promises": "^1.1.0", - "fs-extra": "^10.1.0", - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-pages": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "fs-extra": "^10.1.0", - "tslib": "^2.4.0", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-debug": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "fs-extra": "^10.1.0", - "react-json-view": "^1.21.3", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-analytics": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-gtag": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/plugin-sitemap": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "fs-extra": "^10.1.0", - "sitemap": "^7.1.1", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/preset-classic": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/plugin-debug": "2.4.0", - "@docusaurus/plugin-google-analytics": "2.4.0", - "@docusaurus/plugin-google-gtag": "2.4.0", - "@docusaurus/plugin-google-tag-manager": "2.4.0", - "@docusaurus/plugin-sitemap": "2.4.0", - "@docusaurus/theme-classic": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-search-algolia": "2.4.0", - "@docusaurus/types": "2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/react-loadable": { - "version": "5.5.2", - "license": "MIT", - "dependencies": { - "@types/react": "*", - "prop-types": "^15.6.2" - }, - "peerDependencies": { - "react": "*" - } - }, - "node_modules/@docusaurus/theme-classic": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "2.4.0", - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/types": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "@mdx-js/react": "^1.6.22", - "clsx": "^1.2.1", - "copy-text-to-clipboard": "^3.0.1", - "infima": "0.2.0-alpha.43", - "lodash": "^4.17.21", - "nprogress": "^0.2.0", - "postcss": "^8.4.14", - "prism-react-renderer": "^1.3.5", - "prismjs": "^1.28.0", - "react-router-dom": "^5.3.3", - "rtlcss": "^3.5.0", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/theme-common": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/mdx-loader": "2.4.0", - "@docusaurus/module-type-aliases": "2.4.0", - "@docusaurus/plugin-content-blog": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/plugin-content-pages": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-common": "2.4.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^1.2.1", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.5", - "tslib": "^2.4.0", - "use-sync-external-store": "^1.2.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.4.0", - "@docusaurus/logger": "2.4.0", - "@docusaurus/plugin-content-docs": "2.4.0", - "@docusaurus/theme-common": "2.4.0", - "@docusaurus/theme-translations": "2.4.0", - "@docusaurus/utils": "2.4.0", - "@docusaurus/utils-validation": "2.4.0", - "algoliasearch": "^4.13.1", - "algoliasearch-helper": "^3.10.0", - "clsx": "^1.2.1", - "eta": "^2.0.0", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/theme-translations": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "fs-extra": "^10.1.0", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - } - }, - "node_modules/@docusaurus/types": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.6.0", - "react-helmet-async": "^1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0", - "webpack-merge": "^5.8.0" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/utils": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/logger": "2.4.0", - "@svgr/webpack": "^6.2.1", - "escape-string-regexp": "^4.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "github-slugger": "^1.4.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.4.0", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } - } - }, - "node_modules/@docusaurus/utils-common": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } - } - }, - "node_modules/@docusaurus/utils-validation": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "@docusaurus/logger": "2.4.0", - "@docusaurus/utils": "2.4.0", - "joi": "^17.6.0", - "js-yaml": "^4.1.0", - "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" - } - }, - "node_modules/@hapi/hoek": { - "version": "9.3.0", - "license": "BSD-3-Clause" - }, - "node_modules/@hapi/topo": { - "version": "5.1.0", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@jest/schemas": { - "version": "29.4.3", - "license": "MIT", - "dependencies": { - "@sinclair/typebox": "^0.25.16" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/types": { - "version": "29.5.0", - "license": "MIT", - "dependencies": { - "@jest/schemas": "^29.4.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "license": "MIT", - "dependencies": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/source-map": { - "version": "0.3.3", - "license": "MIT", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.18", - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" - } - }, - "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "license": "MIT" - }, - "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.4", - "license": "MIT" - }, - "node_modules/@mdx-js/mdx": { - "version": "1.6.22", - "license": "MIT", - "dependencies": { - "@babel/core": "7.12.9", - "@babel/plugin-syntax-jsx": "7.12.1", - "@babel/plugin-syntax-object-rest-spread": "7.8.3", - "@mdx-js/util": "1.6.22", - "babel-plugin-apply-mdx-type-prop": "1.6.22", - "babel-plugin-extract-import-names": "1.6.22", - "camelcase-css": "2.0.1", - "detab": "2.0.4", - "hast-util-raw": "6.0.1", - "lodash.uniq": "4.5.0", - "mdast-util-to-hast": "10.0.1", - "remark-footnotes": "2.0.0", - "remark-mdx": "1.6.22", - "remark-parse": "8.0.3", - "remark-squeeze-paragraphs": "4.0.0", - "style-to-object": "0.3.0", - "unified": "9.2.0", - "unist-builder": "2.0.3", - "unist-util-visit": "2.0.3" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@mdx-js/mdx/node_modules/@babel/core": { - "version": "7.12.9", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.12.5", - "@babel/helper-module-transforms": "^7.12.1", - "@babel/helpers": "^7.12.5", - "@babel/parser": "^7.12.7", - "@babel/template": "^7.12.7", - "@babel/traverse": "^7.12.9", - "@babel/types": "^7.12.7", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.1", - "json5": "^2.1.2", - "lodash": "^4.17.19", - "resolve": "^1.3.2", - "semver": "^5.4.1", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.1", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@mdx-js/mdx/node_modules/semver": { - "version": "5.7.1", - "license": "ISC", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/@mdx-js/mdx/node_modules/source-map": { - "version": "0.5.7", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@mdx-js/mdx/node_modules/unified": { - "version": "9.2.0", - "license": "MIT", - "dependencies": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@mdx-js/react": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz", - "integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - }, - "peerDependencies": { - "react": "^16.13.1 || ^17.0.0" - } - }, - "node_modules/@mdx-js/util": { - "version": "1.6.22", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@polka/url": { - "version": "1.0.0-next.21", - "license": "MIT" - }, - "node_modules/@sideway/address": { - "version": "4.1.4", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@sideway/formula": { - "version": "3.0.1", - "license": "BSD-3-Clause" - }, - "node_modules/@sideway/pinpoint": { - "version": "2.0.0", - "license": "BSD-3-Clause" - }, - "node_modules/@sinclair/typebox": { - "version": "0.25.24", - "license": "MIT" - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/@slorber/static-site-generator-webpack-plugin": { - "version": "4.0.7", - "license": "MIT", - "dependencies": { - "eval": "^0.1.8", - "p-map": "^4.0.0", - "webpack-sources": "^3.2.2" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@svgr/babel-plugin-add-jsx-attribute": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { - "version": "7.0.0", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "7.0.0", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-dynamic-title": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-em-dimensions": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-react-native-svg": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-svg-component": { - "version": "6.5.1", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-preset": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", - "@svgr/babel-plugin-remove-jsx-attribute": "*", - "@svgr/babel-plugin-remove-jsx-empty-expression": "*", - "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", - "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", - "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", - "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", - "@svgr/babel-plugin-transform-svg-component": "^6.5.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/core": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "camelcase": "^6.2.0", - "cosmiconfig": "^7.0.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/hast-util-to-babel-ast": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.20.0", - "entities": "^4.4.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/plugin-jsx": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/hast-util-to-babel-ast": "^6.5.1", - "svg-parser": "^2.0.4" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "^6.0.0" - } - }, - "node_modules/@svgr/plugin-svgo": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "cosmiconfig": "^7.0.1", - "deepmerge": "^4.2.2", - "svgo": "^2.8.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "*" - } - }, - "node_modules/@svgr/webpack": { - "version": "6.5.1", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.19.6", - "@babel/plugin-transform-react-constant-elements": "^7.18.12", - "@babel/preset-env": "^7.19.4", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@svgr/core": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "@svgr/plugin-svgo": "^6.5.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "license": "MIT", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@trysound/sax": { - "version": "0.2.0", - "license": "ISC", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/@tsconfig/node10": { - "version": "1.0.9", - "license": "MIT", - "peer": true - }, - "node_modules/@tsconfig/node12": { - "version": "1.0.11", - "license": "MIT", - "peer": true - }, - "node_modules/@tsconfig/node14": { - "version": "1.0.3", - "license": "MIT", - "peer": true - }, - "node_modules/@tsconfig/node16": { - "version": "1.0.3", - "license": "MIT", - "peer": true - }, - "node_modules/@types/body-parser": { - "version": "1.19.2", - "license": "MIT", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/bonjour": { - "version": "3.5.10", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.35", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.3.5", - "license": "MIT", - "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "node_modules/@types/eslint": { - "version": "8.37.0", - "license": "MIT", - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.4", - "license": "MIT", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, - "node_modules/@types/estree": { - "version": "0.0.51", - "license": "MIT" - }, - "node_modules/@types/express": { - "version": "4.17.17", - "license": "MIT", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.33", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "node_modules/@types/hast": { - "version": "2.3.4", - "license": "MIT", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/history": { - "version": "4.7.11", - "license": "MIT" - }, - "node_modules/@types/html-minifier-terser": { - "version": "6.1.0", - "license": "MIT" - }, - "node_modules/@types/http-proxy": { - "version": "1.17.10", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.4", - "license": "MIT" - }, - "node_modules/@types/istanbul-lib-report": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-coverage": "*" - } - }, - "node_modules/@types/istanbul-reports": { - "version": "3.0.1", - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-report": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.11", - "license": "MIT" - }, - "node_modules/@types/mdast": { - "version": "3.0.11", - "license": "MIT", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/mime": { - "version": "3.0.1", - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "18.15.11", - "license": "MIT" - }, - "node_modules/@types/parse-json": { - "version": "4.0.0", - "license": "MIT" - }, - "node_modules/@types/parse5": { - "version": "5.0.3", - "license": "MIT" - }, - "node_modules/@types/prop-types": { - "version": "15.7.5", - "license": "MIT" - }, - "node_modules/@types/qs": { - "version": "6.9.7", - "license": "MIT" - }, - "node_modules/@types/range-parser": { - "version": "1.2.4", - "license": "MIT" - }, - "node_modules/@types/react": { - "version": "18.0.35", - "license": "MIT", - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "node_modules/@types/react-router": { - "version": "5.1.20", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "node_modules/@types/react-router-config": { - "version": "5.0.7", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "^5.1.0" - } - }, - "node_modules/@types/react-router-dom": { - "version": "5.3.3", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "license": "MIT" - }, - "node_modules/@types/sax": { - "version": "1.2.4", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/scheduler": { - "version": "0.16.3", - "license": "MIT" - }, - "node_modules/@types/serve-index": { - "version": "1.9.1", - "license": "MIT", - "dependencies": { - "@types/express": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "1.15.1", - "license": "MIT", - "dependencies": { - "@types/mime": "*", - "@types/node": "*" - } - }, - "node_modules/@types/sockjs": { - "version": "0.3.33", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/unist": { - "version": "2.0.6", - "license": "MIT" - }, - "node_modules/@types/ws": { - "version": "8.5.4", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/yargs": { - "version": "17.0.24", - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/@types/yargs-parser": { - "version": "21.0.0", - "license": "MIT" - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.1", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.1", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.1", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.1", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.11.1", - "license": "Apache-2.0", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.11.1", - "license": "MIT" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/helper-wasm-section": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-opt": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "@webassemblyjs/wast-printer": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.1", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "license": "BSD-3-Clause" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "license": "Apache-2.0" - }, - "node_modules/accepts": { - "version": "1.3.8", - "license": "MIT", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-types": { - "version": "2.1.35", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "8.8.2", - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-assertions": { - "version": "1.8.0", - "license": "MIT", - "peerDependencies": { - "acorn": "^8" - } - }, - "node_modules/acorn-walk": { - "version": "8.2.0", - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/address": { - "version": "1.2.2", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "license": "MIT", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/algoliasearch": { - "version": "4.17.0", - "license": "MIT", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.17.0", - "@algolia/cache-common": "4.17.0", - "@algolia/cache-in-memory": "4.17.0", - "@algolia/client-account": "4.17.0", - "@algolia/client-analytics": "4.17.0", - "@algolia/client-common": "4.17.0", - "@algolia/client-personalization": "4.17.0", - "@algolia/client-search": "4.17.0", - "@algolia/logger-common": "4.17.0", - "@algolia/logger-console": "4.17.0", - "@algolia/requester-browser-xhr": "4.17.0", - "@algolia/requester-common": "4.17.0", - "@algolia/requester-node-http": "4.17.0", - "@algolia/transporter": "4.17.0" - } - }, - "node_modules/algoliasearch-helper": { - "version": "3.12.0", - "license": "MIT", - "dependencies": { - "@algolia/events": "^4.0.1" - }, - "peerDependencies": { - "algoliasearch": ">= 3.1 < 6" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "license": "ISC", - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-align/node_modules/emoji-regex": { - "version": "8.0.0", - "license": "MIT" - }, - "node_modules/ansi-align/node_modules/string-width": { - "version": "4.2.3", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "engines": [ - "node >= 0.8.0" - ], - "license": "Apache-2.0", - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/anymatch": { - "version": "3.1.3", - "license": "ISC", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "5.0.2", - "license": "MIT" - }, - "node_modules/argparse": { - "version": "2.0.1", - "license": "Python-2.0" - }, - "node_modules/array-flatten": { - "version": "2.1.2", - "license": "MIT" - }, - "node_modules/array-union": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/asap": { - "version": "2.0.6", - "license": "MIT" - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "license": "ISC", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/autoprefixer": { - "version": "10.4.14", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - } - ], - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.5", - "caniuse-lite": "^1.0.30001464", - "fraction.js": "^4.2.0", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/axios": { - "version": "0.25.0", - "license": "MIT", - "dependencies": { - "follow-redirects": "^1.14.7" - } - }, - "node_modules/babel-loader": { - "version": "8.3.0", - "license": "MIT", - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^2.0.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - }, - "peerDependencies": { - "@babel/core": "^7.0.0", - "webpack": ">=2" - } - }, - "node_modules/babel-plugin-apply-mdx-type-prop": { - "version": "1.6.22", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "7.10.4", - "@mdx-js/util": "1.6.22" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - }, - "peerDependencies": { - "@babel/core": "^7.11.6" - } - }, - "node_modules/babel-plugin-apply-mdx-type-prop/node_modules/@babel/helper-plugin-utils": { - "version": "7.10.4", - "license": "MIT" - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "license": "MIT", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-extract-import-names": { - "version": "1.6.22", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "7.10.4" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/babel-plugin-extract-import-names/node_modules/@babel/helper-plugin-utils": { - "version": "7.10.4", - "license": "MIT" - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.3.3", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.3.3", - "semver": "^6.1.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.6.0", - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3", - "core-js-compat": "^3.25.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.4.1", - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/bail": { - "version": "1.0.5", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "license": "MIT" - }, - "node_modules/base16": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/batch": { - "version": "0.6.1", - "license": "MIT" - }, - "node_modules/big.js": { - "version": "5.2.2", - "license": "MIT", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/body-parser": { - "version": "1.20.1", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.11.0", - "raw-body": "2.5.1", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.2", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/body-parser/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/bonjour-service": { - "version": "1.1.1", - "license": "MIT", - "dependencies": { - "array-flatten": "^2.1.2", - "dns-equal": "^1.0.0", - "fast-deep-equal": "^3.1.3", - "multicast-dns": "^7.2.5" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "license": "ISC" - }, - "node_modules/boxen": { - "version": "6.2.1", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^6.2.0", - "chalk": "^4.1.2", - "cli-boxes": "^3.0.0", - "string-width": "^5.0.1", - "type-fest": "^2.5.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.0.1" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.2", - "license": "MIT", - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.21.5", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - } - ], - "license": "MIT", - "dependencies": { - "caniuse-lite": "^1.0.30001449", - "electron-to-chromium": "^1.4.284", - "node-releases": "^2.0.8", - "update-browserslist-db": "^1.0.10" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "license": "MIT" - }, - "node_modules/bytes": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "license": "MIT", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/normalize-url": { - "version": "4.5.1", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/camel-case": { - "version": "4.1.2", - "license": "MIT", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "node_modules/camelcase": { - "version": "6.3.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001478", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "CC-BY-4.0" - }, - "node_modules/ccount": { - "version": "1.1.0", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/character-entities": { - "version": "1.2.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-entities-legacy": { - "version": "1.1.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-reference-invalid": { - "version": "1.1.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.12", - "license": "MIT", - "dependencies": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "htmlparser2": "^8.0.1", - "parse5": "^7.0.0", - "parse5-htmlparser2-tree-adapter": "^7.0.0" - }, - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/cheeriojs/cheerio?sponsor=1" - } - }, - "node_modules/cheerio-select": { - "version": "2.1.0", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/chokidar": { - "version": "3.5.3", - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], - "license": "MIT", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "license": "MIT", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.8.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/clean-css": { - "version": "5.3.2", - "license": "MIT", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 10.0" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/cli-boxes": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-table3": { - "version": "0.6.3", - "license": "MIT", - "dependencies": { - "string-width": "^4.2.0" - }, - "engines": { - "node": "10.* || >= 12.*" - }, - "optionalDependencies": { - "@colors/colors": "1.5.0" - } - }, - "node_modules/cli-table3/node_modules/emoji-regex": { - "version": "8.0.0", - "license": "MIT" - }, - "node_modules/cli-table3/node_modules/string-width": { - "version": "4.2.3", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/clone-deep": { - "version": "4.0.1", - "license": "MIT", - "dependencies": { - "is-plain-object": "^2.0.4", - "kind-of": "^6.0.2", - "shallow-clone": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clone-response": { - "version": "1.0.3", - "license": "MIT", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/clsx": { - "version": "1.2.1", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/collapse-white-space": { - "version": "1.0.6", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "license": "MIT" - }, - "node_modules/colord": { - "version": "2.9.3", - "license": "MIT" - }, - "node_modules/colorette": { - "version": "2.0.19", - "license": "MIT" - }, - "node_modules/combine-promises": { - "version": "1.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/comma-separated-tokens": { - "version": "1.0.8", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/commander": { - "version": "5.1.0", - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/commondir": { - "version": "1.0.1", - "license": "MIT" - }, - "node_modules/compressible": { - "version": "2.0.18", - "license": "MIT", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compressible/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/compression/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/compression/node_modules/safe-buffer": { - "version": "5.1.2", - "license": "MIT" - }, - "node_modules/concat-map": { - "version": "0.0.1", - "license": "MIT" - }, - "node_modules/configstore": { - "version": "5.0.1", - "license": "BSD-2-Clause", - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "2.15.3", - "license": "MIT" - }, - "node_modules/content-disposition": { - "version": "0.5.2", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "1.9.0", - "license": "MIT" - }, - "node_modules/cookie": { - "version": "0.5.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "license": "MIT" - }, - "node_modules/copy-text-to-clipboard": { - "version": "3.1.0", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "11.0.0", - "license": "MIT", - "dependencies": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/copy-webpack-plugin/node_modules/glob-parent": { - "version": "6.0.2", - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "13.1.4", - "license": "MIT", - "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/core-js": { - "version": "3.30.0", - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-compat": { - "version": "3.30.0", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.5" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-pure": { - "version": "3.30.0", - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.3", - "license": "MIT" - }, - "node_modules/cosmiconfig": { - "version": "7.1.0", - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/cosmiconfig-typescript-loader": { - "version": "4.3.0", - "license": "MIT", - "engines": { - "node": ">=12", - "npm": ">=6" - }, - "peerDependencies": { - "@types/node": "*", - "cosmiconfig": ">=7", - "ts-node": ">=10", - "typescript": ">=3" - } - }, - "node_modules/create-require": { - "version": "1.1.1", - "license": "MIT", - "peer": true - }, - "node_modules/cross-fetch": { - "version": "3.1.5", - "license": "MIT", - "dependencies": { - "node-fetch": "2.6.7" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/css-declaration-sorter": { - "version": "6.4.0", - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/css-loader": { - "version": "6.7.3", - "license": "MIT", - "dependencies": { - "icss-utils": "^5.1.0", - "postcss": "^8.4.19", - "postcss-modules-extract-imports": "^3.0.0", - "postcss-modules-local-by-default": "^4.0.0", - "postcss-modules-scope": "^3.0.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.2.0", - "semver": "^7.3.8" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/css-minimizer-webpack-plugin": { - "version": "4.2.2", - "license": "MIT", - "dependencies": { - "cssnano": "^5.1.8", - "jest-worker": "^29.1.2", - "postcss": "^8.4.17", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@parcel/css": { - "optional": true - }, - "@swc/css": { - "optional": true - }, - "clean-css": { - "optional": true - }, - "csso": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "lightningcss": { - "optional": true - } - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/css-select": { - "version": "5.1.0", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-tree": { - "version": "1.1.3", - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-what": { - "version": "6.1.0", - "license": "BSD-2-Clause", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "5.1.15", - "license": "MIT", - "dependencies": { - "cssnano-preset-default": "^5.2.14", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/cssnano" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-preset-advanced": { - "version": "5.3.10", - "license": "MIT", - "dependencies": { - "autoprefixer": "^10.4.12", - "cssnano-preset-default": "^5.2.14", - "postcss-discard-unused": "^5.1.0", - "postcss-merge-idents": "^5.1.1", - "postcss-reduce-idents": "^5.2.0", - "postcss-zindex": "^5.1.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-preset-default": { - "version": "5.2.14", - "license": "MIT", - "dependencies": { - "css-declaration-sorter": "^6.3.1", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.1", - "postcss-convert-values": "^5.1.3", - "postcss-discard-comments": "^5.1.2", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.1", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.7", - "postcss-merge-rules": "^5.1.4", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.4", - "postcss-minify-selectors": "^5.2.1", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.1", - "postcss-normalize-repeat-style": "^5.1.1", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.1", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.1", - "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.2", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.1" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-utils": { - "version": "3.1.0", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "license": "MIT", - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csstype": { - "version": "3.1.2", - "license": "MIT" - }, - "node_modules/debug": { - "version": "4.3.4", - "license": "MIT", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "license": "MIT", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "license": "MIT", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "4.3.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "6.0.3", - "license": "BSD-2-Clause", - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "license": "MIT" - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/define-properties": { - "version": "1.2.0", - "license": "MIT", - "dependencies": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/del": { - "version": "6.1.1", - "license": "MIT", - "dependencies": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "license": "MIT", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/detab": { - "version": "2.0.4", - "license": "MIT", - "dependencies": { - "repeat-string": "^1.5.4" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/detect-node": { - "version": "2.1.0", - "license": "MIT" - }, - "node_modules/detect-port": { - "version": "1.5.1", - "license": "MIT", - "dependencies": { - "address": "^1.0.1", - "debug": "4" - }, - "bin": { - "detect": "bin/detect-port.js", - "detect-port": "bin/detect-port.js" - } - }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "license": "MIT", - "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" - }, - "engines": { - "node": ">= 4.2.1" - } - }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/diff": { - "version": "4.0.2", - "license": "BSD-3-Clause", - "peer": true, - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/dns-packet": { - "version": "5.5.0", - "license": "MIT", - "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "license": "MIT", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, - "node_modules/domhandler": { - "version": "5.0.3", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "3.0.1", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dot-case": { - "version": "3.0.4", - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "license": "MIT", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dot-prop/node_modules/is-obj": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer": { - "version": "0.1.2", - "license": "MIT" - }, - "node_modules/duplexer3": { - "version": "0.1.5", - "license": "BSD-3-Clause" - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "license": "MIT" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "license": "MIT" - }, - "node_modules/electron-to-chromium": { - "version": "1.4.359", - "license": "ISC" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "license": "MIT" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/emoticon": { - "version": "3.2.0", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "license": "MIT", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enhanced-resolve": { - "version": "5.12.0", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/entities": { - "version": "4.4.0", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "license": "MIT", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-module-lexer": { - "version": "0.9.3", - "license": "MIT" - }, - "node_modules/escalade": { - "version": "3.1.1", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "license": "MIT" - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "license": "BSD-2-Clause", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "license": "BSD-2-Clause", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eta": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - }, - "funding": { - "url": "https://github.com/eta-dev/eta?sponsor=1" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eval": { - "version": "0.1.8", - "dependencies": { - "@types/node": "*", - "require-like": ">= 0.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "license": "MIT" - }, - "node_modules/events": { - "version": "3.3.0", - "license": "MIT", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/execa": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/execa/node_modules/get-stream": { - "version": "6.0.1", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/express": { - "version": "4.18.2", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.1", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.5.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.2.0", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.7", - "qs": "6.11.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "license": "MIT" - }, - "node_modules/express/node_modules/content-disposition": { - "version": "0.5.4", - "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/express/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "license": "MIT" - }, - "node_modules/express/node_modules/range-parser": { - "version": "1.2.1", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "license": "MIT" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "license": "MIT", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "license": "MIT" - }, - "node_modules/fast-glob": { - "version": "3.2.12", - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "license": "MIT" - }, - "node_modules/fast-url-parser": { - "version": "1.1.3", - "license": "MIT", - "dependencies": { - "punycode": "^1.3.2" - } - }, - "node_modules/fastq": { - "version": "1.15.0", - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "license": "Apache-2.0", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/fbemitter": { - "version": "3.0.0", - "license": "BSD-3-Clause", - "dependencies": { - "fbjs": "^3.0.0" - } - }, - "node_modules/fbjs": { - "version": "3.0.4", - "license": "MIT", - "dependencies": { - "cross-fetch": "^3.1.5", - "fbjs-css-vars": "^1.0.0", - "loose-envify": "^1.0.0", - "object-assign": "^4.1.0", - "promise": "^7.1.1", - "setimmediate": "^1.0.5", - "ua-parser-js": "^0.7.30" - } - }, - "node_modules/fbjs-css-vars": { - "version": "1.0.2", - "license": "MIT" - }, - "node_modules/feed": { - "version": "4.2.2", - "license": "MIT", - "dependencies": { - "xml-js": "^1.6.11" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/file-loader": { - "version": "6.2.0", - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/filesize": { - "version": "8.0.7", - "license": "BSD-3-Clause", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/fill-range": { - "version": "7.0.1", - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/finalhandler": { - "version": "1.2.0", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/find-cache-dir": { - "version": "3.3.2", - "license": "MIT", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/avajs/find-cache-dir?sponsor=1" - } - }, - "node_modules/find-up": { - "version": "4.1.0", - "license": "MIT", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/flux": { - "version": "4.0.4", - "license": "BSD-3-Clause", - "dependencies": { - "fbemitter": "^3.0.0", - "fbjs": "^3.0.1" - }, - "peerDependencies": { - "react": "^15.0.2 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.2", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin": { - "version": "6.5.3", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=10", - "yarn": ">=1.0.0" - }, - "peerDependencies": { - "eslint": ">= 6", - "typescript": ">= 2.7", - "vue-template-compiler": "*", - "webpack": ">= 4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - }, - "vue-template-compiler": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { - "version": "9.1.0", - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { - "version": "2.7.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { - "version": "1.1.3", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fraction.js": { - "version": "4.2.0", - "license": "MIT", - "engines": { - "node": "*" - }, - "funding": { - "type": "patreon", - "url": "https://www.patreon.com/infusion" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-extra": { - "version": "10.1.0", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.3", - "license": "Unlicense" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "license": "ISC" - }, - "node_modules/function-bind": { - "version": "1.1.1", - "license": "MIT" - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-intrinsic": { - "version": "1.2.0", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-own-enumerable-property-symbols": { - "version": "3.0.2", - "license": "ISC" - }, - "node_modules/get-stream": { - "version": "4.1.0", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/github-slugger": { - "version": "1.5.0", - "license": "ISC" - }, - "node_modules/glob": { - "version": "7.2.3", - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "license": "BSD-2-Clause" - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "license": "MIT", - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-dirs/node_modules/ini": { - "version": "2.0.0", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/global-modules": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "global-prefix": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix/node_modules/which": { - "version": "1.3.1", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "11.1.0", - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/got": { - "version": "9.6.0", - "license": "MIT", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "license": "ISC" - }, - "node_modules/gray-matter": { - "version": "4.0.3", - "license": "MIT", - "dependencies": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/gray-matter/node_modules/argparse": { - "version": "1.0.10", - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "license": "MIT", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/gzip-size": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "duplexer": "^0.1.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "license": "MIT" - }, - "node_modules/has": { - "version": "1.0.3", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-flag": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.0", - "license": "MIT", - "dependencies": { - "get-intrinsic": "^1.1.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/hast-to-hyperscript": { - "version": "9.0.1", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.3", - "comma-separated-tokens": "^1.0.0", - "property-information": "^5.3.0", - "space-separated-tokens": "^1.0.0", - "style-to-object": "^0.3.0", - "unist-util-is": "^4.0.0", - "web-namespaces": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-from-parse5": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "@types/parse5": "^5.0.0", - "hastscript": "^6.0.0", - "property-information": "^5.0.0", - "vfile": "^4.0.0", - "vfile-location": "^3.2.0", - "web-namespaces": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-parse-selector": { - "version": "2.2.5", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-raw": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "@types/hast": "^2.0.0", - "hast-util-from-parse5": "^6.0.0", - "hast-util-to-parse5": "^6.0.0", - "html-void-elements": "^1.0.0", - "parse5": "^6.0.0", - "unist-util-position": "^3.0.0", - "vfile": "^4.0.0", - "web-namespaces": "^1.0.0", - "xtend": "^4.0.0", - "zwitch": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-raw/node_modules/parse5": { - "version": "6.0.1", - "license": "MIT" - }, - "node_modules/hast-util-to-parse5": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "hast-to-hyperscript": "^9.0.0", - "property-information": "^5.0.0", - "web-namespaces": "^1.0.0", - "xtend": "^4.0.0", - "zwitch": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hastscript": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "@types/hast": "^2.0.0", - "comma-separated-tokens": "^1.0.0", - "hast-util-parse-selector": "^2.0.0", - "property-information": "^5.0.0", - "space-separated-tokens": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/he": { - "version": "1.2.0", - "license": "MIT", - "bin": { - "he": "bin/he" - } - }, - "node_modules/history": { - "version": "4.10.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "license": "BSD-3-Clause", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hpack.js/node_modules/isarray": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.8", - "license": "MIT", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/hpack.js/node_modules/safe-buffer": { - "version": "5.1.2", - "license": "MIT" - }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/htm": { - "version": "3.1.1", - "license": "Apache-2.0" - }, - "node_modules/html-entities": { - "version": "2.3.3", - "license": "MIT" - }, - "node_modules/html-minifier-terser": { - "version": "6.1.0", - "license": "MIT", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/html-minifier-terser/node_modules/commander": { - "version": "8.3.0", - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, - "node_modules/html-tags": { - "version": "3.3.1", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/html-void-elements": { - "version": "1.0.5", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/html-webpack-plugin": { - "version": "5.5.0", - "license": "MIT", - "dependencies": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/html-webpack-plugin" - }, - "peerDependencies": { - "webpack": "^5.20.0" - } - }, - "node_modules/htmlparser2": { - "version": "8.0.2", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "entities": "^4.4.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "license": "BSD-2-Clause" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "license": "MIT" - }, - "node_modules/http-errors": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-parser-js": { - "version": "0.5.8", - "license": "MIT" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "license": "MIT", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "2.0.6", - "license": "MIT", - "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" - }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } - } - }, - "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/human-signals": { - "version": "2.1.0", - "license": "Apache-2.0", - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-utils": { - "version": "5.1.0", - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/ignore": { - "version": "5.2.4", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/image-size": { - "version": "1.0.2", - "license": "MIT", - "dependencies": { - "queue": "6.0.2" - }, - "bin": { - "image-size": "bin/image-size.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/immer": { - "version": "9.0.21", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/immer" - } - }, - "node_modules/import-fresh": { - "version": "3.3.0", - "license": "MIT", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/infima": { - "version": "0.2.0-alpha.43", - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "license": "ISC" - }, - "node_modules/ini": { - "version": "1.3.8", - "license": "ISC" - }, - "node_modules/inline-style-parser": { - "version": "0.1.1", - "license": "MIT" - }, - "node_modules/interpret": { - "version": "1.4.0", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/invariant": { - "version": "2.2.4", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, - "node_modules/ipaddr.js": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/is-alphabetical": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-alphanumerical": { - "version": "1.0.4", - "license": "MIT", - "dependencies": { - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "license": "MIT" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "license": "MIT", - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-buffer": { - "version": "2.0.5", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-ci": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-ci/node_modules/ci-info": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/is-core-module": { - "version": "2.12.0", - "license": "MIT", - "dependencies": { - "has": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-decimal": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-docker": { - "version": "2.2.1", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-hexadecimal": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "license": "MIT", - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-npm": { - "version": "5.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-obj": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-plain-obj": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "license": "MIT", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-regexp": { - "version": "1.0.0", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-root": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/is-whitespace-character": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-word-character": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "license": "MIT" - }, - "node_modules/isarray": { - "version": "0.0.1", - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "license": "ISC" - }, - "node_modules/isobject": { - "version": "3.0.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/jest-util": { - "version": "29.5.0", - "license": "MIT", - "dependencies": { - "@jest/types": "^29.5.0", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker": { - "version": "29.5.0", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "jest-util": "^29.5.0", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker/node_modules/supports-color": { - "version": "8.1.1", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/joi": { - "version": "17.9.1", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.0.0", - "@hapi/topo": "^5.0.0", - "@sideway/address": "^4.1.3", - "@sideway/formula": "^3.0.1", - "@sideway/pinpoint": "^2.0.0" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "license": "MIT" - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsesc": { - "version": "2.5.2", - "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "license": "MIT" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "license": "MIT" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "license": "MIT" - }, - "node_modules/json5": { - "version": "2.2.3", - "license": "MIT", - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/kind-of": { - "version": "6.0.3", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/kleur": { - "version": "3.0.3", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/klona": { - "version": "2.0.6", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/launch-editor": { - "version": "2.6.0", - "license": "MIT", - "dependencies": { - "picocolors": "^1.0.0", - "shell-quote": "^1.7.3" - } - }, - "node_modules/leven": { - "version": "3.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/lilconfig": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "license": "MIT" - }, - "node_modules/loader-runner": { - "version": "4.3.0", - "license": "MIT", - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/loader-utils": { - "version": "2.0.4", - "license": "MIT", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/locate-path": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "license": "MIT" - }, - "node_modules/lodash.curry": { - "version": "4.1.1", - "license": "MIT" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "license": "MIT" - }, - "node_modules/lodash.flow": { - "version": "3.5.0", - "license": "MIT" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "license": "MIT" - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "license": "MIT" - }, - "node_modules/loose-envify": { - "version": "1.4.0", - "license": "MIT", - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, - "node_modules/lower-case": { - "version": "2.0.2", - "license": "MIT", - "dependencies": { - "tslib": "^2.0.3" - } - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "license": "ISC", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/lunr-languages": { - "version": "1.10.0", - "license": "MPL-1.1" - }, - "node_modules/make-dir": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/make-error": { - "version": "1.3.6", - "license": "ISC", - "peer": true - }, - "node_modules/mark.js": { - "version": "8.11.1", - "license": "MIT" - }, - "node_modules/markdown-escapes": { - "version": "1.0.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-squeeze-paragraphs": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "unist-util-remove": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-definitions": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "unist-util-visit": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-hast": { - "version": "10.0.1", - "license": "MIT", - "dependencies": { - "@types/mdast": "^3.0.0", - "@types/unist": "^2.0.0", - "mdast-util-definitions": "^4.0.0", - "mdurl": "^1.0.0", - "unist-builder": "^2.0.0", - "unist-util-generated": "^1.0.0", - "unist-util-position": "^3.0.0", - "unist-util-visit": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-string": { - "version": "2.0.0", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdn-data": { - "version": "2.0.14", - "license": "CC0-1.0" - }, - "node_modules/mdurl": { - "version": "1.0.1", - "license": "MIT" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.5.0", - "license": "Unlicense", - "dependencies": { - "fs-monkey": "^1.0.3" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "license": "MIT" - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/merge2": { - "version": "1.4.1", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromatch": { - "version": "4.0.5", - "license": "MIT", - "dependencies": { - "braces": "^3.0.2", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "license": "MIT", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.33.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.18", - "license": "MIT", - "dependencies": { - "mime-db": "~1.33.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "2.7.5", - "license": "MIT", - "dependencies": { - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "license": "ISC" - }, - "node_modules/minimatch": { - "version": "3.1.2", - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/mrmime": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "license": "MIT" - }, - "node_modules/multicast-dns": { - "version": "7.2.5", - "license": "MIT", - "dependencies": { - "dns-packet": "^5.2.2", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/nanoid": { - "version": "3.3.6", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/negotiator": { - "version": "0.6.3", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "license": "MIT" - }, - "node_modules/no-case": { - "version": "3.0.4", - "license": "MIT", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/node-emoji": { - "version": "1.11.0", - "license": "MIT", - "dependencies": { - "lodash": "^4.17.21" - } - }, - "node_modules/node-fetch": { - "version": "2.6.7", - "license": "MIT", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/node-forge": { - "version": "1.3.1", - "license": "(BSD-3-Clause OR GPL-2.0)", - "engines": { - "node": ">= 6.13.0" - } - }, - "node_modules/node-releases": { - "version": "2.0.10", - "license": "MIT" - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "6.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "license": "MIT", - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "license": "MIT" - }, - "node_modules/nth-check": { - "version": "2.1.1", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.12.3", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.4", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "license": "MIT" - }, - "node_modules/on-finished": { - "version": "2.4.1", - "license": "MIT", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/open": { - "version": "8.4.2", - "license": "MIT", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/opener": { - "version": "1.5.2", - "license": "(WTFPL OR MIT)", - "bin": { - "opener": "bin/opener-bin.js" - } - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "4.1.0", - "license": "MIT", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-map": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.2", - "license": "MIT", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "license": "MIT", - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/param-case": { - "version": "3.0.4", - "license": "MIT", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/parent-module": { - "version": "1.0.1", - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-entities": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "character-entities": "^1.0.0", - "character-entities-legacy": "^1.0.0", - "character-reference-invalid": "^1.0.0", - "is-alphanumerical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-hexadecimal": "^1.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/parse-json": { - "version": "5.2.0", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse-numeric-range": { - "version": "1.3.0", - "license": "ISC" - }, - "node_modules/parse5": { - "version": "7.1.2", - "license": "MIT", - "dependencies": { - "entities": "^4.4.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "7.0.0", - "license": "MIT", - "dependencies": { - "domhandler": "^5.0.2", - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/path-exists": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "license": "(WTFPL OR MIT)" - }, - "node_modules/path-key": { - "version": "3.1.1", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "license": "MIT" - }, - "node_modules/path-to-regexp": { - "version": "1.8.0", - "license": "MIT", - "dependencies": { - "isarray": "0.0.1" - } - }, - "node_modules/path-type": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/picocolors": { - "version": "1.0.0", - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "license": "MIT", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-up": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-up/node_modules/find-up": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/locate-path": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/p-locate": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss": { - "version": "8.4.21", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.4", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-calc": { - "version": "8.2.4", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0" - }, - "peerDependencies": { - "postcss": "^8.2.2" - } - }, - "node_modules/postcss-colormin": { - "version": "5.3.1", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0", - "colord": "^2.9.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-convert-values": { - "version": "5.1.3", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-comments": { - "version": "5.1.2", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "5.1.0", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-empty": { - "version": "5.1.1", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "5.1.0", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-unused": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-loader": { - "version": "7.2.4", - "license": "MIT", - "dependencies": { - "cosmiconfig": "^8.1.3", - "cosmiconfig-typescript-loader": "^4.3.0", - "klona": "^2.0.6", - "semver": "^7.3.8" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "ts-node": ">=10", - "typescript": ">=4", - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "ts-node": { - "optional": true - }, - "typescript": { - "optional": true - } - } - }, - "node_modules/postcss-loader/node_modules/cosmiconfig": { - "version": "8.1.3", - "license": "MIT", - "dependencies": { - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/d-fischer" - } - }, - "node_modules/postcss-merge-idents": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "5.1.7", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.1" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-merge-rules": { - "version": "5.1.4", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-gradients": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-params": { - "version": "5.1.4", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-selectors": { - "version": "5.2.1", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "3.0.0", - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.1.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-scope": { - "version": "3.0.0", - "license": "ISC", - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-values": { - "version": "4.0.0", - "license": "ISC", - "dependencies": { - "icss-utils": "^5.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "5.1.0", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-positions": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-string": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-unicode": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-url": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "normalize-url": "^6.0.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-whitespace": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-ordered-values": { - "version": "5.1.3", - "license": "MIT", - "dependencies": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-reduce-idents": { - "version": "5.2.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-reduce-initial": { - "version": "5.1.2", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.11", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-sort-media-queries": { - "version": "4.3.0", - "license": "MIT", - "dependencies": { - "sort-css-media-queries": "2.1.0" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.4.16" - } - }, - "node_modules/postcss-svgo": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-unique-selectors": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "license": "MIT" - }, - "node_modules/postcss-zindex": { - "version": "5.1.0", - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/preact": { - "version": "10.13.2", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/preact" - } - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/pretty-error": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^3.0.0" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/prism-react-renderer": { - "version": "1.3.5", - "license": "MIT", - "peerDependencies": { - "react": ">=0.14.9" - } - }, - "node_modules/prismjs": { - "version": "1.29.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "license": "MIT" - }, - "node_modules/promise": { - "version": "7.3.1", - "license": "MIT", - "dependencies": { - "asap": "~2.0.3" - } - }, - "node_modules/prompts": { - "version": "2.4.2", - "license": "MIT", - "dependencies": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/prop-types": { - "version": "15.8.1", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, - "node_modules/property-information": { - "version": "5.6.0", - "license": "MIT", - "dependencies": { - "xtend": "^4.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "license": "MIT", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/proxy-addr/node_modules/ipaddr.js": { - "version": "1.9.1", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/pump": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "1.4.1", - "license": "MIT" - }, - "node_modules/pupa": { - "version": "2.1.1", - "license": "MIT", - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pure-color": { - "version": "1.3.0", - "license": "MIT" - }, - "node_modules/qs": { - "version": "6.11.0", - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/queue": { - "version": "6.0.2", - "license": "MIT", - "dependencies": { - "inherits": "~2.0.3" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/randombytes": { - "version": "2.1.0", - "license": "MIT", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.1", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.2", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/strip-json-comments": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react": { - "version": "17.0.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react-base16-styling": { - "version": "0.6.0", - "license": "MIT", - "dependencies": { - "base16": "^1.0.0", - "lodash.curry": "^4.0.1", - "lodash.flow": "^3.3.0", - "pure-color": "^1.2.0" - } - }, - "node_modules/react-dev-utils": { - "version": "12.0.1", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.16.0", - "address": "^1.1.2", - "browserslist": "^4.18.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "detect-port-alt": "^1.1.6", - "escape-string-regexp": "^4.0.0", - "filesize": "^8.0.6", - "find-up": "^5.0.0", - "fork-ts-checker-webpack-plugin": "^6.5.0", - "global-modules": "^2.0.0", - "globby": "^11.0.4", - "gzip-size": "^6.0.0", - "immer": "^9.0.7", - "is-root": "^2.1.0", - "loader-utils": "^3.2.0", - "open": "^8.4.0", - "pkg-up": "^3.1.0", - "prompts": "^2.4.2", - "react-error-overlay": "^6.0.11", - "recursive-readdir": "^2.2.2", - "shell-quote": "^1.7.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/react-dev-utils/node_modules/find-up": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/loader-utils": { - "version": "3.2.1", - "license": "MIT", - "engines": { - "node": ">= 12.13.0" - } - }, - "node_modules/react-dev-utils/node_modules/locate-path": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/p-limit": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/p-locate": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dom": { - "version": "17.0.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" - }, - "peerDependencies": { - "react": "17.0.2" - } - }, - "node_modules/react-error-overlay": { - "version": "6.0.11", - "license": "MIT" - }, - "node_modules/react-fast-compare": { - "version": "3.2.1", - "license": "MIT" - }, - "node_modules/react-helmet-async": { - "version": "1.3.0", - "license": "Apache-2.0", - "dependencies": { - "@babel/runtime": "^7.12.5", - "invariant": "^2.2.4", - "prop-types": "^15.7.2", - "react-fast-compare": "^3.2.0", - "shallowequal": "^1.1.0" - }, - "peerDependencies": { - "react": "^16.6.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-is": { - "version": "16.13.1", - "license": "MIT" - }, - "node_modules/react-json-view": { - "version": "1.21.3", - "license": "MIT", - "dependencies": { - "flux": "^4.0.1", - "react-base16-styling": "^0.6.0", - "react-lifecycles-compat": "^3.0.4", - "react-textarea-autosize": "^8.3.2" - }, - "peerDependencies": { - "react": "^17.0.0 || ^16.3.0 || ^15.5.4", - "react-dom": "^17.0.0 || ^16.3.0 || ^15.5.4" - } - }, - "node_modules/react-lifecycles-compat": { - "version": "3.0.4", - "license": "MIT" - }, - "node_modules/react-loadable": { - "name": "@docusaurus/react-loadable", - "version": "5.5.2", - "license": "MIT", - "dependencies": { - "@types/react": "*", - "prop-types": "^15.6.2" - }, - "peerDependencies": { - "react": "*" - } - }, - "node_modules/react-loadable-ssr-addon-v5-slorber": { - "version": "1.0.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.10.3" - }, - "engines": { - "node": ">=10.13.0" - }, - "peerDependencies": { - "react-loadable": "*", - "webpack": ">=4.41.1 || 5.x" - } - }, - "node_modules/react-router": { - "version": "5.3.4", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-router-config": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2" - }, - "peerDependencies": { - "react": ">=15", - "react-router": ">=5" - } - }, - "node_modules/react-router-dom": { - "version": "5.3.4", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.4", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-textarea-autosize": { - "version": "8.4.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.20.13", - "use-composed-ref": "^1.3.0", - "use-latest": "^1.2.1" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "license": "MIT", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/reading-time": { - "version": "1.5.0", - "license": "MIT" - }, - "node_modules/rechoir": { - "version": "0.6.2", - "dependencies": { - "resolve": "^1.1.6" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/recursive-readdir": { - "version": "2.2.3", - "license": "MIT", - "dependencies": { - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "license": "MIT" - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.1.0", - "license": "MIT", - "dependencies": { - "regenerate": "^1.4.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.13.11", - "license": "MIT" - }, - "node_modules/regenerator-transform": { - "version": "0.15.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regexpu-core": { - "version": "5.3.2", - "license": "MIT", - "dependencies": { - "@babel/regjsgen": "^0.8.0", - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.2", - "license": "MIT", - "dependencies": { - "rc": "1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/regjsparser": { - "version": "0.9.1", - "license": "BSD-2-Clause", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remark-emoji": { - "version": "2.2.0", - "license": "MIT", - "dependencies": { - "emoticon": "^3.2.0", - "node-emoji": "^1.10.0", - "unist-util-visit": "^2.0.3" - } - }, - "node_modules/remark-footnotes": { - "version": "2.0.0", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-mdx": { - "version": "1.6.22", - "license": "MIT", - "dependencies": { - "@babel/core": "7.12.9", - "@babel/helper-plugin-utils": "7.10.4", - "@babel/plugin-proposal-object-rest-spread": "7.12.1", - "@babel/plugin-syntax-jsx": "7.12.1", - "@mdx-js/util": "1.6.22", - "is-alphabetical": "1.0.4", - "remark-parse": "8.0.3", - "unified": "9.2.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-mdx/node_modules/@babel/core": { - "version": "7.12.9", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.12.5", - "@babel/helper-module-transforms": "^7.12.1", - "@babel/helpers": "^7.12.5", - "@babel/parser": "^7.12.7", - "@babel/template": "^7.12.7", - "@babel/traverse": "^7.12.9", - "@babel/types": "^7.12.7", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.1", - "json5": "^2.1.2", - "lodash": "^4.17.19", - "resolve": "^1.3.2", - "semver": "^5.4.1", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/remark-mdx/node_modules/@babel/helper-plugin-utils": { - "version": "7.10.4", - "license": "MIT" - }, - "node_modules/remark-mdx/node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.12.1", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.0", - "@babel/plugin-transform-parameters": "^7.12.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/remark-mdx/node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.1", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/remark-mdx/node_modules/semver": { - "version": "5.7.1", - "license": "ISC", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/remark-mdx/node_modules/source-map": { - "version": "0.5.7", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/remark-mdx/node_modules/unified": { - "version": "9.2.0", - "license": "MIT", - "dependencies": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-parse": { - "version": "8.0.3", - "license": "MIT", - "dependencies": { - "ccount": "^1.0.0", - "collapse-white-space": "^1.0.2", - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-whitespace-character": "^1.0.0", - "is-word-character": "^1.0.0", - "markdown-escapes": "^1.0.0", - "parse-entities": "^2.0.0", - "repeat-string": "^1.5.4", - "state-toggle": "^1.0.0", - "trim": "0.0.1", - "trim-trailing-lines": "^1.0.0", - "unherit": "^1.0.4", - "unist-util-remove-position": "^2.0.0", - "vfile-location": "^3.0.0", - "xtend": "^4.0.1" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-squeeze-paragraphs": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "mdast-squeeze-paragraphs": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/renderkid": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "css-select": "^4.1.3", - "dom-converter": "^0.2.0", - "htmlparser2": "^6.1.0", - "lodash": "^4.17.21", - "strip-ansi": "^6.0.1" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "4.3.0", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "1.4.1", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "4.3.1", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "2.8.0", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/entities": { - "version": "2.2.0", - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "6.1.0", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/repeat-string": { - "version": "1.6.1", - "license": "MIT", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/require-from-string": { - "version": "2.0.2", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-like": { - "version": "0.1.2", - "engines": { - "node": "*" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/resolve": { - "version": "1.22.2", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.11.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-pathname": { - "version": "3.0.0", - "license": "MIT" - }, - "node_modules/responselike": { - "version": "1.0.2", - "license": "MIT", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/reusify": { - "version": "1.0.4", - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/rtl-detect": { - "version": "1.0.4", - "license": "BSD-3-Clause" - }, - "node_modules/rtlcss": { - "version": "3.5.0", - "license": "MIT", - "dependencies": { - "find-up": "^5.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.3.11", - "strip-json-comments": "^3.1.1" - }, - "bin": { - "rtlcss": "bin/rtlcss.js" - } - }, - "node_modules/rtlcss/node_modules/find-up": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/rtlcss/node_modules/locate-path": { - "version": "6.0.0", - "license": "MIT", - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/rtlcss/node_modules/p-limit": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/rtlcss/node_modules/p-locate": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/rxjs": { - "version": "7.8.0", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.1.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "license": "MIT" - }, - "node_modules/sax": { - "version": "1.2.4", - "license": "ISC" - }, - "node_modules/scheduler": { - "version": "0.20.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "node_modules/schema-utils": { - "version": "2.7.1", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/section-matter": { - "version": "1.0.0", - "license": "MIT", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select-hose": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/selfsigned": { - "version": "2.1.1", - "license": "MIT", - "dependencies": { - "node-forge": "^1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver": { - "version": "7.4.0", - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/semver-diff/node_modules/semver": { - "version": "6.3.0", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver/node_modules/yallist": { - "version": "4.0.0", - "license": "ISC" - }, - "node_modules/send": { - "version": "0.18.0", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "license": "MIT" - }, - "node_modules/send/node_modules/range-parser": { - "version": "1.2.1", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serialize-javascript": { - "version": "6.0.1", - "license": "BSD-3-Clause", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-handler": { - "version": "6.1.5", - "license": "MIT", - "dependencies": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", - "mime-types": "2.1.18", - "minimatch": "3.1.2", - "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", - "range-parser": "1.2.0" - } - }, - "node_modules/serve-handler/node_modules/path-to-regexp": { - "version": "2.2.1", - "license": "MIT" - }, - "node_modules/serve-index": { - "version": "1.9.1", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/depd": { - "version": "1.1.2", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "license": "MIT", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "license": "ISC" - }, - "node_modules/serve-index/node_modules/ms": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "license": "ISC" - }, - "node_modules/serve-index/node_modules/statuses": { - "version": "1.5.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-static": { - "version": "1.15.0", - "license": "MIT", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.18.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/setimmediate": { - "version": "1.0.5", - "license": "MIT" - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "license": "ISC" - }, - "node_modules/shallow-clone": { - "version": "3.0.1", - "license": "MIT", - "dependencies": { - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shallowequal": { - "version": "1.1.0", - "license": "MIT" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/shell-quote": { - "version": "1.8.1", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/shelljs": { - "version": "0.8.5", - "license": "BSD-3-Clause", - "dependencies": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - }, - "bin": { - "shjs": "bin/shjs" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/side-channel": { - "version": "1.0.4", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "license": "ISC" - }, - "node_modules/sirv": { - "version": "1.0.19", - "license": "MIT", - "dependencies": { - "@polka/url": "^1.0.0-next.20", - "mrmime": "^1.0.0", - "totalist": "^1.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/sisteransi": { - "version": "1.0.5", - "license": "MIT" - }, - "node_modules/sitemap": { - "version": "7.1.1", - "license": "MIT", - "dependencies": { - "@types/node": "^17.0.5", - "@types/sax": "^1.2.1", - "arg": "^5.0.0", - "sax": "^1.2.4" - }, - "bin": { - "sitemap": "dist/cli.js" - }, - "engines": { - "node": ">=12.0.0", - "npm": ">=5.6.0" - } - }, - "node_modules/sitemap/node_modules/@types/node": { - "version": "17.0.45", - "license": "MIT" - }, - "node_modules/slash": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/sockjs": { - "version": "0.3.24", - "license": "MIT", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sort-css-media-queries": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">= 6.3.0" - } - }, - "node_modules/source-map": { - "version": "0.6.1", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-js": { - "version": "1.0.2", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "license": "MIT", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/space-separated-tokens": { - "version": "1.1.5", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/spdy": { - "version": "4.0.2", - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "license": "BSD-3-Clause" - }, - "node_modules/stable": { - "version": "0.1.8", - "license": "MIT" - }, - "node_modules/state-toggle": { - "version": "1.0.3", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/statuses": { - "version": "2.0.1", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/std-env": { - "version": "3.3.2", - "license": "MIT" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "5.1.2", - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.0.1", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.0.1", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/stringify-object": { - "version": "3.3.0", - "license": "BSD-2-Clause", - "dependencies": { - "get-own-enumerable-property-symbols": "^3.0.0", - "is-obj": "^1.0.1", - "is-regexp": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/style-to-object": { - "version": "0.3.0", - "license": "MIT", - "dependencies": { - "inline-style-parser": "0.1.1" - } - }, - "node_modules/stylehacks": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/svg-parser": { - "version": "2.0.4", - "license": "MIT" - }, - "node_modules/svgo": { - "version": "2.8.0", - "license": "MIT", - "dependencies": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/svgo/node_modules/commander": { - "version": "7.2.0", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/svgo/node_modules/css-select": { - "version": "4.3.0", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "1.4.1", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/svgo/node_modules/domhandler": { - "version": "4.3.1", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/svgo/node_modules/domutils": { - "version": "2.8.0", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/svgo/node_modules/entities": { - "version": "2.2.0", - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/tapable": { - "version": "2.2.1", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/terser": { - "version": "5.16.9", - "license": "BSD-2-Clause", - "dependencies": { - "@jridgewell/source-map": "^0.3.2", - "acorn": "^8.5.0", - "commander": "^2.20.0", - "source-map-support": "~0.5.20" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "5.3.7", - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.17", - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.16.5" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "uglify-js": { - "optional": true - } - } - }, - "node_modules/terser-webpack-plugin/node_modules/jest-worker": { - "version": "27.5.1", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/terser-webpack-plugin/node_modules/supports-color": { - "version": "8.1.1", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "license": "MIT" - }, - "node_modules/text-table": { - "version": "0.2.0", - "license": "MIT" - }, - "node_modules/thunky": { - "version": "1.1.0", - "license": "MIT" - }, - "node_modules/tiny-invariant": { - "version": "1.3.1", - "license": "MIT" - }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "license": "MIT" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/totalist": { - "version": "1.1.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/tr46": { - "version": "0.0.3", - "license": "MIT" - }, - "node_modules/trim": { - "version": "0.0.1" - }, - "node_modules/trim-trailing-lines": { - "version": "1.1.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/trough": { - "version": "1.0.5", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/ts-node": { - "version": "10.9.1", - "license": "MIT", - "peer": true, - "dependencies": { - "@cspotcode/source-map-support": "^0.8.0", - "@tsconfig/node10": "^1.0.7", - "@tsconfig/node12": "^1.0.7", - "@tsconfig/node14": "^1.0.0", - "@tsconfig/node16": "^1.0.2", - "acorn": "^8.4.1", - "acorn-walk": "^8.1.1", - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.1", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-cwd": "dist/bin-cwd.js", - "ts-node-esm": "dist/bin-esm.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "peerDependencies": { - "@swc/core": ">=1.2.50", - "@swc/wasm": ">=1.2.50", - "@types/node": "*", - "typescript": ">=2.7" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "@swc/wasm": { - "optional": true - } - } - }, - "node_modules/ts-node/node_modules/arg": { - "version": "4.1.3", - "license": "MIT", - "peer": true - }, - "node_modules/tslib": { - "version": "2.5.0", - "license": "0BSD" - }, - "node_modules/type-fest": { - "version": "2.19.0", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "license": "MIT", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-types": { - "version": "2.1.35", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "license": "MIT", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/typescript": { - "version": "5.0.4", - "license": "Apache-2.0", - "peer": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=12.20" - } - }, - "node_modules/ua-parser-js": { - "version": "0.7.35", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/ua-parser-js" - }, - { - "type": "paypal", - "url": "https://paypal.me/faisalman" - } - ], - "license": "MIT", - "engines": { - "node": "*" - } - }, - "node_modules/unherit": { - "version": "1.1.3", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.0", - "xtend": "^4.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unified": { - "version": "9.2.2", - "license": "MIT", - "dependencies": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/unist-builder": { - "version": "2.0.3", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-generated": { - "version": "1.1.6", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-is": { - "version": "4.1.0", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-position": { - "version": "3.1.0", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-remove": { - "version": "2.1.0", - "license": "MIT", - "dependencies": { - "unist-util-is": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-remove-position": { - "version": "2.0.1", - "license": "MIT", - "dependencies": { - "unist-util-visit": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-stringify-position": { - "version": "2.0.3", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.2" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit": { - "version": "2.0.3", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit-parents": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/universalify": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.0.10", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - } - ], - "license": "MIT", - "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" - }, - "bin": { - "browserslist-lint": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/update-notifier": { - "version": "5.1.0", - "license": "BSD-2-Clause", - "dependencies": { - "boxen": "^5.0.0", - "chalk": "^4.1.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.4.0", - "is-npm": "^5.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.1.0", - "pupa": "^2.1.1", - "semver": "^7.3.4", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/yeoman/update-notifier?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/boxen": { - "version": "5.1.2", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^6.2.0", - "chalk": "^4.1.0", - "cli-boxes": "^2.2.1", - "string-width": "^4.2.2", - "type-fest": "^0.20.2", - "widest-line": "^3.1.0", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/cli-boxes": { - "version": "2.2.1", - "license": "MIT", - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/emoji-regex": { - "version": "8.0.0", - "license": "MIT" - }, - "node_modules/update-notifier/node_modules/string-width": { - "version": "4.2.3", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/type-fest": { - "version": "0.20.2", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/widest-line": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/wrap-ansi": { - "version": "7.0.0", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/uri-js/node_modules/punycode": { - "version": "2.3.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/url-loader": { - "version": "4.1.1", - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "mime-types": "^2.1.27", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "file-loader": "*", - "webpack": "^4.0.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "file-loader": { - "optional": true - } - } - }, - "node_modules/url-loader/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/mime-types": { - "version": "2.1.35", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "license": "MIT", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/use-composed-ref": { - "version": "1.3.0", - "license": "MIT", - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/use-isomorphic-layout-effect": { - "version": "1.1.2", - "license": "MIT", - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/use-latest": { - "version": "1.2.1", - "license": "MIT", - "dependencies": { - "use-isomorphic-layout-effect": "^1.1.1" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/use-sync-external-store": { - "version": "1.2.0", - "license": "MIT", - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "license": "MIT" - }, - "node_modules/utila": { - "version": "0.4.0", - "license": "MIT" - }, - "node_modules/utility-types": { - "version": "3.10.0", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "license": "MIT", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "8.3.2", - "license": "MIT", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/v8-compile-cache-lib": { - "version": "3.0.1", - "license": "MIT", - "peer": true - }, - "node_modules/value-equal": { - "version": "1.0.1", - "license": "MIT" - }, - "node_modules/vary": { - "version": "1.1.2", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vfile": { - "version": "4.2.1", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.0", - "is-buffer": "^2.0.0", - "unist-util-stringify-position": "^2.0.0", - "vfile-message": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-location": { - "version": "3.2.0", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-message": { - "version": "2.0.4", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.0", - "unist-util-stringify-position": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/wait-on": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "axios": "^0.25.0", - "joi": "^17.6.0", - "lodash": "^4.17.21", - "minimist": "^1.2.5", - "rxjs": "^7.5.4" - }, - "bin": { - "wait-on": "bin/wait-on" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/watchpack": { - "version": "2.4.0", - "license": "MIT", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/wbuf": { - "version": "1.7.3", - "license": "MIT", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/web-namespaces": { - "version": "1.1.4", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "license": "BSD-2-Clause" - }, - "node_modules/webpack": { - "version": "5.78.0", - "license": "MIT", - "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^0.0.51", - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/wasm-edit": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.7.6", - "browserslist": "^4.14.5", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.10.0", - "es-module-lexer": "^0.9.0", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", - "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.1.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.1.3", - "watchpack": "^2.4.0", - "webpack-sources": "^3.2.3" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-bundle-analyzer": { - "version": "4.8.0", - "license": "MIT", - "dependencies": { - "@discoveryjs/json-ext": "0.5.7", - "acorn": "^8.0.4", - "acorn-walk": "^8.0.0", - "chalk": "^4.1.0", - "commander": "^7.2.0", - "gzip-size": "^6.0.0", - "lodash": "^4.17.20", - "opener": "^1.5.2", - "sirv": "^1.0.7", - "ws": "^7.3.1" - }, - "bin": { - "webpack-bundle-analyzer": "lib/bin/analyzer.js" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/webpack-bundle-analyzer/node_modules/commander": { - "version": "7.2.0", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "license": "MIT", - "dependencies": { - "colorette": "^2.0.10", - "memfs": "^3.4.3", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/webpack-dev-middleware/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mime-types": { - "version": "2.1.35", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/range-parser": { - "version": "1.2.1", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/schema-utils": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server": { - "version": "4.13.2", - "license": "MIT", - "dependencies": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/express": "^4.17.13", - "@types/serve-index": "^1.9.1", - "@types/serve-static": "^1.13.10", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.5.1", - "ansi-html-community": "^0.0.8", - "bonjour-service": "^1.0.11", - "chokidar": "^3.5.3", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^2.0.0", - "default-gateway": "^6.0.3", - "express": "^4.17.3", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.3", - "ipaddr.js": "^2.0.1", - "launch-editor": "^2.6.0", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "rimraf": "^3.0.2", - "schema-utils": "^4.0.0", - "selfsigned": "^2.1.1", - "serve-index": "^1.9.1", - "sockjs": "^0.3.24", - "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", - "ws": "^8.13.0" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.37.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "webpack": { - "optional": true - }, - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-server/node_modules/ajv": { - "version": "8.12.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-server/node_modules/ajv-keywords": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "4.0.0", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.13.0", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/webpack-merge": { - "version": "5.8.0", - "license": "MIT", - "dependencies": { - "clone-deep": "^4.0.1", - "wildcard": "^2.0.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/webpack-sources": { - "version": "3.2.3", - "license": "MIT", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack/node_modules/mime-db": { - "version": "1.52.0", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/mime-types": { - "version": "2.1.35", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.1.1", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpackbar": { - "version": "5.0.2", - "license": "MIT", - "dependencies": { - "chalk": "^4.1.0", - "consola": "^2.15.3", - "pretty-time": "^1.1.0", - "std-env": "^3.0.1" - }, - "engines": { - "node": ">=12" - }, - "peerDependencies": { - "webpack": "3 || 4 || 5" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "license": "Apache-2.0", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/widest-line": { - "version": "4.0.1", - "license": "MIT", - "dependencies": { - "string-width": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wildcard": { - "version": "2.0.0", - "license": "MIT" - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.0.1", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.0.1", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "license": "ISC" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "7.5.9", - "license": "MIT", - "engines": { - "node": ">=8.3.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/xml-js": { - "version": "1.6.11", - "license": "MIT", - "dependencies": { - "sax": "^1.2.4" - }, - "bin": { - "xml-js": "bin/cli.js" - } - }, - "node_modules/xtend": { - "version": "4.0.2", - "license": "MIT", - "engines": { - "node": ">=0.4" - } - }, - "node_modules/yallist": { - "version": "3.1.1", - "license": "ISC" - }, - "node_modules/yaml": { - "version": "1.10.2", - "license": "ISC", - "engines": { - "node": ">= 6" - } - }, - "node_modules/yn": { - "version": "3.1.1", - "license": "MIT", - "peer": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/zwitch": { - "version": "1.0.5", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - } - } -} diff --git a/docsite/package.json b/docsite/package.json deleted file mode 100644 index 5bb4b131..00000000 --- a/docsite/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "docsite", - "version": "0.0.0", - "private": true, - "scripts": { - "docusaurus": "docusaurus", - "start": "docusaurus start", - "build": "docusaurus build", - "swizzle": "docusaurus swizzle", - "deploy": "docusaurus deploy", - "clear": "docusaurus clear", - "serve": "docusaurus serve", - "write-translations": "docusaurus write-translations", - "write-heading-ids": "docusaurus write-heading-ids" - }, - "dependencies": { - "@cmfcmf/docusaurus-search-local": "^1.1.0", - "@docusaurus/core": "2.4.0", - "@docusaurus/plugin-google-gtag": "2.4.0", - "@docusaurus/preset-classic": "2.4.0", - "@mdx-js/react": "^1.6.22", - "clsx": "^1.2.1", - "prism-react-renderer": "^1.3.5", - "react": "^17.0.2", - "react-dom": "^17.0.2" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "2.4.0" - }, - "resolutions": { - "trim": "0.0.3" - }, - "browserslist": { - "production": [ - ">0.5%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" - ] - } -} diff --git a/docsite/sidebars.js b/docsite/sidebars.js deleted file mode 100644 index 7d0fc947..00000000 --- a/docsite/sidebars.js +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Creating a sidebar enables you to: - - create an ordered group of docs - - render a sidebar for each doc of that group - - provide next/previous navigation - - The sidebars can be generated from the filesystem, or explicitly defined here. - - Create as many sidebars as you want. - */ - -// @ts-check - -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ -const sidebars = { - // By default, Docusaurus generates a sidebar from the docs folder structure - serverless: [{type: 'autogenerated', dirName: 'fal-serverless'}], - adapter: [{ type: 'autogenerated', dirName: 'dbt-fal' }], - // But you can create a sidebar manually - /* - tutorialSidebar: [ - { - type: 'category', - label: 'Tutorial', - items: ['hello'], - }, - ], - */ -}; - -module.exports = sidebars; diff --git a/docsite/src/components/HomepageFeatures.js b/docsite/src/components/HomepageFeatures.js deleted file mode 100644 index 16f820b1..00000000 --- a/docsite/src/components/HomepageFeatures.js +++ /dev/null @@ -1,64 +0,0 @@ -import React from 'react'; -import clsx from 'clsx'; -import styles from './HomepageFeatures.module.css'; - -const FeatureList = [ - { - title: 'Easy to Use', - Svg: require('../../static/img/undraw_docusaurus_mountain.svg').default, - description: ( - <> - Docusaurus was designed from the ground up to be easily installed and - used to get your website up and running quickly. - - ), - }, - { - title: 'Focus on What Matters', - Svg: require('../../static/img/undraw_docusaurus_tree.svg').default, - description: ( - <> - Docusaurus lets you focus on your docs, and we'll do the chores. Go - ahead and move your docs into the docs directory. - - ), - }, - { - title: 'Powered by React', - Svg: require('../../static/img/undraw_docusaurus_react.svg').default, - description: ( - <> - Extend or customize your website layout by reusing React. Docusaurus can - be extended while reusing the same header and footer. - - ), - }, -]; - -function Feature({Svg, title, description}) { - return ( -
-
- -
-
-

{title}

-

{description}

-
-
- ); -} - -export default function HomepageFeatures() { - return ( -
-
-
- {FeatureList.map((props, idx) => ( - - ))} -
-
-
- ); -} diff --git a/docsite/src/components/HomepageFeatures.module.css b/docsite/src/components/HomepageFeatures.module.css deleted file mode 100644 index b248eb2e..00000000 --- a/docsite/src/components/HomepageFeatures.module.css +++ /dev/null @@ -1,11 +0,0 @@ -.features { - display: flex; - align-items: center; - padding: 2rem 0; - width: 100%; -} - -.featureSvg { - height: 200px; - width: 200px; -} diff --git a/docsite/src/css/custom.css b/docsite/src/css/custom.css deleted file mode 100644 index 5d33a154..00000000 --- a/docsite/src/css/custom.css +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Any CSS included here will be global. The classic template - * bundles Infima by default. Infima is a CSS framework designed to - * work well for content-centric websites. - */ - -/* You can override the default Infima variables here. */ -:root { - --ifm-color-primary: #6a28ee; - --ifm-color-primary-dark: #6a28ee; - --ifm-color-primary-darker: #6a28ee; - --ifm-color-primary-darkest: #6a28ee; - --ifm-color-primary-light: #6a28ee; - --ifm-color-primary-lighter: #6a28ee; - --ifm-color-primary-lightest: #6a28ee; - --ifm-code-font-size: 95%; -} - -/* For readability concerns, you should choose a lighter palette in dark mode. */ -html[data-theme="dark"] { - --ifm-color-primary: #fff005; - --ifm-color-primary-dark: #fff005; - --ifm-color-primary-darker: #fff005; - --ifm-color-primary-darkest: #fff005; - --ifm-color-primary-light: #fff005; - --ifm-color-primary-lighter: #fff005; - --ifm-color-primary-lightest: #fff005; -} - -.docusaurus-highlight-code-line { - background-color: rgba(0, 0, 0, 0.1); - display: block; - margin: 0 calc(-1 * var(--ifm-pre-padding)); - padding: 0 var(--ifm-pre-padding); -} - -html[data-theme="dark"] .docusaurus-highlight-code-line { - background-color: rgba(0, 0, 0, 0.3); -} diff --git a/docsite/src/pages/index.module.css b/docsite/src/pages/index.module.css deleted file mode 100644 index 666feb6a..00000000 --- a/docsite/src/pages/index.module.css +++ /dev/null @@ -1,23 +0,0 @@ -/** - * CSS files with the .module.css suffix will be treated as CSS modules - * and scoped locally. - */ - -.heroBanner { - padding: 4rem 0; - text-align: center; - position: relative; - overflow: hidden; -} - -@media screen and (max-width: 966px) { - .heroBanner { - padding: 2rem; - } -} - -.buttons { - display: flex; - align-items: center; - justify-content: center; -} diff --git a/docsite/static/.nojekyll b/docsite/static/.nojekyll deleted file mode 100644 index e69de29b..00000000 diff --git a/docsite/static/img/fal-logo.png b/docsite/static/img/fal-logo.png deleted file mode 100644 index c6551bd2..00000000 Binary files a/docsite/static/img/fal-logo.png and /dev/null differ diff --git a/docsite/static/img/fal-logo.svg b/docsite/static/img/fal-logo.svg deleted file mode 100644 index 1129ac0a..00000000 --- a/docsite/static/img/fal-logo.svg +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docsite/static/img/flow_run.png b/docsite/static/img/flow_run.png deleted file mode 100644 index 72656f00..00000000 Binary files a/docsite/static/img/flow_run.png and /dev/null differ diff --git a/docsite/static/img/load_data_graph.png b/docsite/static/img/load_data_graph.png deleted file mode 100644 index c0b45263..00000000 Binary files a/docsite/static/img/load_data_graph.png and /dev/null differ diff --git a/docsite/static/img/new-logo-no-text.png b/docsite/static/img/new-logo-no-text.png deleted file mode 100644 index b9576296..00000000 Binary files a/docsite/static/img/new-logo-no-text.png and /dev/null differ diff --git a/docsite/static/img/new-logo.png b/docsite/static/img/new-logo.png deleted file mode 100644 index d7fdda08..00000000 Binary files a/docsite/static/img/new-logo.png and /dev/null differ diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index d6a188ce..00000000 --- a/examples/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Examples - -To explore what is possible with fal, take a look at the in-depth examples below. We will be adding more examples here over time: - -- [Example 1: Send Slack notifications](slack-example/README.md) -- [Example 2: Use dbt from a Jupyter Notebook](write_jupyter_notebook/README.md) -- [Example 3: Read and parse dbt metadata](read_dbt_metadata/README.md) -- [Example 4: Extract Addresses from Text using Large Language Models](address-extraction-llm/README.md) -- [Example 5: Metric forecasting](metric-forecast/README.md) -- [Example 6: Sentiment analysis on support tickets](sentiment-analysis/README.md) -- [Example 7: Anomaly Detection](anomaly-detection/README.md) -- [Example 8: Incorporate fal in CI/CD workflow](ci_example/README.md) -- [Example 9: Send events to Datadog](datadog_event/README.md) -- [Example 10: Write dbt artifacts to GCS](write_to_gcs/README.md) -- [Example 11: Write dbt artifacts to AWS S3](write_to_aws/README.md) - -Please open a ticket if there are other examples you would like to see here! diff --git a/examples/anomaly-detection/2021-11-20-2322-CET.jpg b/examples/anomaly-detection/2021-11-20-2322-CET.jpg deleted file mode 100644 index 27a048a2..00000000 Binary files a/examples/anomaly-detection/2021-11-20-2322-CET.jpg and /dev/null differ diff --git a/examples/anomaly-detection/README.md b/examples/anomaly-detection/README.md deleted file mode 100644 index b272b437..00000000 --- a/examples/anomaly-detection/README.md +++ /dev/null @@ -1,310 +0,0 @@ -# Run anomaly detection on metrics and send to Slack if there are any issues - -In this example we will use [sklearn](https://scikit-learn.org/stable/index.html) and [slack_sdk](https://slack.dev/python-slack-sdk/) to find anomalies on a time-series numerical dataset. - -See [slack example](../slack-example/README.md), for instructions on how to set up a minimal Slack bot. - -The model we use for this example has two columns: `y` and `ds`, where `y` is a metric measure and `ds` is a timestamp. - -## Meta tag - -In a `schema.yml` file, within a target model, a meta tag should be added in order to connect the model to fal: - -```yaml -meta: - fal: - scripts: - - path_to_fal_script.py -``` - -## Finding anomalies on a model using DBSCAN - -Our model for this example is from [a dataset of Covid-19 cases in Italy](https://www.kaggle.com/sudalairajkumar/covid19-in-italy?select=covid19_italy_region.csv). This kind of dataset is great for anomaly detection, as the Covid-19 cases shot up in forms resembling waves. Having a system that notices such abnormal trends in data is crucial for any use case, including our current fight against Covid-19. - -TestsPerformed column in our dataset has 19% of its rows empty, so we get rid of it with a quick Python script using Pandas: - -```python -import pandas as pd - -df = pd.read_csv('covid19_italy_region.csv') -df.drop('TestsPerformed', axis=1, inplace=True) -df.to_csv('covid19_italy_region.csv') -``` - -Now, for us to find anomalies we need to write our Python script that will: - -1. Take our model -2. Tune the hyperparameters for `DBSCAN` with a little help from our side -3. Feed it to `DBSCAN` -4. Plot the anomalies with the data on a Matplotlib figure and save it -5. Send the figure to a Slack channel using a bot with a message - -The `DBSCAN` cluster that we are using for this example is a data clustering algorithm that groups together points that are close to each other. The groups are defined by the two hyperparameters `eps` for neighbourhood size and `min_samples` for minimum number of points reqiured in a cluster. After forming the groups, or the clusters, it labels the points that are in low density areas defined by the hyperparameters as noise, which are the anomalous data points for our case. - -For the entire Python script, you can use this [link](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts/anomaly_detection.py). - -We first create a function called `anomaly_detection`, which will take the column of metric values of our model, apply sliding windows, find anomalies and return them in a numpy array: - -```python -def anomaly_detection(X: np.array, eps: float, min_samples: int, window_size: int): - # Here we take the given column of values, apply sliding windows, save the number of how many windows we have, - # and initialize the anomalies list, where we will record the indices of our anomalies. - X_windowed = np.lib.stride_tricks.sliding_window_view(x=X, window_shape=window_size, axis=0) - (size_0, _, _) = X_windowed.shape - anomalies = [] - - # For each window, we use DBSCAN and take note of the locations of the anomalous data points, or noises to use - # the right term, which are noted with the value -1 in the labels array. - for window in range(size_0): - clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(X_windowed[window][0][:].reshape(-1,1)) - labels = clustering.labels_ - location = np.where(labels == -1) - location = location[0] - size = location.size - # If there are anomalies in our current window, we append their indices with respect to the input dataset, - # not the current window. - if size != 0: - if size == 1: - anomalies.append(location[0] + window) - else: - for i in range(size): - anomalies.append(location[i] + window) - else: - continue - - # We find the unique values in the anomalies list and convert them to a numpy array, as the window slides by - # 1 index, the indices of the anomalies have been repeated in the list many times. - anomalies = np.unique(np.array(anomalies)) - - # And finally, we return the numpy array of indices of the anomalous data points. - return anomalies -``` - -Now, as you might have noticed, the `DBSCAN` function takes two arguments, `eps` and `min_samples` which are passed from the `anomaly_detection` function. These are hyperparameters of `DBSCAN`; `eps` stands for epsilon and `min_samples` stands for minimum number of points that must be in the neighbourhood of a given point in epsilon distance. This example focuses on the implementation of the function, thus the concepts will not be explored further. For a more detailed definition check out [this Wikipedia article](https://en.wikipedia.org/wiki/DBSCAN). - -You can hand tune these hyperparameters, but we can somewhat automate this time consuming process and make life easier. [This Medium article](https://medium.com/@mohantysandip/a-step-by-step-approach-to-solve-dbscan-algorithms-by-tuning-its-hyper-parameters-93e693a91289) explains the tuning process, check it out to get a better grasp on the subject. - -First hyperparameter to tune is `min_samples`. To accomplish our goal, we write the function `find_ideal_min_samples`: - -```python -def find_ideal_min_samples(X: np.array, range_min_samples: list): - # We apply sliding windows to our column of values, as we will be using DBSCAN in each window. We also save - # the number of windows and initialize min_sample_scores, a numpy array in which we will add the silhouette - # score for each window for each min_sample values in separate columns. - X_windowed = np.lib.stride_tricks.sliding_window_view(x=X, window_shape=window_size, axis=0) - (size_0, _, _) = X_windowed.shape - - min_sample_scores = np.zeros(shape=(1, len(range_min_samples))) - - # Below, we compute and add our silhouette scores to the min_sample_scores array. - for window in range(size_0): - for i in range(len(range_min_samples)): - clustering = KMeans(n_clusters=range_min_samples[i]) - cluster_labels = clustering.fit_predict(X_windowed[window][0][:].reshape(-1,1)) - silhouette_avg = silhouette_score(X_windowed[window][0][:].reshape(-1,1), cluster_labels) - min_sample_scores[0][i] = min_sample_scores[0][i]+silhouette_avg - - # Here, we divide the total scores for all min_samples values to find and average for each. From those, we - # select the min_samples value with the highest score, which is our ideal min sample, and we return it. - min_sample_scores = min_sample_scores / size_0 - ideal_min_sample = range_min_samples[np.where(min_sample_scores.max)[0][0]] - - return ideal_min_sample -``` - -Next is `eps`. To find the ideal `eps` value, we have two stages; first we find a range of `eps` values, then we compute silhouette scores for each and use the highest scoring one. - -```python -def find_eps_range(X: np.array, range_const: int): - # The first thing we need to do is to calculate the distances between each consecutive sample, store them - # in a numpy array and sort them. - dists = np.zeros_like(X) - for i in range(X.size-1): - dist = np.linalg.norm(X[i]-X[i+1]) - dists[i] = dist - dists = np.sort(dists, axis=0) - - # Below, we have two Matplotlib figures; one is the entire set of distances and the other one is the same - # as the other, but its distance value axis is bounded. The PATH_PREFIX is a global constant that you need - # to set beforehand, which is the path of the fal_dbt_exams repo on your local machine. - plt.plot([i for i in range(dists.size)], dists, 'b.', markersize=4) - plt.xlabel('Time') - plt.ylabel('Value') - plt.savefig(f'{PATH_PREFIX}/fal_scripts/anomaly_detection_other/distance_between_samples.jpg') - plt.clf() - - bounded_dists_i = np.where(dists<=range_const*5)[0] - plt.plot(bounded_dists_i, [dists[i] for i in bounded_dists_i], 'b.', markersize=4) - plt.xlabel('Time') - plt.ylabel('Value') - plt.savefig(f'{PATH_PREFIX}/fal_scripts/anomaly_detection_other/distance_between_samples_bounded.jpg') - plt.clf() -``` - -This is where our system needs some supervision. However, please note that this can also be automated for a fully unsupervised anomaly detection system, but requires more dedication. For our example, the extra work can't be justified, also it provides a visual insight to how the `eps` is selected. - -To find the ideal `eps`, we first need to plot the distance between each consecutive data point. For our case, the distance between the points is the difference of number of daily positive Covid-19 cases between each time index. In this plot, we are looking for a curve with an elbow, or a knee, as the exact elbow point is the ideal `eps`. - -![An example elbow curve, credit https://en.wikipedia.org/wiki/File:DataClustering_ElbowCriterion.JPG](https://upload.wikimedia.org/wikipedia/commons/c/cd/DataClustering_ElbowCriterion.JPG) - -Also, we need the smallest value possible, which means that we need the curve in the smallest range posssible. To bound the range, we create a constant, namely the `range_conts`, for convenience, as the constant is a fraction of the maximum of the range of our data. We use this constant to bound our range now and also create our `eps` range in increments of it. We have to try values for `range_const` and find an elbow curve in our plot with the smallest constant value. Here we have the first figure, which has not been bounded yet: - -![Plot of distances](distance_between_samples.jpg) - -As we can see, there is a curve with a really sharp bend. However, as we said before, for an optimal `eps` range we need to bound the plot. - -Here we have a bounded curve with a `range_const` roughly equaling 2.5% of the maximum value of the distances. Setting the `range_const` to this value often provides a good starting point. In our case, it is the plot we are looking for: - -![Bounded plot](distance_between_samples_bounded.jpg) - -If we set the `range_const` even smaller, to roughly 1% of the maximum, we get a parabola like curve, not the sharp elbow we are looking for. - -![Plot with bad range_const](small_range_constant.jpg) - -Now all we have to do is to create a relatively small range of `eps` values to test: - -```python -range_const = int(floor(np.amax(column_y)*0.025)) -range_eps = range(range_const, (range_const*5)+1, range_const) -``` - -Altough you are free to change `stop` and `step` of the `range` function, if we take a look at the bounded plot with the good range constant, it is visible that a higher `stop` won't be necessary. For the `step`, it is really up to your compute power, for our example, going up with a `step` size of `range_const` is OK. - -Now that we have a range of `eps` values to test, it is time to see how they fare against each other. We set up a function called `find_ideal_eps` to do the job: - -```python -def find_ideal_eps(X: np.array, min_samples: int, window_size: int, range_eps: list): - # Again, we apply sliding windows to our column of values, take note of the number of windows, and - # initialize an array that is esentiallt same as min_sample_scores, but for epsilon values. - X_windowed = np.lib.stride_tricks.sliding_window_view(x=X, window_shape=window_size, axis=0) - (size_0, _, _) = X_windowed.shape - - eps_scores = np.zeros(shape=(1, len(range_eps))) - - # Here, we compute silhouette scores of each eps value and get a sum for all the windows. For the - # clustering we use DBSCAN, which means that we will be using our recently found ideal min_samples value, - # as the two hyperparameters affect each other. - for window in range(size_0): - for i in range(len(range_eps)): - clustering = DBSCAN(eps=range_eps[i], min_samples=min_samples).fit(X_windowed[window][0][:].reshape(-1,1)) - labels = clustering.labels_ - if np.unique(labels).size > 1: - silhouette_avg = silhouette_score(X_windowed[window][0][:].reshape(-1,1), labels) - eps_scores[0][i] = eps_scores[0][i]+silhouette_avg - - # We calculate the average and return the ideal eps value. - eps_scores = eps_scores / size_0 - - ideal_eps = range_eps[np.where(eps_scores.max)[0][0]] - - return ideal_eps -``` - -In this section, we first created the `anomaly_detection` function which takes the model, `min_samples` and `eps` hyperparameters, then returns the indices of the anomalous points. Then we created the `find_ideal_min_samples` function which finds the ideal `min_samples` value for `anomaly_detection`. After that, we created the two functions `find_eps_range` and `find_ideal_eps`, which are used to first find the range which has the ideal `eps` value, then find it based on the `min_samples` value we have found before. This is what is needed to find anomalies on a given model. However, we want our script to also plot the anomalies and send them to us via Slack. So, we need to set up two more functions to get a Slack bot to message us some information and a plot of the anomalies in our model. - -## Sending results via Slack - -The two functions we need are `plot_anomalies` and `send_slack_file`. The `send_slack_file` function is present in both the [metric forecast](metric-forecast.md) and [slack bot](slack-example.md) examples. - -```python -def plot_anomalies(column_y: np.array, column_date: np.array, anomalies: np.array): - # Below, we plot the data and note tha anomalies in a Matplotlib figure. Then, we save the figure with - # a name that is the timestamp of the time which the anomalies was computed. Then, we return the path - # of the figure for the Slack function. - fig = plt.figure(figsize=(15,5)) - axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) - axes.plot([column_date[i] for i in range(column_y.size)], column_y, 'b.', markersize=4) - axes.plot([column_date[i] for i in anomalies], [column_y[i] for i in anomalies], 'r^', markersize=4) - axes.set_xlabel('Time') - axes.set_ylabel('Value') - axes.legend(['Actual', 'Anomaly Found']) - now = str(datetime.datetime.now()) - now = now[:10]+'-'+now[11:13]+now[14:16] - fpath = f'{PATH_PREFIX}/fal_scripts/anomaly_detection/{now}-{TIMEZONE}.jpg' - fig.savefig(fname=fpath) - plt.clf() - - return fpath -``` - -```python -def send_slack_file(file_path: str, message_text: str, channel_id: str, slack_token: str): - # Here, we take the filename of the saved figure and send it with a message. - client = WebClient(token=slack_token) - - try: - client.files_upload( - channels=channel_id, - file=file_path, - title="fal.ai anomaly detection", - initial_comment=message_text, - ) - except SlackApiError as e: - assert e.response["error"] -``` - -## Finding anomalies on a dbt model with fal - -At last, we have all the functions needed for anomaly detection. However, there is a very important thing missing: Our model. Using fal we can load our model in the form of a DataFrame with ease using the extremely handy `ref()` function and the `context` object. - -```python -model_df = ref(context.current_model.name).sort_values(by='ds') -``` - -Now that we have all our functions and our model, we can write the following that ties everything together for our script: - -```python -# Then, we separate the DataFrame by columns into the values, 'y', and dates, 'ds'. Then we reshape them -# to fit our functions. -column_y = model_df['y'].to_numpy(dtype=np.float).reshape(-1,1) -column_date = model_df['ds'].to_numpy(dtype=datetime.datetime).reshape(-1,1) - -# Here, we set the arbitrarily chosen window size for the sliding windows application. For the size of -# our model, 100 is a good window size. -window_size = 100 - -# Here, we set and arbitrary list of min_samples values, for numerical time-series data, the list below is good. -# With the range of possible min_samples values, we use our find_ideal_min_samples to find the min_samples for -# our system. -range_min_samples = [2,3,4,5] -min_samples = find_ideal_min_samples(column_y, range_min_samples) - -# With our min_samples value, we find the optimal epsilon value. -range_const = int(floor(np.amax(column_y)*0.025)) -find_eps_range(column_y, range_const) -range_eps = range(range_const, (range_const*5)+1, range_const) - -eps = find_ideal_eps(column_y, min_samples, window_size, range_eps) - -# Finally, we find the anomalies using our anomaly_detection function, and send them via Slack, while also sending -# a handy command line message. -anomalies = anomaly_detection(column_y, eps, min_samples, window_size) -fpath = plot_anomalies(column_y, column_date, anomalies) -now = str(datetime.datetime.now()) -date = now[:10] -hour = now[11:13]+now[14:16] - -print(f'anomalies: {anomalies.size}\ndata: {column_y.size}\npercentage: {(anomalies.size/column_y.size)*100}%') - -# This is to get around a bug, usually it is not needed. -ssl._create_default_https_context = ssl._create_unverified_context - -message = f'fal.ai anomaly detection.\nFound {anomalies.size} anomalies.\nModel: {context.current_model.name}\nDate: {date}\nTime: {hour}-{TIMEZONE}\neps: {eps}, min_samples: {min_samples}, window_size: {window_size}' -# The CHANNEL_ID and SLACK_TOKEN are global variables that have been set beforehand to the Slack bot and channel -# that we are using. -send_slack_file(fpath, message, CHANNEL_ID, SLACK_TOKEN) -``` - -![Slack bot message](anomaly_detection_slack_message.png) - -And, that's it. We have our Slack bot notifying us about the anomalous data points. Now, let's take a look at the anomalies that have been spotted on our data and do a little analysis: - -![Plot of anomalies](2021-11-20-2322-CET.jpg) - -As we can see, the system finds 3 anomalies in the first wave and 1 while the wave flattens. This behaviour is unexpected, however it is insignificant enough that only the extreme outliers are reported as anomalous. In this second wave, we can observe a much, much more sharp upwards trend that the system classifies most of the wave as anomalous. - -In this example, we can see the power of fal, as we can have an extremely vital and time sensitive system directly from your dbt project. - -## Moving further - -The next step for this example would be to get the system production ready by implementing it into your existing dbt based pipeline. But, for the example to be a pipeline ready system, the main bottleneck is finding the range for the ideal `eps`. A system can be implemented to detect the elbow curve using [mathematical analysis](https://en.wikipedia.org/wiki/Mathematical_analysis) to solve this issue. Other than that, the system is a modular block so, the connections of your pipeline can be configured for your specific needs. I.e. our example with the Covid-19 data can be used to trigger a PSA or manage social distancing rules. diff --git a/examples/anomaly-detection/anomaly_detection_slack_message.png b/examples/anomaly-detection/anomaly_detection_slack_message.png deleted file mode 100644 index c6a01cb3..00000000 Binary files a/examples/anomaly-detection/anomaly_detection_slack_message.png and /dev/null differ diff --git a/examples/anomaly-detection/distance_between_samples.jpg b/examples/anomaly-detection/distance_between_samples.jpg deleted file mode 100644 index 82cdbeae..00000000 Binary files a/examples/anomaly-detection/distance_between_samples.jpg and /dev/null differ diff --git a/examples/anomaly-detection/distance_between_samples_bounded.jpg b/examples/anomaly-detection/distance_between_samples_bounded.jpg deleted file mode 100644 index 9332aac0..00000000 Binary files a/examples/anomaly-detection/distance_between_samples_bounded.jpg and /dev/null differ diff --git a/examples/anomaly-detection/small_range_constant.jpg b/examples/anomaly-detection/small_range_constant.jpg deleted file mode 100644 index 4640df0f..00000000 Binary files a/examples/anomaly-detection/small_range_constant.jpg and /dev/null differ diff --git a/examples/ci_example/README.md b/examples/ci_example/README.md deleted file mode 100644 index dd411219..00000000 --- a/examples/ci_example/README.md +++ /dev/null @@ -1,108 +0,0 @@ -# Incorporate fal into a CI/CD pipeline - -You can use fal as part of your CI/CD pipeline. In this example we use [Github Actions](https://github.com/features/actions). - -## Environment variables - -Environment variables need to be provided as [repository secrets](https://docs.github.com/en/actions/security-guides/encrypted-secrets) These will be different depending on which sources and outputs your project uses. In this example we use [BigQuery](https://cloud.google.com/bigquery/) both as a source and as an output, and so we need to provide BigQuery-specific environment variables: - -- `SERVICE_ACCOUNT_KEY`: contents of a `keyfile.json`. For more information see [here](https://docs.github.com/en/actions/security-guides/encrypted-secrets) -- `GCLOUD_PROJECT`: your Google Cloud project ID, necessary for seeding -- `BQ_DATASET`: name of the BigQuery dataset, necessary for seeding - -If your fal scripts require environment variables, these should also be provided as repository secrets. - -## Setup dbt project - -### `profiles.yml` - -Since every dbt setup has it's own `profiles.yml`, usually in `~/.dbt` directory, there can be discrepancies between different setups. We therefore need a standard `profiles.yml` that will be used in the CI/CD workflow. Here's a profiles.yml that is specific to this example project: - -```yaml -fal_dbt_examples: - target: dev - outputs: - dev: - type: bigquery - method: service-account - keyfile: "{{ env_var('KEYFILE_DIR') }}/keyfile.json" - project: "{{ env_var('GCLOUD_PROJECT') }}" - dataset: "{{ env_var('BQ_DATASET') }}" - threads: 1 - timeout_seconds: 300 - location: US - priority: interactive -``` - -As you can see, it uses [environment variables](#environment-variables) for some properties, as it's best to keep these secret. - -### `requirements.txt` - -All the packages that are necessary to run dbt and fal should be put in `requirements.txt`. This includes any packages that are used by user-defined python scripts. Here's an example of how `requirements.txt` can look like: - -``` -# Core dependencies -dbt-core -dbt-bigquery -fal - -# Script dependencies -slack_sdk -datadog_api_client -``` - -## Action Workflow - -### Install dependencies - -The first step in our workflow is to setup python and install the dependencies from `requirements.txt`: - -```yaml -- uses: actions/setup-python@v2 - with: - python-version: "3.8" - -- name: Install dependencies - run: | - pip install --upgrade --upgrade-strategy eager -r requirements.txt -``` - -### Make secret key available - -`keyfile.json` data needs to be stored in a file and provided as a variable: - -```yaml -- name: Setup secret key - env: - SERVICE_ACCOUNT_KEY: ${{ secrets.SERVICE_ACCOUNT_KEY }} - run: | - echo "$SERVICE_ACCOUNT_KEY" > $HOME/keyfile.json - ls -la $HOME/keyfile.json - echo 'keyfile is ready' -``` - -Note the use of secrets. - -### Setup variables and run scripts - -Finally, we setup the necessary environment variables and trigger dbt and fal runs: - -```yaml -- name: Run dbt and fal - env: - GCLOUD_PROJECT: ${{ secrets.GCLOUD_PROJECT }} - BQ_DATASET: ${{ secrets.BQ_DATASET }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_BOT_CHANNEL: ${{ secrets.SLACK_BOT_CHANNEL }} - DD_API_KEY: ${{ secrets.DD_API_KEY }} - DD_APP_KEY: ${{ secrets.DD_APP_KEY }} - run: | - export KEYFILE_DIR=$HOME - dbt seed - dbt run --profiles-dir . - fal run --profiles-dir . -``` - -## Full example - -The full example of incorporating dbt and fal in a CI/CD pipeline can be found in [our example repository](https://github.com/fal-ai/fal_dbt_examples). diff --git a/examples/datadog_event/README.md b/examples/datadog_event/README.md deleted file mode 100644 index 75409eb3..00000000 --- a/examples/datadog_event/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Send Datadog event - -## Setting up Datadog WebAPI - -Get [API and Application keys](https://docs.datadoghq.com/account_management/api-app-keys/) for your Datadog account. Set them up as environment variables: - -```bash -export DD_API_KEY=your-api-key -export DD_APP_KEY=your-app-key -``` - -Install the Datadog client, if you haven't done so already: - -```bash -pip install datadog_api_client -``` - -## Meta tag - -In a `schema.yml` file, within a target model, a meta tag should be added in order to connect the model to fal: - -```yaml -meta: - fal: - scripts: - - path_to_fal_script.py -``` - -## Fal script - -Import necessary libraries and setup the Datadog application configurations: - -```python -from datadog_api_client.v1 import ApiClient, ApiException, Configuration -from datadog_api_client.v1.api import events_api -from datadog_api_client.v1.models import EventCreateRequest -import os -import time -import io - -current_time = time.time() - -configuration = Configuration() -configuration.api_key['apiKeyAuth'] = os.getenv("DD_API_KEY") -configuration.api_key['appKeyAuth'] = os.getenv("DD_APP_KEY") -``` - -Get you model as a pandas DataFrame by using the `ref` function and the `context` variable: - -```python -df = ref(context.current_model.name) -``` - -Prepare event message: - -```python -buf = io.StringIO() -df.info(buf=buf) - -text = buf.getvalue() -tags = ["fal"] - -event_body = EventCreateRequest( - tags=tags, - aggregation_key="fal", - title="fal - event", - text=text, - date_happened=int(current_time) -) -``` - -Send event to Datadog: - -```python -with ApiClient(configuration) as api_client: - # Create an instance of the API class - events_api_instance = events_api.EventsApi(api_client) - try: - events_api_instance.create_event(event_body) - except ApiException as e: - assert e.response["error"] -``` - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts/send_datadog_event.py). diff --git a/examples/metric-forecast/README.md b/examples/metric-forecast/README.md deleted file mode 100644 index 63df3f55..00000000 --- a/examples/metric-forecast/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# Run forecasts on metrics and send visualizations to Slack - -In this example we use [prophet](https://facebook.github.io/prophet/) and [slack_sdk](https://slack.dev/python-slack-sdk/) in order to make predictions on our models and send the resulting plots to slack. - -See [slack example](slack-example.md), for instructions on how to set up a minimal Slack bot. - -This example is built for a model that has two columns: `y` and `ds`, where `y` is a metric measure and `ds` is a timestamp. The metric that we look at is Agent Wait Time in minutes. - -## Meta tag - -In a `schema.yml` file, within a target model, a meta tag should be added in order to connect the model to fal: - -```yaml -meta: - fal: - scripts: - - path_to_fal_script.py -``` - -## Make a forecast - -First, we write a function that will fit a given DataFrame and make a prediction based on that fit. Assuming we already have a DataFrame with histotic data, fitting this data using prophet is done like this: - -```python -from fbprophet import Prophet - -m = Prophet() -m.fit(input_dataframe) -``` - -We need decide how far into the future our prediction should run and then create another DataFrame that will hold that future data: - -```python -n_future_days = 30 -ds = dataframe["ds"].max() -future_dates = [] -for _ in range(n_future_days): - ds = ds + datetime.timedelta(days=1) - future_dates.append(ds) - -df_future = pd.DataFrame({"ds": future_dates}) -``` - -We are ready to do a forecast: - -```python -forecast = m.predict(df_future) -``` - -This `forecast` object can be used to make a plot and save it as a picture: - -```python -from fbprophet.plot import plot_plotly - -fig = plot_plotly(m, forecast, xlabel="Date", ylabel="Agent Wait Time") -fig.write_image("some_file_name.png") -``` - -This results in a plot like this: - -![Forecast plot](fal_forecast_1636707573.278499.png) - -Putting it all together into a function: - -```python -def make_forecast(dataframe: pd.DataFrame, filename: str): - """Make forecast on metric data.""" - m = Prophet() - m.fit(dataframe) - - n_future_days = 30 - ds = dataframe["ds"].max() - future_dates = [] - for _ in range(n_future_days): - ds = ds + datetime.timedelta(days=1) - future_dates.append(ds) - df_future = pd.DataFrame({"ds": future_dates}) - forecast = m.predict(df_future) - fig = plot_plotly(m, forecast, xlabel="Date", ylabel="Agent Wait Time") - fig.write_image(filename) - return filename -``` - -## Send forecast to Slack - -Having setup Slack bot as outlined in our [slack example](slack_example.md), we can use `slack_sdk` to send a file to our slack channel: - -```python -def send_slack_file( - file_path: str, message_text: str, channel_id: str, slack_token: str -): - """Send file to slack.""" - client = WebClient(token=slack_token) - - try: - client.files_upload( - channels=channel_id, - file=file_path, - title="FAL forecast", - initial_comment=message_text, - ) - except SlackApiError as e: - assert e.response["error"] -``` - -## Running the script on a dbt model - -We use the `ref` function to get a DataFrame of our associated dbt model (the model that has the fal meta tag): - -```python -model_df = ref(context.current_model.name) -``` - -Then we run the forecast and send the result to slack: - -```python -FORECAST_PREFIX = "fal_forecast_" -CHANNEL_ID = os.getenv("SLACK_BOT_CHANNEL") -SLACK_TOKEN = os.getenv("SLACK_BOT_TOKEN") - -forecast = make_forecast( - dataframe=model_df, filename=f"{FORECAST_PREFIX}{time.time()}.png" -) -send_slack_file( - file_path=forecast, - message_text=message, - channel_id=CHANNEL_ID, - slack_token=SLACK_TOKEN, -) -``` - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts/forecast_slack.py). diff --git a/examples/metric-forecast/fal_forecast_1636707573.278499.png b/examples/metric-forecast/fal_forecast_1636707573.278499.png deleted file mode 100644 index 74abb14d..00000000 Binary files a/examples/metric-forecast/fal_forecast_1636707573.278499.png and /dev/null differ diff --git a/examples/read_dbt_metadata/README.md b/examples/read_dbt_metadata/README.md deleted file mode 100644 index f12dfea1..00000000 --- a/examples/read_dbt_metadata/README.md +++ /dev/null @@ -1,128 +0,0 @@ -## Read Dbt Model Metadata - -Dbt supports a special `meta` tag in configuration definitions to allow engineers to to add arbitrary information to their schema.yml and sources.yml files. - -This tag can be used in various ways, one common example is to [assign owners to the models](https://docs.getdbt.com/reference/resource-configs/meta#designate-a-model-owner). - -``` -version: 2 - -models: - - name: users - meta: - owner: "@alice" -``` - -Dbt users can take advantage of `fal-dbt` to parse the configuration under the `meta` tag and use this data for other purposes. For example; to send a slack to owners of models after their model completes a dbt run. - -In this example we'll leave what to do with this data to the imagination of the reader, but go through how to parse the `meta` tag in a fal script. - -## Install Fal - -```bash -pip install fal -``` - -## Create a fal script - -Now navigate to your dbt to project, create a directory for your fal scripts and create a python file inside that directory. [For example](https://github.com/fal-ai/fal_dbt_examples/tree/main/fal_scripts/list_owners_of_models.py) a directory named `fal_scripts` and a python file named `list_owners_of_models.py`. - -In your python file you'll write the script that will parse the `meta` for all the models and print the owners to the console. - -``` -models = list_models() -for model in models: - if model.meta: - print(model.meta["owner"]) -``` - -## Meta tag - -Fal-dbt is also making use of the meta tag to configure when this script will run. This part will be different for every usecase. Currently there are two types of triggers, a script can be configured per model or globally for the whole project. In this example we want this script to run once, not for any specific model. - -To configure this navigate to your `schema.yml` file or create one if you dont have one and add the following yaml entry. - -``` -fal: - scripts: - - fal_scripts/list_owners_of_models.py -``` - -so your `schema.yml` might look like: - -```yaml -models: - - name: boston - meta: - owner: "@ali" - - - name: los angeles - meta: - owner: "@gorkem" - -fal: - scripts: - - fal_scripts/list_owners_of_models.py -``` - -## Run fal - -After a successful dbt run invoke the fal cli. - -``` -dbt run - -# Found 4 models, 0 tests, 0 snapshots, 0 analyses, 184 macros, 0 operations, 1 seed file, 0 sources, 0 exposures -# Completed successfully -# Done. PASS=4 WARN=0 ERROR=0 SKIP=0 TOTAL=4 - -fal run - -# ali -# gorkem -``` - -## Run script per model - -Alternatively instead of running this script once for the whole project, you may want to run it for a specific model. -In that case you would have to change the script to act on a single model. - -``` -model_meta = context.current_model.meta -if model_meta: - print(model_meta["owner"]) -``` - -Next modfiy the trigger to invoke the script to run with the context of the models you choose. - -```yaml -models: - - name: boston - meta: - owner: "@ali" - fal: - scripts: - - fal_scripts/list_model_owner.py - - - name: los angeles - meta: - owner: "@gorkem" - fal: - scripts: - - fal_scripts/list_model_owner.py -``` - -Similary invoke fal cli after a succcesful dbt run - -``` -dbt run - -# Found 4 models, 0 tests, 0 snapshots, 0 analyses, 184 macros, 0 operations, 1 seed file, 0 sources, 0 exposures -# Completed successfully -# Done. PASS=4 WARN=0 ERROR=0 SKIP=0 TOTAL=4 - -fal run - -# ali -# gorkem -``` diff --git a/examples/sentiment-analysis/README.md b/examples/sentiment-analysis/README.md deleted file mode 100644 index dc7eafe0..00000000 --- a/examples/sentiment-analysis/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Run sentiment analysis using HuggingFace and store results in your data warehouse - -Sentiment Analysis is a powerful technique that takes advantage of NLP to understand opinions based on text. It is usually used to identify customer satisfaction. For simplicity we are taking advantage of a pretrained model and going to use [HuggingFace transformers for sentiment analysis](https://huggingface.co/transformers/quicktour.html). - -We are going to use sample data from the [Fivetran dbt-zendesk](https://github.com/fivetran/dbt_zendesk/tree/main/integration_tests/seeds) repo to classify sentiment of fake customer support ticket reviews. - -## Meta tag - -In a `schema.yml` file, within a target model, a meta tag should be added in order to connect the model to fal: - -```yaml -# models/shema.yml - -models: - - name: stg_zendesk_ticket_data - description: zendesk ticket data - config: - materialized: table - meta: - fal: - scripts: - - "models/zendesk_sentiment_analysis.py" -``` - -## Seeding data to the warehouse - -If you are just following this example for some practice with fal or dbt, one useful feature of dbt is to [seed csv data to your warehouse](https://docs.getdbt.com/docs/building-a-dbt-project/seeds). Instead of adding the dummy data to your warehouse you can put [Fivetran dbt-zendesk](https://github.com/fivetran/dbt_zendesk/tree/main/integration_tests/seeds) ticket data in the data folder of your project and run `dbt seed`. Just a friendly warning that seeding should only be used with small amount of data. - -Alternatively you can load this data to your warehouse with in any way as you like. - -## Using ref() and transformer - -Let's first install the transformer library from hugging face. Head to your terminal and the python environment that you have installed `fal` and run: - -``` -pip install transformers pandas numpy tensorflow -``` - -(We are working on better dependency managment, head over to this [github issue](https://github.com/fal-ai/fal/issues/10) if you run into any problems with this step) - -Once the transformer dependency is installed create a python file in the location that is specified above in your `schema.yml` file, `models/zendesk_sentiment_analysis.py`. - -```py -# models/zendesk_sentiment_analysis.py - -from transformers import pipeline -import pandas as pd -import numpy as np - -ticket_data = ref("stg_zendesk_ticket_data") -ticket_descriptions = list(ticket_data.description) -classifier = pipeline("sentiment-analysis") -description_sentiment_analysis = classifier(ticket_descriptions) -``` - -## Writing the analysis results back to your warehouse - -To upload data back to the warehouse, we define a source where we will be uploading it to. -We upload to a source because it may need more dbt transformations afterwards, and a source is the perfect place for that. - -```yaml -# models/shema.yml [continuation] - -sources: - - name: results - tables: - - name: ticket_data_sentiment_analysis -``` - -Then let's organize the resulting data frame before uploading it - -```py -# models/zendesk_sentiment_analysis.py [continuation] - -rows = [] -for id, sentiment in zip(ticket_data.id, description_sentiment_analysis): - rows.append((int(id), sentiment["label"], sentiment["score"])) - -records = np.array(rows, dtype=[("id", int), ("label", "U8"), ("score", float)]) - -sentiment_df = pd.DataFrame.from_records(records) -``` - -And finally, upload it with the handy `write_to_source` function - -```py -# models/zendesk_sentiment_analysis.py [continuation] - -print("Uploading\n", sentiment_df) -write_to_source(sentiment_df, "results", "ticket_data_sentiment_analysis") -``` - -The table `ticket_data_sentiment_analysis` will be created if it's not already present, and in case -it existed and there was data already, data will be appended to it. - -It can be used from dbt as a regular source with the usual `{{ source('results', 'ticket_data_sentiment_analysis') }}` syntax. diff --git a/examples/slack-example/README.md b/examples/slack-example/README.md deleted file mode 100644 index 1d438b16..00000000 --- a/examples/slack-example/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# Send a Slack message about model status - -You can use fal to send Slack messages. - -## Setting up a Slack App: - -### 1. Create a Slack App - -Follow instructions on this page in order to create an organization-specific Slack app: - -https://slack.com/help/articles/115005265703-Create-a-bot-for-your-workspace - -Add following OAuth Scopes: - -- `channels:join` -- `chat:write` -- `files:write` -- `app_mentions:read` -- `groups:read` - -### 2. Install the app and get the bot token - -On the same "OAuth & Permissions" page, click on "Install to Workspace" button, proceed with installation and take note of the provided Bot User OAuth Token. - -### 3. Get channel ID - -In Slack, right click on the channel that you want fal to publish to, click on "Open channel details" and copy the Channel ID on the bottom of the modal. - -### 4. Add your bot to your channel - -In your Slack channel, type following message: - -`/add @your_bot_name` - -### 5. Set environment variables - -In terminal set following two environment variable: `SLACK_BOT_TOKEN` and `SLACK_BOT_CHANNEL`. This can be done with export command: - -```bash -export SLACK_BOT_TOKEN=your-bot-token -export SLACK_TARGET_CHANNEL=your-target-channel -``` - -## Meta tag - -In a `schema.yml` file, within a target model, a meta tag should be added in order to connect the model to fal: - -```yaml -meta: - fal: - scripts: - - path_to_fal_script.py -``` - -## Fal script - -This example requires [`slack_sdk`](https://github.com/slackapi/python-slack-sdk) to be installed. We will be using the `WebClient` class for sending messages to our Slack app: - -```python -import os -from slack_sdk import WebClient -from slack_sdk.errors import SlackApiError - -CHANNEL_ID = os.getenv("SLACK_BOT_CHANNEL") -SLACK_TOKEN = os.getenv("SLACK_BOT_TOKEN") - -client = WebClient(token=SLACK_TOKEN) -``` - -Fal provides a magic variable `context` that gives you access to dbt model information, such as model name and status. We can create a message using this variable: - -```python -message_text = f"Model: {context.current_model.name}. Status: {context.current_model.status}." -``` - -And finally we post this message to our target Slack channel: - -```python -try: - response = client.chat_postMessage( - channel=CHANNEL_ID, - text=message_text - ) -except SlackApiError as e: - assert e.response["error"] -``` - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts/slack.py). diff --git a/examples/write_jupyter_notebook/README.md b/examples/write_jupyter_notebook/README.md deleted file mode 100644 index 42863bf4..00000000 --- a/examples/write_jupyter_notebook/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# Use dbt from a Jupyter Notebook - -We also offer fal as an importable package to load in a Python environment to reference and use your dbt models and sources. -You may want to do this to run some ad-hoc analysis on the data. - -We start by importing fal into your project - -```py -from fal import FalDbt -``` - -Then instantiate a new FalDbt project with the dbt project information: - -```py -faldbt = FalDbt(profiles_dir="~/.dbt", project_dir="../my_project") - -faldbt.list_sources() -# [['results', 'ticket_data_sentiment_analysis']] - -faldbt.list_models() -# { -# 'zendesk_ticket_metrics': , -# 'stg_o3values': , -# 'stg_zendesk_ticket_data': , -# 'stg_counties': -# } -``` - -Reference these objects as you would in a regular fal script, from the `faldbt` object: - -```py -sentiments = faldbt.source('results', 'ticket_data_sentiment_analysis') -# pandas.DataFrame -tickets = faldbt.ref('stg_zendesk_ticket_data') -# pandas.DataFrame -``` - -NOTE: You can use any other function available in the fal script runtime through the `faldbt` object. - -Then you can just use the data in these Pandas DataFrames to analyze and plot an interesting graph: - -```py -def calc_weighted_label(row): - val = 1 if row['label'] == 'POSITIVE' else -1 - return val * row['score'] - -joined = sentiments.merge(tickets, on='id') -joined['weighted_label'] = joined.apply(calc_weighted_label, axis=1).astype(float) - -from matplotlib import pyplot as plt - -joined.plot(y=['weighted_label'], x='created_at') -plt.show() -``` - -![GCS bucket creation](jupyter_notebook_weighted_label.png) - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/analyze_sentiment.ipynb). diff --git a/examples/write_jupyter_notebook/jupyter_notebook_weighted_label.png b/examples/write_jupyter_notebook/jupyter_notebook_weighted_label.png deleted file mode 100644 index 845289c5..00000000 Binary files a/examples/write_jupyter_notebook/jupyter_notebook_weighted_label.png and /dev/null differ diff --git a/examples/write_to_aws/README.md b/examples/write_to_aws/README.md deleted file mode 100644 index 96998c9f..00000000 --- a/examples/write_to_aws/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# Write dbt artifacts to AWS S3 - -Dbt artifacts are files created by the dbt compiler after a run is completed. They contain information about the project, help with documentation, calculate test coverage and much more. In this example we are going to focus on two of these artifacts `manifest.json` and `run_results.json`. - -[`manifest.json`](https://docs.getdbt.com/reference/artifacts/manifest-json) Is a full point-in-time represantation of your dbt project. This file can be used to pass [state](https://docs.getdbt.com/docs/guides/understanding-state) to a dbt run using the `--state` flag. - -[`run_results.json`](https://docs.getdbt.com/reference/artifacts/run-results-json) file contains timing and status information about a completed dbt run. - -There might be several reasons why might want to store `dbt` artifacts. The most obvious reason would be to use the `--state` functionality to pass the previous state back to dbt. Besides that these artifacts can be stored in a database to later be analyzed.. - -## Create an S3 Bucket - -Navigate the [S3 console](https://s3.console.aws.amazon.com/s3/home) and create a bucket. In the screenshot below I named the my bucket `fal-example-dbt-artifacts-bucket`, pick a unique name for yourself and complete the next steps as you see fit. - -![AWS S3 bucket creation](s3_bucket.png) - -## Fal Script - -Now navigate to your dbt to project, create a directory for your fal scripts and create a python file inside that directory. [For example](https://github.com/fal-ai/fal_dbt_examples/tree/main/fal_scripts/upload_to_s3.py) a directory named `fal_scripts` and a python file named `upload_to_s3.py`. - -In your python file you'll write the script that would upload `dbt` artifacts to S3. - -```python -import os -import boto3 - -s3_client = boto3.client('s3') - -bucket_name = "fal-example-dbt-artifacts-bucket" -manifest_source_file_name = os.path.join(context.config.target_path, "manifest.json") -run_results_source_file_name = os.path.join(context.config.target_path, "run_results.json") -manifest_destination_blob_name = "manifest.json" -run_results_destination_blob_name = "run_results.json" - - -s3_client.upload_file(manifest_source_file_name, bucket_name, manifest_destination_blob_name) -s3_client.upload_file(run_results_source_file_name, bucket_name, run_results_destination_blob_name) -``` - -This script will use [default credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) set in your environment for AWS, one way to do this is to set `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` as environment variables. - -## Meta tag - -Next, we need to configure when this script will run. This part will be different for every usecase. Currently there are two types of triggers, a script can be configured per model or globally for the whole project. In this example we want this script to run once, not for any specific model. - -To configure this navigate to your `schema.yml` file or create one if you dont have one and add the following yaml entry. - -```yaml -fal: - scripts: - - fal_scripts/upload_to_s3.py -``` - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts). diff --git a/examples/write_to_aws/s3_bucket.png b/examples/write_to_aws/s3_bucket.png deleted file mode 100644 index e750081a..00000000 Binary files a/examples/write_to_aws/s3_bucket.png and /dev/null differ diff --git a/examples/write_to_gcs/README.md b/examples/write_to_gcs/README.md deleted file mode 100644 index 6cca2f28..00000000 --- a/examples/write_to_gcs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Write dbt artifacts to Google Cloud Storage - -Dbt artifacts are files created by the dbt compiler after a run is completed. They contain information about the project, help with documentation, calculate test coverage and much more. In this example we are going to focus on two of these artifacts `manifest.json` and `run_results.json`. - -[`manifest.json`](https://docs.getdbt.com/reference/artifacts/manifest-json) Is a full point-in-time represantation of your dbt project. This file can be used to pass [state](https://docs.getdbt.com/docs/guides/understanding-state) to a dbt run using the `--state` flag. - -[`run_results.json`](https://docs.getdbt.com/reference/artifacts/run-results-json) file contains timing and status information about a completed dbt run. - -There might be several reasons why might want to store `dbt` artifacts. The most obvious reason would be to use the `--state` functionality to pass the previous state back to dbt. Besides that these artifacts can be stored in a database to later be analyzed.. - -This example assumes you have GCS (Google Cloud Storage) enabled in your project. - -## Create a GCS Bucket - -Navigate the [GCS console](https://console.cloud.google.com/storage/browser) and create a bucket. In the screenshot below I named the my bucket `fal_example_dbt_artifacts_bucket`, pick a unique name for yourself and complete the next steps as you see fit. - -![GCS bucket creation](gcs_bucket.png) - -## Fal Script - -Now navigate to your dbt to project, create a directory for your fal scripts and create a python file inside that directory. [For example](https://github.com/fal-ai/fal_dbt_examples/tree/main/fal_scripts/upload_to_gcs.py) a directory named `fal_scripts` and a python file named `upload_to_gcs.py`. - -In your python file you'll write the script that would upload `dbt` artifacts to GSC. - -```python -import os -from google.cloud import storage - -bucket_name = "fal_example_dbt_artifacts_bucket" -manifest_destination_blob_name = "manifest.json" -run_results_destination_blob_name = "run_results.json" - -manifest_source_file_name = os.path.join(context.config.target_path, "manifest.json") -run_results_source_file_name = os.path.join(context.config.target_path, "run_results.json") - -storage_client = storage.Client() -bucket = storage_client.bucket(bucket_name) -manifest_blob = bucket.blob(manifest_destination_blob_name) -run_results_blob = bucket.blob(run_results_destination_blob_name) - -manifest_blob.upload_from_filename(manifest_source_file_name) -run_results_blob.upload_from_filename(run_results_source_file_name) -``` - -This script will use [default credentials](https://cloud.google.com/docs/authentication/production) set in your environment for GCP. - -## Meta tag - -Next, we need to configure when this script will run. This part will be different for every usecase. Currently there are two types of triggers, a script can be configured per model or globally for the whole project. In this example we want this script to run once, not for any specific model. - -To configure this navigate to your `schema.yml` file or create one if you dont have one and add the following yaml entry. - -```yaml -fal: - scripts: - - fal_scripts/upload_to_gcs.py -``` - -## Full example - -You can find the full code example [here](https://github.com/fal-ai/fal_dbt_examples/blob/main/fal_scripts). diff --git a/examples/write_to_gcs/gcs_bucket.png b/examples/write_to_gcs/gcs_bucket.png deleted file mode 100644 index fb36ad49..00000000 Binary files a/examples/write_to_gcs/gcs_bucket.png and /dev/null differ diff --git a/projects/adapter/README.md b/projects/adapter/README.md index e41438eb..8170a708 100644 --- a/projects/adapter/README.md +++ b/projects/adapter/README.md @@ -8,15 +8,16 @@ Starting with dbt v1.3, you can now build your dbt models in Python. This leads - Building forecasts - Building other predictive models such as classification and clustering -This is fantastic! BUT, there is still one issue though! There is no Python support for Postgres. +This is fantastic! BUT, there is still one issue though! There is no Python support for Postgres. dbt-postgres-python provides the best environment to run your Python models that works with Postgres! -dbt-postgres-python provides the best environment to run your Python models that works with Postgres! With dbt-postgres-python, you can: +## Project background -- Build and test your models locally -- Isolate each model to run in its own environment with its own dependencies +This project is based off the [dbt-fal](https://github.com/fal-ai/dbt-fal) project. Thank you [FAL-AI](https://github.com/fal-ai) for starting that project. +Priorities change and FAL-AI in April, 2024 chose to archive that project and focus their efforts elsewhere. -**NB**: This project is based off the archived [dbt-fal](https://github.com/fal-ai/dbt-fal) project, which initially supported other platforms. This project -will only support the postgres adaptor. Support for the FAL CLI is dropped as well. +My team at work used dbt-fal, so that we could run python code on postgres. As dbt-fal was not being kept up-to-date with DBT, +I chose to pick up that project and bring it up-to-date. As a result, this project will only support Postgres going forward. +Support for the FAL CLI has also been dropped. ## Getting Started @@ -39,5 +40,24 @@ jaffle_shop: Don't forget to point to your main adapter with the `db_profile` attribute. This is how the fal adapter knows how to connect to your data warehouse. -### 3. `dbt run`! +### 3. Add your python models + +Add in your python models to your project just as you would your SQL models. Follow DBT's [instructions](https://docs.getdbt.com/docs/build/python-models) +on how to do so. + +```python +# a_python_model.py + +import ... + +def model(dbt, session): + + my_sql_model_df = dbt.ref("my_sql_model") + + final_df = ... # stuff you can't write in SQL! + + return final_df +``` + +### 4. `dbt run`! That is it! It is really that simple 😊 diff --git a/projects/adapter/cli_tests/.gitignore b/projects/adapter/cli_tests/.gitignore deleted file mode 100644 index 2211df63..00000000 --- a/projects/adapter/cli_tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.txt diff --git a/projects/adapter/cli_tests/features/context_info.feature b/projects/adapter/cli_tests/features/context_info.feature deleted file mode 100644 index 3e46a953..00000000 --- a/projects/adapter/cli_tests/features/context_info.feature +++ /dev/null @@ -1,43 +0,0 @@ -Feature: Context object information - Background: Project Setup - Given the project 005_functions_and_variables - # To make sure all data is there for dbt stage - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - - Scenario: Get target info in post hook - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then the following models are calculated: - | some_model | - Then the following scripts are ran: - | some_model.lists.py | some_model.context.py | some_model.execute_sql.py | - And the script some_model.context.py output file has the lines: - | target profile: fal_test | - - Scenario: Get rows affected in post hook for fal flow run - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then the following models are calculated: - | some_model | - Then the following scripts are ran: - | some_model.lists.py | some_model.context.py | some_model.execute_sql.py | - And the script some_model.context.py output file has the lines: - | adapter response: rows affected 1 | - - Scenario: Get rows affected in post hook for fal run - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --select some_model --scripts context.py - """ - Then the following scripts are ran: - | some_model.lists.py | some_model.context.py | some_model.execute_sql.py | - And the script some_model.context.py output file has the lines: - | adapter response: rows affected 1 | - diff --git a/projects/adapter/cli_tests/features/dbt_test.feature b/projects/adapter/cli_tests/features/dbt_test.feature deleted file mode 100644 index fa90b1f5..00000000 --- a/projects/adapter/cli_tests/features/dbt_test.feature +++ /dev/null @@ -1,57 +0,0 @@ -Feature: `dbt test` awareness - Background: Project Setup - Given the project 005_functions_and_variables - - @TODO-duckdb - Scenario: Source tests are present in context - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | some_model.write_to_source_twice.py | other_model.complete_model.py | third_model.complete_model.py | - And the script some_model.write_to_source_twice.py output file has the lines: - | source results.some_source has 2 tests, source status is skipped | - | source results.other_source has 0 tests, source status is skipped | - | model some_model has 2 tests, model status is success | - | model other_model has 0 tests, model status is success | - When the following shell command is invoked: - """ - dbt test --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | some_model.write_to_source_twice.py | - And the script some_model.write_to_source_twice.py output file has the lines: - | source results.some_source has 2 tests, source status is tested | - | source results.other_source has 0 tests, source status is skipped | - | model some_model has 2 tests, model status is tested | - | model other_model has 0 tests, model status is skipped | - - Scenario: Singular tests are present in context - Given the project 002_jaffle_shop - When the following shell command is invoked: - """ - dbt seed --profiles-dir $profilesDir --project-dir $baseDir - """ - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude orders_forecast+ - """ - And the following shell command is invoked: - """ - dbt test --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following shell command is invoked: - """ - python $baseDir/fal_dbt.py $baseDir $profilesDir - """ - Then the script fal_dbt.py output file has the lines: - | There are 21 tests | There are 20 generic tests | There are 1 singular tests | diff --git a/projects/adapter/cli_tests/features/environment.py b/projects/adapter/cli_tests/features/environment.py deleted file mode 100644 index 72ba61aa..00000000 --- a/projects/adapter/cli_tests/features/environment.py +++ /dev/null @@ -1,45 +0,0 @@ -from behave.configuration import Configuration -import os - - -def before_all(context): - os.environ["FAL_STATS_ENABLED"] = "false" - config: Configuration = context.config - config.setup_logging() - - -def after_scenario(context, scenario): - if hasattr(context, "temp_dir"): - context.temp_dir.cleanup() - - if hasattr(context, "added_during_tests"): - for file in context.added_during_tests: - - os.remove(file) - delattr(context, "added_during_tests") - - if hasattr(context, "exc") and context.exc: - from traceback import print_exception - - _etype, exception, _tb = context.exc - - print_exception(*context.exc) - - raise AssertionError("Should have expected exception") from exception - - -def before_tag(context, tag): - if "TODO-logging" == tag: - # print here is not captured by behave - print("WARN: should have thrown an exception (TODO-logging)") - elif "requires-conda" == tag: - # See if conda is available, and if not skip the - # current scenerio. - from fal.dbt.packages.environments.conda import get_conda_executable - - try: - executable = get_conda_executable() - except RuntimeError: - context.scenario.skip( - reason="this test requires conda, but conda is not installed." - ) diff --git a/projects/adapter/cli_tests/features/execute_sql_function.feature b/projects/adapter/cli_tests/features/execute_sql_function.feature deleted file mode 100644 index dc9e148e..00000000 --- a/projects/adapter/cli_tests/features/execute_sql_function.feature +++ /dev/null @@ -1,27 +0,0 @@ -Feature: `execute_sql` function - Background: Project Setup - Given the project 009_execute_sql_function - - Scenario: Use execute_sql function - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +query_other_model.py - """ - Then the following models are calculated: - | execute_sql_model_one | execute_sql_model_two | - And the following scripts are ran: - | execute_sql_model_one.query_other_model.py | - And the script execute_sql_model_one.query_other_model.py output file has the lines: - | Model dataframe information: | RangeIndex: 1 entries, 0 to 0 | - - Scenario: Use execute_sql to run a macro - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +run_macro.py - """ - Then the following models are calculated: - | execute_sql_model_one | execute_sql_model_two | - And the following scripts are ran: - | execute_sql_model_one.run_macro.py | - And the script execute_sql_model_one.run_macro.py output file has the lines: - | Model dataframe first row: | my_int_times_ten 10 | diff --git a/projects/adapter/cli_tests/features/flow_run.feature b/projects/adapter/cli_tests/features/flow_run.feature deleted file mode 100644 index 87aa6fb5..00000000 --- a/projects/adapter/cli_tests/features/flow_run.feature +++ /dev/null @@ -1,346 +0,0 @@ -Feature: `flow run` command - - Scenario: post hooks cannot be selected - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select check_extra.py - """ - Then no models are calculated - And no scripts are run - - Scenario: fal flow run command with selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select before.py+ - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | model_c | - And the following scripts are ran: - | agent_wait_time.before.py | agent_wait_time.after.py | model_c.before.py | - - Scenario: fal flow run command with selectors single model - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select before.py - """ - Then no models are calculated - And the following scripts are ran: - | agent_wait_time.before.py | model_c.before.py | - - Scenario: fal flow run command with complex selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +intermediate_model_3 - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | model_a | model_b | model_c | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select intermediate_model_1+ - """ - Then the following models are calculated: - | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | - - Scenario: fal flow run command with plus operator with number selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select 2+intermediate_model_3 - """ - Then the following models are calculated: - | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select intermediate_model_1+1 - """ - Then the following models are calculated: - | intermediate_model_1 | intermediate_model_2 | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select intermediate_model_1+0 - """ - Then the following models are calculated: - | intermediate_model_1 | - - Scenario: fal flow run command with selectors single model again - Given the project 001_flow_run_with_selectors - When the data is seeded - And the file $baseDir/models/new_model.sql is created with the content: - """ - select * 1 - """ - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select agent_wait_time - """ - Then the following models are calculated: - | agent_wait_time | - - Scenario: fal flow run command with state selector without state - Given the project 001_flow_run_with_selectors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the file $baseDir/models/new_model.sql is created with the content: - """ - select 1 - """ - Then the following command will fail: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select state:new - """ - And no models are calculated - - Scenario: fal flow run command with state selector and with state - Given the project 001_flow_run_with_selectors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir - """ - And state is stored in old_state - And the file $baseDir/models/new_model.sql is created with the content: - """ - select cast(NULL as numeric) AS my_float - """ - And the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select state:new --state $tempDir/old_state - """ - Then the following models are calculated: - | new_model | - - @TODO-logging - Scenario: fal flow run with an error in before - Given the project 003_runtime_errors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select before.py - """ - Then it throws an exception RuntimeError with message 'Error in scripts' - - @TODO-logging - Scenario: fal flow run with an error in after - Given the project 003_runtime_errors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select after.py - """ - Then it throws an exception RuntimeError with message 'Error in scripts' - - @TODO-logging - @TODO-duckdb - Scenario: fal flow run with an error in dbt run - Given the project 003_runtime_errors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then it throws an exception RuntimeError with message 'Error running dbt run' - - @TODO-logging - @TODO-duckdb - Scenario: fal flow run with an error in Python model - Given the project 003_runtime_errors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select runtime_error_model - """ - Then it throws an exception RuntimeError with message 'Error running dbt run' - - Scenario: fal flow run command with selectors with tag selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select tag:daily - """ - Then the following models are calculated: - | agent_wait_time | model_b | model_c | - - Scenario: fal flow run command with tag selector and children - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select tag:daily+ - """ - Then the following models are calculated: - | agent_wait_time | model_b | model_c | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | - And the following scripts are ran: - | agent_wait_time.after.py | - - Scenario: fal flow run post-hooks run with parent model without + - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select zendesk_ticket_data - """ - Then the following models are calculated: - | zendesk_ticket_data | - And the following scripts are ran: - | zendesk_ticket_data.check_extra.py | - - Scenario: fal flow run command with vars - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select zendesk_ticket_data - """ - Then the following models are calculated: - | zendesk_ticket_data | - And the script zendesk_ticket_data.check_extra.py output file has the lines: - | no extra_col | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select zendesk_ticket_data --vars 'extra_col: true' - """ - Then the following models are calculated: - | zendesk_ticket_data | - And the script zendesk_ticket_data.check_extra.py output file has the lines: - | extra_col: yes | - - Scenario: fal flow run command with exclude arg - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude before.py - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | zendesk_ticket_data | model_b | model_a | model_c | - And the following scripts are ran: - | agent_wait_time.after.py | zendesk_ticket_data.check_extra.py | - - Scenario: fal flow run command with exclude arg with children - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude before.py+ - """ - Then the following models are calculated: - | zendesk_ticket_data | model_a | model_b | - And the following scripts are not ran: - | agent_wait_time.before.py | agent_wait_time.after.py | - - Scenario: fal flow run command with exclude arg and select arg - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select before.py+ --exclude after.py - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | model_c | - And the following scripts are not ran: - | agent_wait_time.after.py | - - Scenario: fal flow run command with select @ - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select @agent_wait_time - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | model_a | model_b | model_c | - And the following scripts are ran: - | model_c.before.py | agent_wait_time.before.py | agent_wait_time.after.py | - - Scenario: fal flow run with @ in the middle - Given the project 001_flow_run_with_selectors - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select @model_c - """ - Then the following models are calculated: - | agent_wait_time | intermediate_model_1 | intermediate_model_2 | intermediate_model_3 | model_a | model_b | model_c | - And the following scripts are ran: - | model_c.before.py | agent_wait_time.before.py | - - @broken_profile - @TODO-logging - Scenario: fal flow run with target - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir profiles/broken --project-dir $baseDir --select zendesk_ticket_data - """ - Then it throws an exception RuntimeError with message 'Error running dbt run' - - When the following command is invoked: - """ - fal flow run --profiles-dir profiles/broken --project-dir $baseDir --select zendesk_ticket_data --target custom - """ - Then the following models are calculated: - | zendesk_ticket_data | - - @TODO-logging - Scenario: post hooks run after both successful and failed dbt models - Given the project 003_runtime_errors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude before.py - """ - Then it throws an exception RuntimeError with message 'Error running dbt run' - And the following scripts are ran: - | working_model.post_hook.py | some_model.post_hook.py | - And the script working_model.post_hook.py output file has the lines: - | Status: success | - And the script some_model.post_hook.py output file has the lines: - | Status: error | - - Scenario: fal flow run command with non-existent selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select xxx yyy - """ - Then no models are calculated - - Scenario: fal flow run command with mixed non-existent selectors - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select intermediate_model_1 xxx yyy - """ - Then the following models are calculated: - | intermediate_model_1 | - - Scenario: fal flow run command with mixed non-existent selectors intersection - Given the project 001_flow_run_with_selectors - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select intermediate_model_1,xxx yyy intermediate_model_2 - """ - Then the following models are calculated: - | intermediate_model_2 | diff --git a/projects/adapter/cli_tests/features/flow_run_middle_nodes.feature b/projects/adapter/cli_tests/features/flow_run_middle_nodes.feature deleted file mode 100644 index a7b39aa8..00000000 --- a/projects/adapter/cli_tests/features/flow_run_middle_nodes.feature +++ /dev/null @@ -1,117 +0,0 @@ -Feature: `flow run` command with py nodes in the middle - Background: Project Setup - Given the project 002_jaffle_shop - When the data is seeded - - Scenario: fal flow run command with selectors middle nodes - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select middle_1+ - """ - Then the following models are calculated: - | middle_1 | middle_2 | after_middle | - And the following scripts are ran: - | middle_2.middle_script.py | - - Scenario: fal flow run command with selectors with experimental flag - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select middle_1+ - """ - Then the following models are calculated in order: - | middle_1 | middle_2 | after_middle | - And the following scripts are ran: - | middle_2.middle_script.py | - - Scenario: fal flow run with or without experimental flag sends status information to after script updated - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +middle_script.py - """ - Then the following scripts are ran: - | middle_2.middle_script.py | - And the script middle_2.middle_script.py output file has the lines: - | Status: success | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select middle_script.py - """ - Then the following scripts are ran: - | middle_2.middle_script.py | - And the script middle_2.middle_script.py output file has the lines: - | Status: skipped | - - @TODO-postgres - @TODO-fal - @TODO-bigquery - @TODO-snowflake - @TODO-redshift - @TODO-duckdb - @TODO-athena - Scenario: fal flow run command with set intersection selector - Given the project 008_pure_python_models - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude broken_model - """ - And the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_a+,model_c+ - """ - Then the following models are calculated in order: - | model_c.py | model_d | model_e.ipynb | - And the following scripts are ran: - | model_c.after.py | model_e.pre_hook.py | model_e.post_hook.py | model_c.post_hook.py | - - Scenario: fal flow run command with triple intersection selectors on parents - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +customers,+orders_forecast,+orders - """ - Then the following models are calculated: - | stg_orders | stg_payments | - - Scenario: fal flow run command with intersection and union mixed - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +customers +middle_script.py - """ - And the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select stg_customers+,stg_orders+,stg_payments+ middle_script.py - """ - Then the following models are calculated: - | customers | - Then the following scripts are ran: - | middle_2.middle_script.py | customers.send_slack_message.py | - - Scenario: fal flow run command with intersection and union mixed as a single string - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +customers +middle_script.py - """ - And the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select "stg_customers+,stg_orders+,stg_payments+ middle_script.py" - """ - Then the following models are calculated: - | customers | - Then the following scripts are ran: - | customers.send_slack_message.py | middle_2.middle_script.py | - - # This is technically an outlier case (in the following case, the actual intersection is model_b but - # due to how DBT parses command line arguments the following works like a regular union) so we are - # simply mirroring the exact behavior from DBT. - Scenario: fal flow run command with quoted union groups with a distinct intersection operator - Given the project 008_pure_python_models - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select "model_a model_b" , "model_b model_c" - """ - Then the following models are calculated in order: - | model_a | model_b | model_c.py | diff --git a/projects/adapter/cli_tests/features/flow_run_with_jaffle_shop.feature b/projects/adapter/cli_tests/features/flow_run_with_jaffle_shop.feature deleted file mode 100644 index ea9b68f7..00000000 --- a/projects/adapter/cli_tests/features/flow_run_with_jaffle_shop.feature +++ /dev/null @@ -1,24 +0,0 @@ -Feature: `flow run` command - Background: Project Setup - Given the project 002_jaffle_shop - When the data is seeded - - Scenario: jaffle_shop fal flow run command with selectors - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select load_data.py+ stg_orders stg_payments - """ - Then the following models are calculated: - | stg_customers | customers | stg_orders | stg_payments | - And the following scripts are ran: - | stg_customers.load_data.py | customers.send_slack_message.py | - - Scenario: jaffle_shop fal flow run command with selectors as a mixed single string - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select "load_data.py+ stg_orders" stg_payments - """ - Then the following models are calculated: - | stg_customers | customers | stg_orders | stg_payments | - And the following scripts are ran: - | stg_customers.load_data.py | customers.send_slack_message.py | diff --git a/projects/adapter/cli_tests/features/globals.feature b/projects/adapter/cli_tests/features/globals.feature deleted file mode 100644 index 43a1bf81..00000000 --- a/projects/adapter/cli_tests/features/globals.feature +++ /dev/null @@ -1,80 +0,0 @@ -Feature: global scripts - Scenario: fal run triggers globals by default - Given the project 004_globals - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | some_model.after.py | GLOBAL.after.py | - - Scenario: fal run doesn't trigger globals with select flag - Given the project 004_globals - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then the following scripts are ran: - | some_model.after.py | - - Scenario: fal run triggers globals with select and globals flags - Given the project 004_globals - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --select some_model --globals - """ - Then the following scripts are ran: - | some_model.after.py | GLOBAL.after.py | - - Scenario: fal run triggers globals by default with before - Given the project 004_globals - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --before - """ - Then the following scripts are ran: - | GLOBAL.before.py | GLOBAL.before_b.py | some_model.before.py | - - Scenario: --before script selection doesn't run globals - Given the project 004_globals - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --before --script fal_scripts/before.py - """ - Then the following scripts are ran: - | some_model.before.py | - - Scenario: global before scripts are run with --globals flag and script selection - Given the project 004_globals - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --before --script fal_scripts/before.py --globals - """ - Then the following scripts are ran: - | GLOBAL.before.py | some_model.before.py | - - Scenario: Fal selects global and not-global scripts - Given the project 004_globals - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --before --script fal_scripts/before.py --globals - """ - Then the following scripts are ran: - | GLOBAL.before.py | some_model.before.py | - - Scenario: fal flow run does not trigger globals - Given the project 004_globals - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | some_model.before.py | some_model.after.py | diff --git a/projects/adapter/cli_tests/features/ipynb_scripts.feature b/projects/adapter/cli_tests/features/ipynb_scripts.feature deleted file mode 100644 index 3edf9579..00000000 --- a/projects/adapter/cli_tests/features/ipynb_scripts.feature +++ /dev/null @@ -1,91 +0,0 @@ -@TODO-postgres -@TODO-fal -@TODO-bigquery -@TODO-snowflake -@TODO-redshift -@TODO-duckdb -@TODO-athena -Feature: fal works with ipynb features - - Scenario: fal flow run command for ipynb scripts - Given the project 007_ipynb_scripts - When the data is seeded - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following models are calculated: - | agent_wait_time | zendesk_ticket_data | - And the following scripts are ran: - | agent_wait_time.before.py | agent_wait_time.after.py | zendesk_ticket_data.check_extra.py | zendesk_ticket_data.my_notebook.py | - - Scenario: fal flow run command with vars - Given the project 007_ipynb_scripts - When the data is seeded - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select zendesk_ticket_data+ - """ - Then the following models are calculated: - | zendesk_ticket_data | - And the script zendesk_ticket_data.check_extra.py output file has the lines: - | no extra_col | - And the script zendesk_ticket_data.my_notebook.py output file has the lines: - | no extra_col | - - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select zendesk_ticket_data+ --vars 'extra_col: true' - """ - Then the following models are calculated: - | zendesk_ticket_data | - And the script zendesk_ticket_data.check_extra.py output file has the lines: - | extra_col: yes | - And the script zendesk_ticket_data.my_notebook.py output file has the lines: - | extra_col: yes | - - Scenario: fal run works - Given the project 007_ipynb_scripts - When the data is seeded - - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | zendesk_ticket_data.check_extra.py | zendesk_ticket_data.my_notebook.py | - - Scenario: fal run works with model selection - Given the project 007_ipynb_scripts - When the data is seeded - - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --models zendesk_ticket_data - """ - Then the following scripts are ran: - | zendesk_ticket_data.check_extra.py | zendesk_ticket_data.my_notebook.py | - - Scenario: fal run works with script selection - Given the project 007_ipynb_scripts - When the data is seeded - - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --script fal_scripts/notebooks/my_notebook.ipynb - """ - Then the following scripts are ran: - | zendesk_ticket_data.my_notebook.py | diff --git a/projects/adapter/cli_tests/features/main_module.feature b/projects/adapter/cli_tests/features/main_module.feature deleted file mode 100644 index 1b620c29..00000000 --- a/projects/adapter/cli_tests/features/main_module.feature +++ /dev/null @@ -1,13 +0,0 @@ -Feature: __name__ should be __main__ - Background: Project Setup - Given the project 002_jaffle_shop - - Scenario: main check should be present - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select +middle_2+ - """ - Then the following scripts are ran: - | middle_2.middle_script.py | - And the script middle_2.middle_script.py output file has the lines: - | top name: __main__ | inner name: main_check_2 | passed main if | diff --git a/projects/adapter/cli_tests/features/meta_object.feature b/projects/adapter/cli_tests/features/meta_object.feature deleted file mode 100644 index db444fe5..00000000 --- a/projects/adapter/cli_tests/features/meta_object.feature +++ /dev/null @@ -1,28 +0,0 @@ -Feature: Access to meta object from schema - Background: Project Setup - Given the project 005_functions_and_variables - - Scenario: Use meta object from models - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then the following models are calculated: - | some_model | - And the following scripts are ran: - | some_model.lists.py | some_model.context.py | some_model.execute_sql.py | - And the script some_model.lists.py output file has the lines: - # only check for 1 of the lines - | model: some_model property: 1 | - - Scenario: Use meta object from sources - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select some_model - """ - Then the following models are calculated: - | some_model | - And the following scripts are ran: - | some_model.lists.py | some_model.context.py | some_model.execute_sql.py | - And the script some_model.lists.py output file has the lines: - | source: results other_source property: 4 | source: results some_source property: None | diff --git a/projects/adapter/cli_tests/features/python_nodes.feature b/projects/adapter/cli_tests/features/python_nodes.feature deleted file mode 100644 index af76db81..00000000 --- a/projects/adapter/cli_tests/features/python_nodes.feature +++ /dev/null @@ -1,65 +0,0 @@ -Feature: Python nodes - Background: Project Setup - Given the project 008_pure_python_models - - @TODO-postgres - @TODO-fal - @TODO-bigquery - @TODO-snowflake - @TODO-redshift - @TODO-duckdb - @TODO-athena - Scenario: Run a project with Python nodes - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude broken_model - """ - Then the following models are calculated in order: - | model_a | model_b | model_c.py | model_d | model_e.ipynb | - Then the following scripts are ran: - | model_a.after.py | model_b.before.py | model_c.after.py | model_e.pre_hook.py | model_e.post_hook.py | model_c.post_hook.py | - - Scenario: Run a project with Python nodes only selecting the Python model - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_c - """ - Then the following models are calculated in order: - | model_c.py | - And the following scripts are not ran: - | model_c.after.py | - And the following scripts are ran: - | model_c.post_hook.py | - And the script model_c.post_hook.py output file has the lines: - | Status: success | - - @TODO-logging - Scenario: Python model post hooks run even when model script fails - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select broken_model - """ - Then the following scripts are ran: - | broken_model.post_hook.py | - And the script broken_model.post_hook.py output file has the lines: - | Status: error | - And it throws an exception RuntimeError with message 'Error in scripts (model: models/staging/broken_model.py)' - - Scenario: Run a Python node without write to model should error - Given the project 012_model_generation_error - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select no_write_model - """ - Then it throws an exception AssertionError with message 'There must be at least one write_to_model call in the Python Model' - - Scenario: Broken DBT model leads to corruption of the dependant Python model - Given the project 014_broken_dbt_models - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select get_data regular_model - """ - Then the following models are calculated: - | regular_model | get_data.py | - And the following scripts are ran: - | regular_model.post_hook.py | diff --git a/projects/adapter/cli_tests/features/run.feature b/projects/adapter/cli_tests/features/run.feature deleted file mode 100644 index 2a7c9425..00000000 --- a/projects/adapter/cli_tests/features/run.feature +++ /dev/null @@ -1,149 +0,0 @@ -Feature: `run` command - Background: - Given the project 000_fal_run - When the data is seeded - - Scenario: fal run works - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models agent_wait_time zendesk_ticket_data - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | zendesk_ticket_data.post_hook.py | zendesk_ticket_data.post_hook2.py | - - Scenario: fal run works after selected dbt model run - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models agent_wait_time - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | - - Scenario: fal run works with pre-hooks - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --models zendesk_ticket_data --before - """ - Then the following scripts are ran: - | zendesk_ticket_data.pre_hook.py | - - Scenario: fal run works with model selection - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --models zendesk_ticket_data - """ - Then the following scripts are ran: - | zendesk_ticket_data.post_hook.py | zendesk_ticket_data.post_hook2.py | - - Scenario: fal run works with script selection - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models agent_wait_time - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --script fal_scripts/after.py - """ - Then the following scripts are ran: - | agent_wait_time.after.py | - - @TODO-redshift - # TODO: use real redshift instance for testing - Scenario: fal run is aware of source freshness - Given the project 010_source_freshness - When the following shell command is invoked: - """ - python $baseDir/load_freshness_table.py $baseDir $profilesDir - """ - And the following shell command is invoked: - """ - dbt source freshness --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --exclude '*' --globals - """ - Then the following scripts are ran: - | GLOBAL.freshness.py | - And the script GLOBAL.freshness.py output file has the lines: - | (freshness_test, freshness_table) pass | - | (freshness_test, freshness_other) runtime error | - - Scenario: fal run works after dbt test - Given the project 000_fal_run - When the data is seeded - And the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following shell command is invoked: - """ - dbt test --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | - - Scenario: fal run provides model aliases - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models agent_wait_time zendesk_ticket_data - """ - - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | zendesk_ticket_data.post_hook.py | zendesk_ticket_data.post_hook2.py | - And the script agent_wait_time.after.py output file has the lines: - | Model alias without namespace is wait_time | - - Scenario: when false script is selected, nothing runs - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models agent_wait_time - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --script fal_scripts/notthere.py - """ - Then no scripts are run - - Scenario: Post hooks with write_to_model will fail - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models some_model - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then it throws an exception Exception with message 'Error in scripts' - - Scenario: Post hooks with write_to_source will fail - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir --models some_other_model - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then it throws an exception Exception with message 'Error in scripts' - diff --git a/projects/adapter/cli_tests/features/run_with_custom_target.feature b/projects/adapter/cli_tests/features/run_with_custom_target.feature deleted file mode 100644 index 3d7af23e..00000000 --- a/projects/adapter/cli_tests/features/run_with_custom_target.feature +++ /dev/null @@ -1,53 +0,0 @@ -@broken_profile -Feature: `run` command with a custom profile target - Background: - Given the project 000_fal_run - When the data is seeded to custom target in profile directory profiles/broken - - Scenario: fal run works with custom target - When the following shell command is invoked: - """ - dbt run --profiles-dir profiles/broken --project-dir $baseDir --models agent_wait_time zendesk_ticket_data --target custom - """ - And the following command is invoked: - """ - fal run --profiles-dir profiles/broken --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | zendesk_ticket_data.post_hook.py | zendesk_ticket_data.post_hook2.py | - - Scenario: fal run works after selected dbt model run with custom target - When the following shell command is invoked: - """ - dbt run --profiles-dir profiles/broken --project-dir $baseDir --models agent_wait_time --target custom - """ - And the following command is invoked: - """ - fal run --profiles-dir profiles/broken --project-dir $baseDir - """ - Then the following scripts are ran: - | agent_wait_time.after.py | - - Scenario: fal run works with model selection with custom target - When the following shell command is invoked: - """ - dbt run --profiles-dir profiles/broken --project-dir $baseDir --target custom - """ - And the following command is invoked: - """ - fal run --profiles-dir profiles/broken --project-dir $baseDir --models zendesk_ticket_data - """ - Then the following scripts are ran: - | zendesk_ticket_data.post_hook.py | zendesk_ticket_data.post_hook2.py | - - Scenario: fal run works with script selection with custom target - When the following shell command is invoked: - """ - dbt run --profiles-dir profiles/broken --project-dir $baseDir --target custom --models agent_wait_time - """ - And the following command is invoked: - """ - fal run --profiles-dir profiles/broken --project-dir $baseDir --script fal_scripts/after.py - """ - Then the following scripts are ran: - | agent_wait_time.after.py | diff --git a/projects/adapter/cli_tests/features/script_path.feature b/projects/adapter/cli_tests/features/script_path.feature deleted file mode 100644 index e501b40c..00000000 --- a/projects/adapter/cli_tests/features/script_path.feature +++ /dev/null @@ -1,45 +0,0 @@ -Feature: defining script path - Background: Project Setup - Given the project 006_script_paths - When the data is seeded - - Scenario: fal flow run - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following models are calculated: - | some_model | - And the following scripts are ran: - | some_model.before.py | some_model.after.py | - - Scenario: fal flow run with cli var - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --vars "{'fal-scripts-path': 'scripts2'}" - """ - Then the following models are calculated: - | some_model | - And the following scripts are ran: - | some_model.before2.py | some_model.after2.py | - - - Scenario: fal run with before - When the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir --before - """ - Then the following scripts are ran: - | some_model.before.py | - - Scenario: fal run - When the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - And the following command is invoked: - """ - fal run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following scripts are ran: - | some_model.after.py | diff --git a/projects/adapter/cli_tests/features/steps/fal_steps.py b/projects/adapter/cli_tests/features/steps/fal_steps.py deleted file mode 100644 index c1c9bd8a..00000000 --- a/projects/adapter/cli_tests/features/steps/fal_steps.py +++ /dev/null @@ -1,431 +0,0 @@ -from functools import reduce, partial -import os -import shlex -from typing import List, Optional -from behave import * -from fal.dbt.cli import cli -import tempfile -import json -import unittest -from os.path import exists -from pathlib import Path -from datetime import datetime, timezone -import re - - -# The main distinction we can use on an artifact file to determine -# whether it was created by a Python script or a Python model is the number -# of suffixes it has. Models use .txt and scripts use -# ..txt - -FAL_MODEL = 1 -FAL_SCRIPT = 2 - - -def temp_path(context, *paths: str): - return str(Path(context.temp_dir.name).joinpath(*paths)) - -def target_path(context, *paths: str): - return temp_path(context, "target", *paths) - -def artifact_glob(context): - return Path(temp_path(context)).glob("*.txt") - -def _command_replace(command: str, context): - return ( - command.replace("$baseDir", context.base_dir) - .replace("$profilesDir", str(_set_profiles_dir(context))) - .replace("$tempDir", temp_path(context)) - ) - -@when("the following shell command is invoked") -def run_command_step(context): - command = _command_replace(context.text, context) - os.system(command) - - -@given("the project {project}") -def set_project_folder(context, project: str): - projects_dir = _find_projects_directory() - project_path = projects_dir / project - if not project_path.exists() or not project_path.is_dir(): - extra = "" - try: - # Try to find the correct option - match = re.match("^(\\d+)_", project) - - if match: - project_number = match.group(1) - found = [r.name for r in projects_dir.glob(project_number + "_*")] - if found: - extra = "Is it " + " or ".join(found) + " ?" - finally: - raise ValueError(f"Project {project} not found. {extra}") - - context.base_dir = str(project_path) - context.temp_dir = tempfile.TemporaryDirectory() - os.environ["project_dir"] = context.base_dir - os.environ["temp_dir"] = context.temp_dir.name - os.environ["DBT_TARGET_PATH"] = target_path(context) - - -@when("the data is seeded") -def seed_data(context): - base_path = Path(context.base_dir) - profiles_dir = _set_profiles_dir(context) - os.system( - f"dbt seed --profiles-dir {profiles_dir} --project-dir {base_path} --full-refresh" - ) - - -@when("the data is seeded to {target} target in profile directory {profiles_dir}") -def seed_data_custom_target(context, target, profiles_dir): - base_path = Path(context.base_dir) - os.system( - f"dbt seed --profiles-dir {profiles_dir} --project-dir {base_path} --full-refresh --target {target}" - ) - - -@when("state is stored in {folder_name}") -def persist_state(context, folder_name): - os.system(f"mv {target_path(context)} {temp_path(context, folder_name)}") - - -@when("the file {file} is created with the content") -def add_model(context, file): - file = file.replace("$baseDir", context.base_dir) - context.added_during_tests = ( - context.added_during_tests.append(file) - if hasattr(context, "added_during_tests") - else [file] - ) - parent = os.path.dirname(file) - if not exists(parent): - os.mkdir(parent) - with open(file, "w") as f: - f.write(context.text) - f.close() - - -def _invoke_command(context, command: str): - _clear_all_artifacts(context) - args = _command_replace(command, context) - args_list = shlex.split(args) - - cli(args_list) - -@when("the following command is invoked") -def invoke_command(context): - context.exc = None - try: - _invoke_command(context, context.text) - except BaseException: - import sys - - context.exc = sys.exc_info() - - -@then("it throws an exception {etype} with message '{msg}'") -def invoke_command_error(context, etype: str, msg: str): - # TODO: Somehow capture logging and check the contents for exceptions - - # from behave.log_capture import LoggingCapture - # from io import StringIO - # log_cap: LoggingCapture = context.log_capture - # out_cap: StringIO = context.stdout_capture - # err_cap: StringIO = context.stderr_capture - - if context.exc: - _etype, exc, _tb = context.exc - if isinstance(exc, SystemExit): - if not exc.code: - # zero exit code - raise AssertionError("Should have thrown an exception") - else: - assert isinstance( - exc, eval(etype) - ), f"Invalid exception - expected {etype}, got {type(exc)}" - assert msg in str(exc), "Invalid message - expected " + msg - else: - raise AssertionError("Should have thrown an exception") - - # Clear the exception - context.exc = None - - -@then("the following command will fail") -def invoke_failing_fal_flow(context): - try: - _invoke_command(context, context.text) - assert False, "Command should have failed." - except Exception as e: - print(e) - - -@then("the following scripts are ran") -def check_script_files_exist(context): - python_scripts = _get_fal_scripts(context) - expected_scripts = list(map(_script_filename, context.table.headings)) - unittest.TestCase().assertCountEqual(python_scripts, expected_scripts) - - -@then("the following scripts are not ran") -def check_script_files_dont_exist(context): - python_scripts = set(_get_fal_scripts(context)) - expected_scripts = set(map(_script_filename, context.table.headings)) - - unexpected_runs = expected_scripts & python_scripts - if unexpected_runs: - to_report = ", ".join(unexpected_runs) - assert False, f"Script files {to_report} should NOT BE present" - - -def _clear_all_artifacts(context): - """Clear all artifacts that are left behind by Python scripts and models.""" - for artifact in artifact_glob(context): - artifact.unlink() - - -@then("the script {script} output file has the lines") -def check_file_has_lines(context, script): - filename = _script_filename(script) - with open(temp_path(context, filename)) as handle: - handle_lines = [line.strip().lower() for line in handle] - expected_lines = [line.lower() for line in context.table.headings] - for line in expected_lines: - assert line in handle_lines, f"Line '{line}' not in {handle_lines}" - - -@then("no models are calculated") -def no_models_are_run(context): - fal_results = _get_fal_results_file_name(context) - fal_results_paths = [temp_path(context, res) for res in fal_results] - for fal_result_path in fal_results_paths: - if exists(fal_result_path): - data = json.load(open(fal_result_path)) - assert ( - len(data["results"]) == 0 - ), f"results length is {len(data['results'])}" - else: - assert True - - -@then("no scripts are run") -def no_scripts_are_run(context): - results = list(artifact_glob(context)) - - assert len(results) == 0 - - -@then("the following models are calculated") -def check_model_results(context): - models = _get_all_models(context) - expected_models = list(map(_script_filename, context.table.headings)) - unittest.TestCase().assertCountEqual(models, expected_models) - - -@then("the following models are calculated in order") -def check_model_results(context): - models = _get_all_models(context) - expected_models = list(map(_script_filename, context.table.headings)) - unittest.TestCase().assertCountEqual(models, expected_models) - _verify_node_order(context) - -def _find_projects_directory(): - path = Path(__file__) - while path is not None and not (path / "projects").exists(): - path = path.parent - return (path / "projects") - -def _verify_node_order(context): - import networkx as nx - from fal.dbt import FalDbt - from fal.dbt.node_graph import NodeGraph - - fal_dbt = FalDbt( - profiles_dir=_set_profiles_dir(context), project_dir=context.base_dir - ) - node_graph = NodeGraph.from_fal_dbt(fal_dbt) - - all_nodes = _get_dated_dbt_models(context) + _get_dated_fal_artifacts( - context, FAL_SCRIPT - ) - # We need to normalize the suffix for Python models. - all_nodes += [ - (_as_name(name), date) - for name, date in _get_dated_fal_artifacts(context, FAL_MODEL) - ] - ordered_nodes = _unpack_dated_result(all_nodes) - - graph = node_graph.graph - ancestors, post_hooks, pre_hooks, descendants = {}, {}, {}, {} - for node, data in graph.nodes(data=True): - name = _as_name(node) - for container, filter_func in [ - (ancestors, nx.ancestors), - (descendants, nx.descendants), - ]: - container[name] = [ - _as_name(ancestor) - for ancestor in filter_func(graph, node) - if _as_name(ancestor) in ordered_nodes - ] - - for container, hook_type in [ - (pre_hooks, "pre_hook"), - (post_hooks, "post_hook"), - ]: - container[name] = [ - _script_filename(hook.path, name) - for hook in data.get(hook_type, []) - if _script_filename(hook.path, name) in ordered_nodes - ] - - assert_precedes = partial(_assert_precedes, ordered_nodes) - assert_succeeds = partial(_assert_succeeds, ordered_nodes) - for node in ordered_nodes: - # Skip all the nodes that are not part of the graph. - if node not in ancestors: - continue - - # Ancestors (and their hooks) must precede the node - for ancestor in ancestors[node]: - assert_precedes(node, *pre_hooks[ancestor]) - assert_precedes(node, ancestor) - assert_precedes(node, *post_hooks[ancestor]) - - # pre-hooks of the node will precede the node - assert_precedes(node, *pre_hooks[node]) - - # post-hooks of the node will succeed the node - assert_succeeds(node, *post_hooks[node]) - - # Descendants (and their hooks) must succeed the node - for successor in descendants[node]: - assert_succeeds(node, *pre_hooks[successor]) - assert_succeeds(node, successor) - assert_succeeds(node, *post_hooks[successor]) - - -def _assert_succeeds(nodes, node, *successors): - for successor in successors: - assert nodes.index(successor) > nodes.index( - node - ), f"{successor} must come after {node}" - - -def _assert_precedes(nodes, node, *predecessors): - for predecessor in predecessors: - assert nodes.index(predecessor) < nodes.index( - node - ), f"{predecessor} must come before {node}" - - -def _as_name(node): - # format for scripts: script... - if node.startswith("script."): - _, model_name, _, script_name = node.split(".", 3) - return model_name + "." + script_name - elif node.endswith(".txt"): - return node.split(".")[-2] - else: - return node.split(".")[-1] - - -def _script_filename(script: str, model_name: Optional[str] = None): - script_name = script.replace(".ipynb", ".txt").replace(".py", ".txt") - if model_name: - script_name = model_name + "." + script_name - return script_name - - -def _get_all_models(context) -> List[str]: - """Retrieve all models (both DBT and Python).""" - all_models = _get_dated_dbt_models(context) + _get_dated_fal_artifacts( - context, FAL_MODEL - ) - return _unpack_dated_result(all_models) - - -def _get_fal_scripts(context) -> List[str]: - return _unpack_dated_result(_get_dated_fal_artifacts(context, FAL_SCRIPT)) - - -def _unpack_dated_result(dated_result) -> List[str]: - if not dated_result: - return [] - - result, _ = zip(*sorted(dated_result, key=lambda node: node[1])) - return list(result) - - -def _get_dated_dbt_models(context): - return [ - ( - result["unique_id"].split(".")[-1], - datetime.fromisoformat( - result["timing"][-1]["completed_at"].replace("Z", "+00:00") - ), - ) - for result in _load_dbt_result_file(context) - ] - - -def _get_dated_fal_artifacts(context, *kinds): - assert kinds, "Specify at least one artifact kind." - - return [ - # DBT run result files use UTC as the timezone for the timestamps, so - # we need to be careful on using the same method for the local files as well. - ( - artifact.name, - datetime.fromtimestamp(artifact.stat().st_mtime, tz=timezone.utc), - ) - for artifact in artifact_glob(context) - if len(artifact.suffixes) in kinds - ] - - -def _load_dbt_result_file(context): - - temp = target_path(context, "run_results.json") - print(f"RUN_RESULTS location is {temp}") - - with open( - target_path(context, "run_results.json") - ) as stream: - return json.load(stream)["results"] - - -def _get_fal_results_file_name(context): - target = target_path(context) - pattern = re.compile("fal_results_*.\\.json") - target_files = list(os.walk(target))[0][2] - return list(filter(lambda file: pattern.match(file), target_files)) - - -def _set_profiles_dir(context) -> Path: - # TODO: Ideally this needs to change in just one place - available_profiles = [ - "postgres", - "bigquery", - "redshift", - "snowflake", - "duckdb", - "athena", - "fal" - ] - if "profile" in context.config.userdata: - profile = context.config.userdata["profile"] - if profile not in available_profiles: - raise Exception(f"Profile {profile} is not supported") - raw_path = reduce(os.path.join, [os.getcwd(), "profiles", profile]) - path = Path(raw_path).absolute() - elif "profiles_dir" in context: - path = Path(context.profiles_dir).absolute() - else: - # Use postgres profile - path = Path(context.base_dir).parent.absolute() - - os.environ["profiles_dir"] = str(path) - return path diff --git a/projects/adapter/cli_tests/features/structured_hooks.feature b/projects/adapter/cli_tests/features/structured_hooks.feature deleted file mode 100644 index 746d1afc..00000000 --- a/projects/adapter/cli_tests/features/structured_hooks.feature +++ /dev/null @@ -1,88 +0,0 @@ -Feature: Structured Hooks - Background: Project Setup - Given the project 013_structured_hooks - - Scenario: Run a mix of structured/unstructured hooks - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_a - """ - Then the following models are calculated: - | model_a | - And the following scripts are ran: - | model_a.pre_hook_1.py | model_a.pre_hook_2.py | model_a.add.py | model_a.post_hook_1.py | model_a.post_hook_2.py | model_a.sub.py | model_a.types.py | - And the script model_a.add.py output file has the lines - | Calculation result: 5 | - And the script model_a.sub.py output file has the lines - | Calculation result: 3 | - And the script model_a.types.py output file has the lines - | Arguments: number=5, text='type', sequence=[1, 2, 3], mapping={'key': 'value'} | - - Scenario: Run isolated hooks - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_b - """ - Then the following models are calculated: - | model_b | - And the following scripts are ran: - | model_b.local_hook.py | model_b.funny_hook.py | model_b.check_imports.py | - And the script model_b.funny_hook.py output file has the lines - | PyJokes version: 0.6.0 | - - Scenario: Run isolated models - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_c model_d model_e - """ - Then the following models are calculated: - | model_c | model_d.py | model_e.py | - And the following scripts are ran: - | model_c.check_imports.py | model_d.check_imports.py | model_e.check_imports.py | model_e.joke_version.py | model_e.funny_hook.py | - And the script model_d.py output file has the lines - | PyJokes version: 0.5.0 | - And the script model_e.py output file has the lines - | PyJokes version: 0.6.0 | - And the script model_e.funny_hook.py output file has the lines - | PyJokes version: 0.5.0 | - And the script model_e.joke_version.py output file has the lines - | PyJokes version: 0.6.0 | - - Scenario: Run local hooks on isolated models - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_f - """ - Then the following models are calculated: - | model_f.py | - And the following scripts are ran: - | model_f.environment_type.py | model_f.environment_type_2.py | model_f.environment_type_3.py | - And the script model_f.environment_type.py output file has the lines - | Environment: local | - And the script model_f.environment_type_2.py output file has the lines - | Environment: local | - And the script model_f.environment_type_3.py output file has the lines - | Environment: venv | - - # Since conda requires an external installation step that we don't - # automatically do in (at least not yet), we can't assume all testing - # environments has it so it is guarded by a tag. - # - # We will check whether conda is available on runtime, and if so, we'll - # run the test. Otherwise, we'll skip it. - @requires-conda - Scenario: Run local hooks on isolated models with conda - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_g - """ - Then the following models are calculated: - | model_g | - And the following scripts are ran: - | model_g.check_imports.py | model_g.environment_type.py | model_g.environment_type_2.py | model_g.environment_type_3.py | - And the script model_g.environment_type.py output file has the lines - | Environment: conda | - And the script model_g.environment_type_2.py output file has the lines - | Environment: venv | - And the script model_g.environment_type_3.py output file has the lines - | Environment: local | diff --git a/projects/adapter/cli_tests/features/write_to_model_function.feature b/projects/adapter/cli_tests/features/write_to_model_function.feature deleted file mode 100644 index fbe6c691..00000000 --- a/projects/adapter/cli_tests/features/write_to_model_function.feature +++ /dev/null @@ -1,60 +0,0 @@ -Feature: `write_to_model` function - Background: Project Setup - Given the project 005_functions_and_variables - - @TODO-duckdb - Scenario: Use write_to_model and write_to_source_twice function with mode overwrite - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude model_with_date model_with_array - """ - Then the following models are calculated: - | other_model | some_model | third_model | - And the following scripts are ran: - | some_model.write_to_source_twice.py | some_model.context.py | some_model.lists.py | some_model.execute_sql.py | other_model.complete_model.py | third_model.complete_model.py | - And the script other_model.complete_model.py output file has the lines: - | my_int 0.0 | my_int 3.0 | size 1 | - And the script third_model.complete_model.py output file has the lines: - | my_int 0.0 | my_int 3.0 | size 1 | - And the script some_model.write_to_source_twice.py output file has the lines: - | my_float 1.2 | - - Scenario: Use write_to_model function with mode overwrite - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select other_model+ --exclude some_model+ - """ - Then the following models are calculated: - | other_model | - And the following scripts are ran: - | other_model.complete_model.py | - And the script other_model.complete_model.py output file has the lines: - | my_int 0.0 | my_int 3.0 | size 1 | - - @TODO-postgres - @TODO-duckdb - @TODO-redshift - @TODO-fal - Scenario: Write a datetime to the datawarehouse - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_with_date - """ - Then the following models are calculated: - | model_with_date.py | - And the script model_with_date.py output file has the lines: - | my_datetime: 2022-01-01t14:50:59+00:00 | my_date: 2022-01-01 | my_time: 14:50:59 | - - @TODO-postgres - @TODO-duckdb - @TODO-redshift - @TODO-fal - Scenario: Write a string and int array to the datawarehouse - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --select model_with_array - """ - Then the following models are calculated: - | model_with_array.py | - And the script model_with_array.py output file has the lines: - | my_array: ['some', 'other'] | other_array: [1, 2, 3] | diff --git a/projects/adapter/cli_tests/features/write_to_source_function.feature b/projects/adapter/cli_tests/features/write_to_source_function.feature deleted file mode 100644 index a167e33c..00000000 --- a/projects/adapter/cli_tests/features/write_to_source_function.feature +++ /dev/null @@ -1,16 +0,0 @@ -Feature: `write_to_source` function - Background: Project Setup - Given the project 005_functions_and_variables - - @TODO-duckdb - Scenario: Use write_to_source function with mode append and overwrite - When the following command is invoked: - """ - fal flow run --profiles-dir $profilesDir --project-dir $baseDir --exclude model_with_date model_with_array - """ - Then the following models are calculated: - | other_model | some_model | third_model | - And the following scripts are ran: - | some_model.write_to_source_twice.py | some_model.context.py | some_model.lists.py | some_model.execute_sql.py | other_model.complete_model.py | third_model.complete_model.py | - And the script some_model.write_to_source_twice.py output file has the lines: - | source size 1 | source size 2 | diff --git a/projects/adapter/cli_tests/profiles/athena/profiles.yml b/projects/adapter/cli_tests/profiles/athena/profiles.yml deleted file mode 100644 index ead16434..00000000 --- a/projects/adapter/cli_tests/profiles/athena/profiles.yml +++ /dev/null @@ -1,15 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: integration_tests - outputs: - integration_tests: - type: athena - s3_staging_dir: "{{ env_var('ATHENA_S3_STAGING_DIR') }}" - region_name: us-east-1 - database: "{{ env_var('ATHENA_DATABASE') }}" - schema: "{{ env_var('ATHENA_SCHEMA') }}" - num_retries: 0 - # work_group: "{{ env_var('ATHENA_WORK_GROUP') }}" - # aws_profile_name: "{{ env_var('ATHENA_PROFILE') }}" diff --git a/projects/adapter/cli_tests/profiles/bigquery/profiles.yml b/projects/adapter/cli_tests/profiles/bigquery/profiles.yml deleted file mode 100644 index e68f2085..00000000 --- a/projects/adapter/cli_tests/profiles/bigquery/profiles.yml +++ /dev/null @@ -1,16 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: integration_tests - outputs: - integration_tests: - type: bigquery - method: service-account - keyfile: "{{ env_var('KEYFILE_DIR') }}/keyfile.json" - project: "{{ env_var('GCLOUD_PROJECT') }}" - dataset: "{{ env_var('BQ_DATASET') }}" - threads: 4 - timeout_seconds: 300 - location: US - priority: interactive diff --git a/projects/adapter/cli_tests/profiles/broken/profiles.yml b/projects/adapter/cli_tests/profiles/broken/profiles.yml deleted file mode 100644 index ffb4a37d..00000000 --- a/projects/adapter/cli_tests/profiles/broken/profiles.yml +++ /dev/null @@ -1,24 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: dev - outputs: - dev: - type: postgres - host: localhost - user: broken - password: pass - port: 5433 - dbname: test - schema: dbt_fal - threads: 4 - custom: - type: postgres - host: localhost - user: pguser - password: pass - port: 5432 - dbname: test - schema: dbt_fal - threads: 4 diff --git a/projects/adapter/cli_tests/profiles/duckdb/profiles.yml b/projects/adapter/cli_tests/profiles/duckdb/profiles.yml deleted file mode 100644 index db3434a5..00000000 --- a/projects/adapter/cli_tests/profiles/duckdb/profiles.yml +++ /dev/null @@ -1,9 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: integration_tests - outputs: - integration_tests: - type: duckdb - path: "{{ env_var('DB_PATH') }}" diff --git a/projects/adapter/cli_tests/profiles/fal/profiles.yml b/projects/adapter/cli_tests/profiles/fal/profiles.yml deleted file mode 100755 index b0257b86..00000000 --- a/projects/adapter/cli_tests/profiles/fal/profiles.yml +++ /dev/null @@ -1,18 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: fal - outputs: - fal: - type: fal - db_profile: dev - dev: - type: postgres - host: localhost - port: 5432 - user: pguser - password: pass - dbname: test - schema: dbt_fal - threads: 4 diff --git a/projects/adapter/cli_tests/profiles/postgres/profiles.yml b/projects/adapter/cli_tests/profiles/postgres/profiles.yml deleted file mode 120000 index da786181..00000000 --- a/projects/adapter/cli_tests/profiles/postgres/profiles.yml +++ /dev/null @@ -1 +0,0 @@ -../../projects/profiles.yml \ No newline at end of file diff --git a/projects/adapter/cli_tests/profiles/redshift/profiles.yml b/projects/adapter/cli_tests/profiles/redshift/profiles.yml deleted file mode 100644 index 81ba0154..00000000 --- a/projects/adapter/cli_tests/profiles/redshift/profiles.yml +++ /dev/null @@ -1,24 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: integration_tests - outputs: - dev: - type: redshift - host: localhost - port: 5432 - user: pguser - password: pass - dbname: test - schema: dbt_fal - threads: 4 - integration_tests: - type: redshift - host: "{{ env_var('RS_HOST') }}" - port: 5432 - user: "{{ env_var('RS_USER') }}" - password: "{{ env_var('RS_PASSWORD') }}" - dbname: "{{ env_var('RS_DB_NAME') }}" - schema: "{{ env_var('RS_SCHEMA') }}" - threads: 4 diff --git a/projects/adapter/cli_tests/profiles/snowflake/profiles.yml b/projects/adapter/cli_tests/profiles/snowflake/profiles.yml deleted file mode 100644 index d04fc548..00000000 --- a/projects/adapter/cli_tests/profiles/snowflake/profiles.yml +++ /dev/null @@ -1,16 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: integration_tests - outputs: - integration_tests: - type: snowflake - account: "{{ env_var('SF_ACCOUNT') }}" - user: "{{ env_var('SF_USER') }}" - password: "{{ env_var('SF_PASSWORD') }}" - role: "{{ env_var('SF_ROLE') }}" - warehouse: "{{ env_var('SF_WAREHOUSE') }}" - database: "{{ env_var('SF_DATABASE') }}" - schema: "{{ env_var('SF_SCHEMA') }}" - threads: 4 diff --git a/projects/adapter/cli_tests/projects/000_fal_run/README.md b/projects/adapter/cli_tests/projects/000_fal_run/README.md deleted file mode 100644 index 5b18cd2a..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/README.md +++ /dev/null @@ -1,3 +0,0 @@ -## Fal test project - -This is a test project for [fal](https://github.com/fal-ai/fal). For example dbt project that uses fal, see [here](https://github.com/fal-ai/fal_dbt_examples). diff --git a/projects/adapter/cli_tests/projects/000_fal_run/data/raw_zendesk_ticket_data.csv b/projects/adapter/cli_tests/projects/000_fal_run/data/raw_zendesk_ticket_data.csv deleted file mode 100644 index 6aa0ef13..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/data/raw_zendesk_ticket_data.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,_fivetran_synced,allow_channelback,assignee_id,brand_id,created_at,description,due_at,external_id,forum_topic_id,group_id,has_incidents,is_public,organization_id,priority,problem_id,recipient,requester_id,status,subject,submitter_id,system_client,ticket_form_id,type,updated_at,url,via_channel,via_source_from_id,via_source_from_title,via_source_rel,via_source_to_address,via_source_to_name,merged_ticket_ids,via_source_from_address,followup_ids,via_followup_source_id -1595,2020-03-20 2:32:49,FALSE,,360003529474a,2020-02-19 1:54:52,I think this is the 5th one I've purchased. I'm working on getting one in every room of my house. I ...,,,,360006965034a,FALSE,TRUE,370295712714a,,,email@email.com,396331237134a,deleted,subject1,396331237134a,,360002048693a,,2020-02-19 1:55:11,https://zendesk.com/api/v2/tickets/1595.json,web,,,,example@email.com,,[],,, -16988,2021-01-13 20:09:16,FALSE,418284131934a,360003529474,2020-12-22 0:19:23,"Love it! I’ve listened to songs I haven’t heard since childhood! I get the news, weather, informatio...",,,,360013366274,FALSE,TRUE,370469077513,,,email@email.com,1500656884401,solved,subject1,1500656884401,,360002048693,,2021-01-13 18:42:39,https://zendesk.com/api/v2/tickets/16988.json,email,,,,example@email.com,Support,[],,[], -14173,2020-11-11 20:08:45,FALSE,396371699653,360003529474,2020-10-28 12:03:02,"I sent it to my 85 year old Dad, and he talks to it constantly.",,,,360006965034,FALSE,TRUE,370321120273,,,email@email.com,424883466453,closed,subject1,424883466453,,360002048693,,2020-11-11 17:01:32,https://zendesk.com/api/v2/tickets/14173.json,email,,,,example@email.com,Support,[],,, -11071,2020-10-02 14:08:33,FALSE,,360003529474,2020-08-28 18:06:36,"I love it, wife hates it.",,,,,FALSE,TRUE,,,,email@email.com,419755385214,deleted,subject1,419755385214,,360002048693,,2020-09-02 11:01:27,https://zendesk.com/api/v2/tickets/11071.json,email,,,,X,Support,[],,, -1966,2020-03-25 20:32:24,FALSE,396315360434,360003529474,2020-02-27 6:05:08,She doesn’t always listen,,,,360006965034,FALSE,TRUE,370295721514,,,email@email.com,402813302773,closed,subject1,402813302773,,360002048693,,2020-03-25 16:03:26,https://zendesk.com/api/v2/tickets/1966.json,email,,,,example@email.com,Support,[1967],,, -11013,2020-10-02 20:08:20,FALSE,402851697393,360003529474,2020-08-27 23:09:52,I was a little nervous when I received my new Echo as I'm not really Tech savvy. I found it a bit in...,,,,360008376313,FALSE,TRUE,370297881854,,,email@email.com,419688934974,deleted,subject1,419688934974,,360002048693,,2020-09-02 15:53:16,https://zendesk.com/api/v2/tickets/11013.json,email,,,,X,Support,[],,, -1404,2020-03-05 4:53:46,FALSE,396371699653,360003529474,2020-02-13 21:43:58,Some major design flaws,,,,360006965034,FALSE,TRUE,370295709874,,,email@email.com,403125197514,closed,subject1,403125197514,,360002048693,,2020-02-28 1:01:57,https://zendesk.com/api/v2/tickets/1404.json,email,,,,example@email.com,Support,,,, -4721,2020-05-14 20:12:36,FALSE,396371706773,360003529474,2020-04-20 14:31:46,Huge disappointment,,,,360006965034,FALSE,TRUE,370295719414,,,email@email.com,402862357193,closed,subject1,402862357193,,360002048693,,2020-05-14 20:04:34,https://zendesk.com/api/v2/tickets/4721.json,email,,,,example@email.com,Support,[],,, -6171,2020-06-01 2:11:40,FALSE,396334400494,360003529474,2020-05-17 17:50:31,"nice hotel expensive parking got good deal stay hotel anniversary, arrived late evening ",,,,360006965034,FALSE,TRUE,370295713034,,,email@email.com,410930434074,closed,subject1,410930434074,,360002048693,,2020-05-31 23:03:46,https://zendesk.com/api/v2/tickets/6171.json,email,,,,example@email.com,Support,[],,, -6605,2020-06-10 2:10:24,FALSE,396315360434,360003529474,2020-05-26 22:29:50,Full display not working in all application.,,,,360006965034,FALSE,TRUE,370295719754,,,email@email.com,410416672973,closed,subject1,410416672973,,360002048693,,2020-06-09 23:03:49,https://zendesk.com/api/v2/tickets/6605.json,email,,,,example@email.com,Support,[],,, diff --git a/projects/adapter/cli_tests/projects/000_fal_run/data/time_series.csv b/projects/adapter/cli_tests/projects/000_fal_run/data/time_series.csv deleted file mode 100644 index d81001dc..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/data/time_series.csv +++ /dev/null @@ -1,77 +0,0 @@ -,y,ds -0,1537,2009-08-04 -1,1796,2009-09-29 -2,1804,2009-09-22 -3,1805,2009-09-23 -4,1554,2009-08-10 -5,1563,2009-08-05 -6,1564,2009-08-13 -7,1564,2009-08-09 -8,1821,2009-09-30 -9,1570,2009-08-03 -10,1572,2009-08-23 -11,1574,2009-08-24 -12,1832,2009-10-01 -13,1580,2009-08-14 -14,1589,2009-08-15 -15,1847,2009-10-03 -16,1595,2009-08-11 -17,1599,2009-08-21 -18,1600,2009-08-28 -19,1604,2009-08-22 -20,1610,2009-08-20 -21,1622,2009-08-16 -22,1624,2009-08-18 -23,1625,2009-08-27 -24,1630,2009-09-10 -25,1635,2009-08-26 -26,1638,2009-08-17 -27,1648,2009-09-08 -28,1650,2009-09-06 -29,1659,2009-08-31 -30,1662,2009-09-02 -31,1666,2009-09-04 -32,1669,2009-09-18 -33,1670,2009-08-25 -34,1417,2009-07-24 -35,1676,2009-09-11 -36,1681,2009-09-09 -37,1681,2009-09-16 -38,1683,2009-08-29 -39,1431,2009-07-23 -40,1435,2009-07-25 -41,1699,2009-09-01 -42,1700,2009-08-30 -43,1702,2009-09-24 -44,1449,2009-07-27 -45,1709,2009-09-03 -46,1459,2009-07-22 -47,1464,2009-07-28 -48,1721,2009-09-07 -49,1477,2009-07-21 -50,1734,2009-09-17 -51,1738,2009-09-05 -52,1741,2009-09-12 -53,1743,2009-10-02 -54,1747,2009-09-27 -55,1494,2009-08-06 -56,1496,2009-08-07 -57,1757,2009-09-21 -58,1757,2009-10-04 -59,1502,2009-08-12 -60,1758,2009-09-20 -61,1759,2009-09-15 -62,1504,2009-07-30 -63,1510,2009-07-29 -64,1511,2009-07-31 -65,1768,2009-09-13 -66,1768,2009-09-25 -67,1770,2009-09-14 -68,1770,2009-09-19 -69,1515,2009-08-02 -70,1520,2009-07-26 -71,1523,2009-08-08 -72,1779,2009-09-28 -73,1530,2009-08-19 -74,1531,2009-08-01 -75,1791,2009-09-26 diff --git a/projects/adapter/cli_tests/projects/000_fal_run/dbt_project.yml b/projects/adapter/cli_tests/projects/000_fal_run/dbt_project.yml deleted file mode 100644 index b1fb463c..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/dbt_project.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: "fal_000" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "target" -clean-targets: - - "target" - - "dbt_modules" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/after.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/after.py deleted file mode 100644 index 53684105..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/after.py +++ /dev/null @@ -1,24 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name -model_alias = context.current_model.alias - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -output = output + f"\nModel alias is {model_alias}" -alias_no_namespace = model_alias.split('__ns__')[1] -output = output + f"\nModel alias without namespace is {alias_no_namespace}" -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/before.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/before.py deleted file mode 100644 index 7471eb55..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/before.py +++ /dev/null @@ -1,21 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook.py deleted file mode 100644 index 44a75943..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook.py +++ /dev/null @@ -1,22 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name -model_alias = context.current_model.alias - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -output = output + f"\nModel alias is {model_alias}" -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".post_hook.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook2.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook2.py deleted file mode 100644 index 57b131a0..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/post_hook2.py +++ /dev/null @@ -1,22 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name -model_alias = context.current_model.alias - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -output = output + f"\nModel alias is {model_alias}" -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".post_hook2.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/pre_hook.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/pre_hook.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/pre_hook.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_model.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_model.py deleted file mode 100644 index d5368939..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_model.py +++ /dev/null @@ -1,5 +0,0 @@ -from pandas import DataFrame - -df = DataFrame() -df["a"] = 1 -write_to_model(df) diff --git a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_source.py b/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_source.py deleted file mode 100644 index 3670c4a5..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/fal_scripts/write_to_source.py +++ /dev/null @@ -1,6 +0,0 @@ -from pandas import DataFrame -import os - -table_prefix = f"ns__{ os.environ.get('DB_NAMESPACE', '') }__ns__" - -write_to_source(DataFrame({"a": [1, 2, 3]}), "results", table_prefix + "some_source") diff --git a/projects/adapter/cli_tests/projects/000_fal_run/macros b/projects/adapter/cli_tests/projects/000_fal_run/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/000_fal_run/models/agent_wait_time.sql b/projects/adapter/cli_tests/projects/000_fal_run/models/agent_wait_time.sql deleted file mode 100644 index 722ba6e9..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/models/agent_wait_time.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table', alias='wait_time') }} - -with source_data as ( - - select * from {{ ref('time_series') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/000_fal_run/models/schema.yml b/projects/adapter/cli_tests/projects/000_fal_run/models/schema.yml deleted file mode 100644 index e0d9cef9..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/models/schema.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: 2 - -sources: - - name: results - database: "{{ env_var('DBT_DATABASE', 'test') }}" - schema: "{{ env_var('DBT_SCHEMA', 'dbt_fal') }}" - tables: - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__some_source" - -models: - - name: zendesk_ticket_data - description: zendesk ticket data - config: - materialized: table - meta: - fal: - pre-hook: - - fal_scripts/pre_hook.py - post-hook: - - fal_scripts/post_hook.py - - fal_scripts/post_hook2.py - - - name: agent_wait_time - description: Agent wait time series - config: - materialized: table - columns: - - name: y - tests: - - not_null - - name: ds - tests: - - not_null - - name: a - tests: - - unique - meta: - fal: - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py - - - name: some_model - meta: - fal: - post-hook: - - fal_scripts/write_to_model.py - - - name: some_other_model - meta: - fal: - post-hook: - - fal_scripts/write_to_source.py diff --git a/projects/adapter/cli_tests/projects/000_fal_run/models/some_model.sql b/projects/adapter/cli_tests/projects/000_fal_run/models/some_model.sql deleted file mode 100644 index a7802394..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/models/some_model.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select 1 as my_int diff --git a/projects/adapter/cli_tests/projects/000_fal_run/models/some_other_model.sql b/projects/adapter/cli_tests/projects/000_fal_run/models/some_other_model.sql deleted file mode 100644 index a7802394..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/models/some_other_model.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select 1 as my_int diff --git a/projects/adapter/cli_tests/projects/000_fal_run/models/zendesk_ticket_data.sql b/projects/adapter/cli_tests/projects/000_fal_run/models/zendesk_ticket_data.sql deleted file mode 100644 index f8249482..00000000 --- a/projects/adapter/cli_tests/projects/000_fal_run/models/zendesk_ticket_data.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - select * from {{ ref('raw_zendesk_ticket_data') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/raw_zendesk_ticket_data.csv b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/raw_zendesk_ticket_data.csv deleted file mode 100755 index 6aa0ef13..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/raw_zendesk_ticket_data.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,_fivetran_synced,allow_channelback,assignee_id,brand_id,created_at,description,due_at,external_id,forum_topic_id,group_id,has_incidents,is_public,organization_id,priority,problem_id,recipient,requester_id,status,subject,submitter_id,system_client,ticket_form_id,type,updated_at,url,via_channel,via_source_from_id,via_source_from_title,via_source_rel,via_source_to_address,via_source_to_name,merged_ticket_ids,via_source_from_address,followup_ids,via_followup_source_id -1595,2020-03-20 2:32:49,FALSE,,360003529474a,2020-02-19 1:54:52,I think this is the 5th one I've purchased. I'm working on getting one in every room of my house. I ...,,,,360006965034a,FALSE,TRUE,370295712714a,,,email@email.com,396331237134a,deleted,subject1,396331237134a,,360002048693a,,2020-02-19 1:55:11,https://zendesk.com/api/v2/tickets/1595.json,web,,,,example@email.com,,[],,, -16988,2021-01-13 20:09:16,FALSE,418284131934a,360003529474,2020-12-22 0:19:23,"Love it! I’ve listened to songs I haven’t heard since childhood! I get the news, weather, informatio...",,,,360013366274,FALSE,TRUE,370469077513,,,email@email.com,1500656884401,solved,subject1,1500656884401,,360002048693,,2021-01-13 18:42:39,https://zendesk.com/api/v2/tickets/16988.json,email,,,,example@email.com,Support,[],,[], -14173,2020-11-11 20:08:45,FALSE,396371699653,360003529474,2020-10-28 12:03:02,"I sent it to my 85 year old Dad, and he talks to it constantly.",,,,360006965034,FALSE,TRUE,370321120273,,,email@email.com,424883466453,closed,subject1,424883466453,,360002048693,,2020-11-11 17:01:32,https://zendesk.com/api/v2/tickets/14173.json,email,,,,example@email.com,Support,[],,, -11071,2020-10-02 14:08:33,FALSE,,360003529474,2020-08-28 18:06:36,"I love it, wife hates it.",,,,,FALSE,TRUE,,,,email@email.com,419755385214,deleted,subject1,419755385214,,360002048693,,2020-09-02 11:01:27,https://zendesk.com/api/v2/tickets/11071.json,email,,,,X,Support,[],,, -1966,2020-03-25 20:32:24,FALSE,396315360434,360003529474,2020-02-27 6:05:08,She doesn’t always listen,,,,360006965034,FALSE,TRUE,370295721514,,,email@email.com,402813302773,closed,subject1,402813302773,,360002048693,,2020-03-25 16:03:26,https://zendesk.com/api/v2/tickets/1966.json,email,,,,example@email.com,Support,[1967],,, -11013,2020-10-02 20:08:20,FALSE,402851697393,360003529474,2020-08-27 23:09:52,I was a little nervous when I received my new Echo as I'm not really Tech savvy. I found it a bit in...,,,,360008376313,FALSE,TRUE,370297881854,,,email@email.com,419688934974,deleted,subject1,419688934974,,360002048693,,2020-09-02 15:53:16,https://zendesk.com/api/v2/tickets/11013.json,email,,,,X,Support,[],,, -1404,2020-03-05 4:53:46,FALSE,396371699653,360003529474,2020-02-13 21:43:58,Some major design flaws,,,,360006965034,FALSE,TRUE,370295709874,,,email@email.com,403125197514,closed,subject1,403125197514,,360002048693,,2020-02-28 1:01:57,https://zendesk.com/api/v2/tickets/1404.json,email,,,,example@email.com,Support,,,, -4721,2020-05-14 20:12:36,FALSE,396371706773,360003529474,2020-04-20 14:31:46,Huge disappointment,,,,360006965034,FALSE,TRUE,370295719414,,,email@email.com,402862357193,closed,subject1,402862357193,,360002048693,,2020-05-14 20:04:34,https://zendesk.com/api/v2/tickets/4721.json,email,,,,example@email.com,Support,[],,, -6171,2020-06-01 2:11:40,FALSE,396334400494,360003529474,2020-05-17 17:50:31,"nice hotel expensive parking got good deal stay hotel anniversary, arrived late evening ",,,,360006965034,FALSE,TRUE,370295713034,,,email@email.com,410930434074,closed,subject1,410930434074,,360002048693,,2020-05-31 23:03:46,https://zendesk.com/api/v2/tickets/6171.json,email,,,,example@email.com,Support,[],,, -6605,2020-06-10 2:10:24,FALSE,396315360434,360003529474,2020-05-26 22:29:50,Full display not working in all application.,,,,360006965034,FALSE,TRUE,370295719754,,,email@email.com,410416672973,closed,subject1,410416672973,,360002048693,,2020-06-09 23:03:49,https://zendesk.com/api/v2/tickets/6605.json,email,,,,example@email.com,Support,[],,, diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/time_series.csv b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/time_series.csv deleted file mode 100755 index d81001dc..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/data/time_series.csv +++ /dev/null @@ -1,77 +0,0 @@ -,y,ds -0,1537,2009-08-04 -1,1796,2009-09-29 -2,1804,2009-09-22 -3,1805,2009-09-23 -4,1554,2009-08-10 -5,1563,2009-08-05 -6,1564,2009-08-13 -7,1564,2009-08-09 -8,1821,2009-09-30 -9,1570,2009-08-03 -10,1572,2009-08-23 -11,1574,2009-08-24 -12,1832,2009-10-01 -13,1580,2009-08-14 -14,1589,2009-08-15 -15,1847,2009-10-03 -16,1595,2009-08-11 -17,1599,2009-08-21 -18,1600,2009-08-28 -19,1604,2009-08-22 -20,1610,2009-08-20 -21,1622,2009-08-16 -22,1624,2009-08-18 -23,1625,2009-08-27 -24,1630,2009-09-10 -25,1635,2009-08-26 -26,1638,2009-08-17 -27,1648,2009-09-08 -28,1650,2009-09-06 -29,1659,2009-08-31 -30,1662,2009-09-02 -31,1666,2009-09-04 -32,1669,2009-09-18 -33,1670,2009-08-25 -34,1417,2009-07-24 -35,1676,2009-09-11 -36,1681,2009-09-09 -37,1681,2009-09-16 -38,1683,2009-08-29 -39,1431,2009-07-23 -40,1435,2009-07-25 -41,1699,2009-09-01 -42,1700,2009-08-30 -43,1702,2009-09-24 -44,1449,2009-07-27 -45,1709,2009-09-03 -46,1459,2009-07-22 -47,1464,2009-07-28 -48,1721,2009-09-07 -49,1477,2009-07-21 -50,1734,2009-09-17 -51,1738,2009-09-05 -52,1741,2009-09-12 -53,1743,2009-10-02 -54,1747,2009-09-27 -55,1494,2009-08-06 -56,1496,2009-08-07 -57,1757,2009-09-21 -58,1757,2009-10-04 -59,1502,2009-08-12 -60,1758,2009-09-20 -61,1759,2009-09-15 -62,1504,2009-07-30 -63,1510,2009-07-29 -64,1511,2009-07-31 -65,1768,2009-09-13 -66,1768,2009-09-25 -67,1770,2009-09-14 -68,1770,2009-09-19 -69,1515,2009-08-02 -70,1520,2009-07-26 -71,1523,2009-08-08 -72,1779,2009-09-28 -73,1530,2009-08-19 -74,1531,2009-08-01 -75,1791,2009-09-26 diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/dbt_project.yml b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/dbt_project.yml deleted file mode 100755 index 35c005f5..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_001" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models", "other_models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/after.py b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/after.py deleted file mode 100755 index 9c04ce64..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/after.py +++ /dev/null @@ -1,21 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/before.py b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/before.py deleted file mode 100755 index a3ee69fa..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/before.py +++ /dev/null @@ -1,16 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -output = output + "\nModel dataframe name: {model_name}" -temp_dir = os.getenv("temp_dir", ".") - -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/check_extra.py b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/check_extra.py deleted file mode 100644 index 66e3da9b..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/fal_scripts/check_extra.py +++ /dev/null @@ -1,19 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name -df: pd.DataFrame = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns - - -if hasattr(df, "extra_col"): - output = f"extra_col: {df.extra_col[0]}\n" -else: - output = "no extra_col\n" - -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".check_extra.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/macros b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/agent_wait_time.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/agent_wait_time.sql deleted file mode 100755 index 4c76dafe..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/agent_wait_time.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table', tags='daily') }} - -with source_data as ( - - select y, ds from {{ ref('time_series') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_1.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_1.sql deleted file mode 100644 index f4be8c7e..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_1.sql +++ /dev/null @@ -1,2 +0,0 @@ --- {{ ref("model_c") }} -select * from {{ ref('agent_wait_time') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_2.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_2.sql deleted file mode 100644 index 124058dd..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_2.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('intermediate_model_1') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_3.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_3.sql deleted file mode 100644 index 4a387c37..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/intermediate_model_3.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('intermediate_model_2') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_a.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_a.sql deleted file mode 100644 index c14585e6..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_a.sql +++ /dev/null @@ -1 +0,0 @@ -select y from {{ ref('time_series') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_b.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_b.sql deleted file mode 100644 index c14585e6..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_b.sql +++ /dev/null @@ -1 +0,0 @@ -select y from {{ ref('time_series') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_c.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_c.sql deleted file mode 100644 index 6169d75e..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/model_c.sql +++ /dev/null @@ -1,3 +0,0 @@ --- {{ ref("model_a") }} --- {{ ref("model_b") }} -select y from {{ ref('time_series') }} diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/schema.yml b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/schema.yml deleted file mode 100755 index 4d643c08..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/schema.yml +++ /dev/null @@ -1,46 +0,0 @@ -version: 2 - -models: - - name: zendesk_ticket_data - meta: - fal: - post-hook: - - fal_scripts/check_extra.py - - - name: model_a - - - name: model_b - config: - tags: daily - - - name: model_c - config: - tags: daily - meta: - fal: - scripts: - before: - - fal_scripts/before.py - - - name: agent_wait_time - description: Agent wait time series - config: - materialized: table - tags: daily - columns: - - name: y - tests: - - not_null - - name: ds - tests: - - not_null - - name: a - tests: - - unique - meta: - fal: - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py diff --git a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/zendesk_ticket_data.sql b/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/zendesk_ticket_data.sql deleted file mode 100755 index 1454cc92..00000000 --- a/projects/adapter/cli_tests/projects/001_flow_run_with_selectors/models/zendesk_ticket_data.sql +++ /dev/null @@ -1,13 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - select id,_fivetran_synced,allow_channelback,assignee_id,brand_id - {% if var('extra_col', False) %} - , 'yes' as extra_col - {% endif %} - from {{ ref('raw_zendesk_ticket_data') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_customers.csv b/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_customers.csv deleted file mode 100755 index b3e6747d..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_customers.csv +++ /dev/null @@ -1,101 +0,0 @@ -id,first_name,last_name -1,Michael,P. -2,Shawn,M. -3,Kathleen,P. -4,Jimmy,C. -5,Katherine,R. -6,Sarah,R. -7,Martin,M. -8,Frank,R. -9,Jennifer,F. -10,Henry,W. -11,Fred,S. -12,Amy,D. -13,Kathleen,M. -14,Steve,F. -15,Teresa,H. -16,Amanda,H. -17,Kimberly,R. -18,Johnny,K. -19,Virginia,F. -20,Anna,A. -21,Willie,H. -22,Sean,H. -23,Mildred,A. -24,David,G. -25,Victor,H. -26,Aaron,R. -27,Benjamin,B. -28,Lisa,W. -29,Benjamin,K. -30,Christina,W. -31,Jane,G. -32,Thomas,O. -33,Katherine,M. -34,Jennifer,S. -35,Sara,T. -36,Harold,O. -37,Shirley,J. -38,Dennis,J. -39,Louise,W. -40,Maria,A. -41,Gloria,C. -42,Diana,S. -43,Kelly,N. -44,Jane,R. -45,Scott,B. -46,Norma,C. -47,Marie,P. -48,Lillian,C. -49,Judy,N. -50,Billy,L. -51,Howard,R. -52,Laura,F. -53,Anne,B. -54,Rose,M. -55,Nicholas,R. -56,Joshua,K. -57,Paul,W. -58,Kathryn,K. -59,Adam,A. -60,Norma,W. -61,Timothy,R. -62,Elizabeth,P. -63,Edward,G. -64,David,C. -65,Brenda,W. -66,Adam,W. -67,Michael,H. -68,Jesse,E. -69,Janet,P. -70,Helen,F. -71,Gerald,C. -72,Kathryn,O. -73,Alan,B. -74,Harry,A. -75,Andrea,H. -76,Barbara,W. -77,Anne,W. -78,Harry,H. -79,Jack,R. -80,Phillip,H. -81,Shirley,H. -82,Arthur,D. -83,Virginia,R. -84,Christina,R. -85,Theresa,M. -86,Jason,C. -87,Phillip,B. -88,Adam,T. -89,Margaret,J. -90,Paul,P. -91,Todd,W. -92,Willie,O. -93,Frances,R. -94,Gregory,H. -95,Lisa,P. -96,Jacqueline,A. -97,Shirley,D. -98,Nicole,M. -99,Mary,G. -100,Jean,M. diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_orders.csv b/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_orders.csv deleted file mode 100755 index 7c2be078..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_orders.csv +++ /dev/null @@ -1,100 +0,0 @@ -id,user_id,order_date,status -1,1,2018-01-01,returned -2,3,2018-01-02,completed -3,94,2018-01-04,completed -4,50,2018-01-05,completed -5,64,2018-01-05,completed -6,54,2018-01-07,completed -7,88,2018-01-09,completed -8,2,2018-01-11,returned -9,53,2018-01-12,completed -10,7,2018-01-14,completed -11,99,2018-01-14,completed -12,59,2018-01-15,completed -13,84,2018-01-17,completed -14,40,2018-01-17,returned -15,25,2018-01-17,completed -16,39,2018-01-18,completed -17,71,2018-01-18,completed -18,64,2018-01-20,returned -19,54,2018-01-22,completed -20,20,2018-01-23,completed -21,71,2018-01-23,completed -22,86,2018-01-24,completed -23,22,2018-01-26,return_pending -24,3,2018-01-27,completed -25,51,2018-01-28,completed -26,32,2018-01-28,completed -27,94,2018-01-29,completed -28,8,2018-01-29,completed -29,57,2018-01-31,completed -30,69,2018-02-02,completed -31,16,2018-02-02,completed -32,28,2018-02-04,completed -33,42,2018-02-04,completed -34,38,2018-02-06,completed -35,80,2018-02-08,completed -36,85,2018-02-10,completed -37,1,2018-02-10,completed -38,51,2018-02-10,completed -39,26,2018-02-11,completed -40,33,2018-02-13,completed -41,99,2018-02-14,completed -42,92,2018-02-16,completed -43,31,2018-02-17,completed -44,66,2018-02-17,completed -45,22,2018-02-17,completed -46,6,2018-02-19,completed -47,50,2018-02-20,completed -48,27,2018-02-21,completed -49,35,2018-02-21,completed -50,51,2018-02-23,completed -51,71,2018-02-24,completed -52,54,2018-02-25,return_pending -53,34,2018-02-26,completed -54,54,2018-02-26,completed -55,18,2018-02-27,completed -56,79,2018-02-28,completed -57,93,2018-03-01,completed -58,22,2018-03-01,completed -59,30,2018-03-02,completed -60,12,2018-03-03,completed -61,63,2018-03-03,completed -62,57,2018-03-05,completed -63,70,2018-03-06,completed -64,13,2018-03-07,completed -65,26,2018-03-08,completed -66,36,2018-03-10,completed -67,79,2018-03-11,completed -68,53,2018-03-11,completed -69,3,2018-03-11,completed -70,8,2018-03-12,completed -71,42,2018-03-12,shipped -72,30,2018-03-14,shipped -73,19,2018-03-16,completed -74,9,2018-03-17,shipped -75,69,2018-03-18,completed -76,25,2018-03-20,completed -77,35,2018-03-21,shipped -78,90,2018-03-23,shipped -79,52,2018-03-23,shipped -80,11,2018-03-23,shipped -81,76,2018-03-23,shipped -82,46,2018-03-24,shipped -83,54,2018-03-24,shipped -84,70,2018-03-26,placed -85,47,2018-03-26,shipped -86,68,2018-03-26,placed -87,46,2018-03-27,placed -88,91,2018-03-27,shipped -89,21,2018-03-28,placed -90,66,2018-03-30,shipped -91,47,2018-03-31,placed -92,84,2018-04-02,placed -93,66,2018-04-03,placed -94,63,2018-04-03,placed -95,27,2018-04-04,placed -96,90,2018-04-06,placed -97,89,2018-04-07,placed -98,41,2018-04-07,placed -99,85,2018-04-09,placed diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_payments.csv b/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_payments.csv deleted file mode 100755 index a587baab..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/data/raw_payments.csv +++ /dev/null @@ -1,114 +0,0 @@ -id,order_id,payment_method,amount -1,1,credit_card,1000 -2,2,credit_card,2000 -3,3,coupon,100 -4,4,coupon,2500 -5,5,bank_transfer,1700 -6,6,credit_card,600 -7,7,credit_card,1600 -8,8,credit_card,2300 -9,9,gift_card,2300 -10,9,bank_transfer,0 -11,10,bank_transfer,2600 -12,11,credit_card,2700 -13,12,credit_card,100 -14,13,credit_card,500 -15,13,bank_transfer,1400 -16,14,bank_transfer,300 -17,15,coupon,2200 -18,16,credit_card,1000 -19,17,bank_transfer,200 -20,18,credit_card,500 -21,18,credit_card,800 -22,19,gift_card,600 -23,20,bank_transfer,1500 -24,21,credit_card,1200 -25,22,bank_transfer,800 -26,23,gift_card,2300 -27,24,coupon,2600 -28,25,bank_transfer,2000 -29,25,credit_card,2200 -30,25,coupon,1600 -31,26,credit_card,3000 -32,27,credit_card,2300 -33,28,bank_transfer,1900 -34,29,bank_transfer,1200 -35,30,credit_card,1300 -36,31,credit_card,1200 -37,32,credit_card,300 -38,33,credit_card,2200 -39,34,bank_transfer,1500 -40,35,credit_card,2900 -41,36,bank_transfer,900 -42,37,credit_card,2300 -43,38,credit_card,1500 -44,39,bank_transfer,800 -45,40,credit_card,1400 -46,41,credit_card,1700 -47,42,coupon,1700 -48,43,gift_card,1800 -49,44,gift_card,1100 -50,45,bank_transfer,500 -51,46,bank_transfer,800 -52,47,credit_card,2200 -53,48,bank_transfer,300 -54,49,credit_card,600 -55,49,credit_card,900 -56,50,credit_card,2600 -57,51,credit_card,2900 -58,51,credit_card,100 -59,52,bank_transfer,1500 -60,53,credit_card,300 -61,54,credit_card,1800 -62,54,bank_transfer,1100 -63,55,credit_card,2900 -64,56,credit_card,400 -65,57,bank_transfer,200 -66,58,coupon,1800 -67,58,gift_card,600 -68,59,gift_card,2800 -69,60,credit_card,400 -70,61,bank_transfer,1600 -71,62,gift_card,1400 -72,63,credit_card,2900 -73,64,bank_transfer,2600 -74,65,credit_card,0 -75,66,credit_card,2800 -76,67,bank_transfer,400 -77,67,credit_card,1900 -78,68,credit_card,1600 -79,69,credit_card,1900 -80,70,credit_card,2600 -81,71,credit_card,500 -82,72,credit_card,2900 -83,73,bank_transfer,300 -84,74,credit_card,3000 -85,75,credit_card,1900 -86,76,coupon,200 -87,77,credit_card,0 -88,77,bank_transfer,1900 -89,78,bank_transfer,2600 -90,79,credit_card,1800 -91,79,credit_card,900 -92,80,gift_card,300 -93,81,coupon,200 -94,82,credit_card,800 -95,83,credit_card,100 -96,84,bank_transfer,2500 -97,85,bank_transfer,1700 -98,86,coupon,2300 -99,87,gift_card,3000 -100,87,credit_card,2600 -101,88,credit_card,2900 -102,89,bank_transfer,2200 -103,90,bank_transfer,200 -104,91,credit_card,1900 -105,92,bank_transfer,1500 -106,92,coupon,200 -107,93,gift_card,2600 -108,94,coupon,700 -109,95,coupon,2400 -110,96,gift_card,1700 -111,97,bank_transfer,1400 -112,98,bank_transfer,1000 -113,99,credit_card,2400 diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/dbt_project.yml b/projects/adapter/cli_tests/projects/002_jaffle_shop/dbt_project.yml deleted file mode 100755 index 75cf8cb8..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/dbt_project.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: "fal_002" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "fal_scripts" - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_dbt.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_dbt.py deleted file mode 100644 index 3112ff28..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_dbt.py +++ /dev/null @@ -1,37 +0,0 @@ -import sys -from fal.dbt import FalDbt -from _fal_testing import create_file - -from fal.dbt.integration.project import DbtGenericTest, DbtSingularTest - -project_dir = "." -if len(sys.argv) >= 2: - project_dir = sys.argv[1] -profiles_dir = "~/.dbt" -if len(sys.argv) >= 3: - profiles_dir = sys.argv[2] - -print(f"project_dir={project_dir}, profiles_dir={profiles_dir}") - -faldbt = FalDbt(project_dir=project_dir, profiles_dir=profiles_dir) -print(faldbt) -print(faldbt.tests) - -for test in faldbt.tests: - print(test) - -for source in faldbt.sources: - # NOTE: removing the namespace prefix - print(source.name, source.table_name.split('__ns__')[1], [(t.name, t.status) for t in source.tests]) - -for model in faldbt.models: - print(model.name, [(t.name, t.status) for t in model.tests]) - -output = f"There are {len(faldbt.tests)} tests\n" -output += f"There are {len([t for t in faldbt.tests if isinstance(t, DbtGenericTest)])} generic tests\n" -output += f"There are {len([t for t in faldbt.tests if isinstance(t, DbtSingularTest)])} singular tests\n" - -for test in faldbt.tests: - output += f"test {test.name} {'generic' if isinstance(test, DbtGenericTest) else 'singular'} \n" - -create_file(output, "fal_dbt.txt") diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_models/orders_forecast.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_models/orders_forecast.py deleted file mode 100644 index 688dc949..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_models/orders_forecast.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Forecast and upload order data -Packages: - - prophet -""" - -import pandas as pd -from prophet import Prophet -import sqlalchemy.types as types - - -def make_forecast(dataframe: pd.DataFrame, periods: int = 30): - """Make forecast on metric data.""" - model = Prophet(daily_seasonality=False, yearly_seasonality=False) - model.fit(dataframe) - - future = model.make_future_dataframe(periods=periods) - prediction = model.predict(future) - - return model, prediction - - -def plot_forecast(model: Prophet, forecast: pd.DataFrame, filename: str): - from prophet.plot import plot_plotly - - fig = plot_plotly(model, forecast) - fig.write_image(f"{context.current_model.name}_{filename}.jpg") - - -df: pd.DataFrame = ref("orders_daily") -print(df) - -df_count = df[["order_date", "order_count"]] -df_count = df_count.rename(columns={"order_date": "ds", "order_count": "y"}) -model_count, forecast_count = make_forecast(df_count, 50) -# plot_forecast(model_count, forecast_count, "count") - -df_amount = df[["order_date", "order_amount"]] -df_amount = df_amount.rename(columns={"order_date": "ds", "order_amount": "y"}) -model_amount, forecast_amount = make_forecast(df_amount, 50) -# plot_forecast(model_amount, forecast_amount, "amount") - -joined_forecast = forecast_count.join( - forecast_amount.set_index("ds"), - on="ds", - lsuffix="_count", - rsuffix="_amount", -) -print(joined_forecast.dtypes) - -# HACK: have to figure out how to write dates (or datetimes) to the database -# TODO: The types.DATE did not work when testing for `dtype={"ds": types.DATE}` -joined_forecast["ds"] = joined_forecast["ds"].map(lambda x: x.strftime("%Y-%m-%d")) - -# Generates a table with a BUNCH of columns -# It will use the current model as target, no need to pass it -write_to_model(joined_forecast, mode="overwrite") diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/global_test.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/global_test.py deleted file mode 100644 index 76b496de..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/global_test.py +++ /dev/null @@ -1 +0,0 @@ -print(context) diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/load_data.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/load_data.py deleted file mode 100755 index ff494d03..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/load_data.py +++ /dev/null @@ -1,14 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".load_data.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/main_check_2.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/main_check_2.py deleted file mode 100644 index 4856aac5..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/main_check_2.py +++ /dev/null @@ -1,3 +0,0 @@ -def main_check(output): - output += f"inner name: {__name__}\n" - return output diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/middle_script.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/middle_script.py deleted file mode 100644 index c9fedca3..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/middle_script.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -from functools import reduce -from main_check_2 import main_check - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}\n" - -output += f"top name: {__name__}\n" -output = main_check(output) -if __name__ == "__main__": - output += "passed main if\n" - -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open( - reduce(os.path.join, [temp_dir, model_name + ".middle_script.txt"]), "w" -) -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/send_slack_message.py b/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/send_slack_message.py deleted file mode 100755 index 258c616d..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/fal_scripts/send_slack_message.py +++ /dev/null @@ -1,23 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open( - reduce(os.path.join, [temp_dir, model_name + ".send_slack_message.txt"]), "w" -) -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/macros b/projects/adapter/cli_tests/projects/002_jaffle_shop/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/after_middle.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/after_middle.sql deleted file mode 100644 index 7d36634b..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/after_middle.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref ("middle_2") }} diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/customers.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/customers.sql deleted file mode 100755 index 016a004f..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/customers.sql +++ /dev/null @@ -1,69 +0,0 @@ -with customers as ( - - select * from {{ ref('stg_customers') }} - -), - -orders as ( - - select * from {{ ref('stg_orders') }} - -), - -payments as ( - - select * from {{ ref('stg_payments') }} - -), - -customer_orders as ( - - select - customer_id, - - min(order_date) as first_order, - max(order_date) as most_recent_order, - count(order_id) as number_of_orders - from orders - - group by customer_id - -), - -customer_payments as ( - - select - orders.customer_id, - sum(amount) as total_amount - - from payments - - left join orders on - payments.order_id = orders.order_id - - group by orders.customer_id - -), - -final as ( - - select - customers.customer_id, - customers.first_name, - customers.last_name, - customer_orders.first_order, - customer_orders.most_recent_order, - customer_orders.number_of_orders, - customer_payments.total_amount as customer_lifetime_value - - from customers - - left join customer_orders - on customers.customer_id = customer_orders.customer_id - - left join customer_payments - on customers.customer_id = customer_payments.customer_id - -) - -select * from final diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/docs.md b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/docs.md deleted file mode 100755 index c6ae93be..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/docs.md +++ /dev/null @@ -1,14 +0,0 @@ -{% docs orders_status %} - -Orders can be one of the following statuses: - -| status | description | -|----------------|------------------------------------------------------------------------------------------------------------------------| -| placed | The order has been placed but has not yet left the warehouse | -| shipped | The order has ben shipped to the customer and is currently in transit | -| completed | The order has been received by the customer | -| return_pending | The customer has indicated that they would like to return the order, but it has not yet been received at the warehouse | -| returned | The order has been returned by the customer and received at the warehouse | - - -{% enddocs %} diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/fal/orders_forecast.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/fal/orders_forecast.sql deleted file mode 100644 index 7ec49a85..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/fal/orders_forecast.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED ad0f56029e8f6dc8ac7c39911c276624 - -Script dependencies: - -{{ ref('orders_daily') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/forecast_filter.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/forecast_filter.sql deleted file mode 100644 index 7373ca47..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/forecast_filter.sql +++ /dev/null @@ -1,19 +0,0 @@ -WITH orders_forecast AS ( - - SELECT - * - FROM {{ ref('orders_forecast') }} - -), final AS ( - - SELECT - date(ds) AS forecast_date, - yhat_count AS forecast_count, - yhat_amount AS forecast_amount - FROM orders_forecast - WHERE yhat_amount > 0 - -) - -SELECT * -FROM final diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_1.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_1.sql deleted file mode 100644 index f6220887..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_1.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('raw_customers') }} diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_2.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_2.sql deleted file mode 100644 index cef2f48e..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/middle_2.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref ("middle_1") }} diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders.sql deleted file mode 100755 index cbb29349..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders.sql +++ /dev/null @@ -1,56 +0,0 @@ -{% set payment_methods = ['credit_card', 'coupon', 'bank_transfer', 'gift_card'] %} - -with orders as ( - - select * from {{ ref('stg_orders') }} - -), - -payments as ( - - select * from {{ ref('stg_payments') }} - -), - -order_payments as ( - - select - order_id, - - {% for payment_method in payment_methods -%} - sum(case when payment_method = '{{ payment_method }}' then amount else 0 end) as {{ payment_method }}_amount, - {% endfor -%} - - sum(amount) as total_amount - - from payments - - group by order_id - -), - -final as ( - - select - orders.order_id, - orders.customer_id, - orders.order_date, - orders.status, - - {% for payment_method in payment_methods -%} - - order_payments.{{ payment_method }}_amount, - - {% endfor -%} - - order_payments.total_amount as amount - - from orders - - - left join order_payments - on orders.order_id = order_payments.order_id - -) - -select * from final diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders_daily.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders_daily.sql deleted file mode 100644 index 2bc802e8..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/orders_daily.sql +++ /dev/null @@ -1,36 +0,0 @@ -{% set order_statuses = ['returned', 'completed', 'return_pending', 'shipped', 'placed'] %} - -with orders as ( - - select * from {{ ref('stg_orders') }} - -), - -payments as ( - - select * from {{ ref('stg_payments') }} - -), - -final as ( - - select - orders.order_date, - count(*) as order_count, - - {% for order_status in order_statuses -%} - sum(case when orders.status = '{{ order_status }}' then 1 else 0 end) as {{ order_status }}_status, - {% endfor %} - - sum(payments.amount) as order_amount - from orders - - - left join payments - on orders.order_id = payments.order_id - - group by orders.order_date - -) - -select * from final diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/schema.yml b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/schema.yml deleted file mode 100755 index ced7c381..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/schema.yml +++ /dev/null @@ -1,107 +0,0 @@ -version: 2 - -fal: - scripts: - - global_test.py - -models: - - name: middle_1 - - - name: after_middle - - - name: middle_2 - meta: - fal: - scripts: - - middle_script.py - - - name: customers - description: This table has basic information about a customer, as well as some derived facts based on a customer's orders - - columns: - - name: customer_id - description: This is a unique identifier for a customer - tests: - - unique - - not_null - - - name: first_name - description: Customer's first name. PII. - - - name: last_name - description: Customer's last name. PII. - - - name: first_order - description: Date (UTC) of a customer's first order - - - name: most_recent_order - description: Date (UTC) of a customer's most recent order - - - name: number_of_orders - description: Count of the number of orders a customer has placed - - - name: total_order_amount - description: Total value (AUD) of a customer's orders - - meta: - fal: - post-hook: - - send_slack_message.py - - - name: orders - description: This table has basic information about orders, as well as some derived facts based on payments - - columns: - - name: order_id - tests: - - unique - - not_null - description: This is a unique identifier for an order - - - name: customer_id - description: Foreign key to the customers table - tests: - - not_null - - relationships: - to: ref('customers') - field: customer_id - - - name: order_date - description: Date (UTC) that the order was placed - - - name: status - description: '{{ doc("orders_status") }}' - tests: - - accepted_values: - values: - ["placed", "shipped", "completed", "return_pending", "returned"] - - - name: amount - description: Total amount (AUD) of the order - tests: - - not_null - - - name: credit_card_amount - description: Amount of the order (AUD) paid for by credit card - tests: - - not_null - - - name: coupon_amount - description: Amount of the order (AUD) paid for by coupon - tests: - - not_null - - - name: bank_transfer_amount - description: Amount of the order (AUD) paid for by bank transfer - tests: - - not_null - - - name: gift_card_amount - description: Amount of the order (AUD) paid for by gift card - tests: - - not_null - - - name: orders_forecast - description: fal generated model that forecasts dollar amount and count of orders - - - name: orders_daily diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/schema.yml b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/schema.yml deleted file mode 100755 index 6a077489..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/schema.yml +++ /dev/null @@ -1,38 +0,0 @@ -version: 2 - -models: - - name: stg_customers - columns: - - name: customer_id - tests: - - unique - - not_null - - meta: - fal: - scripts: - before: - - load_data.py - - - name: stg_orders - columns: - - name: order_id - tests: - - unique - - not_null - - name: status - tests: - - accepted_values: - values: - ["placed", "shipped", "completed", "return_pending", "returned"] - - - name: stg_payments - columns: - - name: payment_id - tests: - - unique - - not_null - - name: payment_method - tests: - - accepted_values: - values: ["credit_card", "coupon", "bank_transfer", "gift_card"] diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_customers.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_customers.sql deleted file mode 100755 index cad04726..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_customers.sql +++ /dev/null @@ -1,22 +0,0 @@ -with source as ( - - {#- - Normally we would select from the table here, but we are using seeds to load - our data in this project - #} - select * from {{ ref('raw_customers') }} - -), - -renamed as ( - - select - id as customer_id, - first_name, - last_name - - from source - -) - -select * from renamed diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_orders.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_orders.sql deleted file mode 100755 index a654dcb9..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_orders.sql +++ /dev/null @@ -1,23 +0,0 @@ -with source as ( - - {#- - Normally we would select from the table here, but we are using seeds to load - our data in this project - #} - select * from {{ ref('raw_orders') }} - -), - -renamed as ( - - select - id as order_id, - user_id as customer_id, - order_date, - status - - from source - -) - -select * from renamed diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_payments.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_payments.sql deleted file mode 100755 index 700cf7f4..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/models/staging/stg_payments.sql +++ /dev/null @@ -1,25 +0,0 @@ -with source as ( - - {#- - Normally we would select from the table here, but we are using seeds to load - our data in this project - #} - select * from {{ ref('raw_payments') }} - -), - -renamed as ( - - select - id as payment_id, - order_id, - payment_method, - - -- `amount` is currently stored in cents, so we convert it to dollars - amount / 100 as amount - - from source - -) - -select * from renamed diff --git a/projects/adapter/cli_tests/projects/002_jaffle_shop/tests/customer_lifetime_value.sql b/projects/adapter/cli_tests/projects/002_jaffle_shop/tests/customer_lifetime_value.sql deleted file mode 100644 index 75d46da2..00000000 --- a/projects/adapter/cli_tests/projects/002_jaffle_shop/tests/customer_lifetime_value.sql +++ /dev/null @@ -1,4 +0,0 @@ -SELECT - customer_id -FROM {{ ref('customers') }} -WHERE NOT (customer_lifetime_value >= 0) OR customer_lifetime_value IS NULL diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/data/.gitkeep b/projects/adapter/cli_tests/projects/003_runtime_errors/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/dbt_project.yml b/projects/adapter/cli_tests/projects/003_runtime_errors/dbt_project.yml deleted file mode 100644 index e994ba5b..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_003" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_models/runtime_error_model.py b/projects/adapter/cli_tests/projects/003_runtime_errors/fal_models/runtime_error_model.py deleted file mode 100644 index 32baf2cc..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_models/runtime_error_model.py +++ /dev/null @@ -1,5 +0,0 @@ -df = ref("working_model") - -assert False, "expected" - -write_to_model(df) diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/after.py b/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/after.py deleted file mode 100644 index c9a263ec..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/after.py +++ /dev/null @@ -1 +0,0 @@ -raise Exception("After error") diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/before.py b/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/before.py deleted file mode 100644 index 54d914e7..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/before.py +++ /dev/null @@ -1 +0,0 @@ -raise Exception("Before error") diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/post_hook.py b/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/post_hook.py deleted file mode 100644 index ab14f0d7..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/fal_scripts/post_hook.py +++ /dev/null @@ -1,13 +0,0 @@ -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".post_hook.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/macros b/projects/adapter/cli_tests/projects/003_runtime_errors/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/models/fal/runtime_error_model.sql b/projects/adapter/cli_tests/projects/003_runtime_errors/models/fal/runtime_error_model.sql deleted file mode 100644 index b1b83a8c..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/models/fal/runtime_error_model.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED 35d7f6579989a8628c2b64b496c8e3d9 - -Script dependencies: - -{{ ref('working_model') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/models/schema.yml b/projects/adapter/cli_tests/projects/003_runtime_errors/models/schema.yml deleted file mode 100644 index f5585096..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/models/schema.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 - -models: - - name: some_model - meta: - fal: - post-hook: - - fal_scripts/post_hook.py - - name: working_model - meta: - fal: - post-hook: - - fal_scripts/post_hook.py - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/models/some_model.sql b/projects/adapter/cli_tests/projects/003_runtime_errors/models/some_model.sql deleted file mode 100644 index 8b6765d4..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/models/some_model.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select 1/0 as my_int diff --git a/projects/adapter/cli_tests/projects/003_runtime_errors/models/working_model.sql b/projects/adapter/cli_tests/projects/003_runtime_errors/models/working_model.sql deleted file mode 100644 index 6947f788..00000000 --- a/projects/adapter/cli_tests/projects/003_runtime_errors/models/working_model.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table') }} - -WITH data AS ( - SELECT - cast(1 AS integer) AS my_int -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/004_globals/data/.gitkeep b/projects/adapter/cli_tests/projects/004_globals/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/004_globals/dbt_project.yml b/projects/adapter/cli_tests/projects/004_globals/dbt_project.yml deleted file mode 100644 index d3fa5300..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_004" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/after.py b/projects/adapter/cli_tests/projects/004_globals/fal_scripts/after.py deleted file mode 100644 index e0afeda3..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/after.py +++ /dev/null @@ -1,12 +0,0 @@ -import os -from functools import reduce - -model_name = context.current_model.name if context.current_model else "GLOBAL" - -output = f"Model name: {model_name}" - -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before.py b/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before.py deleted file mode 100644 index ecd114e2..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before.py +++ /dev/null @@ -1,12 +0,0 @@ -import os -from functools import reduce - -model_name = context.current_model.name if context.current_model else "GLOBAL" - -output = f"Model name: {model_name}" - -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before_b.py b/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before_b.py deleted file mode 100644 index 21b6e948..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/fal_scripts/before_b.py +++ /dev/null @@ -1,12 +0,0 @@ -import os -from functools import reduce - -model_name = context.current_model.name if context.current_model else "GLOBAL" - -output = f"Model name: {model_name}" - -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before_b.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/004_globals/macros b/projects/adapter/cli_tests/projects/004_globals/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/004_globals/models/other_schema.yaml b/projects/adapter/cli_tests/projects/004_globals/models/other_schema.yaml deleted file mode 100644 index 56df1c81..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/models/other_schema.yaml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 - -fal: - scripts: - before: - - fal_scripts/before_b.py diff --git a/projects/adapter/cli_tests/projects/004_globals/models/schema.yml b/projects/adapter/cli_tests/projects/004_globals/models/schema.yml deleted file mode 100644 index 3743bdcb..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/models/schema.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 - -models: - - name: some_model - meta: - fal: - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py - -fal: - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py diff --git a/projects/adapter/cli_tests/projects/004_globals/models/some_model.sql b/projects/adapter/cli_tests/projects/004_globals/models/some_model.sql deleted file mode 100644 index a7802394..00000000 --- a/projects/adapter/cli_tests/projects/004_globals/models/some_model.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select 1 as my_int diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/.gitignore b/projects/adapter/cli_tests/projects/005_functions_and_variables/.gitignore deleted file mode 100644 index 204ea9b4..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/.gitignore +++ /dev/null @@ -1 +0,0 @@ -models/fal/ diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/data/.gitkeep b/projects/adapter/cli_tests/projects/005_functions_and_variables/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/dbt_project.yml b/projects/adapter/cli_tests/projects/005_functions_and_variables/dbt_project.yml deleted file mode 100644 index 19c5576f..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_005" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_array.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_array.py deleted file mode 100644 index a473db91..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_array.py +++ /dev/null @@ -1,28 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing.utils import create_model_artifact - -import pandas as pd - -df = pd.DataFrame( - { - "my_array": [["some", "other"], []], - "other_array": [[1, 2, 3], []], - } -) -df.info() - -model_name = context.current_model.name - -write_to_model(df) -df = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -df.info() - -write_to_model(df) -df = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -df.info() - -output = f"my_array: {list(df['my_array'][0])}" -output += f"\nother_array: {list(df['other_array'][0])}" -create_model_artifact(context, additional_data=output) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_date.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_date.py deleted file mode 100644 index 01ea0d4f..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_models/model_with_date.py +++ /dev/null @@ -1,36 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing.utils import create_model_artifact - -import pandas as pd -import datetime as dt - - -arr = [dt.datetime(2022, 1, 1, 14, 50, 59), dt.datetime.now()] -df = pd.DataFrame( - { - # NOTE: timestamp without time zone fail to write to Snowflake - # https://github.com/snowflakedb/snowflake-connector-python/issues/600#issuecomment-844524183 - "my_datetime": map(lambda d: pd.Timestamp(d, unit="ms", tz="UTC"), arr), - "my_date": map(lambda d: d.date(), arr), - "my_time": map(lambda d: d.time(), arr), - } -) - -df.info() - -model_name = context.current_model.name - -write_to_model(df) -df = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -df.info() - - -# HACK: Snowflake returns without the time zone information -df["my_datetime"] = df["my_datetime"].apply( - lambda d: d.tz_localize("UTC") if not d.tz else d -) -output = f"my_datetime: {df['my_datetime'][0].isoformat()}" -output += f"\nmy_date: {df['my_date'][0].isoformat()}" -output += f"\nmy_time: {df['my_time'][0].isoformat()}" -create_model_artifact(context, additional_data=output) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/complete_model.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/complete_model.py deleted file mode 100644 index 066a58f2..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/complete_model.py +++ /dev/null @@ -1,34 +0,0 @@ -import pandas as pd -from functools import reduce -import os - -model_name = context.current_model.name - - -output = "" - -df: pd.DataFrame = ref(model_name).fillna(0) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -df = df.astype({"my_int": float}) -output += f"my_int {df.my_int[0]}\n" - -df.my_int = 3 -write_to_model(df, mode="append") - -float_df = df.astype({"my_int": float}) -write_to_model(float_df) - -write_to_model(df) # default: overwrite - -df: pd.DataFrame = ref(model_name).fillna(0) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -df = df.astype({"my_int": float}) -output += f"my_int {df.my_int[0]}\n" -output += f"size {len(df)}\n" - - -path = reduce( - os.path.join, [os.getenv("temp_dir", "."), model_name + ".complete_model.txt"] -) -with open(path, "w") as file: - file.write(output) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/context.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/context.py deleted file mode 100644 index 2c37a3da..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/context.py +++ /dev/null @@ -1,20 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing.utils import create_dynamic_artifact - -assert context.current_model - -extra = "" -extra += f"context: {context}\n" -extra += f"model: {context.current_model}\n" -extra += f"target: {context.target}\n" -extra += f"target name: {context.target.name}\n" -extra += f"target profile: {context.target.profile_name}\n" -extra += f"target database: {context.target.database}\n" - -response = context.current_model.adapter_response -assert response - -extra += f"adapter response: {response}\n" -extra += f"adapter response: rows affected {response.rows_affected}\n" - -create_dynamic_artifact(context, additional_data=extra) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/execute_sql.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/execute_sql.py deleted file mode 100644 index dcb5b012..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/execute_sql.py +++ /dev/null @@ -1,11 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing.utils import create_dynamic_artifact - -df = execute_sql('SELECT 1 as a, 2 as b, 3 as c') -df.columns = df.columns.str.lower() # Snowflake has uppercase columns - -assert 2 == df['b'][0] - -df.info() - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/lists.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/lists.py deleted file mode 100644 index 11fecd68..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/lists.py +++ /dev/null @@ -1,19 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing.utils import create_dynamic_artifact - -assert context.current_model - -lines = [] - -for model in list_models(): - lines.append( - f"model: {model.name} property: {model.meta['property']['other'] if model.meta else None}" - ) - -for source in list_sources(): - lines.append( - # NOTE: removing the namespace prefix - f"source: {source.name} {source.table_name.split('__ns__')[1]} property: {source.meta['property']['other'] if source.meta else None}" - ) - -create_dynamic_artifact(context, additional_data="\n".join(lines)) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/write_to_source_twice.py b/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/write_to_source_twice.py deleted file mode 100644 index 64ca08be..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/fal_scripts/write_to_source_twice.py +++ /dev/null @@ -1,43 +0,0 @@ -import pandas as pd -from functools import reduce -import os - -model_name = context.current_model.name - - -output = "" - -df: pd.DataFrame = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns - -output += f"my_float {df.my_float[0]}\n" - -table_prefix = f"ns__{ os.environ.get('DB_NAMESPACE', '') }__ns__" - -write_to_source(df, "results", table_prefix + "some_source", mode="overwrite") -source_size = len(source("results", table_prefix + "some_source")) -output += f"source size {source_size}\n" - -write_to_source(df, "results", table_prefix + "some_source", mode="append") -source_size = len(source("results", table_prefix + "some_source")) -output += f"source size {source_size}\n" - -for source in list_sources(): - output += ( - # NOTE: removing the namespace prefix - f"source {source.name}.{source.table_name.split('__ns__')[1]} has {len(source.tests)} tests," - f" source status is {source.status}\n" - ) - -for model in list_models(): - output += ( - f"model {model.name} has {len(model.tests)} tests," - f" model status is {model.status}\n" - ) - -path = reduce( - os.path.join, [os.getenv("temp_dir", "."), model_name + ".write_to_source_twice.txt"] -) - -with open(path, "w") as file: - file.write(output) diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/macros b/projects/adapter/cli_tests/projects/005_functions_and_variables/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/other_model.sql b/projects/adapter/cli_tests/projects/005_functions_and_variables/models/other_model.sql deleted file mode 100644 index 55500879..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/other_model.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table') }} - -WITH data AS ( - - SELECT - 'some text' AS my_text, - -- The following column will be filled in an after script with fal - cast(NULL AS integer) AS my_int -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/schema.yml b/projects/adapter/cli_tests/projects/005_functions_and_variables/models/schema.yml deleted file mode 100644 index 840c1602..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/schema.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: 2 - -sources: - - name: results - database: "{{ env_var('DBT_DATABASE', 'test') }}" - schema: "{{ env_var('DBT_SCHEMA', 'dbt_fal') }}" - tables: - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__other_source" - meta: - property: - other: 4 - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__some_source" - columns: - - name: my_text - tests: - - not_null - - name: my_int - tests: - - not_null - -models: - - name: some_model - columns: - - name: my_text - tests: - - not_null - - name: my_int - tests: - - not_null - meta: - property: - name: some_model - other: 1 - fal: - scripts: - after: - - fal_scripts/write_to_source_twice.py - post-hook: - - fal_scripts/execute_sql.py - - fal_scripts/context.py - - fal_scripts/lists.py - - - name: other_model - meta: - property: - name: other_model - other: 2 - fal: - scripts: - after: - - fal_scripts/complete_model.py - - - name: third_model - meta: - property: - name: third_model - other: 3 - fal: - scripts: - after: - - fal_scripts/complete_model.py diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/some_model.sql b/projects/adapter/cli_tests/projects/005_functions_and_variables/models/some_model.sql deleted file mode 100644 index 116581cf..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/some_model.sql +++ /dev/null @@ -1,13 +0,0 @@ -{{ config(materialized='table') }} - -WITH data AS ( - SELECT - 1.2 AS my_float, - my_text, - -- the after script value should reflect here - my_int - FROM {{ ref('other_model') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/third_model.sql b/projects/adapter/cli_tests/projects/005_functions_and_variables/models/third_model.sql deleted file mode 100644 index c71573a8..00000000 --- a/projects/adapter/cli_tests/projects/005_functions_and_variables/models/third_model.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table', alias='third') }} - -WITH data AS ( - - SELECT - 'some text' AS my_text, - -- The following column will be filled in an after script with fal - cast(NULL AS integer) AS my_int -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/006_script_paths/data/.gitkeep b/projects/adapter/cli_tests/projects/006_script_paths/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/006_script_paths/dbt_project.yml b/projects/adapter/cli_tests/projects/006_script_paths/dbt_project.yml deleted file mode 100644 index 2a393a66..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/dbt_project.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: "fal_006" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "scripts" - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/006_script_paths/macros b/projects/adapter/cli_tests/projects/006_script_paths/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/006_script_paths/models/schema.yml b/projects/adapter/cli_tests/projects/006_script_paths/models/schema.yml deleted file mode 100644 index 8f9aaf8f..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/models/schema.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: some_model - meta: - fal: - scripts: - before: - - before.py - after: - - after.py diff --git a/projects/adapter/cli_tests/projects/006_script_paths/models/some_model.sql b/projects/adapter/cli_tests/projects/006_script_paths/models/some_model.sql deleted file mode 100644 index 1a952fe1..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/models/some_model.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table') }} - -with data as ( - - SELECT - cast(1 AS integer) as my_int, - 'some text' as my_text, - cast(0.1 AS numeric) as my_float -) - -select * -from data diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts/after.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts/after.py deleted file mode 100644 index 9d0575eb..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts/after.py +++ /dev/null @@ -1,7 +0,0 @@ -print("next: 'from utils.blah import x'") -from utils.process.process_df import process_data - -print("next: 'import utils.blah as y'") -import utils.process.process_df as process_df - -process_data(context, ref) diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts/before.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts/before.py deleted file mode 100755 index db9abe08..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts/before.py +++ /dev/null @@ -1,12 +0,0 @@ -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" -temp_dir = os.getenv("temp_dir", ".") - -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/my_utils.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/my_utils.py deleted file mode 100644 index 9cfe7f6b..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/my_utils.py +++ /dev/null @@ -1,10 +0,0 @@ -import os -from functools import reduce - - -def write_data(data, model_name): - temp_dir = os.getenv("temp_dir", ".") - - write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w") - write_dir.write(data) - write_dir.close() diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/process/process_df.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/process/process_df.py deleted file mode 100644 index dbe34ace..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts/utils/process/process_df.py +++ /dev/null @@ -1,23 +0,0 @@ -import pandas as pd -import io - -print("relative import in inner directories") -from ..my_utils import write_data - -print("use 'utils' as base even in inner directories") -from utils.my_utils import write_data - - -def process_data(context, ref): - model_name = context.current_model.name - - output = f"Model name: {model_name}" - output = output + f"\nStatus: {context.current_model.status}" - - df: pd.DataFrame = ref(model_name) - buf = io.StringIO() - df.info(buf=buf, memory_usage=False) - info = buf.getvalue() - - output = output + f"\nModel dataframe information:\n{info}" - write_data(output, model_name) diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts2/after.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts2/after.py deleted file mode 100644 index e34f5e71..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts2/after.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -from functools import reduce -import pandas as pd -import io - - -def write_data(data, model_name): - temp_dir = os.getenv("temp_dir", ".") - - write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after2.txt"]), "w") - write_dir.write(data) - write_dir.close() - - -def process_data(context, ref): - model_name = context.current_model.name - - output = f"Model name: {model_name}" - output = output + f"\nStatus: {context.current_model.status}" - - df: pd.DataFrame = ref(model_name) - buf = io.StringIO() - df.info(buf=buf, memory_usage=False) - info = buf.getvalue() - - output = output + f"\nModel dataframe information:\n{info}" - write_data(output, model_name) - - -process_data(context, ref) diff --git a/projects/adapter/cli_tests/projects/006_script_paths/scripts2/before.py b/projects/adapter/cli_tests/projects/006_script_paths/scripts2/before.py deleted file mode 100755 index 5cd5e8f7..00000000 --- a/projects/adapter/cli_tests/projects/006_script_paths/scripts2/before.py +++ /dev/null @@ -1,12 +0,0 @@ -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" -temp_dir = os.getenv("temp_dir", ".") - -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before2.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/raw_zendesk_ticket_data.csv b/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/raw_zendesk_ticket_data.csv deleted file mode 100755 index 6aa0ef13..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/raw_zendesk_ticket_data.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,_fivetran_synced,allow_channelback,assignee_id,brand_id,created_at,description,due_at,external_id,forum_topic_id,group_id,has_incidents,is_public,organization_id,priority,problem_id,recipient,requester_id,status,subject,submitter_id,system_client,ticket_form_id,type,updated_at,url,via_channel,via_source_from_id,via_source_from_title,via_source_rel,via_source_to_address,via_source_to_name,merged_ticket_ids,via_source_from_address,followup_ids,via_followup_source_id -1595,2020-03-20 2:32:49,FALSE,,360003529474a,2020-02-19 1:54:52,I think this is the 5th one I've purchased. I'm working on getting one in every room of my house. I ...,,,,360006965034a,FALSE,TRUE,370295712714a,,,email@email.com,396331237134a,deleted,subject1,396331237134a,,360002048693a,,2020-02-19 1:55:11,https://zendesk.com/api/v2/tickets/1595.json,web,,,,example@email.com,,[],,, -16988,2021-01-13 20:09:16,FALSE,418284131934a,360003529474,2020-12-22 0:19:23,"Love it! I’ve listened to songs I haven’t heard since childhood! I get the news, weather, informatio...",,,,360013366274,FALSE,TRUE,370469077513,,,email@email.com,1500656884401,solved,subject1,1500656884401,,360002048693,,2021-01-13 18:42:39,https://zendesk.com/api/v2/tickets/16988.json,email,,,,example@email.com,Support,[],,[], -14173,2020-11-11 20:08:45,FALSE,396371699653,360003529474,2020-10-28 12:03:02,"I sent it to my 85 year old Dad, and he talks to it constantly.",,,,360006965034,FALSE,TRUE,370321120273,,,email@email.com,424883466453,closed,subject1,424883466453,,360002048693,,2020-11-11 17:01:32,https://zendesk.com/api/v2/tickets/14173.json,email,,,,example@email.com,Support,[],,, -11071,2020-10-02 14:08:33,FALSE,,360003529474,2020-08-28 18:06:36,"I love it, wife hates it.",,,,,FALSE,TRUE,,,,email@email.com,419755385214,deleted,subject1,419755385214,,360002048693,,2020-09-02 11:01:27,https://zendesk.com/api/v2/tickets/11071.json,email,,,,X,Support,[],,, -1966,2020-03-25 20:32:24,FALSE,396315360434,360003529474,2020-02-27 6:05:08,She doesn’t always listen,,,,360006965034,FALSE,TRUE,370295721514,,,email@email.com,402813302773,closed,subject1,402813302773,,360002048693,,2020-03-25 16:03:26,https://zendesk.com/api/v2/tickets/1966.json,email,,,,example@email.com,Support,[1967],,, -11013,2020-10-02 20:08:20,FALSE,402851697393,360003529474,2020-08-27 23:09:52,I was a little nervous when I received my new Echo as I'm not really Tech savvy. I found it a bit in...,,,,360008376313,FALSE,TRUE,370297881854,,,email@email.com,419688934974,deleted,subject1,419688934974,,360002048693,,2020-09-02 15:53:16,https://zendesk.com/api/v2/tickets/11013.json,email,,,,X,Support,[],,, -1404,2020-03-05 4:53:46,FALSE,396371699653,360003529474,2020-02-13 21:43:58,Some major design flaws,,,,360006965034,FALSE,TRUE,370295709874,,,email@email.com,403125197514,closed,subject1,403125197514,,360002048693,,2020-02-28 1:01:57,https://zendesk.com/api/v2/tickets/1404.json,email,,,,example@email.com,Support,,,, -4721,2020-05-14 20:12:36,FALSE,396371706773,360003529474,2020-04-20 14:31:46,Huge disappointment,,,,360006965034,FALSE,TRUE,370295719414,,,email@email.com,402862357193,closed,subject1,402862357193,,360002048693,,2020-05-14 20:04:34,https://zendesk.com/api/v2/tickets/4721.json,email,,,,example@email.com,Support,[],,, -6171,2020-06-01 2:11:40,FALSE,396334400494,360003529474,2020-05-17 17:50:31,"nice hotel expensive parking got good deal stay hotel anniversary, arrived late evening ",,,,360006965034,FALSE,TRUE,370295713034,,,email@email.com,410930434074,closed,subject1,410930434074,,360002048693,,2020-05-31 23:03:46,https://zendesk.com/api/v2/tickets/6171.json,email,,,,example@email.com,Support,[],,, -6605,2020-06-10 2:10:24,FALSE,396315360434,360003529474,2020-05-26 22:29:50,Full display not working in all application.,,,,360006965034,FALSE,TRUE,370295719754,,,email@email.com,410416672973,closed,subject1,410416672973,,360002048693,,2020-06-09 23:03:49,https://zendesk.com/api/v2/tickets/6605.json,email,,,,example@email.com,Support,[],,, diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/time_series.csv b/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/time_series.csv deleted file mode 100755 index d81001dc..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/data/time_series.csv +++ /dev/null @@ -1,77 +0,0 @@ -,y,ds -0,1537,2009-08-04 -1,1796,2009-09-29 -2,1804,2009-09-22 -3,1805,2009-09-23 -4,1554,2009-08-10 -5,1563,2009-08-05 -6,1564,2009-08-13 -7,1564,2009-08-09 -8,1821,2009-09-30 -9,1570,2009-08-03 -10,1572,2009-08-23 -11,1574,2009-08-24 -12,1832,2009-10-01 -13,1580,2009-08-14 -14,1589,2009-08-15 -15,1847,2009-10-03 -16,1595,2009-08-11 -17,1599,2009-08-21 -18,1600,2009-08-28 -19,1604,2009-08-22 -20,1610,2009-08-20 -21,1622,2009-08-16 -22,1624,2009-08-18 -23,1625,2009-08-27 -24,1630,2009-09-10 -25,1635,2009-08-26 -26,1638,2009-08-17 -27,1648,2009-09-08 -28,1650,2009-09-06 -29,1659,2009-08-31 -30,1662,2009-09-02 -31,1666,2009-09-04 -32,1669,2009-09-18 -33,1670,2009-08-25 -34,1417,2009-07-24 -35,1676,2009-09-11 -36,1681,2009-09-09 -37,1681,2009-09-16 -38,1683,2009-08-29 -39,1431,2009-07-23 -40,1435,2009-07-25 -41,1699,2009-09-01 -42,1700,2009-08-30 -43,1702,2009-09-24 -44,1449,2009-07-27 -45,1709,2009-09-03 -46,1459,2009-07-22 -47,1464,2009-07-28 -48,1721,2009-09-07 -49,1477,2009-07-21 -50,1734,2009-09-17 -51,1738,2009-09-05 -52,1741,2009-09-12 -53,1743,2009-10-02 -54,1747,2009-09-27 -55,1494,2009-08-06 -56,1496,2009-08-07 -57,1757,2009-09-21 -58,1757,2009-10-04 -59,1502,2009-08-12 -60,1758,2009-09-20 -61,1759,2009-09-15 -62,1504,2009-07-30 -63,1510,2009-07-29 -64,1511,2009-07-31 -65,1768,2009-09-13 -66,1768,2009-09-25 -67,1770,2009-09-14 -68,1770,2009-09-19 -69,1515,2009-08-02 -70,1520,2009-07-26 -71,1523,2009-08-08 -72,1779,2009-09-28 -73,1530,2009-08-19 -74,1531,2009-08-01 -75,1791,2009-09-26 diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/dbt_project.yml b/projects/adapter/cli_tests/projects/007_ipynb_scripts/dbt_project.yml deleted file mode 100755 index 01af3df6..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_007" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models", "other_models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/after.py b/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/after.py deleted file mode 100755 index 9c04ce64..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/after.py +++ /dev/null @@ -1,21 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -df: pd.DataFrame = ref(model_name) -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() - -output = output + f"\nModel dataframe information:\n{info}" -temp_dir = os.getenv("temp_dir", ".") -print(temp_dir) -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/before.py b/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/before.py deleted file mode 100755 index a3ee69fa..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/before.py +++ /dev/null @@ -1,16 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name - -output = f"Model name: {model_name}" -output = output + f"\nStatus: {context.current_model.status}" - -output = output + "\nModel dataframe name: {model_name}" -temp_dir = os.getenv("temp_dir", ".") - -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".before.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/check_extra.py b/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/check_extra.py deleted file mode 100644 index 66e3da9b..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/check_extra.py +++ /dev/null @@ -1,19 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -model_name = context.current_model.name -df: pd.DataFrame = ref(model_name) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns - - -if hasattr(df, "extra_col"): - output = f"extra_col: {df.extra_col[0]}\n" -else: - output = "no extra_col\n" - -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".check_extra.txt"]), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/notebooks/my_notebook.ipynb b/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/notebooks/my_notebook.ipynb deleted file mode 100644 index 80cccccd..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/fal_scripts/notebooks/my_notebook.ipynb +++ /dev/null @@ -1,186 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "62a31b37", - "metadata": { - "collapsed": false - }, - "source": [ - "Do imports" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7a3a70d", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import pandas as pd\n", - "import io\n", - "import os\n", - "from functools import reduce\n", - "from fal.dbt.integration.magics import init_fal" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "256f078d", - "metadata": { - "collapsed": false - }, - "source": [ - "Initiate FalDbt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8a3d27de", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%init_fal project_dir=../.. profiles_dir=../../.. default_model_name=zendesk_ticket_data" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e3cc7e7c", - "metadata": { - "collapsed": false - }, - "source": [ - "Function for finding a model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f998243f", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def find_model(model_name: str):\n", - " models = list_models()\n", - " return [model for model in models if model.name == model_name][0]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e32e0e2b", - "metadata": { - "collapsed": false - }, - "source": [ - "Function for writing dataframe data in a file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "021f4d26", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def check_and_write_data(model_name: str):\n", - " output = f\"Model name: {model_name}\"\n", - " model = find_model(model_name)\n", - " output = output + f\"\\nStatus: {model.status}\"\n", - " df = ref(model_name)\n", - " df.columns = df.columns.str.lower()\n", - "\n", - " if hasattr(df, \"extra_col\"):\n", - " output = f\"extra_col: {df.extra_col[0]}\\n\"\n", - " else:\n", - " output = \"no extra_col\\n\"\n", - "\n", - "\n", - " temp_dir = os.environ[\"temp_dir\"]\n", - " write_dir = open(reduce(os.path.join, [temp_dir, model_name + \".my_notebook.txt\"]), \"w\")\n", - " write_dir.write(output)\n", - " write_dir.close()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "41f47376", - "metadata": { - "collapsed": false - }, - "source": [ - "Model name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9c99dab1", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_name = \"zendesk_ticket_data\"" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "f8e7490d", - "metadata": { - "collapsed": false - }, - "source": [ - "Process dataframe" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f3e47dc", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "check_and_write_data(model_name)" - ] - } - ], - "metadata": { - "kernelspec": { - "argv": [ - "python", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ], - "display_name": "Python 3 (ipykernel)", - "env": null, - "interrupt_mode": "signal", - "language": "python", - "metadata": { - "debugger": true - }, - "name": "python3" - }, - "name": "my_notebook.ipynb" - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/macros b/projects/adapter/cli_tests/projects/007_ipynb_scripts/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/agent_wait_time.sql b/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/agent_wait_time.sql deleted file mode 100755 index 4c76dafe..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/agent_wait_time.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table', tags='daily') }} - -with source_data as ( - - select y, ds from {{ ref('time_series') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/schema.yaml b/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/schema.yaml deleted file mode 100755 index 9976db0e..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/schema.yaml +++ /dev/null @@ -1,33 +0,0 @@ -version: 2 - -models: - - name: zendesk_ticket_data - meta: - fal: - scripts: - after: - - fal_scripts/check_extra.py - - fal_scripts/notebooks/my_notebook.ipynb - - - name: agent_wait_time - description: Agent wait time series - config: - materialized: table - tags: daily - columns: - - name: y - tests: - - not_null - - name: ds - tests: - - not_null - - name: a - tests: - - unique - meta: - fal: - scripts: - before: - - fal_scripts/before.py - after: - - fal_scripts/after.py diff --git a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/zendesk_ticket_data.sql b/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/zendesk_ticket_data.sql deleted file mode 100755 index 1454cc92..00000000 --- a/projects/adapter/cli_tests/projects/007_ipynb_scripts/models/zendesk_ticket_data.sql +++ /dev/null @@ -1,13 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - select id,_fivetran_synced,allow_channelback,assignee_id,brand_id - {% if var('extra_col', False) %} - , 'yes' as extra_col - {% endif %} - from {{ ref('raw_zendesk_ticket_data') }} -) - -select * -from source_data diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/.gitignore b/projects/adapter/cli_tests/projects/008_pure_python_models/.gitignore deleted file mode 100644 index a27fe0f4..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/.gitignore +++ /dev/null @@ -1 +0,0 @@ -models/fal/staging/broken_model.sql diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/dbt_project.yml b/projects/adapter/cli_tests/projects/008_pure_python_models/dbt_project.yml deleted file mode 100644 index edb3ba07..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/dbt_project.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: "fal_008" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "scripts" - fal-models-paths: ["fal_models"] - -models: - +schema: custom diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/model_e.ipynb b/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/model_e.ipynb deleted file mode 100644 index c2bad7d7..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/model_e.ipynb +++ /dev/null @@ -1,103 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from functools import reduce\n", - "from fal.dbt.integration.magics import init_fal\n", - "from fal.dbt.typing import *\n", - "%init_fal project_dir=../.. profiles_dir=../../.. default_model_name=model_e_notebook" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Get the models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df = ref(\"model_c\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Add the new data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df[\"my_null\"] = None" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Write back to data warehouse" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "write_to_model(df)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Leave behind an artifact for tests" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_name = context.current_model.name\n", - "\n", - "output = f\"Model name: {model_name}\"\n", - "output = output + f\"\\nStatus: {context.current_model.status}\"\n", - "\n", - "output = output + \"\\nModel dataframe name: {model_name}\"\n", - "temp_dir = os.environ[\"temp_dir\"]\n", - "\n", - "write_dir = open(reduce(os.path.join, [temp_dir, model_name + \".txt\"]), \"w\")\n", - "write_dir.write(output)\n", - "write_dir.close()" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/broken_model.py b/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/broken_model.py deleted file mode 100644 index 36512852..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/broken_model.py +++ /dev/null @@ -1,2 +0,0 @@ -a = 1 / 0 -write_to_model() diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/model_c.py b/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/model_c.py deleted file mode 100644 index bc58ef4d..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/fal_models/staging/model_c.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -DEPENDENCY: -# not really used, but we put it to make sure it is picked up -- ref('model_a') -""" -from _fal_testing.utils import create_model_artifact -from fal.dbt.typing import * - -df = ref("model_b") - -df["my_bool"] = True - -write_to_model(df) - -create_model_artifact(context) diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/macros b/projects/adapter/cli_tests/projects/008_pure_python_models/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/model_e.sql b/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/model_e.sql deleted file mode 100644 index 3f11eedd..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/model_e.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED d5d59e7be72d81f154140338f730e38c - -Script dependencies: - -{{ ref('model_c') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/staging/model_c.sql b/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/staging/model_c.sql deleted file mode 100644 index 05cb9ccb..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/fal/staging/model_c.sql +++ /dev/null @@ -1,13 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED 2d1b785862f633481ecaa4b410a5a8a6 - -Script dependencies: - -{{ ref('model_a') }} -{{ ref('model_b') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_a.sql b/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_a.sql deleted file mode 100644 index 89c755f9..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_a.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH data AS ( - - SELECT - 'some text' AS my_text -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_b.sql b/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_b.sql deleted file mode 100644 index c83eca3b..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_b.sql +++ /dev/null @@ -1,9 +0,0 @@ -WITH data AS ( - SELECT - cast(1 AS integer) AS my_int, - my_text - FROM {{ ref('model_a') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_d.sql b/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_d.sql deleted file mode 100644 index 700c7528..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/model_d.sql +++ /dev/null @@ -1,14 +0,0 @@ --- To test full-refresh -{{ config(materialized='incremental') }} - -WITH data AS ( - SELECT - my_int, - my_text, - my_bool, - cast('2022-05-11' AS date) AS my_date - FROM {{ ref('model_c') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/models/schema.yml b/projects/adapter/cli_tests/projects/008_pure_python_models/models/schema.yml deleted file mode 100644 index 861468f1..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/models/schema.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: 2 - -models: - - name: model_a - meta: - fal: - scripts: - - after.py - - name: model_b - meta: - fal: - scripts: - before: - - before.py - - name: model_c # Python model - meta: - fal: - post-hook: - - post_hook.py - scripts: - - post_hook.py - - after.py - - name: model_d - - name: model_e # Python (notebook) model - meta: - fal: - pre-hook: - - pre_hook.py - post-hook: - - post_hook.py - - name: broken_model # Python model that will throw an exception - meta: - fal: - post-hook: - - post_hook.py diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/after.py b/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/after.py deleted file mode 100644 index 279b640e..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/after.py +++ /dev/null @@ -1,12 +0,0 @@ -import pandas as pd -from functools import reduce -import os - -model_name = context.current_model.name -df: pd.DataFrame = ref(model_name) - -output = str(df) - -path = reduce(os.path.join, [os.getenv("temp_dir", "."), model_name + ".after.txt"]) -with open(path, "w") as file: - file.write(output) diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/before.py b/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/before.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/before.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/post_hook.py b/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/post_hook.py deleted file mode 100644 index e6c7d5b7..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/post_hook.py +++ /dev/null @@ -1,9 +0,0 @@ -import time -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -# Delay just a little bit in order to make sure the file is created -# just after the model file. -time.sleep(0.05) - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/pre_hook.py b/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/pre_hook.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/008_pure_python_models/scripts/pre_hook.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/custom_macros/multiply_by_ten.sql b/projects/adapter/cli_tests/projects/009_execute_sql_function/custom_macros/multiply_by_ten.sql deleted file mode 100644 index 3f74bf83..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/custom_macros/multiply_by_ten.sql +++ /dev/null @@ -1,3 +0,0 @@ -{% macro multiply_by_ten(column_name) %} - cast({{ column_name }} * 10 as INT) -{% endmacro %} diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/data/.gitkeep b/projects/adapter/cli_tests/projects/009_execute_sql_function/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/dbt_project.yml b/projects/adapter/cli_tests/projects/009_execute_sql_function/dbt_project.yml deleted file mode 100644 index 7a82bb73..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_test" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros", "custom_macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/query_other_model.py b/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/query_other_model.py deleted file mode 100644 index 205e3e32..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/query_other_model.py +++ /dev/null @@ -1,20 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -df: pd.DataFrame = execute_sql('SELECT * FROM {{ ref("execute_sql_model_one")}}') - -buf = io.StringIO() -df.info(buf=buf, memory_usage=False) -info = buf.getvalue() -output = f"\nModel dataframe information:\n{info}" -temp_dir = os.getenv("temp_dir", ".") -write_dir = open( - reduce( - os.path.join, [temp_dir, context.current_model.name + ".query_other_model.txt"] - ), - "w", -) -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/run_macro.py b/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/run_macro.py deleted file mode 100644 index 7a19f2ab..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/fal_scripts/run_macro.py +++ /dev/null @@ -1,23 +0,0 @@ -import pandas as pd -import io -import os -from functools import reduce - -query = """ - select {{ multiply_by_ten('my_int') }} as my_int_times_ten - from {{ ref('execute_sql_model_two') }} - """ - -df: pd.DataFrame = execute_sql(query) -df.columns = df.columns.str.lower() # Snowflake has uppercase columns -# Cast since some get float -df = df.astype({"my_int_times_ten": int}) - -output = f"\nModel dataframe first row:\n{df.iloc[0]}" -temp_dir = os.getenv("temp_dir", ".") -write_dir = open( - reduce(os.path.join, [temp_dir, context.current_model.name + ".run_macro.txt"]), - "w", -) -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/macros b/projects/adapter/cli_tests/projects/009_execute_sql_function/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_one.sql b/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_one.sql deleted file mode 100644 index 84cc6951..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_one.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ config(materialized='table') }} --- {{ ref("execute_sql_model_two") }} - -WITH data AS ( - SELECT - 'some text' AS my_text -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_two.sql b/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_two.sql deleted file mode 100644 index 6947f788..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/execute_sql_model_two.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ config(materialized='table') }} - -WITH data AS ( - SELECT - cast(1 AS integer) AS my_int -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/schema.yaml b/projects/adapter/cli_tests/projects/009_execute_sql_function/models/schema.yaml deleted file mode 100644 index 3b171a05..00000000 --- a/projects/adapter/cli_tests/projects/009_execute_sql_function/models/schema.yaml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 - -sources: - - name: results - database: "{{ env_var('DBT_DATABASE', 'test') }}" - schema: "{{ env_var('DBT_SCHEMA', 'dbt_fal') }}" - tables: - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__some_source" - -models: - - name: execute_sql_model_one - meta: - fal: - scripts: - after: - - fal_scripts/query_other_model.py - - fal_scripts/run_macro.py - - name: execute_sql_model_two diff --git a/projects/adapter/cli_tests/projects/010_source_freshness/dbt_project.yml b/projects/adapter/cli_tests/projects/010_source_freshness/dbt_project.yml deleted file mode 100644 index 31433c7f..00000000 --- a/projects/adapter/cli_tests/projects/010_source_freshness/dbt_project.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: "fal_010" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "target" -clean-targets: - - "target" - - "dbt_modules" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/010_source_freshness/fal_scripts/freshness.py b/projects/adapter/cli_tests/projects/010_source_freshness/fal_scripts/freshness.py deleted file mode 100644 index 0395913c..00000000 --- a/projects/adapter/cli_tests/projects/010_source_freshness/fal_scripts/freshness.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import List -import os -from fal.dbt import DbtSource - -output = "" -# TODO add real test for freshness -sources: List[DbtSource] = list_sources() -for node in sources: - if node.freshness: - # NOTE: removing the namespace prefix - output += f"({node.name}, {node.table_name.split('__ns__')[1]}) {node.freshness.status}\n" - -temp_dir = os.getenv("temp_dir", ".") -write_dir = open(os.path.join(temp_dir, "GLOBAL.freshness.txt"), "w") -write_dir.write(output) -write_dir.close() diff --git a/projects/adapter/cli_tests/projects/010_source_freshness/load_freshness_table.py b/projects/adapter/cli_tests/projects/010_source_freshness/load_freshness_table.py deleted file mode 100644 index c30cb637..00000000 --- a/projects/adapter/cli_tests/projects/010_source_freshness/load_freshness_table.py +++ /dev/null @@ -1,25 +0,0 @@ -from datetime import datetime as dt, timezone as tz -from pandas import DataFrame -from fal.dbt import FalDbt -from sys import argv -import os - -project_dir = argv[1] if len(argv) >= 2 else "." -profiles_dir = argv[2] if len(argv) >= 3 else ".." - -faldbt = FalDbt(project_dir=project_dir, profiles_dir=profiles_dir) - -# 10 rows -df = DataFrame({"loaded_at": dt.now(tz=tz.utc).isoformat(), "info": range(0, 10)}) - -table_prefix = f"ns__{ os.environ.get('DB_NAMESPACE', '') }__ns__" - -print(df) -faldbt.write_to_source(df, "freshness_test", table_prefix + "freshness_table", mode="overwrite") - -from time import sleep - -# Let BigQuery cache load it -sleep(5) - -print("Lodaded") diff --git a/projects/adapter/cli_tests/projects/010_source_freshness/macros b/projects/adapter/cli_tests/projects/010_source_freshness/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/010_source_freshness/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/010_source_freshness/models/schema.yml b/projects/adapter/cli_tests/projects/010_source_freshness/models/schema.yml deleted file mode 100644 index 9377c5d9..00000000 --- a/projects/adapter/cli_tests/projects/010_source_freshness/models/schema.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: 2 - -sources: - - name: freshness_test - database: "{{ env_var('DBT_DATABASE', 'test') }}" - schema: "{{ env_var('DBT_SCHEMA', 'dbt_fal') }}" - freshness: - warn_after: { "count": 5, "period": minute } - error_after: { "count": 30, "period": minute } - tables: - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__freshness_table" - loaded_at_field: "current_timestamp" - columns: - - name: info - tests: - - unique - - - name: "ns__{{ env_var('DB_NAMESPACE', '') }}__ns__freshness_xther" # does not exist (run time error) - loaded_at_field: "current_timestamp" -fal: - scripts: - - fal_scripts/freshness.py diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/.gitignore b/projects/adapter/cli_tests/projects/011_highly_parallelizable/.gitignore deleted file mode 100644 index 49f147cb..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -target/ -dbt_packages/ -logs/ diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/README.md b/projects/adapter/cli_tests/projects/011_highly_parallelizable/README.md deleted file mode 100644 index 7874ac84..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Welcome to your new dbt project! - -### Using the starter project - -Try running the following commands: -- dbt run -- dbt test - - -### Resources: -- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction) -- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers -- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support -- Find [dbt events](https://events.getdbt.com) near you -- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/analyses/.gitkeep b/projects/adapter/cli_tests/projects/011_highly_parallelizable/analyses/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/dbt_project.yml b/projects/adapter/cli_tests/projects/011_highly_parallelizable/dbt_project.yml deleted file mode 100644 index c7d8b08e..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/dbt_project.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: "fal_011" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "scripts" - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/fal_models/flow_1/model_b1.py b/projects/adapter/cli_tests/projects/011_highly_parallelizable/fal_models/flow_1/model_b1.py deleted file mode 100644 index 8997e6a1..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/fal_models/flow_1/model_b1.py +++ /dev/null @@ -1,8 +0,0 @@ -from _fal_testing import create_model_artifact - -# weird way to call, but added in the docstring -df = ref("model_a1") -df["b1_data"] = 1 -write_to_model(df) - -create_model_artifact(context) diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/macros b/projects/adapter/cli_tests/projects/011_highly_parallelizable/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/fal/flow_1/model_b1.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/fal/flow_1/model_b1.sql deleted file mode 100644 index 3a657211..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/fal/flow_1/model_b1.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED 8cbbef9c1ac3aacaddb5c94d55d2ce6c - -Script dependencies: - -{{ ref('model_a1') }} - -*/ - -SELECT * FROM {{ target.schema }}.{{ model.alias }} diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/final.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/final.sql deleted file mode 100644 index 794fadd0..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/final.sql +++ /dev/null @@ -1,11 +0,0 @@ --- depends_on: {{ ref('model_e1') }} --- depends_on: {{ ref('model_h2') }} --- depends_on: {{ ref('model_i2') }} --- depends_on: {{ ref('model_j2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS final_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_a1.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_a1.sql deleted file mode 100644 index a3bc02fe..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_a1.sql +++ /dev/null @@ -1,6 +0,0 @@ -WITH data AS ( - SELECT cast(1 AS integer) AS a1_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_c1.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_c1.sql deleted file mode 100644 index 8e24e025..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_c1.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH data AS ( - SELECT - cast(1 AS integer) AS c1_data - FROM {{ ref('model_a1') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_d1.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_d1.sql deleted file mode 100644 index 34fc6233..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_d1.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH data AS ( - SELECT - cast(1 AS integer) AS d1_data - FROM {{ ref('model_a1') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_e1.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_e1.sql deleted file mode 100644 index 17d57a7e..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_1/model_e1.sql +++ /dev/null @@ -1,10 +0,0 @@ --- depends_on: {{ ref('model_b1') }} --- depends_on: {{ ref('model_c1') }} --- depends_on: {{ ref('model_d1') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS e1_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_a2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_a2.sql deleted file mode 100644 index c2f5249d..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_a2.sql +++ /dev/null @@ -1,6 +0,0 @@ -WITH data AS ( - SELECT cast(1 AS integer) AS a2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_b2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_b2.sql deleted file mode 100644 index eac6c871..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_b2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_a2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS b2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_c2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_c2.sql deleted file mode 100644 index 1072769a..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_c2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_a2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS c2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_d2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_d2.sql deleted file mode 100644 index 3e501949..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_d2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_a2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS d2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_e2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_e2.sql deleted file mode 100644 index 879255ac..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_e2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_b2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS e2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_f2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_f2.sql deleted file mode 100644 index 3127e956..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_f2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_c2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS f2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_g2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_g2.sql deleted file mode 100644 index ad8dd2d8..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_g2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_d2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS g2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_h2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_h2.sql deleted file mode 100644 index 4a7b8ff7..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_h2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_e2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS h2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_i2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_i2.sql deleted file mode 100644 index ab3082fe..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_i2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_f2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS i2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_j2.sql b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_j2.sql deleted file mode 100644 index 767bed2c..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/flow_2/model_j2.sql +++ /dev/null @@ -1,8 +0,0 @@ --- depends_on: {{ ref('model_g2') }} - -WITH data AS ( - SELECT cast(1 AS integer) AS j2_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/schema.yaml b/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/schema.yaml deleted file mode 100644 index 0b69d265..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/models/schema.yaml +++ /dev/null @@ -1,63 +0,0 @@ -version: 2 - -models: - - name: model_a1 - - name: model_b1 - - name: model_c1 - meta: - fal: - post-hook: - - post_hook1.py - - post_hook2.py - - post_hook3.py - - name: model_d1 - - name: model_e1 - meta: - fal: - post-hook: - - post_hook1.py - - name: model_a2 - - name: model_b2 - meta: - fal: - post-hook: - - post_hook1.py - - name: model_c2 - meta: - fal: - post-hook: - - post_hook1.py - - name: model_d2 - - name: model_e2 - meta: - fal: - post-hook: - - post_hook1.py - - post_hook2.py - - name: model_f2 - meta: - fal: - post-hook: - - post_hook1.py - - post_hook2.py - - name: model_g2 - - name: model_h2 - meta: - fal: - post-hook: - - post_hook1.py - - post_hook2.py - - post_hook3.py - - name: model_i2 - meta: - fal: - post-hook: - - post_hook1.py - - post_hook2.py - - post_hook3.py - - name: model_j2 - - name: final - meta: - fal: - post-hook: - - post_hook1.py diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook1.py b/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook1.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook1.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook2.py b/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook2.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook2.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook3.py b/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook3.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/011_highly_parallelizable/scripts/post_hook3.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/seeds/.gitkeep b/projects/adapter/cli_tests/projects/011_highly_parallelizable/seeds/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/snapshots/.gitkeep b/projects/adapter/cli_tests/projects/011_highly_parallelizable/snapshots/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/011_highly_parallelizable/tests/.gitkeep b/projects/adapter/cli_tests/projects/011_highly_parallelizable/tests/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/012_model_generation_error/data/.gitkeep b/projects/adapter/cli_tests/projects/012_model_generation_error/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/cli_tests/projects/012_model_generation_error/dbt_project.yml b/projects/adapter/cli_tests/projects/012_model_generation_error/dbt_project.yml deleted file mode 100644 index 584142ab..00000000 --- a/projects/adapter/cli_tests/projects/012_model_generation_error/dbt_project.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: "fal_012" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-models-paths: ["fal_models"] diff --git a/projects/adapter/cli_tests/projects/012_model_generation_error/fal_models/no_write_model.py b/projects/adapter/cli_tests/projects/012_model_generation_error/fal_models/no_write_model.py deleted file mode 100644 index a3ace384..00000000 --- a/projects/adapter/cli_tests/projects/012_model_generation_error/fal_models/no_write_model.py +++ /dev/null @@ -1 +0,0 @@ -print("something") diff --git a/projects/adapter/cli_tests/projects/012_model_generation_error/macros b/projects/adapter/cli_tests/projects/012_model_generation_error/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/012_model_generation_error/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/012_model_generation_error/models/schema.yml b/projects/adapter/cli_tests/projects/012_model_generation_error/models/schema.yml deleted file mode 100644 index 4edea1a1..00000000 --- a/projects/adapter/cli_tests/projects/012_model_generation_error/models/schema.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: 2 - -models: - - name: no_write_model # Python model diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/.gitignore b/projects/adapter/cli_tests/projects/013_structured_hooks/.gitignore deleted file mode 100644 index a27fe0f4..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/.gitignore +++ /dev/null @@ -1 +0,0 @@ -models/fal/staging/broken_model.sql diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/dbt_project.yml b/projects/adapter/cli_tests/projects/013_structured_hooks/dbt_project.yml deleted file mode 100644 index 78b6d1ee..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/dbt_project.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: "fal_013" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "scripts" - fal-models-paths: ["fal_models"] - -models: - +schema: custom diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_d.py b/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_d.py deleted file mode 100644 index ea1de644..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_d.py +++ /dev/null @@ -1,11 +0,0 @@ -import pyjokes -from fal.dbt.typing import * -from _fal_testing import create_model_artifact - -df = ref("model_c") - -df["model_e_data"] = True - -write_to_model(df) - -create_model_artifact(context, f"PyJokes version: {pyjokes.__version__}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_e.py b/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_e.py deleted file mode 100644 index ea1de644..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_e.py +++ /dev/null @@ -1,11 +0,0 @@ -import pyjokes -from fal.dbt.typing import * -from _fal_testing import create_model_artifact - -df = ref("model_c") - -df["model_e_data"] = True - -write_to_model(df) - -create_model_artifact(context, f"PyJokes version: {pyjokes.__version__}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_f.py b/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_f.py deleted file mode 100644 index 3ffe7ac7..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_models/model_f.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys -from pathlib import Path - -from fal.dbt.typing import * -from fal.dbt.packages.environments.virtual_env import _BASE_VENV_DIR -from _fal_testing import create_model_artifact - -# To determine whether this is a fal-created environment or not -# we'll check whether the executable that is running this script -# is located under _BASE_VENV_DIR - -executable_path = Path(sys.executable) -environment_type = "venv" if _BASE_VENV_DIR in executable_path.parents else "local" - -df = ref("model_c") - -df["model_e_data"] = True - -write_to_model(df) - -create_model_artifact(context, additional_data=f"Environment: {environment_type}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_project.yml b/projects/adapter/cli_tests/projects/013_structured_hooks/fal_project.yml deleted file mode 100644 index 1f0f0514..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/fal_project.yml +++ /dev/null @@ -1,23 +0,0 @@ -environments: - - name: not-funny - type: venv - - - name: funny - type: venv - requirements: - - pyjokes==0.6.0 - - - name: pyjokes-0.5.0 - type: venv - requirements: - - pyjokes==0.5.0 - - - name: pyjokes-0.6.0 - type: venv - requirements: - - pyjokes==0.6.0 - - - name: funny-conda - type: conda - packages: - - pyjokes=0.6.0 diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/macros b/projects/adapter/cli_tests/projects/013_structured_hooks/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_d.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_d.sql deleted file mode 100644 index 3f11eedd..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_d.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED d5d59e7be72d81f154140338f730e38c - -Script dependencies: - -{{ ref('model_c') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_e.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_e.sql deleted file mode 100644 index 3f11eedd..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_e.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED d5d59e7be72d81f154140338f730e38c - -Script dependencies: - -{{ ref('model_c') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_f.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_f.sql deleted file mode 100644 index 3f11eedd..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/fal/model_f.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED d5d59e7be72d81f154140338f730e38c - -Script dependencies: - -{{ ref('model_c') }} - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_a.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_a.sql deleted file mode 100644 index 89c755f9..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_a.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH data AS ( - - SELECT - 'some text' AS my_text -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_b.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_b.sql deleted file mode 100644 index 89c755f9..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_b.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH data AS ( - - SELECT - 'some text' AS my_text -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_c.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_c.sql deleted file mode 100644 index a8734840..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_c.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH model_c AS ( - - SELECT - 'some text' AS my_text -) - -SELECT * -FROM model_c diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_g.sql b/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_g.sql deleted file mode 100644 index 077957ea..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/model_g.sql +++ /dev/null @@ -1,8 +0,0 @@ -WITH model_g AS ( - - SELECT - 'some text' AS my_text -) - -SELECT * -FROM model_g diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/models/schema.yml b/projects/adapter/cli_tests/projects/013_structured_hooks/models/schema.yml deleted file mode 100644 index 7ce2daf0..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/models/schema.yml +++ /dev/null @@ -1,127 +0,0 @@ -version: 2 - -models: - - name: model_a - meta: - fal: - pre-hook: - - pre_hook_1.py - - path: pre_hook_2.py - - path: add.py - with: - left: 2 - right: 3 - - post-hook: - - post_hook_1.py - - path: post_hook_2.py - - path: sub.py - with: - left: 5 - right: 2 - - path: types.py - with: - number: 5 - text: "type" - sequence: - - 1 - - 2 - - 3 - mapping: - key: value - - # Hook with environment overrides - - name: model_b - meta: - fal: - pre-hook: - - path: local_hook.py - - path: funny_hook.py - environment: funny - post-hook: - - path: check_imports.py - environment: not-funny - with: - import: pyjokes - expected_success: false - - # A regular DBT model on a new environment - - name: model_c - meta: - fal: - environment: pyjokes-0.6.0 - post-hook: - - path: check_imports.py - with: - import: pyjokes - expected_success: true - - # A Python model on an environment w/pyjokes==0.5.0 - - name: model_d - meta: - fal: - environment: pyjokes-0.5.0 - post-hook: - - path: check_imports.py - with: - import: pyjokes - expected_success: true - - # A Python model on an environment w/pyjokes==0.6.0 - - name: model_e - meta: - fal: - environment: pyjokes-0.6.0 - pre-hook: - # We override the default environment here - # for only this hook, and expect the import - # of pyjokes to fail under 'not-funny' environment. - - path: check_imports.py - environment: not-funny - with: - import: pyjokes - expected_success: false - - # But all other hooks should still be able to - # access to the model's environment - - joke_version.py - - # And they can even customize the version - # with another overwrite - - path: funny_hook.py - environment: pyjokes-0.5.0 - - # A node-scoped environment with local hooks - - name: model_f - meta: - fal: - environment: not-funny - pre-hook: - - path: environment_type.py - environment: local - - post-hook: - - path: environment_type_2.py - environment: local - - - path: environment_type_3.py - - - name: model_g - meta: - fal: - environment: funny-conda - pre-hook: - - path: environment_type.py - - - path: environment_type_2.py - environment: funny - - - path: environment_type_3.py - environment: local - - post-hook: - - path: check_imports.py - with: - import: pyjokes - expected_success: true - version: "0.6.0" diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/add.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/add.py deleted file mode 100644 index 30c5980b..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/add.py +++ /dev/null @@ -1,8 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact( - context, - "Calculation result: " - + str(context.arguments["left"] + context.arguments["right"]), -) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/check_imports.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/check_imports.py deleted file mode 100644 index a88cbf0f..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/check_imports.py +++ /dev/null @@ -1,24 +0,0 @@ -import importlib -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) - -module_name = context.arguments["import"] -expectation = "succeed" if context.arguments["expected_success"] else "fail" - -try: - module = importlib.import_module(module_name) -except ImportError: - result = "fail" -else: - result = "succeed" - -assert ( - expectation == result -), f"Expected import {module_name} to {expectation}, but it {result}." - -if "version" in context.arguments: - expected_version = context.arguments["version"] - actual_version = module.__version__ - assert expected_version == actual_version, f"Expected version {expected_version} of {module_name}, but got {actual_version}." diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type.py deleted file mode 100644 index cde0e2a0..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact, get_environment_type - -create_dynamic_artifact(context, additional_data=f"Environment: {get_environment_type()}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_2.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_2.py deleted file mode 100644 index cde0e2a0..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_2.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact, get_environment_type - -create_dynamic_artifact(context, additional_data=f"Environment: {get_environment_type()}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_3.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_3.py deleted file mode 100644 index 385e7712..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/environment_type_3.py +++ /dev/null @@ -1,7 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact, get_environment_type - -df = ref('model_c') -print(f"Model c has {len(df)} rows") - -create_dynamic_artifact(context, additional_data=f"Environment: {get_environment_type()}") diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/funny_hook.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/funny_hook.py deleted file mode 100644 index 33bacd0a..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/funny_hook.py +++ /dev/null @@ -1,9 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -import pyjokes - -print("While we are preparing your artifacts, here is a joke for you: ", pyjokes.get_joke()) - -create_dynamic_artifact(context, f"PyJokes version: {pyjokes.__version__}") - diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/joke_version.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/joke_version.py deleted file mode 100644 index d16c502d..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/joke_version.py +++ /dev/null @@ -1,7 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -import pyjokes - -create_dynamic_artifact(context, f"PyJokes version: {pyjokes.__version__}") - diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/local_hook.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/local_hook.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/local_hook.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_1.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_1.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_1.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_2.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_2.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/post_hook_2.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_1.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_1.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_1.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_2.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_2.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/pre_hook_2.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/sub.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/sub.py deleted file mode 100644 index e5648d37..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/sub.py +++ /dev/null @@ -1,8 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact( - context, - "Calculation result: " - + str(context.arguments["left"] - context.arguments["right"]), -) diff --git a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/types.py b/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/types.py deleted file mode 100644 index 471a9d5c..00000000 --- a/projects/adapter/cli_tests/projects/013_structured_hooks/scripts/types.py +++ /dev/null @@ -1,8 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact( - context, - "Arguments: " - + ", ".join(f"{key}={value!r}" for key, value in context.arguments.items()), -) diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/.gitignore b/projects/adapter/cli_tests/projects/014_broken_dbt_models/.gitignore deleted file mode 100644 index a27fe0f4..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/.gitignore +++ /dev/null @@ -1 +0,0 @@ -models/fal/staging/broken_model.sql diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/dbt_project.yml b/projects/adapter/cli_tests/projects/014_broken_dbt_models/dbt_project.yml deleted file mode 100644 index 8e85fb40..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/dbt_project.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: "fal_014" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -seed-paths: ["data"] -snapshot-paths: ["snapshots"] -target-path: "{{ env_var('temp_dir') }}/target" -vars: - fal-scripts-path: "scripts" - fal-models-paths: ["fal_models"] - -models: - +schema: custom diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/fal_models/get_data.py b/projects/adapter/cli_tests/projects/014_broken_dbt_models/fal_models/get_data.py deleted file mode 100644 index a02342b0..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/fal_models/get_data.py +++ /dev/null @@ -1,18 +0,0 @@ -import pandas -from fal.dbt.typing import * -from _fal_testing import create_model_artifact - -df = pandas.DataFrame() - -df["negative_data"] = [ - -1, - -2, - -3, -] -df["positive_data"] = [ - 1, - 2, - 3, -] -write_to_model(df) -create_model_artifact(context) diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/macros b/projects/adapter/cli_tests/projects/014_broken_dbt_models/macros deleted file mode 120000 index dc8c6dea..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/macros +++ /dev/null @@ -1 +0,0 @@ -../default/macros \ No newline at end of file diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/fal/get_data.sql b/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/fal/get_data.sql deleted file mode 100644 index fe0c495f..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/fal/get_data.sql +++ /dev/null @@ -1,12 +0,0 @@ - -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED d3c058017f3fae45d61db3f40f814d7c - -Script dependencies: - - - -*/ - -SELECT * FROM {{ this }} diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/process_data.sql b/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/process_data.sql deleted file mode 100644 index e389d8c6..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/process_data.sql +++ /dev/null @@ -1,10 +0,0 @@ --- depends_on: {{ ref('regular_model') }} - -WITH data AS ( - SELECT - BROKEN_MODEL, - FROM {{ ref('get_data') }} -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/regular_model.sql b/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/regular_model.sql deleted file mode 100644 index 47bff564..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/regular_model.sql +++ /dev/null @@ -1,6 +0,0 @@ -WITH data AS ( - SELECT cast(1 AS integer) AS regular_data -) - -SELECT * -FROM data diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/schema.yml b/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/schema.yml deleted file mode 100644 index 0960d54b..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/models/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 - -models: - - name: get_data - - name: regular_model - meta: - fal: - post-hook: - - post_hook.py - - name: process_data diff --git a/projects/adapter/cli_tests/projects/014_broken_dbt_models/scripts/post_hook.py b/projects/adapter/cli_tests/projects/014_broken_dbt_models/scripts/post_hook.py deleted file mode 100644 index 860d4771..00000000 --- a/projects/adapter/cli_tests/projects/014_broken_dbt_models/scripts/post_hook.py +++ /dev/null @@ -1,4 +0,0 @@ -from fal.dbt.typing import * -from _fal_testing import create_dynamic_artifact - -create_dynamic_artifact(context) diff --git a/projects/adapter/cli_tests/projects/default/macros/generate_alias_name.sql b/projects/adapter/cli_tests/projects/default/macros/generate_alias_name.sql deleted file mode 100644 index 8a7a9475..00000000 --- a/projects/adapter/cli_tests/projects/default/macros/generate_alias_name.sql +++ /dev/null @@ -1,13 +0,0 @@ -{% macro generate_alias_name(custom_alias_name=none, node=none) -%} - - {%- if custom_alias_name is none -%} - - ns__{{ env_var('DB_NAMESPACE', '') }}__ns__{{ project_name }}_{{ node.name }} - - {%- else -%} - - ns__{{ env_var('DB_NAMESPACE', '') }}__ns__{{ custom_alias_name | trim }} - - {%- endif -%} - -{%- endmacro %} diff --git a/projects/adapter/cli_tests/projects/profiles.yml b/projects/adapter/cli_tests/projects/profiles.yml deleted file mode 100755 index 67ffe3d1..00000000 --- a/projects/adapter/cli_tests/projects/profiles.yml +++ /dev/null @@ -1,15 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: dev - outputs: - dev: - type: postgres - host: localhost - port: 5432 - user: pguser - password: pass - dbname: test - schema: dbt_fal - threads: 4 diff --git a/projects/adapter/integration_tests/configs/sqlserver/docker-compose.yml b/projects/adapter/integration_tests/configs/sqlserver/docker-compose.yml deleted file mode 100644 index 5629296b..00000000 --- a/projects/adapter/integration_tests/configs/sqlserver/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3.9' - -services: - sqlserver: - image: mcr.microsoft.com/mssql/server:2022-latest - container_name: fal_sql_server - environment: - ACCEPT_EULA: Y - MSSQL_SA_PASSWORD: strongPassword1@ - ports: - - 1433:1433 diff --git a/projects/adapter/integration_tests/configs/trino/catalog/postgresql.properties b/projects/adapter/integration_tests/configs/trino/catalog/postgresql.properties deleted file mode 100644 index c8a1688c..00000000 --- a/projects/adapter/integration_tests/configs/trino/catalog/postgresql.properties +++ /dev/null @@ -1,4 +0,0 @@ -connector.name=postgresql -connection-url=jdbc:postgresql://db:5432/test -connection-user=pguser -connection-password=pass diff --git a/projects/adapter/integration_tests/configs/trino/docker-compose.yml b/projects/adapter/integration_tests/configs/trino/docker-compose.yml deleted file mode 100644 index a042abcc..00000000 --- a/projects/adapter/integration_tests/configs/trino/docker-compose.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.9' - -services: - db: - image: postgres:12 - container_name: fal_db - environment: - POSTGRES_USER: pguser - POSTGRES_PASSWORD: pass - POSTGRES_DB: test - ports: - - 5432:5432 - trino: - image: trinodb/trino:latest - volumes: - - ./catalog:/etc/trino/catalog - ports: - - 8080:8080 diff --git a/projects/adapter/cli_tests/docker-compose.yml b/projects/adapter/integration_tests/docker-compose.yml similarity index 100% rename from projects/adapter/cli_tests/docker-compose.yml rename to projects/adapter/integration_tests/docker-compose.yml diff --git a/projects/adapter/integration_tests/features/cloud.feature b/projects/adapter/integration_tests/features/cloud.feature deleted file mode 100644 index b9f9cb40..00000000 --- a/projects/adapter/integration_tests/features/cloud.feature +++ /dev/null @@ -1,23 +0,0 @@ -@skip # FAL Cloud is not supported -@cloud -Feature: isolate cloud - Background: Project Setup - Given the project env_project - - Scenario: Run a Python model with Isolate Cloud - When the following shell command is invoked: - """ - dbt --debug run --profiles-dir $profilesDir --project-dir $baseDir -t prod --select +model_c+ - """ - Then there should be no errors - Then the following models are calculated in order: - | model_a | model_c | model_d | - - Scenario: Run a Python model with Isolate Cloud and conda - When the following shell command is invoked: - """ - dbt --debug run --profiles-dir $profilesDir --project-dir $baseDir -t prod --select +model_e - """ - Then there should be no errors - Then the following models are calculated in order: - | model_a | model_e | diff --git a/projects/adapter/integration_tests/features/source.feature b/projects/adapter/integration_tests/features/source.feature deleted file mode 100644 index 57c0ef35..00000000 --- a/projects/adapter/integration_tests/features/source.feature +++ /dev/null @@ -1,33 +0,0 @@ -@TODO-duckdb -@TODO-bigquery -@TODO-trino -@TODO-athena -Feature: dbt-fal can query sources - Background: Project Setup - Given the project source_freshness - - Scenario: Run a Python model that queries a source in local environment - When the following shell command is invoked: - """ - python $baseDir/load_freshness_table.py $baseDir $profilesDir - """ - And the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir - """ - Then the following models are calculated in order: - | model_a | - - @skip # FAL cloud not supported - @cloud - Scenario: Run a Python model that queries a source with Isolate Cloud - When the following shell command is invoked: - """ - python $baseDir/load_freshness_table.py $baseDir $profilesDir - """ - And the following shell command is invoked: - """ - dbt run --profiles-dir $profilesDir --project-dir $baseDir -t prod - """ - Then the following models are calculated in order: - | model_a | diff --git a/projects/adapter/integration_tests/features/teleport.feature b/projects/adapter/integration_tests/features/teleport.feature deleted file mode 100644 index 8b9caef7..00000000 --- a/projects/adapter/integration_tests/features/teleport.feature +++ /dev/null @@ -1,13 +0,0 @@ -@skip # Not supported. Presently no DBT target "staging_with_teleport" exists. -@teleport -Feature: Teleporting data - Background: Project Setup - Given the project env_project - - Scenario: Run a Python model with Isolate and teleport - When the following shell command is invoked: - """ - dbt --debug run --profiles-dir $profilesDir --project-dir $baseDir -t staging_with_teleport --select +model_c+ - """ - Then the following models are calculated in order: - | model_a | model_c | model_d | diff --git a/projects/adapter/integration_tests/profiles/athena/profiles.yml b/projects/adapter/integration_tests/profiles/athena/profiles.yml deleted file mode 100644 index 37cdef31..00000000 --- a/projects/adapter/integration_tests/profiles/athena/profiles.yml +++ /dev/null @@ -1,17 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - db: - type: athena - s3_staging_dir: "{{ env_var('ATHENA_S3_STAGING_DIR') }}" - region_name: us-east-1 - database: "{{ env_var('ATHENA_DATABASE') }}" - schema: "{{ env_var('ATHENA_SCHEMA') }}" - work_group: primary - num_retries: 0 diff --git a/projects/adapter/integration_tests/profiles/bigquery/profiles.yml b/projects/adapter/integration_tests/profiles/bigquery/profiles.yml deleted file mode 100644 index 8d3bfbe8..00000000 --- a/projects/adapter/integration_tests/profiles/bigquery/profiles.yml +++ /dev/null @@ -1,31 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - db: - type: bigquery - method: service-account-json - project: "{{ env_var('GCLOUD_PROJECT') }}" - dataset: "{{ env_var('BQ_DATASET') }}" - keyfile_json: - type: service_account - project_id: "{{ env_var('GCLOUD_PROJECT') }}" - private_key_id: "{{ env_var('GCLOUD_PRIVATE_KEY_ID') }}" - private_key: "{{ env_var('GCLOUD_PRIVATE_KEY') }}" - client_email: "{{ env_var('GCLOUD_CLIENT_EMAIL') }}" - client_id: "{{ env_var('GCLOUD_CLIENT_ID') }}" - auth_uri: https://accounts.google.com/o/oauth2/auth - token_uri: https://oauth2.googleapis.com/token - auth_provider_x509_cert_url: https://www.googleapis.com/oauth2/v1/certs - client_x509_cert_url: "{{ env_var('GCLOUD_X509_CERT_URL') }}" diff --git a/projects/adapter/integration_tests/profiles/duckdb/profiles.yml b/projects/adapter/integration_tests/profiles/duckdb/profiles.yml deleted file mode 100644 index 5eb2ddd5..00000000 --- a/projects/adapter/integration_tests/profiles/duckdb/profiles.yml +++ /dev/null @@ -1,18 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - db: - type: duckdb - path: "{{ env_var('DB_PATH') }}" diff --git a/projects/adapter/integration_tests/profiles/snowflake/profiles.yml b/projects/adapter/integration_tests/profiles/snowflake/profiles.yml deleted file mode 100644 index b3e6de9b..00000000 --- a/projects/adapter/integration_tests/profiles/snowflake/profiles.yml +++ /dev/null @@ -1,48 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - staging_with_teleport: - type: fal - db_profile: db - teleport: - type: s3 - s3_bucket: fal-dbt-test - s3_region: us-east-1 - s3_access_key_id: "{{ env_var('AWS_ACCESS_KEY_ID') }}" - s3_access_key: "{{ env_var('AWS_SECRET_ACCESS_KEY') }}" - prod_with_teleport: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - teleport: - type: s3 - s3_bucket: fal-dbt-test - s3_region: us-east-1 - s3_access_key_id: "{{ env_var('AWS_ACCESS_KEY_ID') }}" - s3_access_key: "{{ env_var('AWS_SECRET_ACCESS_KEY') }}" - db: - type: snowflake - account: "{{ env_var('SF_ACCOUNT') }}" - user: "{{ env_var('SF_USER') }}" - password: "{{ env_var('SF_PASSWORD') }}" - role: "{{ env_var('SF_ROLE') }}" - warehouse: "{{ env_var('SF_WAREHOUSE') }}" - database: "{{ env_var('SF_DATABASE') }}" - schema: "{{ env_var('SF_SCHEMA') }}" - threads: 4 -config: - send_anonymous_usage_stats: False diff --git a/projects/adapter/integration_tests/profiles/sqlserver/profiles.yml b/projects/adapter/integration_tests/profiles/sqlserver/profiles.yml deleted file mode 100644 index 16b36e26..00000000 --- a/projects/adapter/integration_tests/profiles/sqlserver/profiles.yml +++ /dev/null @@ -1,25 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - db: - type: sqlserver - driver: 'ODBC Driver 18 for SQL Server' - server: localhost - port: 1433 - database: tempdb - schema: dbo - user: sa - password: strongPassword1@ - trust_cert: true diff --git a/projects/adapter/integration_tests/profiles/trino/profiles.yml b/projects/adapter/integration_tests/profiles/trino/profiles.yml deleted file mode 100644 index 9b088006..00000000 --- a/projects/adapter/integration_tests/profiles/trino/profiles.yml +++ /dev/null @@ -1,27 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: staging - outputs: - staging: - type: fal - db_profile: db - prod: - type: fal - db_profile: db - host: "{{ env_var('FAL_HOST') }}" - key_secret: "{{ env_var('FAL_KEY_SECRET') }}" - key_id: "{{ env_var('FAL_KEY_ID') }}" - db: - type: trino - user: user - host: localhost - port: 8080 - database: postgresql - schema: dbt_fal - threads: 8 - http_scheme: http - session_properties: - query_max_run_time: 5d - exchange_compression: True diff --git a/projects/adapter/integration_tests/projects/source_freshness/dbt_project.yml b/projects/adapter/integration_tests/projects/source_freshness/dbt_project.yml deleted file mode 100644 index c844f12d..00000000 --- a/projects/adapter/integration_tests/projects/source_freshness/dbt_project.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: "source_test" -version: "1.0.0" -config-version: 2 -profile: "fal_test" - -model-paths: ["models"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] - -vars: - fal-scripts-path: "fal_scripts" - -models: - +schema: custom diff --git a/projects/adapter/integration_tests/projects/source_freshness/fal_project.yml b/projects/adapter/integration_tests/projects/source_freshness/fal_project.yml deleted file mode 100644 index 9025297d..00000000 --- a/projects/adapter/integration_tests/projects/source_freshness/fal_project.yml +++ /dev/null @@ -1,8 +0,0 @@ -environments: - - name: not-funny - type: venv - - - name: funny - type: venv - requirements: - - pyjokes==0.6.0 diff --git a/projects/adapter/integration_tests/projects/source_freshness/load_freshness_table.py b/projects/adapter/integration_tests/projects/source_freshness/load_freshness_table.py deleted file mode 100644 index e143c5eb..00000000 --- a/projects/adapter/integration_tests/projects/source_freshness/load_freshness_table.py +++ /dev/null @@ -1,22 +0,0 @@ -from datetime import datetime as dt, timezone as tz -from pandas import DataFrame -from fal.dbt import FalDbt -from sys import argv - -project_dir = argv[1] if len(argv) >= 2 else "." -profiles_dir = argv[2] if len(argv) >= 3 else ".." - -faldbt = FalDbt(project_dir=project_dir, profiles_dir=profiles_dir) - -# 10 rows -df = DataFrame({"loaded_at": dt.now(tz=tz.utc).isoformat(), "info": range(0, 10)}) - -print(df) -faldbt.write_to_source(df, "freshness_test", "freshness_table", mode="overwrite") - -from time import sleep - -# Let BigQuery cache load it -sleep(5) - -print("Loaded") diff --git a/projects/adapter/integration_tests/projects/source_freshness/models/model_a.py b/projects/adapter/integration_tests/projects/source_freshness/models/model_a.py deleted file mode 100644 index 89176fb2..00000000 --- a/projects/adapter/integration_tests/projects/source_freshness/models/model_a.py +++ /dev/null @@ -1,8 +0,0 @@ -def model(dbt, fal): - dbt.config(materialized="table") - dbt.config(fal_environment="funny") - import pyjokes - joke = pyjokes.get_joke() - df = dbt.source("freshness_test", "freshness_table") - df["my_joke"] = joke - return df diff --git a/projects/adapter/integration_tests/projects/source_freshness/models/schema.yml b/projects/adapter/integration_tests/projects/source_freshness/models/schema.yml deleted file mode 100644 index 8ae98c06..00000000 --- a/projects/adapter/integration_tests/projects/source_freshness/models/schema.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: 2 - -sources: - - name: freshness_test - database: "{{ env_var('DBT_DATABASE', 'test') }}" - schema: "{{ env_var('DBT_SCHEMA', 'dbt_fal_custom') }}" - freshness: - warn_after: { "count": 5, "period": minute } - error_after: { "count": 30, "period": minute } - tables: - - name: freshness_table - loaded_at_field: "current_timestamp" - columns: - - name: info - tests: - - unique - -models: - - name: model_a diff --git a/projects/adapter/poetry.lock b/projects/adapter/poetry.lock index c15615ad..6fc7dd97 100644 --- a/projects/adapter/poetry.lock +++ b/projects/adapter/poetry.lock @@ -1,10 +1,15 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + [[package]] name = "agate" version = "1.7.0" description = "A data analysis library that is optimized for humans instead of machines." -category = "main" optional = false python-versions = "*" +files = [ + {file = "agate-1.7.0-py2.py3-none-any.whl", hash = "sha256:ad529c80fe6943906ab3d3bc59c12307e1181d35993e6055db59fa72dc79a6bd"}, + {file = "agate-1.7.0.tar.gz", hash = "sha256:a835a1069247b39b0c340e31eb56e1a95e79f679ad37512192118a5ea3336020"}, +] [package.dependencies] Babel = ">=2.0" @@ -18,29 +23,16 @@ pytimeparse = ">=1.1.5" docs = ["Sphinx (>=1.2.2)", "sphinx-rtd-theme (>=0.1.6)"] test = ["PyICU (>=2.4.2)", "coverage (>=3.7.1)", "cssselect (>=0.9.1)", "lxml (>=3.6.0)", "pytest", "pytest-cov", "pytz (>=2015.4)"] -[[package]] -name = "agate-sql" -version = "0.5.9" -description = "agate-sql adds SQL read/write support to agate." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -agate = ">=1.5.0" -sqlalchemy = "<2" - -[package.extras] -docs = ["Sphinx (>=1.2.2)", "sphinx-rtd-theme (>=0.1.6)"] -test = ["crate", "geojson", "pytest", "pytest-cov"] - [[package]] name = "aiobotocore" version = "2.4.2" description = "Async client for aws services using botocore and aiohttp" -category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "aiobotocore-2.4.2-py3-none-any.whl", hash = "sha256:4acd1ebe2e44be4b100aa553910bda899f6dc090b3da2bc1cf3d5de2146ed208"}, + {file = "aiobotocore-2.4.2.tar.gz", hash = "sha256:0603b74a582dffa7511ce7548d07dc9b10ec87bc5fb657eb0b34f9bd490958bf"}, +] [package.dependencies] aiohttp = ">=3.3.1" @@ -56,9 +48,97 @@ boto3 = ["boto3 (>=1.24.59,<1.24.60)"] name = "aiohttp" version = "3.8.4" description = "Async http client/server framework (asyncio)" -category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"}, + {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"}, + {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"}, + {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"}, + {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"}, + {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"}, + {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"}, + {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"}, + {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"}, + {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"}, + {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"}, + {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"}, + {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"}, + {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"}, + {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"}, + {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"}, + {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"}, + {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"}, + {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"}, + {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"}, + {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"}, + {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"}, + {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"}, + {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"}, + {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"}, + {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"}, + {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"}, + {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"}, + {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"}, + {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"}, + {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"}, + {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"}, + {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"}, + {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"}, + {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"}, + {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"}, + {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"}, + {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"}, + {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"}, + {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"}, + {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"}, + {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"}, + {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"}, + {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"}, + {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"}, + {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"}, + {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"}, + {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"}, + {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"}, + {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"}, + {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"}, + {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"}, + {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"}, + {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"}, + {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"}, + {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"}, + {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"}, + {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"}, + {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"}, + {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"}, + {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"}, + {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"}, + {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"}, + {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"}, + {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"}, + {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"}, + {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"}, + {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"}, + {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"}, + {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"}, + {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"}, + {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"}, + {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"}, + {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"}, + {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"}, + {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"}, + {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"}, + {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"}, + {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"}, + {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"}, + {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"}, + {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"}, + {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"}, + {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"}, + {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"}, + {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"}, + {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"}, +] [package.dependencies] aiosignal = ">=1.1.2" @@ -76,9 +156,12 @@ speedups = ["Brotli", "aiodns", "cchardet"] name = "aioitertools" version = "0.11.0" description = "itertools and builtins for AsyncIO and mixed iterables" -category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "aioitertools-0.11.0-py3-none-any.whl", hash = "sha256:04b95e3dab25b449def24d7df809411c10e62aab0cbe31a50ca4e68748c43394"}, + {file = "aioitertools-0.11.0.tar.gz", hash = "sha256:42c68b8dd3a69c2bf7f2233bf7df4bb58b557bca5252ac02ed5187bbc67d6831"}, +] [package.dependencies] typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} @@ -87,9 +170,12 @@ typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" -category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] [package.dependencies] frozenlist = ">=1.1.0" @@ -98,9 +184,12 @@ frozenlist = ">=1.1.0" name = "anyio" version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, +] [package.dependencies] exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} @@ -116,41 +205,34 @@ trio = ["trio (<0.22)"] name = "asn1crypto" version = "1.5.1" description = "Fast ASN.1 parser and serializer with definitions for private keys, public keys, certificates, CRL, OCSP, CMS, PKCS#3, PKCS#7, PKCS#8, PKCS#12, PKCS#5, X.509 and TSP" -category = "main" optional = true python-versions = "*" - -[[package]] -name = "astor" -version = "0.8.1" -description = "Read/rewrite/write Python ASTs" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +files = [ + {file = "asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67"}, + {file = "asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c"}, +] [[package]] name = "async-timeout" version = "4.0.2" description = "Timeout context manager for asyncio programs" -category = "main" optional = true python-versions = ">=3.6" - -[[package]] -name = "atomicwrites" -version = "1.4.1" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, + {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, +] [[package]] name = "attrs" version = "22.2.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] [package.extras] cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] @@ -163,9 +245,12 @@ tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy name = "auth0-python" version = "4.1.0" description = "Auth0 Python SDK" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "auth0-python-4.1.0.tar.gz", hash = "sha256:457009bbbd660b7244247b5e853506d147ecf4cf7dc72a0766330e3057f1180c"}, + {file = "auth0_python-4.1.0-py2.py3-none-any.whl", hash = "sha256:efe5def4d5829dde85d24dbacb5f793468b15dc2ef3cd2032dfdac20096e099a"}, +] [package.dependencies] pyjwt = {version = ">=2.6.0", extras = ["crypto"]} @@ -178,9 +263,12 @@ test = ["coverage", "pre-commit"] name = "awswrangler" version = "3.0.0" description = "Pandas on AWS." -category = "main" optional = true python-versions = ">=3.8,<4.0" +files = [ + {file = "awswrangler-3.0.0-py3-none-any.whl", hash = "sha256:cccab37dc37dfe0f4a19e00e836ba734601ccb73a114fabdff53cb60e70f8c2c"}, + {file = "awswrangler-3.0.0.tar.gz", hash = "sha256:9a2384ec4dbfd83ac8cef859d127924f89b3b3f1b6242da89606a296af3d68ec"}, +] [package.dependencies] boto3 = ">=1.20.32,<2.0.0" @@ -211,9 +299,12 @@ sqlserver = ["pyodbc (>=4.0.0,<5.0.0)"] name = "babel" version = "2.11.0" description = "Internationalization utilities" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"}, + {file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"}, +] [package.dependencies] pytz = ">=2015.7" @@ -222,40 +313,38 @@ pytz = ">=2015.7" name = "backoff" version = "1.11.1" description = "Function decoration for backoff and retry" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "backoff-1.11.1-py2.py3-none-any.whl", hash = "sha256:61928f8fa48d52e4faa81875eecf308eccfb1016b018bb6bd21e05b5d90a96c5"}, + {file = "backoff-1.11.1.tar.gz", hash = "sha256:ccb962a2378418c667b3c979b504fdeb7d9e0d29c0579e3b13b86467177728cb"}, +] [[package]] name = "backports-functools-lru-cache" -version = "1.6.4" +version = "1.6.6" description = "Backport of functools.lru_cache" -category = "main" optional = false python-versions = ">=2.6" +files = [ + {file = "backports.functools_lru_cache-1.6.6-py2.py3-none-any.whl", hash = "sha256:77e27d0ffbb463904bdd5ef8b44363f6cd5ef503e664b3f599a3bf5843ed37cf"}, + {file = "backports.functools_lru_cache-1.6.6.tar.gz", hash = "sha256:7b70e701ba4db58c0ed8671a9d3391b0abb9bd1bc24d4e90c3480f4baafcc2dc"}, +] [package.extras] -docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] -testing = ["pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] - -[[package]] -name = "backports-zoneinfo" -version = "0.2.1" -description = "Backport of the standard library zoneinfo module" -category = "main" -optional = true -python-versions = ">=3.6" - -[package.extras] -tzdata = ["tzdata"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [[package]] name = "beautifulsoup4" version = "4.12.2" description = "Screen-scraping library" -category = "main" optional = true python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] [package.dependencies] soupsieve = ">1.2" @@ -268,9 +357,12 @@ lxml = ["lxml"] name = "behave" version = "1.2.6" description = "behave is behaviour-driven development, Python style" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "behave-1.2.6-py2.py3-none-any.whl", hash = "sha256:ebda1a6c9e5bfe95c5f9f0a2794e01c7098b3dde86c10a95d8621c5907ff6f1c"}, + {file = "behave-1.2.6.tar.gz", hash = "sha256:b9662327aa53294c1351b0a9c369093ccec1d21026f050c3bd9b3e5cccf81a86"}, +] [package.dependencies] parse = ">=1.8.2" @@ -281,35 +373,16 @@ six = ">=1.11" develop = ["coverage", "invoke (>=0.21.0)", "modernize (>=0.5)", "path.py (>=8.1.2)", "pathlib", "pycmd", "pylint", "pytest (>=3.0)", "pytest-cov", "tox"] docs = ["sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6)"] -[[package]] -name = "black" -version = "22.12.0" -description = "The uncompromising code formatter." -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} -typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - [[package]] name = "boto3" version = "1.24.59" description = "The AWS SDK for Python" -category = "main" optional = true python-versions = ">= 3.7" +files = [ + {file = "boto3-1.24.59-py3-none-any.whl", hash = "sha256:34ab44146a2c4e7f4e72737f4b27e6eb5e0a7855c2f4599e3d9199b6a0a2d575"}, + {file = "boto3-1.24.59.tar.gz", hash = "sha256:a50b4323f9579cfe22fcf5531fbd40b567d4d74c1adce06aeb5c95fce2a6fb40"}, +] [package.dependencies] botocore = ">=1.27.59,<1.28.0" @@ -323,9 +396,12 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] name = "botocore" version = "1.27.59" description = "Low-level, data-driven core of boto 3." -category = "main" optional = true python-versions = ">= 3.7" +files = [ + {file = "botocore-1.27.59-py3-none-any.whl", hash = "sha256:69d756791fc024bda54f6c53f71ae34e695ee41bbbc1743d9179c4837a4929da"}, + {file = "botocore-1.27.59.tar.gz", hash = "sha256:eda4aed6ee719a745d1288eaf1beb12f6f6448ad1fa12f159405db14ba9c92cf"}, +] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" @@ -335,29 +411,89 @@ urllib3 = ">=1.25.4,<1.27" [package.extras] crt = ["awscrt (==0.14.0)"] -[[package]] -name = "cachetools" -version = "5.3.0" -description = "Extensible memoizing collections and decorators" -category = "main" -optional = true -python-versions = "~=3.7" - [[package]] name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] [[package]] name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] [package.dependencies] pycparser = "*" @@ -366,9 +502,12 @@ pycparser = "*" name = "charset-normalizer" version = "2.1.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.6.0" +files = [ + {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"}, + {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"}, +] [package.extras] unicode-backport = ["unicodedata2"] @@ -377,9 +516,12 @@ unicode-backport = ["unicodedata2"] name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -388,25 +530,55 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] [[package]] name = "cryptography" version = "38.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false python-versions = ">=3.6" - -[package.dependencies] -cffi = ">=1.12" - -[package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +files = [ + {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, + {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, + {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, + {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, + {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, + {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, + {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, +] + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] @@ -415,17 +587,23 @@ test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0 name = "cycler" version = "0.11.0" description = "Composable style cycles" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, + {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, +] [[package]] name = "datadog-api-client" version = "2.12.0" description = "Collection of all Datadog Public endpoints" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "datadog-api-client-2.12.0.tar.gz", hash = "sha256:8d139a535249b8bea516537b8ada7c746eadd59187f9e02de8c2ae85c3ae0b31"}, + {file = "datadog_api_client-2.12.0-py3-none-any.whl", hash = "sha256:28b3bd1f4dfe27d44c71ae13f8906fce8483f69c950a078c330778de49f10746"}, +] [package.dependencies] certifi = "*" @@ -439,67 +617,99 @@ async = ["aiosonic (==0.15.1)"] tests = ["aiosonic (==0.15.1)", "glom", "jinja2", "mypy", "pytest", "pytest-asyncio", "pytest-bdd (==6.0.1)", "pytest-randomly", "pytest-recording", "python-dateutil", "types-python-dateutil", "zstandard"] zstandard = ["zstandard"] -[[package]] -name = "db-dtypes" -version = "1.0.5" -description = "Pandas Data Types for SQL systems (BigQuery, Spanner)" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -numpy = ">=1.16.6,<2.0dev" -packaging = ">=17.0" -pandas = ">=0.24.2,<2.0dev" -pyarrow = ">=3.0.0" - [[package]] name = "dbt-core" -version = "1.5.0" +version = "1.6.9" description = "With dbt, data analysts and engineers can build analytics the way engineers build applications." -category = "main" optional = false -python-versions = ">=3.7.2" +python-versions = ">=3.8" +files = [ + {file = "dbt-core-1.6.9.tar.gz", hash = "sha256:de0d4cc695483ac33b36693c84ab1be9b2eab9ebc01e249ad1834804ff59ad16"}, + {file = "dbt_core-1.6.9-py3-none-any.whl", hash = "sha256:3f82308b8bf2f1154292d3a46e763f4860af608ac51160a97e1857d0bd1468f3"}, +] [package.dependencies] -agate = ">=1.6,<1.7.1" +agate = ">=1.7.0,<1.8.0" cffi = ">=1.9,<2.0.0" -click = ">=7.0,<9" -colorama = ">=0.3.9,<0.4.7" +click = "<9" +colorama = ">=0.3.9,<0.5" dbt-extractor = ">=0.4.1,<0.5.0" -hologram = ">=0.0.14,<=0.0.16" +dbt-semantic-interfaces = ">=0.2.0,<0.3.0" +hologram = ">=0.0.16,<0.1.0" idna = ">=2.5,<4" isodate = ">=0.6,<0.7" -Jinja2 = "3.1.2" +Jinja2 = ">=3.1.2,<3.2.0" logbook = ">=1.5,<1.6" -mashumaro = {version = "3.6", extras = ["msgpack"]} -minimal-snowplow-tracker = "0.0.2" -networkx = {version = ">=2.3,<3", markers = "python_version >= \"3.8\""} +mashumaro = {version = ">=3.8.1,<3.9.0", extras = ["msgpack"]} +minimal-snowplow-tracker = ">=0.0.2,<0.1.0" +networkx = ">=2.3,<4" packaging = ">20.9" pathspec = ">=0.9,<0.12" -protobuf = ">=3.18.3" +protobuf = ">=4.0.0" pytz = ">=2015.7" pyyaml = ">=6.0" requests = "<3.0.0" -sqlparse = ">=0.2.3,<0.4.4" +sqlparse = ">=0.2.3,<0.5" typing-extensions = ">=3.7.4" -werkzeug = ">=1,<3" +urllib3 = ">=1.0,<2.0" [[package]] name = "dbt-extractor" version = "0.4.1" description = "A tool to analyze and extract information from Jinja used in dbt projects." -category = "main" optional = false python-versions = ">=3.6.1" +files = [ + {file = "dbt_extractor-0.4.1-cp36-abi3-macosx_10_7_x86_64.whl", hash = "sha256:4dc715bd740e418d8dc1dd418fea508e79208a24cf5ab110b0092a3cbe96bf71"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:bc9e0050e3a2f4ea9fe58e8794bc808e6709a0c688ed710fc7c5b6ef3e5623ec"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76872cdee659075d6ce2df92dc62e59a74ba571be62acab2e297ca478b49d766"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81435841610be1b07806d72cd89b1956c6e2a84c360b9ceb3f949c62a546d569"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7c291f9f483eae4f60dd5859097d7ba51d5cb6c4725f08973ebd18cdea89d758"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:822b1e911db230e1b9701c99896578e711232001027b518c44c32f79a46fa3f9"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:554d27741a54599c39e5c0b7dbcab77400d83f908caba284a3e960db812e5814"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a805d51a25317f53cbff951c79b9cf75421cf48e4b3e1dfb3e9e8de6d824b76c"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cad90ddc708cb4182dc16fe2c87b1f088a1679877b93e641af068eb68a25d582"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:34783d788b133f223844e280e37b3f5244f2fb60acc457aa75c2667e418d5442"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:9da211869a1220ea55c5552c1567a3ea5233a6c52fa89ca87a22465481c37bc9"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_i686.whl", hash = "sha256:7d7c47774dc051b8c18690281a55e2e3d3320e823b17e04b06bc3ff81b1874ba"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:037907a7c7ae0391045d81338ca77ddaef899a91d80f09958f09fe374594e19b"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-win32.whl", hash = "sha256:3fe8d8e28a7bd3e0884896147269ca0202ca432d8733113386bdc84c824561bf"}, + {file = "dbt_extractor-0.4.1-cp36-abi3-win_amd64.whl", hash = "sha256:35265a0ae0a250623b0c2e3308b2738dc8212e40e0aa88407849e9ea090bb312"}, + {file = "dbt_extractor-0.4.1.tar.gz", hash = "sha256:75b1c665699ec0f1ffce1ba3d776f7dfce802156f22e70a7b9c8f0b4d7e80f42"}, +] + +[[package]] +name = "dbt-semantic-interfaces" +version = "0.2.3" +description = "The shared semantic layer definitions that dbt-core and MetricFlow use" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dbt_semantic_interfaces-0.2.3-py3-none-any.whl", hash = "sha256:69235a6b261c45d1501c29a9b2a9496935b0c764e7fd25a2f83471ef53dae15f"}, + {file = "dbt_semantic_interfaces-0.2.3.tar.gz", hash = "sha256:628fd65ce01bbfbf9115866d2bb11616d0e1987b65fb4fabfdf9bb807177c2ee"}, +] + +[package.dependencies] +click = ">=7.0,<9.0" +importlib-metadata = ">=6.0,<7.0" +jinja2 = ">=3.0,<4.0" +jsonschema = ">=4.0,<5.0" +more-itertools = ">=8.0,<9.0" +pydantic = ">=1.10,<2.0" +python-dateutil = ">=2.0,<3.0" +pyyaml = ">=6.0,<7.0" +typing-extensions = ">=4.4,<5.0" [[package]] name = "deprecated" version = "1.2.13" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, + {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, +] [package.dependencies] wrapt = ">=1.10,<2" @@ -507,24 +717,16 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"] -[[package]] -name = "deprecation" -version = "2.1.0" -description = "A library to handle automated deprecations" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -packaging = "*" - [[package]] name = "dill" version = "0.3.7" description = "serialize all of Python" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, + {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, +] [package.extras] graph = ["objgraph (>=1.7.2)"] @@ -533,37 +735,23 @@ graph = ["objgraph (>=1.7.2)"] name = "distlib" version = "0.3.6" description = "Distribution utilities" -category = "main" optional = false python-versions = "*" - -[[package]] -name = "duckdb" -version = "0.8.1" -description = "DuckDB embedded database" -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "duckdb-engine" -version = "0.1.11" -description = "" -category = "main" -optional = true -python-versions = ">=3.6.1" - -[package.dependencies] -duckdb = ">=0.2.8" -sqlalchemy = ">=1.3.19,<2.0.0" +files = [ + {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, + {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, +] [[package]] name = "exceptiongroup" version = "1.1.3" description = "Backport of PEP 654 (exception groups)" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, + {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, +] [package.extras] test = ["pytest (>=6)"] @@ -572,9 +760,12 @@ test = ["pytest (>=6)"] name = "fal" version = "0.10.0" description = "fal is an easy-to-use Serverless Python Framework" -category = "main" optional = false python-versions = ">=3.8,<4.0" +files = [ + {file = "fal-0.10.0-py3-none-any.whl", hash = "sha256:3b1d4275504c235c91ccd54a4b40d61a20730960ee6369b1999ed3192e41533f"}, + {file = "fal-0.10.0.tar.gz", hash = "sha256:c8aad6bfb7406d1740917d4337f7fbfe356ae2b358af9a52883d1ca1dfdb28cb"}, +] [package.dependencies] attrs = ">=21.3.0" @@ -606,9 +797,12 @@ typing-extensions = ">=4.7.1,<5.0.0" name = "filelock" version = "3.9.0" description = "A platform independent file lock." -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "filelock-3.9.0-py3-none-any.whl", hash = "sha256:f58d535af89bb9ad5cd4df046f741f8553a418c01a7856bf0d173bbc9f6bd16d"}, + {file = "filelock-3.9.0.tar.gz", hash = "sha256:7b319f24340b51f55a2bf7a12ac0755a9b03e718311dac567a0f4f7fabd2f5de"}, +] [package.extras] docs = ["furo (>=2022.12.7)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] @@ -618,9 +812,12 @@ testing = ["covdefaults (>=2.2.2)", "coverage (>=7.0.1)", "pytest (>=7.2)", "pyt name = "fonttools" version = "4.38.0" description = "Tools to manipulate font files" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "fonttools-4.38.0-py3-none-any.whl", hash = "sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb"}, + {file = "fonttools-4.38.0.zip", hash = "sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1"}, +] [package.extras] all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=14.0.0)", "xattr", "zopfli (>=0.1.4)"] @@ -640,17 +837,95 @@ woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] name = "frozenlist" version = "1.3.3" description = "A list-like structure which implements collections.abc.MutableSequence" -category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, + {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, + {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, + {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, + {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, + {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, + {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, + {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, + {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, + {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, + {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, +] [[package]] name = "fsspec" version = "2023.1.0" description = "File-system specification" -category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "fsspec-2023.1.0-py3-none-any.whl", hash = "sha256:b833e2e541e9e8cde0ab549414187871243177feb3d344f9d27b25a93f5d8139"}, + {file = "fsspec-2023.1.0.tar.gz", hash = "sha256:fbae7f20ff801eb5f7d0bedf81f25c787c0dfac5e982d98fa3884a9cde2b5411"}, +] [package.extras] abfs = ["adlfs"] @@ -679,167 +954,99 @@ tqdm = ["tqdm"] name = "future" version = "0.18.3" description = "Clean single-source support for Python 3 and 2" -category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "google-api-core" -version = "2.11.0" -description = "Google API client core library" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -google-auth = ">=2.14.1,<3.0dev" -googleapis-common-protos = ">=1.56.2,<2.0dev" -grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\""}, -] -grpcio-status = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\""}, -] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" -requests = ">=2.18.0,<3.0.0dev" - -[package.extras] -grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0dev)", "grpcio-status (>=1.49.1,<2.0dev)"] -grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] -grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] - -[[package]] -name = "google-auth" -version = "2.16.0" -description = "Google Authentication Library" -category = "main" -optional = true -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""} -six = ">=1.9.0" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "requests (>=2.20.0,<3.0.0dev)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0dev)"] - -[[package]] -name = "google-cloud-bigquery" -version = "3.5.0" -description = "Google BigQuery API client library" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -db-dtypes = {version = ">=0.3.0,<2.0.0dev", optional = true, markers = "extra == \"pandas\""} -google-api-core = {version = ">=1.31.5,<2.0.0 || >2.3.0,<3.0.0dev", extras = ["grpc"]} -google-cloud-core = ">=1.4.1,<3.0.0dev" -google-resumable-media = ">=0.6.0,<3.0dev" -grpcio = [ - {version = ">=1.47.0,<2.0dev", markers = "python_version < \"3.11\""}, - {version = ">=1.49.1,<2.0dev", markers = "python_version >= \"3.11\""}, +files = [ + {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, ] -packaging = ">=20.0.0" -pandas = {version = ">=1.1.0", optional = true, markers = "extra == \"pandas\""} -proto-plus = ">=1.15.0,<2.0.0dev" -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" -pyarrow = {version = ">=3.0.0", optional = true, markers = "extra == \"pandas\""} -python-dateutil = ">=2.7.2,<3.0dev" -requests = ">=2.21.0,<3.0.0dev" - -[package.extras] -all = ["Shapely (>=1.8.4,<2.0dev)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "ipython (>=7.0.1,!=8.1.0)", "ipywidgets (==7.7.1)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] -bqstorage = ["google-cloud-bigquery-storage (>=2.0.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] -geopandas = ["Shapely (>=1.8.4,<2.0dev)", "geopandas (>=0.9.0,<1.0dev)"] -ipython = ["ipython (>=7.0.1,!=8.1.0)"] -ipywidgets = ["ipywidgets (==7.7.1)"] -opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] -pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] -tqdm = ["tqdm (>=4.7.4,<5.0.0dev)"] - -[[package]] -name = "google-cloud-core" -version = "2.3.2" -description = "Google Cloud API client core library" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -google-api-core = ">=1.31.6,<2.0.0 || >2.3.0,<3.0.0dev" -google-auth = ">=1.25.0,<3.0dev" - -[package.extras] -grpc = ["grpcio (>=1.38.0,<2.0dev)"] - -[[package]] -name = "google-crc32c" -version = "1.5.0" -description = "A python wrapper of the C library 'Google CRC32C'" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.extras] -testing = ["pytest"] - -[[package]] -name = "google-resumable-media" -version = "2.4.1" -description = "Utilities for Google Media Downloads and Resumable Uploads" -category = "main" -optional = true -python-versions = ">= 3.7" - -[package.dependencies] -google-crc32c = ">=1.0,<2.0dev" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)"] -requests = ["requests (>=2.18.0,<3.0.0dev)"] - -[[package]] -name = "googleapis-common-protos" -version = "1.58.0" -description = "Common protobufs used in Google APIs" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0dev)"] [[package]] name = "greenlet" version = "2.0.2" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" - -[package.extras] -docs = ["Sphinx", "docutils (<0.18)"] -test = ["objgraph", "psutil"] - -[[package]] -name = "grpc-interceptor" -version = "0.15.0" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[package.extras] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpc-interceptor" +version = "0.15.0" description = "Simplifies gRPC interceptors" -category = "main" optional = false python-versions = ">=3.6.1,<4.0.0" +files = [ + {file = "grpc-interceptor-0.15.0.tar.gz", hash = "sha256:5c1aa9680b1d7e12259960c38057b121826860b05ebbc1001c74343b7ad1455e"}, + {file = "grpc_interceptor-0.15.0-py3-none-any.whl", hash = "sha256:63e390162e64df96c39c40508eb697def76a7cafac32a7eaf9272093eec1109e"}, +] [package.dependencies] grpcio = ">=1.32.0,<2.0.0" @@ -851,71 +1058,116 @@ testing = ["protobuf (>=3.6.0)"] name = "grpcio" version = "1.51.1" description = "HTTP/2-based RPC framework" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "grpcio-1.51.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:cc2bece1737b44d878cc1510ea04469a8073dbbcdd762175168937ae4742dfb3"}, + {file = "grpcio-1.51.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:e223a9793522680beae44671b9ed8f6d25bbe5ddf8887e66aebad5e0686049ef"}, + {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:24ac1154c4b2ab4a0c5326a76161547e70664cd2c39ba75f00fc8a2170964ea2"}, + {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4ef09f8997c4be5f3504cefa6b5c6cc3cf648274ce3cede84d4342a35d76db6"}, + {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8a0b77e992c64880e6efbe0086fe54dfc0bbd56f72a92d9e48264dcd2a3db98"}, + {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:eacad297ea60c72dd280d3353d93fb1dcca952ec11de6bb3c49d12a572ba31dd"}, + {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:16c71740640ba3a882f50b01bf58154681d44b51f09a5728180a8fdc66c67bd5"}, + {file = "grpcio-1.51.1-cp310-cp310-win32.whl", hash = "sha256:29cb97d41a4ead83b7bcad23bdb25bdd170b1e2cba16db6d3acbb090bc2de43c"}, + {file = "grpcio-1.51.1-cp310-cp310-win_amd64.whl", hash = "sha256:9ff42c5620b4e4530609e11afefa4a62ca91fa0abb045a8957e509ef84e54d30"}, + {file = "grpcio-1.51.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:bc59f7ba87972ab236f8669d8ca7400f02a0eadf273ca00e02af64d588046f02"}, + {file = "grpcio-1.51.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3c2b3842dcf870912da31a503454a33a697392f60c5e2697c91d133130c2c85d"}, + {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b011674090594f1f3245960ced7386f6af35485a38901f8afee8ad01541dbd"}, + {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d680356a975d9c66a678eb2dde192d5dc427a7994fb977363634e781614f7c"}, + {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:094e64236253590d9d4075665c77b329d707b6fca864dd62b144255e199b4f87"}, + {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:257478300735ce3c98d65a930bbda3db172bd4e00968ba743e6a1154ea6edf10"}, + {file = "grpcio-1.51.1-cp311-cp311-win32.whl", hash = "sha256:5a6ebcdef0ef12005d56d38be30f5156d1cb3373b52e96f147f4a24b0ddb3a9d"}, + {file = "grpcio-1.51.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f9b0023c2c92bebd1be72cdfca23004ea748be1813a66d684d49d67d836adde"}, + {file = "grpcio-1.51.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cd3baccea2bc5c38aeb14e5b00167bd4e2373a373a5e4d8d850bd193edad150c"}, + {file = "grpcio-1.51.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:17ec9b13cec4a286b9e606b48191e560ca2f3bbdf3986f91e480a95d1582e1a7"}, + {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:fbdbe9a849854fe484c00823f45b7baab159bdd4a46075302281998cb8719df5"}, + {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31bb6bc7ff145e2771c9baf612f4b9ebbc9605ccdc5f3ff3d5553de7fc0e0d79"}, + {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e473525c28251558337b5c1ad3fa969511e42304524a4e404065e165b084c9e4"}, + {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6f0b89967ee11f2b654c23b27086d88ad7bf08c0b3c2a280362f28c3698b2896"}, + {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7942b32a291421460d6a07883033e392167d30724aa84987e6956cd15f1a21b9"}, + {file = "grpcio-1.51.1-cp37-cp37m-win32.whl", hash = "sha256:f96ace1540223f26fbe7c4ebbf8a98e3929a6aa0290c8033d12526847b291c0f"}, + {file = "grpcio-1.51.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f1fec3abaf274cdb85bf3878167cfde5ad4a4d97c68421afda95174de85ba813"}, + {file = "grpcio-1.51.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:0e1a9e1b4a23808f1132aa35f968cd8e659f60af3ffd6fb00bcf9a65e7db279f"}, + {file = "grpcio-1.51.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:6df3b63538c362312bc5fa95fb965069c65c3ea91d7ce78ad9c47cab57226f54"}, + {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:172405ca6bdfedd6054c74c62085946e45ad4d9cec9f3c42b4c9a02546c4c7e9"}, + {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:506b9b7a4cede87d7219bfb31014d7b471cfc77157da9e820a737ec1ea4b0663"}, + {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb93051331acbb75b49a2a0fd9239c6ba9528f6bdc1dd400ad1cb66cf864292"}, + {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5dca372268c6ab6372d37d6b9f9343e7e5b4bc09779f819f9470cd88b2ece3c3"}, + {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:471d39d3370ca923a316d49c8aac66356cea708a11e647e3bdc3d0b5de4f0a40"}, + {file = "grpcio-1.51.1-cp38-cp38-win32.whl", hash = "sha256:75e29a90dc319f0ad4d87ba6d20083615a00d8276b51512e04ad7452b5c23b04"}, + {file = "grpcio-1.51.1-cp38-cp38-win_amd64.whl", hash = "sha256:f1158bccbb919da42544a4d3af5d9296a3358539ffa01018307337365a9a0c64"}, + {file = "grpcio-1.51.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:59dffade859f157bcc55243714d57b286da6ae16469bf1ac0614d281b5f49b67"}, + {file = "grpcio-1.51.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:dad6533411d033b77f5369eafe87af8583178efd4039c41d7515d3336c53b4f1"}, + {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:4c4423ea38a7825b8fed8934d6d9aeebdf646c97e3c608c3b0bcf23616f33877"}, + {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dc5354e38e5adf2498312f7241b14c7ce3484eefa0082db4297189dcbe272e6"}, + {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97d67983189e2e45550eac194d6234fc38b8c3b5396c153821f2d906ed46e0ce"}, + {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:538d981818e49b6ed1e9c8d5e5adf29f71c4e334e7d459bf47e9b7abb3c30e09"}, + {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9235dcd5144a83f9ca6f431bd0eccc46b90e2c22fe27b7f7d77cabb2fb515595"}, + {file = "grpcio-1.51.1-cp39-cp39-win32.whl", hash = "sha256:aacb54f7789ede5cbf1d007637f792d3e87f1c9841f57dd51abf89337d1b8472"}, + {file = "grpcio-1.51.1-cp39-cp39-win_amd64.whl", hash = "sha256:2b170eaf51518275c9b6b22ccb59450537c5a8555326fd96ff7391b5dd75303c"}, + {file = "grpcio-1.51.1.tar.gz", hash = "sha256:e6dfc2b6567b1c261739b43d9c59d201c1b89e017afd9e684d85aa7a186c9f7a"}, +] [package.extras] protobuf = ["grpcio-tools (>=1.51.1)"] -[[package]] -name = "grpcio-status" -version = "1.51.1" -description = "Status proto mapping for gRPC" -category = "main" -optional = true -python-versions = ">=3.6" - -[package.dependencies] -googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.51.1" -protobuf = ">=4.21.6" - [[package]] name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] [[package]] name = "hologram" -version = "0.0.15" +version = "0.0.16" description = "JSON schema generation from dataclasses" -category = "main" optional = false python-versions = "*" +files = [ + {file = "hologram-0.0.16-py3-none-any.whl", hash = "sha256:4e56bd525336bb64a18916f871977a4125b64be8aaa750233583003333cda361"}, + {file = "hologram-0.0.16.tar.gz", hash = "sha256:1c2c921b4e575361623ea0e0d0aa5aee377b1a333cc6c6a879e213ed34583e55"}, +] [package.dependencies] -jsonschema = ">=3.0,<4.0" +jsonschema = ">=3.0" python-dateutil = ">=2.8,<2.9" [[package]] name = "httpcore" version = "0.17.3" description = "A minimal low-level HTTP client." -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, + {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, +] [package.dependencies] anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = ">=1.0.0,<2.0.0" +sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "httpx" version = "0.24.1" description = "The next generation HTTP client." -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, + {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, +] [package.dependencies] certifi = "*" @@ -925,41 +1177,68 @@ sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] -cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] [[package]] name = "importlib-metadata" -version = "6.0.0" +version = "6.11.0" description = "Read metadata from Python packages" -category = "main" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, + {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, +] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.4.0" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.4.0-py3-none-any.whl", hash = "sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c"}, + {file = "importlib_resources-6.4.0.tar.gz", hash = "sha256:cdb2b453b8046ca4e3798eb1d84f3cce1446a0e8e7b5ef4efb600f19fc398145"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.test (>=5.4)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] [[package]] name = "isodate" version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" -category = "main" optional = false python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] [package.dependencies] six = "*" @@ -968,9 +1247,12 @@ six = "*" name = "isolate" version = "0.12.2" description = "Managed isolated environments for Python" -category = "main" optional = false python-versions = ">=3.7,<4.0" +files = [ + {file = "isolate-0.12.2-py3-none-any.whl", hash = "sha256:222e85a37c6b10bc13780a5ae38b2ab73a2c69ef09d9f99aae305c9e14bd7324"}, + {file = "isolate-0.12.2.tar.gz", hash = "sha256:dddffa589a0894e0dcc6722241e892cad009888366da9dc50f9253d98fb28e8b"}, +] [package.dependencies] grpcio = ">=1.49" @@ -988,9 +1270,12 @@ build = ["PyYAML (>=6.0)", "virtualenv (>=20.4)"] name = "isolate-proto" version = "0.0.37" description = "(internal) gRPC definitions for Isolate Cloud" -category = "main" optional = false python-versions = ">=3.7,<4" +files = [ + {file = "isolate_proto-0.0.37-py3-none-any.whl", hash = "sha256:7da70d57b5c62f6106aceeb20a0afcd1962d6eb83f59897db5f7a89f9dfc37d0"}, + {file = "isolate_proto-0.0.37.tar.gz", hash = "sha256:c6277f8791241ec82619359cbd7e57d0695b099980c2e6122aa576ea741b5b02"}, +] [package.dependencies] grpcio = ">=1.49" @@ -1001,9 +1286,12 @@ protobuf = "*" name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] [package.dependencies] MarkupSafe = ">=2.0" @@ -1012,1926 +1300,61 @@ MarkupSafe = ">=2.0" i18n = ["Babel (>=2.7)"] [[package]] -name = "jmespath" -version = "1.0.1" -description = "JSON Matching Expressions" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "jsonschema" -version = "3.2.0" -description = "An implementation of JSON Schema validation for Python" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -attrs = ">=17.4.0" -pyrsistent = ">=0.14.0" -setuptools = "*" -six = ">=1.11.0" - -[package.extras] -format = ["idna", "jsonpointer (>1.13)", "rfc3987", "strict-rfc3339", "webcolors"] -format-nongpl = ["idna", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "webcolors"] - -[[package]] -name = "kiwisolver" -version = "1.4.4" -description = "A fast implementation of the Cassowary constraint solver" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "leather" -version = "0.3.4" -description = "Python charting for 80% of humans." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -six = ">=1.6.1" - -[[package]] -name = "logbook" -version = "1.5.3" -description = "A logging replacement for Python" -category = "main" -optional = false -python-versions = "*" - -[package.extras] -all = ["Jinja2", "brotli", "cython", "execnet (>=1.0.9)", "pytest (>4.0)", "pytest-cov (>=2.6)", "pyzmq", "redis", "sqlalchemy"] -compression = ["brotli"] -dev = ["cython", "pytest (>4.0)", "pytest-cov (>=2.6)"] -execnet = ["execnet (>=1.0.9)"] -jinja = ["Jinja2"] -redis = ["redis"] -sqlalchemy = ["sqlalchemy"] -test = ["pytest (>4.0)", "pytest-cov (>=2.6)"] -zmq = ["pyzmq"] - -[[package]] -name = "lxml" -version = "4.9.2" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.7)"] - -[[package]] -name = "markdown-it-py" -version = "2.2.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "markupsafe" -version = "2.1.2" -description = "Safely add untrusted strings to HTML/XML markup." -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "mashumaro" -version = "3.6" -description = "Fast serialization library on top of dataclasses" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -msgpack = {version = ">=0.5.6", optional = true, markers = "extra == \"msgpack\""} -typing-extensions = ">=4.1.0" - -[package.extras] -msgpack = ["msgpack (>=0.5.6)"] -orjson = ["orjson"] -toml = ["tomli (>=1.1.0)", "tomli-w (>=1.0)"] -yaml = ["pyyaml (>=3.13)"] - -[[package]] -name = "matplotlib" -version = "3.5.3" -description = "Python plotting package" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -cycler = ">=0.10" -fonttools = ">=4.22.0" -kiwisolver = ">=1.0.1" -numpy = ">=1.17" -packaging = ">=20.0" -pillow = ">=6.2.0" -pyparsing = ">=2.2.1" -python-dateutil = ">=2.7" -setuptools_scm = ">=4,<7" - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "minimal-snowplow-tracker" -version = "0.0.2" -description = "A minimal snowplow event tracker for Python. Add analytics to your Python and Django apps, webapps and games" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -requests = ">=2.2.1,<3.0" -six = ">=1.9.0,<2.0" - -[[package]] -name = "mock" -version = "4.0.3" -description = "Rolling backport of unittest.mock for all Pythons" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.extras] -build = ["blurb", "twine", "wheel"] -docs = ["sphinx"] -test = ["pytest (<5.4)", "pytest-cov"] - -[[package]] -name = "monotonic" -version = "1.6" -description = "An implementation of time.monotonic() for Python 2 & < 3.3" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "more-itertools" -version = "9.0.0" -description = "More routines for operating on iterables, beyond itertools" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "msgpack" -version = "1.0.4" -description = "MessagePack serializer" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" -optional = false -python-versions = ">=3.5" - -[[package]] -name = "networkx" -version = "2.8.8" -description = "Python package for creating and manipulating graphs and networks" -category = "main" -optional = false -python-versions = ">=3.8" - -[package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.19)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=0.982)", "pre-commit (>=2.20)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.2)", "pydata-sphinx-theme (>=0.11)", "sphinx (>=5.2)", "sphinx-gallery (>=0.11)", "texext (>=0.6.6)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.9)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] - -[[package]] -name = "numpy" -version = "1.21.1" -description = "NumPy is the fundamental package for array computing with Python." -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "opentelemetry-api" -version = "1.17.0" -description = "OpenTelemetry Python API" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -deprecated = ">=1.2.6" -importlib-metadata = ">=6.0.0,<6.1.0" -setuptools = ">=16.0" - -[[package]] -name = "opentelemetry-sdk" -version = "1.17.0" -description = "OpenTelemetry Python SDK" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -opentelemetry-api = "1.17.0" -opentelemetry-semantic-conventions = "0.38b0" -setuptools = ">=16.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.38b0" -description = "OpenTelemetry Semantic Conventions" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "oscrypto" -version = "1.3.0" -description = "TLS (SSL) sockets, key generation, encryption, decryption, signing, verification and KDFs using the OS crypto libraries. Does not require a compiler, and relies on the OS for patching. Works on Windows, OS X and Linux/BSD." -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -asn1crypto = ">=1.5.1" - -[[package]] -name = "packaging" -version = "23.1" -description = "Core utilities for Python packages" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "pandas" -version = "1.3.5" -description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" -optional = false -python-versions = ">=3.7.1" - -[package.dependencies] -numpy = [ - {version = ">=1.17.3", markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, - {version = ">=1.19.2", markers = "platform_machine == \"aarch64\" and python_version < \"3.10\""}, - {version = ">=1.20.0", markers = "platform_machine == \"arm64\" and python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, -] -python-dateutil = ">=2.7.3" -pytz = ">=2017.3" - -[package.extras] -test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"] - -[[package]] -name = "parse" -version = "1.19.1" -description = "parse() is the opposite of format()" -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "parse-type" -version = "0.6.2" -description = "Simplifies to build parse types based on the parse module" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*" - -[package.dependencies] -parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} -six = ">=1.15" - -[package.extras] -develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)"] -docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] -testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] - -[[package]] -name = "parsedatetime" -version = "2.4" -description = "Parse human-readable date/time text." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -future = "*" - -[[package]] -name = "pathspec" -version = "0.11.1" -description = "Utility library for gitignore style pattern matching of file paths." -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "pillow" -version = "9.5.0" -description = "Python Imaging Library (Fork)" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "platformdirs" -version = "2.6.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - -[[package]] -name = "pluggy" -version = "0.13.1" -description = "plugin and hook calling mechanisms for python" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[package.extras] -dev = ["pre-commit", "tox"] - -[[package]] -name = "portalocker" -version = "2.7.0" -description = "Wraps the portalocker recipe for easy usage" -category = "main" -optional = false -python-versions = ">=3.5" - -[package.dependencies] -pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} - -[package.extras] -docs = ["sphinx (>=1.7.1)"] -redis = ["redis"] -tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)"] - -[[package]] -name = "posthog" -version = "1.4.9" -description = "Integrate PostHog into any python application." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -backoff = ">=1.10.0,<2.0.0" -monotonic = ">=1.5" -python-dateutil = ">2.1" -requests = ">=2.7,<3.0" -six = ">=1.5" - -[package.extras] -dev = ["black", "isort", "pre-commit"] -sentry = ["django", "sentry-sdk"] -test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"] - -[[package]] -name = "proto-plus" -version = "1.22.2" -description = "Beautiful, Pythonic protocol buffers." -category = "main" -optional = true -python-versions = ">=3.6" - -[package.dependencies] -protobuf = ">=3.19.0,<5.0.0dev" - -[package.extras] -testing = ["google-api-core[grpc] (>=1.31.5)"] - -[[package]] -name = "protobuf" -version = "4.21.12" -description = "" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "pyarrow" -version = "10.0.1" -description = "Python library for Apache Arrow" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -numpy = ">=1.16.6" - -[[package]] -name = "pyasn1" -version = "0.4.8" -description = "ASN.1 types and codecs" -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "pyasn1-modules" -version = "0.2.8" -description = "A collection of ASN.1-based protocols modules." -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.5.0" - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "pycryptodomex" -version = "3.17" -description = "Cryptographic library for Python" -category = "main" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "pydantic" -version = "1.10.12" -description = "Data validation and settings management using python type hints" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -typing-extensions = ">=4.2.0" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pygments" -version = "2.14.0" -description = "Pygments is a syntax highlighting package written in Python." -category = "main" -optional = false -python-versions = ">=3.6" - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pyjwt" -version = "2.6.0" -description = "JSON Web Token implementation in Python" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} - -[package.extras] -crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] - -[[package]] -name = "pyopenssl" -version = "22.1.0" -description = "Python wrapper module around the OpenSSL library" -category = "main" -optional = true -python-versions = ">=3.6" - -[package.dependencies] -cryptography = ">=38.0.0,<39" - -[package.extras] -docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] -test = ["flaky", "pretend", "pytest (>=3.0.1)"] - -[[package]] -name = "pyparsing" -version = "3.1.0" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "dev" -optional = false -python-versions = ">=3.6.8" - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "pyrsistent" -version = "0.19.3" -description = "Persistent/Functional/Immutable data structures" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "pytest" -version = "5.4.3" -description = "pytest: simple powerful testing with Python" -category = "dev" -optional = false -python-versions = ">=3.5" - -[package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} -attrs = ">=17.4.0" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -more-itertools = ">=4.0.0" -packaging = "*" -pluggy = ">=0.12,<1.0" -py = ">=1.5.0" -wcwidth = "*" - -[package.extras] -checkqa-mypy = ["mypy (==v0.761)"] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] - -[[package]] -name = "pytest-mock" -version = "3.11.1" -description = "Thin-wrapper around the mock package for easier use with pytest" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -pytest = ">=5.0" - -[package.extras] -dev = ["pre-commit", "pytest-asyncio", "tox"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-slugify" -version = "8.0.0" -description = "A Python slugify application that also handles Unicode" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -text-unidecode = ">=1.3" - -[package.extras] -unidecode = ["Unidecode (>=1.1.1)"] - -[[package]] -name = "pytimeparse" -version = "1.1.8" -description = "Time expression parser" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "pytz" -version = "2022.7.1" -description = "World timezone definitions, modern and historical" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "pytz-deprecation-shim" -version = "0.1.0.post0" -description = "Shims to make deprecation of pytz easier" -category = "main" -optional = true -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" - -[package.dependencies] -"backports.zoneinfo" = {version = "*", markers = "python_version >= \"3.6\" and python_version < \"3.9\""} -tzdata = {version = "*", markers = "python_version >= \"3.6\""} - -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "pyyaml" -version = "6.0" -description = "YAML parser and emitter for Python" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "redshift-connector" -version = "2.0.910" -description = "Redshift interface library" -category = "main" -optional = true -python-versions = ">=3.6" - -[package.dependencies] -beautifulsoup4 = ">=4.7.0,<5.0.0" -boto3 = ">=1.9.201,<2.0.0" -botocore = ">=1.12.201,<2.0.0" -lxml = ">=4.6.5" -packaging = "*" -pytz = ">=2020.1" -requests = ">=2.23.0,<3.0.0" -scramp = ">=1.2.0,<1.5.0" -setuptools = "*" - -[package.extras] -full = ["numpy", "pandas"] - -[[package]] -name = "requests" -version = "2.28.2" -description = "Python HTTP for Humans." -category = "main" -optional = false -python-versions = ">=3.7, <4" - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rich" -version = "13.3.3" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "main" -optional = false -python-versions = ">=3.7.0" - -[package.dependencies] -markdown-it-py = ">=2.2.0,<3.0.0" -pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - -[[package]] -name = "rsa" -version = "4.9" -description = "Pure-Python RSA implementation" -category = "main" -optional = true -python-versions = ">=3.6,<4" - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "s3fs" -version = "2023.1.0" -description = "Convenient Filesystem interface over S3" -category = "main" -optional = true -python-versions = ">= 3.7" - -[package.dependencies] -aiobotocore = ">=2.4.2,<2.5.0" -aiohttp = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1" -fsspec = "2023.1.0" - -[package.extras] -awscli = ["aiobotocore[awscli] (>=2.4.2,<2.5.0)"] -boto3 = ["aiobotocore[boto3] (>=2.4.2,<2.5.0)"] - -[[package]] -name = "s3transfer" -version = "0.6.1" -description = "An Amazon S3 Transfer Manager" -category = "main" -optional = true -python-versions = ">= 3.7" - -[package.dependencies] -botocore = ">=1.12.36,<2.0a.0" - -[package.extras] -crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] - -[[package]] -name = "scramp" -version = "1.4.4" -description = "An implementation of the SCRAM protocol." -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -asn1crypto = ">=1.5.1" - -[[package]] -name = "setuptools" -version = "67.3.2" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "setuptools-scm" -version = "6.4.2" -description = "the blessed package to manage your versions by scm tags" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -packaging = ">=20.0" -setuptools = "*" -tomli = ">=1.0.0" - -[package.extras] -test = ["pytest (>=6.2)", "virtualenv (>20)"] -toml = ["setuptools (>=42)"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "sniffio" -version = "1.3.0" -description = "Sniff out which async library your code is running under" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "snowflake-connector-python" -version = "3.0.3" -description = "Snowflake Connector for Python" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -asn1crypto = ">0.24.0,<2.0.0" -certifi = ">=2017.4.17" -cffi = ">=1.9,<2.0.0" -charset-normalizer = ">=2,<3" -cryptography = ">=3.1.0,<41.0.0" -filelock = ">=3.5,<4" -idna = ">=2.5,<4" -oscrypto = "<2.0.0" -packaging = "*" -pandas = {version = ">=1.0.0,<1.6.0", optional = true, markers = "extra == \"pandas\""} -pyarrow = {version = ">=10.0.1,<10.1.0", optional = true, markers = "extra == \"pandas\""} -pycryptodomex = ">=3.2,<3.5.0 || >3.5.0,<4.0.0" -pyjwt = "<3.0.0" -pyOpenSSL = ">=16.2.0,<24.0.0" -pytz = "*" -requests = "<3.0.0" -typing-extensions = ">=4.3,<5" -urllib3 = ">=1.21.1,<1.27" - -[package.extras] -development = ["Cython", "coverage", "more-itertools", "numpy (<1.25.0)", "pendulum (!=2.1.1)", "pexpect", "pytest (<7.3.0)", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytest-xdist", "pytzdata"] -pandas = ["pandas (>=1.0.0,<1.6.0)", "pyarrow (>=10.0.1,<10.1.0)"] -secure-local-storage = ["keyring (!=16.1.0,<24.0.0)"] - -[[package]] -name = "soupsieve" -version = "2.4.1" -description = "A modern CSS selector implementation for Beautiful Soup." -category = "main" -optional = true -python-versions = ">=3.7" - -[[package]] -name = "sqlalchemy" -version = "1.4.46" -description = "Database Abstraction Library" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} - -[package.extras] -aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] -mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=7)", "cx_oracle (>=7,<8)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql", "pymysql (<1)"] -sqlcipher = ["sqlcipher3_binary"] - -[[package]] -name = "sqlalchemy-redshift" -version = "0.8.14" -description = "Amazon Redshift Dialect for sqlalchemy" -category = "main" -optional = true -python-versions = ">=3.4" - -[package.dependencies] -packaging = "*" -SQLAlchemy = ">=0.9.2,<2.0.0" - -[[package]] -name = "sqlparse" -version = "0.4.3" -description = "A non-validating SQL parser." -category = "main" -optional = false -python-versions = ">=3.5" - -[[package]] -name = "structlog" -version = "22.3.0" -description = "Structured Logging for Python" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -dev = ["structlog[docs,tests,typing]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] -tests = ["coverage[toml]", "freezegun (>=0.2.8)", "pretend", "pytest (>=6.0)", "pytest-asyncio (>=0.17)", "simplejson"] -typing = ["mypy", "rich", "twisted"] - -[[package]] -name = "tblib" -version = "1.7.0" -description = "Traceback serialization library." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "text-unidecode" -version = "1.3" -description = "The most basic Text::Unidecode port" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "trino" -version = "0.321.0" -description = "Client for the Trino distributed SQL Engine" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -pytz = "*" -requests = "*" -sqlalchemy = {version = ">=1.3", optional = true, markers = "extra == \"sqlalchemy\""} -tzlocal = "*" - -[package.extras] -all = ["requests-kerberos", "sqlalchemy (>=1.3)"] -external-authentication-token-cache = ["keyring"] -kerberos = ["requests-kerberos"] -sqlalchemy = ["sqlalchemy (>=1.3)"] -tests = ["black", "click", "httpretty (<1.1)", "isort", "pre-commit", "pytest", "pytest-runner", "requests-kerberos", "sqlalchemy (>=1.3)"] - -[[package]] -name = "types-python-dateutil" -version = "2.8.19.14" -description = "Typing stubs for python-dateutil" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "tzdata" -version = "2022.7" -description = "Provider of IANA time zone data" -category = "main" -optional = true -python-versions = ">=2" - -[[package]] -name = "tzlocal" -version = "4.2" -description = "tzinfo object for the local timezone" -category = "main" -optional = true -python-versions = ">=3.6" - -[package.dependencies] -"backports.zoneinfo" = {version = "*", markers = "python_version < \"3.9\""} -pytz-deprecation-shim = "*" -tzdata = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -devenv = ["black", "pyroma", "pytest-cov", "zest.releaser"] -test = ["pytest (>=4.3)", "pytest-mock (>=3.3)"] - -[[package]] -name = "urllib3" -version = "1.26.14" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "virtualenv" -version = "20.19.0" -description = "Virtual Python Environment builder" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -distlib = ">=0.3.6,<1" -filelock = ">=3.4.1,<4" -platformdirs = ">=2.4,<4" - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"] -test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23)", "pytest (>=7.2.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"] - -[[package]] -name = "wcwidth" -version = "0.2.6" -description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "werkzeug" -version = "2.2.3" -description = "The comprehensive WSGI web application library." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -MarkupSafe = ">=2.1.1" - -[package.extras] -watchdog = ["watchdog"] - -[[package]] -name = "wrapt" -version = "1.14.1" -description = "Module for decorators, wrappers and monkey patching." -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" - -[[package]] -name = "yarl" -version = "1.8.2" -description = "Yet another URL library" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[[package]] -name = "zipp" -version = "3.13.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[extras] -athena = [] -bigquery = ["google-cloud-bigquery"] -duckdb = ["duckdb-engine"] -postgres = [] -redshift = ["awswrangler", "sqlalchemy-redshift"] -snowflake = ["snowflake-connector-python"] -teleport = ["s3fs"] -trino = ["trino"] - -[metadata] -lock-version = "1.1" -python-versions = "^3.8" -content-hash = "2639f76cd5c27da63d07c2bcf5ade6bd5163a9afbaa60d3aed5ae672fb68fc94" - -[metadata.files] -agate = [ - {file = "agate-1.7.0-py2.py3-none-any.whl", hash = "sha256:ad529c80fe6943906ab3d3bc59c12307e1181d35993e6055db59fa72dc79a6bd"}, - {file = "agate-1.7.0.tar.gz", hash = "sha256:a835a1069247b39b0c340e31eb56e1a95e79f679ad37512192118a5ea3336020"}, -] -agate-sql = [ - {file = "agate-sql-0.5.9.tar.gz", hash = "sha256:30b642a32a5b671161abef9e8ec36352f2e78a24ce849054edac6946d8b9ddc6"}, - {file = "agate_sql-0.5.9-py2.py3-none-any.whl", hash = "sha256:de199dc5863775fe813b139ec7357f309aab8fd785ae8ee456ad07ee4e24a0e0"}, -] -aiobotocore = [ - {file = "aiobotocore-2.4.2-py3-none-any.whl", hash = "sha256:4acd1ebe2e44be4b100aa553910bda899f6dc090b3da2bc1cf3d5de2146ed208"}, - {file = "aiobotocore-2.4.2.tar.gz", hash = "sha256:0603b74a582dffa7511ce7548d07dc9b10ec87bc5fb657eb0b34f9bd490958bf"}, -] -aiohttp = [ - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"}, - {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"}, - {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"}, - {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"}, - {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"}, - {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"}, - {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"}, - {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"}, - {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"}, - {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"}, - {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"}, - {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"}, -] -aioitertools = [ - {file = "aioitertools-0.11.0-py3-none-any.whl", hash = "sha256:04b95e3dab25b449def24d7df809411c10e62aab0cbe31a50ca4e68748c43394"}, - {file = "aioitertools-0.11.0.tar.gz", hash = "sha256:42c68b8dd3a69c2bf7f2233bf7df4bb58b557bca5252ac02ed5187bbc67d6831"}, -] -aiosignal = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] -anyio = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, -] -asn1crypto = [ - {file = "asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67"}, - {file = "asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c"}, -] -astor = [ - {file = "astor-0.8.1-py2.py3-none-any.whl", hash = "sha256:070a54e890cefb5b3739d19f30f5a5ec840ffc9c50ffa7d23cc9fc1a38ebbfc5"}, - {file = "astor-0.8.1.tar.gz", hash = "sha256:6a6effda93f4e1ce9f618779b2dd1d9d84f1e32812c23a29b3fff6fd7f63fa5e"}, -] -async-timeout = [ - {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, - {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, -] -atomicwrites = [ - {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, -] -attrs = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] -auth0-python = [ - {file = "auth0-python-4.1.0.tar.gz", hash = "sha256:457009bbbd660b7244247b5e853506d147ecf4cf7dc72a0766330e3057f1180c"}, - {file = "auth0_python-4.1.0-py2.py3-none-any.whl", hash = "sha256:efe5def4d5829dde85d24dbacb5f793468b15dc2ef3cd2032dfdac20096e099a"}, -] -awswrangler = [ - {file = "awswrangler-3.0.0-py3-none-any.whl", hash = "sha256:cccab37dc37dfe0f4a19e00e836ba734601ccb73a114fabdff53cb60e70f8c2c"}, - {file = "awswrangler-3.0.0.tar.gz", hash = "sha256:9a2384ec4dbfd83ac8cef859d127924f89b3b3f1b6242da89606a296af3d68ec"}, -] -babel = [ - {file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"}, - {file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"}, -] -backoff = [ - {file = "backoff-1.11.1-py2.py3-none-any.whl", hash = "sha256:61928f8fa48d52e4faa81875eecf308eccfb1016b018bb6bd21e05b5d90a96c5"}, - {file = "backoff-1.11.1.tar.gz", hash = "sha256:ccb962a2378418c667b3c979b504fdeb7d9e0d29c0579e3b13b86467177728cb"}, -] -backports-functools-lru-cache = [ - {file = "backports.functools_lru_cache-1.6.4-py2.py3-none-any.whl", hash = "sha256:dbead04b9daa817909ec64e8d2855fb78feafe0b901d4568758e3a60559d8978"}, - {file = "backports.functools_lru_cache-1.6.4.tar.gz", hash = "sha256:d5ed2169378b67d3c545e5600d363a923b09c456dab1593914935a68ad478271"}, -] -backports-zoneinfo = [ - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win32.whl", hash = "sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win32.whl", hash = "sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-win32.whl", hash = "sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6"}, - {file = "backports.zoneinfo-0.2.1.tar.gz", hash = "sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2"}, -] -beautifulsoup4 = [ - {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, - {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, -] -behave = [ - {file = "behave-1.2.6-py2.py3-none-any.whl", hash = "sha256:ebda1a6c9e5bfe95c5f9f0a2794e01c7098b3dde86c10a95d8621c5907ff6f1c"}, - {file = "behave-1.2.6.tar.gz", hash = "sha256:b9662327aa53294c1351b0a9c369093ccec1d21026f050c3bd9b3e5cccf81a86"}, -] -black = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, -] -boto3 = [ - {file = "boto3-1.24.59-py3-none-any.whl", hash = "sha256:34ab44146a2c4e7f4e72737f4b27e6eb5e0a7855c2f4599e3d9199b6a0a2d575"}, - {file = "boto3-1.24.59.tar.gz", hash = "sha256:a50b4323f9579cfe22fcf5531fbd40b567d4d74c1adce06aeb5c95fce2a6fb40"}, -] -botocore = [ - {file = "botocore-1.27.59-py3-none-any.whl", hash = "sha256:69d756791fc024bda54f6c53f71ae34e695ee41bbbc1743d9179c4837a4929da"}, - {file = "botocore-1.27.59.tar.gz", hash = "sha256:eda4aed6ee719a745d1288eaf1beb12f6f6448ad1fa12f159405db14ba9c92cf"}, -] -cachetools = [ - {file = "cachetools-5.3.0-py3-none-any.whl", hash = "sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4"}, - {file = "cachetools-5.3.0.tar.gz", hash = "sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14"}, -] -certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] -cffi = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] -charset-normalizer = [ - {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"}, - {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -cryptography = [ - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, - {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, - {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, - {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, -] -cycler = [ - {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, - {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, -] -datadog-api-client = [ - {file = "datadog-api-client-2.12.0.tar.gz", hash = "sha256:8d139a535249b8bea516537b8ada7c746eadd59187f9e02de8c2ae85c3ae0b31"}, - {file = "datadog_api_client-2.12.0-py3-none-any.whl", hash = "sha256:28b3bd1f4dfe27d44c71ae13f8906fce8483f69c950a078c330778de49f10746"}, -] -db-dtypes = [ - {file = "db-dtypes-1.0.5.tar.gz", hash = "sha256:ee68f30cbccf343124ef0abebc7f8cc9a74ef8ed7ee4ff61f586117e8040a9d6"}, - {file = "db_dtypes-1.0.5-py2.py3-none-any.whl", hash = "sha256:ab6782bf7a414dd7289ce4ba8ddea5ec44c1339d189c7738d7098efdfd148266"}, -] -dbt-core = [ - {file = "dbt-core-1.5.0.tar.gz", hash = "sha256:d6d43b7ff0f12765e7e85559c1a109eaa23a7536b26b3f963d0c19263f69b3c4"}, - {file = "dbt_core-1.5.0-py3-none-any.whl", hash = "sha256:0761fb8287830d7c8990517340238c62c0085e8b8351952440fe03fdd30ee817"}, -] -dbt-extractor = [ - {file = "dbt_extractor-0.4.1-cp36-abi3-macosx_10_7_x86_64.whl", hash = "sha256:4dc715bd740e418d8dc1dd418fea508e79208a24cf5ab110b0092a3cbe96bf71"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:bc9e0050e3a2f4ea9fe58e8794bc808e6709a0c688ed710fc7c5b6ef3e5623ec"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76872cdee659075d6ce2df92dc62e59a74ba571be62acab2e297ca478b49d766"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81435841610be1b07806d72cd89b1956c6e2a84c360b9ceb3f949c62a546d569"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7c291f9f483eae4f60dd5859097d7ba51d5cb6c4725f08973ebd18cdea89d758"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:822b1e911db230e1b9701c99896578e711232001027b518c44c32f79a46fa3f9"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:554d27741a54599c39e5c0b7dbcab77400d83f908caba284a3e960db812e5814"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a805d51a25317f53cbff951c79b9cf75421cf48e4b3e1dfb3e9e8de6d824b76c"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cad90ddc708cb4182dc16fe2c87b1f088a1679877b93e641af068eb68a25d582"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:34783d788b133f223844e280e37b3f5244f2fb60acc457aa75c2667e418d5442"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:9da211869a1220ea55c5552c1567a3ea5233a6c52fa89ca87a22465481c37bc9"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_i686.whl", hash = "sha256:7d7c47774dc051b8c18690281a55e2e3d3320e823b17e04b06bc3ff81b1874ba"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:037907a7c7ae0391045d81338ca77ddaef899a91d80f09958f09fe374594e19b"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-win32.whl", hash = "sha256:3fe8d8e28a7bd3e0884896147269ca0202ca432d8733113386bdc84c824561bf"}, - {file = "dbt_extractor-0.4.1-cp36-abi3-win_amd64.whl", hash = "sha256:35265a0ae0a250623b0c2e3308b2738dc8212e40e0aa88407849e9ea090bb312"}, - {file = "dbt_extractor-0.4.1.tar.gz", hash = "sha256:75b1c665699ec0f1ffce1ba3d776f7dfce802156f22e70a7b9c8f0b4d7e80f42"}, -] -deprecated = [ - {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, - {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, -] -deprecation = [ - {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, - {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, -] -dill = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, -] -distlib = [ - {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, - {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, -] -duckdb = [ - {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:14781d21580ee72aba1f5dcae7734674c9b6c078dd60470a08b2b420d15b996d"}, - {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f13bf7ab0e56ddd2014ef762ae4ee5ea4df5a69545ce1191b8d7df8118ba3167"}, - {file = "duckdb-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4032042d8363e55365bbca3faafc6dc336ed2aad088f10ae1a534ebc5bcc181"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a71bd8f0b0ca77c27fa89b99349ef22599ffefe1e7684ae2e1aa2904a08684"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24568d6e48f3dbbf4a933109e323507a46b9399ed24c5d4388c4987ddc694fd0"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297226c0dadaa07f7c5ae7cbdb9adba9567db7b16693dbd1b406b739ce0d7924"}, - {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5792cf777ece2c0591194006b4d3e531f720186102492872cb32ddb9363919cf"}, - {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:12803f9f41582b68921d6b21f95ba7a51e1d8f36832b7d8006186f58c3d1b344"}, - {file = "duckdb-0.8.1-cp310-cp310-win32.whl", hash = "sha256:d0953d5a2355ddc49095e7aef1392b7f59c5be5cec8cdc98b9d9dc1f01e7ce2b"}, - {file = "duckdb-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:6e6583c98a7d6637e83bcadfbd86e1f183917ea539f23b6b41178f32f813a5eb"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fad7ed0d4415f633d955ac24717fa13a500012b600751d4edb050b75fb940c25"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81ae602f34d38d9c48dd60f94b89f28df3ef346830978441b83c5b4eae131d08"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7d75cfe563aaa058d3b4ccaaa371c6271e00e3070df5de72361fd161b2fe6780"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbb55e7a3336f2462e5e916fc128c47fe1c03b6208d6bd413ac11ed95132aa0"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6df53efd63b6fdf04657385a791a4e3c4fb94bfd5db181c4843e2c46b04fef5"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b188b80b70d1159b17c9baaf541c1799c1ce8b2af4add179a9eed8e2616be96"}, - {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5ad481ee353f31250b45d64b4a104e53b21415577943aa8f84d0af266dc9af85"}, - {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1d1b1729993611b1892509d21c21628917625cdbe824a61ce891baadf684b32"}, - {file = "duckdb-0.8.1-cp311-cp311-win32.whl", hash = "sha256:2d8f9cc301e8455a4f89aa1088b8a2d628f0c1f158d4cf9bc78971ed88d82eea"}, - {file = "duckdb-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:07457a43605223f62d93d2a5a66b3f97731f79bbbe81fdd5b79954306122f612"}, - {file = "duckdb-0.8.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d2c8062c3e978dbcd80d712ca3e307de8a06bd4f343aa457d7dd7294692a3842"}, - {file = "duckdb-0.8.1-cp36-cp36m-win32.whl", hash = "sha256:fad486c65ae944eae2de0d590a0a4fb91a9893df98411d66cab03359f9cba39b"}, - {file = "duckdb-0.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:86fa4506622c52d2df93089c8e7075f1c4d0ba56f4bf27faebde8725355edf32"}, - {file = "duckdb-0.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:60e07a62782f88420046e30cc0e3de842d0901c4fd5b8e4d28b73826ec0c3f5e"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f18563675977f8cbf03748efee0165b4c8ef64e0cbe48366f78e2914d82138bb"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16e179443832bea8439ae4dff93cf1e42c545144ead7a4ef5f473e373eea925a"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a413d5267cb41a1afe69d30dd6d4842c588256a6fed7554c7e07dad251ede095"}, - {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3784680df59eadd683b0a4c2375d451a64470ca54bd171c01e36951962b1d332"}, - {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:67a1725c2b01f9b53571ecf3f92959b652f60156c1c48fb35798302e39b3c1a2"}, - {file = "duckdb-0.8.1-cp37-cp37m-win32.whl", hash = "sha256:197d37e2588c5ad063e79819054eedb7550d43bf1a557d03ba8f8f67f71acc42"}, - {file = "duckdb-0.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3843feb79edf100800f5037c32d5d5a5474fb94b32ace66c707b96605e7c16b2"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:624c889b0f2d656794757b3cc4fc58030d5e285f5ad2ef9fba1ea34a01dab7fb"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fcbe3742d77eb5add2d617d487266d825e663270ef90253366137a47eaab9448"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47516c9299d09e9dbba097b9fb339b389313c4941da5c54109df01df0f05e78c"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf1ba718b7522d34399446ebd5d4b9fcac0b56b6ac07bfebf618fd190ec37c1d"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e36e35d38a9ae798fe8cf6a839e81494d5b634af89f4ec9483f4d0a313fc6bdb"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23493313f88ce6e708a512daacad13e83e6d1ea0be204b175df1348f7fc78671"}, - {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1fb9bf0b6f63616c8a4b9a6a32789045e98c108df100e6bac783dc1e36073737"}, - {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12fc13ecd5eddd28b203b9e3999040d3a7374a8f4b833b04bd26b8c5685c2635"}, - {file = "duckdb-0.8.1-cp38-cp38-win32.whl", hash = "sha256:a12bf4b18306c9cb2c9ba50520317e6cf2de861f121d6f0678505fa83468c627"}, - {file = "duckdb-0.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e4e809358b9559c00caac4233e0e2014f3f55cd753a31c4bcbbd1b55ad0d35e4"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7acedfc00d97fbdb8c3d120418c41ef3cb86ef59367f3a9a30dff24470d38680"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:99bfe264059cdc1e318769103f656f98e819cd4e231cd76c1d1a0327f3e5cef8"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:538b225f361066231bc6cd66c04a5561de3eea56115a5dd773e99e5d47eb1b89"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae0be3f71a18cd8492d05d0fc1bc67d01d5a9457b04822d025b0fc8ee6efe32e"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd82ba63b58672e46c8ec60bc9946aa4dd7b77f21c1ba09633d8847ad9eb0d7b"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:780a34559aaec8354e83aa4b7b31b3555f1b2cf75728bf5ce11b89a950f5cdd9"}, - {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:01f0d4e9f7103523672bda8d3f77f440b3e0155dd3b2f24997bc0c77f8deb460"}, - {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31f692decb98c2d57891da27180201d9e93bb470a3051fcf413e8da65bca37a5"}, - {file = "duckdb-0.8.1-cp39-cp39-win32.whl", hash = "sha256:e7fe93449cd309bbc67d1bf6f6392a6118e94a9a4479ab8a80518742e855370a"}, - {file = "duckdb-0.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:81d670bc6807672f038332d9bf587037aabdd741b0810de191984325ed307abd"}, - {file = "duckdb-0.8.1.tar.gz", hash = "sha256:a54d37f4abc2afc4f92314aaa56ecf215a411f40af4bffe1e86bd25e62aceee9"}, -] -duckdb-engine = [ - {file = "duckdb_engine-0.1.11-py3-none-any.whl", hash = "sha256:8721775c71300b0b01350429e8727940e03a09e530c91b18861eb6b1d2d8cb1b"}, - {file = "duckdb_engine-0.1.11.tar.gz", hash = "sha256:d958fa32b2f239616416431d474d7a2135fd444b5001339cbe99b868b79ccf50"}, -] -exceptiongroup = [ - {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, - {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, -] -fal = [ - {file = "fal-0.10.0-py3-none-any.whl", hash = "sha256:3b1d4275504c235c91ccd54a4b40d61a20730960ee6369b1999ed3192e41533f"}, - {file = "fal-0.10.0.tar.gz", hash = "sha256:c8aad6bfb7406d1740917d4337f7fbfe356ae2b358af9a52883d1ca1dfdb28cb"}, -] -filelock = [ - {file = "filelock-3.9.0-py3-none-any.whl", hash = "sha256:f58d535af89bb9ad5cd4df046f741f8553a418c01a7856bf0d173bbc9f6bd16d"}, - {file = "filelock-3.9.0.tar.gz", hash = "sha256:7b319f24340b51f55a2bf7a12ac0755a9b03e718311dac567a0f4f7fabd2f5de"}, -] -fonttools = [ - {file = "fonttools-4.38.0-py3-none-any.whl", hash = "sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb"}, - {file = "fonttools-4.38.0.zip", hash = "sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1"}, -] -frozenlist = [ - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, - {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, - {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, - {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, - {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, - {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, - {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, - {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, - {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, - {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, - {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, -] -fsspec = [ - {file = "fsspec-2023.1.0-py3-none-any.whl", hash = "sha256:b833e2e541e9e8cde0ab549414187871243177feb3d344f9d27b25a93f5d8139"}, - {file = "fsspec-2023.1.0.tar.gz", hash = "sha256:fbae7f20ff801eb5f7d0bedf81f25c787c0dfac5e982d98fa3884a9cde2b5411"}, -] -future = [ - {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, -] -google-api-core = [ - {file = "google-api-core-2.11.0.tar.gz", hash = "sha256:4b9bb5d5a380a0befa0573b302651b8a9a89262c1730e37bf423cec511804c22"}, - {file = "google_api_core-2.11.0-py3-none-any.whl", hash = "sha256:ce222e27b0de0d7bc63eb043b956996d6dccab14cc3b690aaea91c9cc99dc16e"}, -] -google-auth = [ - {file = "google-auth-2.16.0.tar.gz", hash = "sha256:ed7057a101af1146f0554a769930ac9de506aeca4fd5af6543ebe791851a9fbd"}, - {file = "google_auth-2.16.0-py2.py3-none-any.whl", hash = "sha256:5045648c821fb72384cdc0e82cc326df195f113a33049d9b62b74589243d2acc"}, -] -google-cloud-bigquery = [ - {file = "google-cloud-bigquery-3.5.0.tar.gz", hash = "sha256:dd3ca84e5be6fa9e0570fb21665a902cc5651cbd045842fb714164c99a2639c4"}, - {file = "google_cloud_bigquery-3.5.0-py2.py3-none-any.whl", hash = "sha256:358f54c473938b2d022335118b4e56cdcdaf22a5a112fa0cfeb888fd8814ba62"}, -] -google-cloud-core = [ - {file = "google-cloud-core-2.3.2.tar.gz", hash = "sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a"}, - {file = "google_cloud_core-2.3.2-py2.py3-none-any.whl", hash = "sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe"}, -] -google-crc32c = [ - {file = "google-crc32c-1.5.0.tar.gz", hash = "sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7"}, - {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13"}, - {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346"}, - {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65"}, - {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b"}, - {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02"}, - {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4"}, - {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e"}, - {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c"}, - {file = "google_crc32c-1.5.0-cp310-cp310-win32.whl", hash = "sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee"}, - {file = "google_crc32c-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289"}, - {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273"}, - {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298"}, - {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57"}, - {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438"}, - {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906"}, - {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183"}, - {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd"}, - {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c"}, - {file = "google_crc32c-1.5.0-cp311-cp311-win32.whl", hash = "sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709"}, - {file = "google_crc32c-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-win32.whl", hash = "sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94"}, - {file = "google_crc32c-1.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740"}, - {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8"}, - {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a"}, - {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946"}, - {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a"}, - {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d"}, - {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a"}, - {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37"}, - {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894"}, - {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a"}, - {file = "google_crc32c-1.5.0-cp38-cp38-win32.whl", hash = "sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4"}, - {file = "google_crc32c-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c"}, - {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7"}, - {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d"}, - {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100"}, - {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9"}, - {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57"}, - {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210"}, - {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd"}, - {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96"}, - {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61"}, - {file = "google_crc32c-1.5.0-cp39-cp39-win32.whl", hash = "sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c"}, - {file = "google_crc32c-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541"}, - {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325"}, - {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd"}, - {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091"}, - {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178"}, - {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2"}, - {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d"}, - {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2"}, - {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5"}, - {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462"}, - {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314"}, - {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728"}, - {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88"}, - {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb"}, - {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31"}, - {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93"}, -] -google-resumable-media = [ - {file = "google-resumable-media-2.4.1.tar.gz", hash = "sha256:15b8a2e75df42dc6502d1306db0bce2647ba6013f9cd03b6e17368c0886ee90a"}, - {file = "google_resumable_media-2.4.1-py2.py3-none-any.whl", hash = "sha256:831e86fd78d302c1a034730a0c6e5369dd11d37bad73fa69ca8998460d5bae8d"}, -] -googleapis-common-protos = [ - {file = "googleapis-common-protos-1.58.0.tar.gz", hash = "sha256:c727251ec025947d545184ba17e3578840fc3a24a0516a020479edab660457df"}, - {file = "googleapis_common_protos-1.58.0-py2.py3-none-any.whl", hash = "sha256:ca3befcd4580dab6ad49356b46bf165bb68ff4b32389f028f1abd7c10ab9519a"}, -] -greenlet = [ - {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, - {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, - {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, - {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, - {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, - {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, - {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, - {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, - {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, - {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, - {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, - {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, - {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, - {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, - {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, - {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, - {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, - {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, - {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, - {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, - {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, - {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, - {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, - {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, - {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, - {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, -] -grpc-interceptor = [ - {file = "grpc-interceptor-0.15.0.tar.gz", hash = "sha256:5c1aa9680b1d7e12259960c38057b121826860b05ebbc1001c74343b7ad1455e"}, - {file = "grpc_interceptor-0.15.0-py3-none-any.whl", hash = "sha256:63e390162e64df96c39c40508eb697def76a7cafac32a7eaf9272093eec1109e"}, -] -grpcio = [ - {file = "grpcio-1.51.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:cc2bece1737b44d878cc1510ea04469a8073dbbcdd762175168937ae4742dfb3"}, - {file = "grpcio-1.51.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:e223a9793522680beae44671b9ed8f6d25bbe5ddf8887e66aebad5e0686049ef"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:24ac1154c4b2ab4a0c5326a76161547e70664cd2c39ba75f00fc8a2170964ea2"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4ef09f8997c4be5f3504cefa6b5c6cc3cf648274ce3cede84d4342a35d76db6"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8a0b77e992c64880e6efbe0086fe54dfc0bbd56f72a92d9e48264dcd2a3db98"}, - {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:eacad297ea60c72dd280d3353d93fb1dcca952ec11de6bb3c49d12a572ba31dd"}, - {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:16c71740640ba3a882f50b01bf58154681d44b51f09a5728180a8fdc66c67bd5"}, - {file = "grpcio-1.51.1-cp310-cp310-win32.whl", hash = "sha256:29cb97d41a4ead83b7bcad23bdb25bdd170b1e2cba16db6d3acbb090bc2de43c"}, - {file = "grpcio-1.51.1-cp310-cp310-win_amd64.whl", hash = "sha256:9ff42c5620b4e4530609e11afefa4a62ca91fa0abb045a8957e509ef84e54d30"}, - {file = "grpcio-1.51.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:bc59f7ba87972ab236f8669d8ca7400f02a0eadf273ca00e02af64d588046f02"}, - {file = "grpcio-1.51.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3c2b3842dcf870912da31a503454a33a697392f60c5e2697c91d133130c2c85d"}, - {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b011674090594f1f3245960ced7386f6af35485a38901f8afee8ad01541dbd"}, - {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d680356a975d9c66a678eb2dde192d5dc427a7994fb977363634e781614f7c"}, - {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:094e64236253590d9d4075665c77b329d707b6fca864dd62b144255e199b4f87"}, - {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:257478300735ce3c98d65a930bbda3db172bd4e00968ba743e6a1154ea6edf10"}, - {file = "grpcio-1.51.1-cp311-cp311-win32.whl", hash = "sha256:5a6ebcdef0ef12005d56d38be30f5156d1cb3373b52e96f147f4a24b0ddb3a9d"}, - {file = "grpcio-1.51.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f9b0023c2c92bebd1be72cdfca23004ea748be1813a66d684d49d67d836adde"}, - {file = "grpcio-1.51.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cd3baccea2bc5c38aeb14e5b00167bd4e2373a373a5e4d8d850bd193edad150c"}, - {file = "grpcio-1.51.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:17ec9b13cec4a286b9e606b48191e560ca2f3bbdf3986f91e480a95d1582e1a7"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:fbdbe9a849854fe484c00823f45b7baab159bdd4a46075302281998cb8719df5"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31bb6bc7ff145e2771c9baf612f4b9ebbc9605ccdc5f3ff3d5553de7fc0e0d79"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e473525c28251558337b5c1ad3fa969511e42304524a4e404065e165b084c9e4"}, - {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6f0b89967ee11f2b654c23b27086d88ad7bf08c0b3c2a280362f28c3698b2896"}, - {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7942b32a291421460d6a07883033e392167d30724aa84987e6956cd15f1a21b9"}, - {file = "grpcio-1.51.1-cp37-cp37m-win32.whl", hash = "sha256:f96ace1540223f26fbe7c4ebbf8a98e3929a6aa0290c8033d12526847b291c0f"}, - {file = "grpcio-1.51.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f1fec3abaf274cdb85bf3878167cfde5ad4a4d97c68421afda95174de85ba813"}, - {file = "grpcio-1.51.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:0e1a9e1b4a23808f1132aa35f968cd8e659f60af3ffd6fb00bcf9a65e7db279f"}, - {file = "grpcio-1.51.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:6df3b63538c362312bc5fa95fb965069c65c3ea91d7ce78ad9c47cab57226f54"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:172405ca6bdfedd6054c74c62085946e45ad4d9cec9f3c42b4c9a02546c4c7e9"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:506b9b7a4cede87d7219bfb31014d7b471cfc77157da9e820a737ec1ea4b0663"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb93051331acbb75b49a2a0fd9239c6ba9528f6bdc1dd400ad1cb66cf864292"}, - {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5dca372268c6ab6372d37d6b9f9343e7e5b4bc09779f819f9470cd88b2ece3c3"}, - {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:471d39d3370ca923a316d49c8aac66356cea708a11e647e3bdc3d0b5de4f0a40"}, - {file = "grpcio-1.51.1-cp38-cp38-win32.whl", hash = "sha256:75e29a90dc319f0ad4d87ba6d20083615a00d8276b51512e04ad7452b5c23b04"}, - {file = "grpcio-1.51.1-cp38-cp38-win_amd64.whl", hash = "sha256:f1158bccbb919da42544a4d3af5d9296a3358539ffa01018307337365a9a0c64"}, - {file = "grpcio-1.51.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:59dffade859f157bcc55243714d57b286da6ae16469bf1ac0614d281b5f49b67"}, - {file = "grpcio-1.51.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:dad6533411d033b77f5369eafe87af8583178efd4039c41d7515d3336c53b4f1"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:4c4423ea38a7825b8fed8934d6d9aeebdf646c97e3c608c3b0bcf23616f33877"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dc5354e38e5adf2498312f7241b14c7ce3484eefa0082db4297189dcbe272e6"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97d67983189e2e45550eac194d6234fc38b8c3b5396c153821f2d906ed46e0ce"}, - {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:538d981818e49b6ed1e9c8d5e5adf29f71c4e334e7d459bf47e9b7abb3c30e09"}, - {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9235dcd5144a83f9ca6f431bd0eccc46b90e2c22fe27b7f7d77cabb2fb515595"}, - {file = "grpcio-1.51.1-cp39-cp39-win32.whl", hash = "sha256:aacb54f7789ede5cbf1d007637f792d3e87f1c9841f57dd51abf89337d1b8472"}, - {file = "grpcio-1.51.1-cp39-cp39-win_amd64.whl", hash = "sha256:2b170eaf51518275c9b6b22ccb59450537c5a8555326fd96ff7391b5dd75303c"}, - {file = "grpcio-1.51.1.tar.gz", hash = "sha256:e6dfc2b6567b1c261739b43d9c59d201c1b89e017afd9e684d85aa7a186c9f7a"}, -] -grpcio-status = [ - {file = "grpcio-status-1.51.1.tar.gz", hash = "sha256:ac2617a3095935ebd785e2228958f24b10a0d527a0c9eb5a0863c784f648a816"}, - {file = "grpcio_status-1.51.1-py3-none-any.whl", hash = "sha256:a52cbdc4b18f325bfc13d319ae7c7ae7a0fee07f3d9a005504d6097896d7a495"}, -] -h11 = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] -hologram = [ - {file = "hologram-0.0.15-py3-none-any.whl", hash = "sha256:48ca81ed47da1c604b2d3b951424b600eb8a5785b00513e3b8e3ae8101f90145"}, - {file = "hologram-0.0.15.tar.gz", hash = "sha256:79b3d04df84d5a9d09c2e669ec5bcc50b1713ec79f4683cfdea85583b41e46f0"}, -] -httpcore = [ - {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, - {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, -] -httpx = [ - {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, - {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, -] -idna = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] -importlib-metadata = [ - {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, - {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, -] -isodate = [ - {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, - {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, -] -isolate = [ - {file = "isolate-0.12.2-py3-none-any.whl", hash = "sha256:222e85a37c6b10bc13780a5ae38b2ab73a2c69ef09d9f99aae305c9e14bd7324"}, - {file = "isolate-0.12.2.tar.gz", hash = "sha256:dddffa589a0894e0dcc6722241e892cad009888366da9dc50f9253d98fb28e8b"}, -] -isolate-proto = [ - {file = "isolate_proto-0.0.37-py3-none-any.whl", hash = "sha256:7da70d57b5c62f6106aceeb20a0afcd1962d6eb83f59897db5f7a89f9dfc37d0"}, - {file = "isolate_proto-0.0.37.tar.gz", hash = "sha256:c6277f8791241ec82619359cbd7e57d0695b099980c2e6122aa576ea741b5b02"}, -] -jinja2 = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] -jmespath = [ +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = true +python-versions = ">=3.7" +files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, ] -jsonschema = [ - {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, - {file = "jsonschema-3.2.0.tar.gz", hash = "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"}, + +[[package]] +name = "jsonschema" +version = "4.22.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, + {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +jsonschema-specifications = ">=2023.03.6" +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, ] -kiwisolver = [ + +[package.dependencies] +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +referencing = ">=0.31.0" + +[[package]] +name = "kiwisolver" +version = "1.4.4" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"}, @@ -3001,11 +1424,28 @@ kiwisolver = [ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"}, {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"}, ] -leather = [ + +[[package]] +name = "leather" +version = "0.3.4" +description = "Python charting for 80% of humans." +optional = false +python-versions = "*" +files = [ {file = "leather-0.3.4-py2.py3-none-any.whl", hash = "sha256:5e741daee96e9f1e9e06081b8c8a10c4ac199301a0564cdd99b09df15b4603d2"}, {file = "leather-0.3.4.tar.gz", hash = "sha256:b43e21c8fa46b2679de8449f4d953c06418666dc058ce41055ee8a8d3bb40918"}, ] -logbook = [ + +[package.dependencies] +six = ">=1.6.1" + +[[package]] +name = "logbook" +version = "1.5.3" +description = "A logging replacement for Python" +optional = false +python-versions = "*" +files = [ {file = "Logbook-1.5.3-cp27-cp27m-win32.whl", hash = "sha256:56ee54c11df3377314cedcd6507638f015b4b88c0238c2e01b5eb44fd3a6ad1b"}, {file = "Logbook-1.5.3-cp27-cp27m-win_amd64.whl", hash = "sha256:2dc85f1510533fddb481e97677bb7bca913560862734c0b3b289bfed04f78c92"}, {file = "Logbook-1.5.3-cp35-cp35m-win32.whl", hash = "sha256:94e2e11ff3c2304b0d09a36c6208e5ae756eb948b210e5cbd63cd8d27f911542"}, @@ -3016,7 +1456,25 @@ logbook = [ {file = "Logbook-1.5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0cf2cdbfb65a03b5987d19109dacad13417809dcf697f66e1a7084fb21744ea9"}, {file = "Logbook-1.5.3.tar.gz", hash = "sha256:66f454ada0f56eae43066f604a222b09893f98c1adc18df169710761b8f32fe8"}, ] -lxml = [ + +[package.extras] +all = ["Jinja2", "brotli", "cython", "execnet (>=1.0.9)", "mock", "pytest", "pytest-cov (<2.6)", "pyzmq", "redis", "sqlalchemy"] +compression = ["brotli"] +dev = ["cython", "mock", "pytest", "pytest-cov (<2.6)"] +execnet = ["execnet (>=1.0.9)"] +jinja = ["Jinja2"] +redis = ["redis"] +sqlalchemy = ["sqlalchemy"] +test = ["mock", "pytest", "pytest-cov (<2.6)"] +zmq = ["pyzmq"] + +[[package]] +name = "lxml" +version = "4.9.2" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" +files = [ {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, @@ -3095,11 +1553,44 @@ lxml = [ {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, ] -markdown-it-py = [ + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=0.29.7)"] + +[[package]] +name = "markdown-it-py" +version = "2.2.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.7" +files = [ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, ] -markupsafe = [ + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, @@ -3151,11 +1642,35 @@ markupsafe = [ {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, ] -mashumaro = [ - {file = "mashumaro-3.6-py3-none-any.whl", hash = "sha256:77403e3e2ecd0a7d0e22d472c08e33282460e48726eabe356c5163efbdf9c7ee"}, - {file = "mashumaro-3.6.tar.gz", hash = "sha256:ceb3de53029219bbbb0385ca600b59348dcd14e0c68523986c6d51889ad338f5"}, + +[[package]] +name = "mashumaro" +version = "3.8.1" +description = "Fast serialization library on top of dataclasses" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mashumaro-3.8.1-py3-none-any.whl", hash = "sha256:e060469a4bec1c86f8145ea27ecd99027ea3e343075a4efcb5e8a969a45b9fb9"}, + {file = "mashumaro-3.8.1.tar.gz", hash = "sha256:8bae8b25e2287b75094655b8ba8635f34016c09ca16498188f2f3b198d88b7ef"}, ] -matplotlib = [ + +[package.dependencies] +msgpack = {version = ">=0.5.6", optional = true, markers = "extra == \"msgpack\""} +typing-extensions = ">=4.1.0" + +[package.extras] +msgpack = ["msgpack (>=0.5.6)"] +orjson = ["orjson"] +toml = ["tomli (>=1.1.0)", "tomli-w (>=1.0)"] +yaml = ["pyyaml (>=3.13)"] + +[[package]] +name = "matplotlib" +version = "3.5.3" +description = "Python plotting package" +optional = false +python-versions = ">=3.7" +files = [ {file = "matplotlib-3.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a206a1b762b39398efea838f528b3a6d60cdb26fe9d58b48265787e29cd1d693"}, {file = "matplotlib-3.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd45a6f3e93a780185f70f05cf2a383daed13c3489233faad83e81720f7ede24"}, {file = "matplotlib-3.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d62880e1f60e5a30a2a8484432bcb3a5056969dc97258d7326ad465feb7ae069"}, @@ -3192,26 +1707,71 @@ matplotlib = [ {file = "matplotlib-3.5.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d2484b350bf3d32cae43f85dcfc89b3ed7bd2bcd781ef351f93eb6fb2cc483f9"}, {file = "matplotlib-3.5.3.tar.gz", hash = "sha256:339cac48b80ddbc8bfd05daae0a3a73414651a8596904c2a881cfd1edb65f26c"}, ] -mdurl = [ + +[package.dependencies] +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.0.1" +numpy = ">=1.17" +packaging = ">=20.0" +pillow = ">=6.2.0" +pyparsing = ">=2.2.1" +python-dateutil = ">=2.7" + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] -minimal-snowplow-tracker = [ + +[[package]] +name = "minimal-snowplow-tracker" +version = "0.0.2" +description = "A minimal snowplow event tracker for Python. Add analytics to your Python and Django apps, webapps and games" +optional = false +python-versions = "*" +files = [ {file = "minimal-snowplow-tracker-0.0.2.tar.gz", hash = "sha256:acabf7572db0e7f5cbf6983d495eef54081f71be392330eb3aadb9ccb39daaa4"}, ] -mock = [ - {file = "mock-4.0.3-py3-none-any.whl", hash = "sha256:122fcb64ee37cfad5b3f48d7a7d51875d7031aaf3d8be7c42e2bee25044eee62"}, - {file = "mock-4.0.3.tar.gz", hash = "sha256:7d3fbbde18228f4ff2f1f119a45cdffa458b4c0dee32eb4d2bb2f82554bac7bc"}, -] -monotonic = [ + +[package.dependencies] +requests = ">=2.2.1,<3.0" +six = ">=1.9.0,<2.0" + +[[package]] +name = "monotonic" +version = "1.6" +description = "An implementation of time.monotonic() for Python 2 & < 3.3" +optional = false +python-versions = "*" +files = [ {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, ] -more-itertools = [ - {file = "more-itertools-9.0.0.tar.gz", hash = "sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab"}, - {file = "more_itertools-9.0.0-py3-none-any.whl", hash = "sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41"}, + +[[package]] +name = "more-itertools" +version = "8.14.0" +description = "More routines for operating on iterables, beyond itertools" +optional = false +python-versions = ">=3.5" +files = [ + {file = "more-itertools-8.14.0.tar.gz", hash = "sha256:c09443cd3d5438b8dafccd867a6bc1cb0894389e90cb53d227456b0b0bccb750"}, + {file = "more_itertools-8.14.0-py3-none-any.whl", hash = "sha256:1bc4f91ee5b1b31ac7ceacc17c09befe6a40a503907baf9c839c229b5095cfd2"}, ] -msgpack = [ + +[[package]] +name = "msgpack" +version = "1.0.4" +description = "MessagePack serializer" +optional = false +python-versions = "*" +files = [ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, @@ -3265,7 +1825,14 @@ msgpack = [ {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, ] -multidict = [ + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = true +python-versions = ">=3.7" +files = [ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, @@ -3341,108 +1908,229 @@ multidict = [ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] -mypy-extensions = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] -networkx = [ + +[[package]] +name = "networkx" +version = "2.8.8" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.8" +files = [ {file = "networkx-2.8.8-py3-none-any.whl", hash = "sha256:e435dfa75b1d7195c7b8378c3859f0445cd88c6b0375c181ed66823a9ceb7524"}, {file = "networkx-2.8.8.tar.gz", hash = "sha256:230d388117af870fce5647a3c52401fcf753e94720e6ea6b4197a5355648885e"}, ] -numpy = [ - {file = "numpy-1.21.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38e8648f9449a549a7dfe8d8755a5979b45b3538520d1e735637ef28e8c2dc50"}, - {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd7d7409fa643a91d0a05c7554dd68aa9c9bb16e186f6ccfe40d6e003156e33a"}, - {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a75b4498b1e93d8b700282dc8e655b8bd559c0904b3910b144646dbbbc03e062"}, - {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1412aa0aec3e00bc23fbb8664d76552b4efde98fb71f60737c83efbac24112f1"}, - {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e46ceaff65609b5399163de5893d8f2a82d3c77d5e56d976c8b5fb01faa6b671"}, - {file = "numpy-1.21.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c6a2324085dd52f96498419ba95b5777e40b6bcbc20088fddb9e8cbb58885e8e"}, - {file = "numpy-1.21.1-cp37-cp37m-win32.whl", hash = "sha256:73101b2a1fef16602696d133db402a7e7586654682244344b8329cdcbbb82172"}, - {file = "numpy-1.21.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7a708a79c9a9d26904d1cca8d383bf869edf6f8e7650d85dbc77b041e8c5a0f8"}, - {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95b995d0c413f5d0428b3f880e8fe1660ff9396dcd1f9eedbc311f37b5652e16"}, - {file = "numpy-1.21.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:635e6bd31c9fb3d475c8f44a089569070d10a9ef18ed13738b03049280281267"}, - {file = "numpy-1.21.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a3d5fb89bfe21be2ef47c0614b9c9c707b7362386c9a3ff1feae63e0267ccb6"}, - {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a326af80e86d0e9ce92bcc1e65c8ff88297de4fa14ee936cb2293d414c9ec63"}, - {file = "numpy-1.21.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:791492091744b0fe390a6ce85cc1bf5149968ac7d5f0477288f78c89b385d9af"}, - {file = "numpy-1.21.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0318c465786c1f63ac05d7c4dbcecd4d2d7e13f0959b01b534ea1e92202235c5"}, - {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a513bd9c1551894ee3d31369f9b07460ef223694098cf27d399513415855b68"}, - {file = "numpy-1.21.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:91c6f5fc58df1e0a3cc0c3a717bb3308ff850abdaa6d2d802573ee2b11f674a8"}, - {file = "numpy-1.21.1-cp38-cp38-win32.whl", hash = "sha256:978010b68e17150db8765355d1ccdd450f9fc916824e8c4e35ee620590e234cd"}, - {file = "numpy-1.21.1-cp38-cp38-win_amd64.whl", hash = "sha256:9749a40a5b22333467f02fe11edc98f022133ee1bfa8ab99bda5e5437b831214"}, - {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d7a4aeac3b94af92a9373d6e77b37691b86411f9745190d2c351f410ab3a791f"}, - {file = "numpy-1.21.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d9e7912a56108aba9b31df688a4c4f5cb0d9d3787386b87d504762b6754fbb1b"}, - {file = "numpy-1.21.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25b40b98ebdd272bc3020935427a4530b7d60dfbe1ab9381a39147834e985eac"}, - {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8a92c5aea763d14ba9d6475803fc7904bda7decc2a0a68153f587ad82941fec1"}, - {file = "numpy-1.21.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05a0f648eb28bae4bcb204e6fd14603de2908de982e761a2fc78efe0f19e96e1"}, - {file = "numpy-1.21.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f01f28075a92eede918b965e86e8f0ba7b7797a95aa8d35e1cc8821f5fc3ad6a"}, - {file = "numpy-1.21.1-cp39-cp39-win32.whl", hash = "sha256:88c0b89ad1cc24a5efbb99ff9ab5db0f9a86e9cc50240177a571fbe9c2860ac2"}, - {file = "numpy-1.21.1-cp39-cp39-win_amd64.whl", hash = "sha256:01721eefe70544d548425a07c80be8377096a54118070b8a62476866d5208e33"}, - {file = "numpy-1.21.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d4d1de6e6fb3d28781c73fbde702ac97f03d79e4ffd6598b880b2d95d62ead4"}, - {file = "numpy-1.21.1.zip", hash = "sha256:dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd"}, -] -opentelemetry-api = [ - {file = "opentelemetry_api-1.17.0-py3-none-any.whl", hash = "sha256:b41d9b2a979607b75d2683b9bbf97062a683d190bc696969fb2122fa60aeaabc"}, - {file = "opentelemetry_api-1.17.0.tar.gz", hash = "sha256:3480fcf6b783be5d440a226a51db979ccd7c49a2e98d1c747c991031348dcf04"}, + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.19)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=0.982)", "pre-commit (>=2.20)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.2)", "pydata-sphinx-theme (>=0.11)", "sphinx (>=5.2)", "sphinx-gallery (>=0.11)", "texext (>=0.6.6)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.9)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, ] -opentelemetry-sdk = [ - {file = "opentelemetry_sdk-1.17.0-py3-none-any.whl", hash = "sha256:07424cbcc8c012bc120ed573d5443e7322f3fb393512e72866c30111070a8c37"}, - {file = "opentelemetry_sdk-1.17.0.tar.gz", hash = "sha256:99bb9a787006774f865a4b24f8179900347d03a214c362a6cb70191f77dd6132"}, + +[[package]] +name = "opentelemetry-api" +version = "1.25.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_api-1.25.0-py3-none-any.whl", hash = "sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737"}, + {file = "opentelemetry_api-1.25.0.tar.gz", hash = "sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869"}, ] -opentelemetry-semantic-conventions = [ - {file = "opentelemetry_semantic_conventions-0.38b0-py3-none-any.whl", hash = "sha256:b0ba36e8b70bfaab16ee5a553d809309cc11ff58aec3d2550d451e79d45243a7"}, - {file = "opentelemetry_semantic_conventions-0.38b0.tar.gz", hash = "sha256:37f09e47dd5fc316658bf9ee9f37f9389b21e708faffa4a65d6a3de484d22309"}, + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=7.1" + +[[package]] +name = "opentelemetry-sdk" +version = "1.25.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_sdk-1.25.0-py3-none-any.whl", hash = "sha256:d97ff7ec4b351692e9d5a15af570c693b8715ad78b8aafbec5c7100fe966b4c9"}, + {file = "opentelemetry_sdk-1.25.0.tar.gz", hash = "sha256:ce7fc319c57707ef5bf8b74fb9f8ebdb8bfafbe11898410e0d2a761d08a98ec7"}, ] -oscrypto = [ - {file = "oscrypto-1.3.0-py2.py3-none-any.whl", hash = "sha256:2b2f1d2d42ec152ca90ccb5682f3e051fb55986e1b170ebde472b133713e7085"}, - {file = "oscrypto-1.3.0.tar.gz", hash = "sha256:6f5fef59cb5b3708321db7cca56aed8ad7e662853351e7991fcf60ec606d47a4"}, + +[package.dependencies] +opentelemetry-api = "1.25.0" +opentelemetry-semantic-conventions = "0.46b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.46b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_semantic_conventions-0.46b0-py3-none-any.whl", hash = "sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07"}, + {file = "opentelemetry_semantic_conventions-0.46b0.tar.gz", hash = "sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa"}, ] -packaging = [ + +[package.dependencies] +opentelemetry-api = "1.25.0" + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, ] -pandas = [ - {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:62d5b5ce965bae78f12c1c0df0d387899dd4211ec0bdc52822373f13a3a022b9"}, - {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adfeb11be2d54f275142c8ba9bf67acee771b7186a5745249c7d5a06c670136b"}, - {file = "pandas-1.3.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a8c055d58873ad81cae290d974d13dd479b82cbb975c3e1fa2cf1920715296"}, - {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd541ab09e1f80a2a1760032d665f6e032d8e44055d602d65eeea6e6e85498cb"}, - {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2651d75b9a167cc8cc572cf787ab512d16e316ae00ba81874b560586fa1325e0"}, - {file = "pandas-1.3.5-cp310-cp310-win_amd64.whl", hash = "sha256:aaf183a615ad790801fa3cf2fa450e5b6d23a54684fe386f7e3208f8b9bfbef6"}, - {file = "pandas-1.3.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:344295811e67f8200de2390093aeb3c8309f5648951b684d8db7eee7d1c81fb7"}, - {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:552020bf83b7f9033b57cbae65589c01e7ef1544416122da0c79140c93288f56"}, - {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cce0c6bbeb266b0e39e35176ee615ce3585233092f685b6a82362523e59e5b4"}, - {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d28a3c65463fd0d0ba8bbb7696b23073efee0510783340a44b08f5e96ffce0c"}, - {file = "pandas-1.3.5-cp37-cp37m-win32.whl", hash = "sha256:a62949c626dd0ef7de11de34b44c6475db76995c2064e2d99c6498c3dba7fe58"}, - {file = "pandas-1.3.5-cp37-cp37m-win_amd64.whl", hash = "sha256:8025750767e138320b15ca16d70d5cdc1886e8f9cc56652d89735c016cd8aea6"}, - {file = "pandas-1.3.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fe95bae4e2d579812865db2212bb733144e34d0c6785c0685329e5b60fcb85dd"}, - {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f261553a1e9c65b7a310302b9dbac31cf0049a51695c14ebe04e4bfd4a96f02"}, - {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b6dbec5f3e6d5dc80dcfee250e0a2a652b3f28663492f7dab9a24416a48ac39"}, - {file = "pandas-1.3.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3bc49af96cd6285030a64779de5b3688633a07eb75c124b0747134a63f4c05f"}, - {file = "pandas-1.3.5-cp38-cp38-win32.whl", hash = "sha256:b6b87b2fb39e6383ca28e2829cddef1d9fc9e27e55ad91ca9c435572cdba51bf"}, - {file = "pandas-1.3.5-cp38-cp38-win_amd64.whl", hash = "sha256:a395692046fd8ce1edb4c6295c35184ae0c2bbe787ecbe384251da609e27edcb"}, - {file = "pandas-1.3.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd971a3f08b745a75a86c00b97f3007c2ea175951286cdda6abe543e687e5f2f"}, - {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37f06b59e5bc05711a518aa10beaec10942188dccb48918bb5ae602ccbc9f1a0"}, - {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c21778a688d3712d35710501f8001cdbf96eb70a7c587a3d5613573299fdca6"}, - {file = "pandas-1.3.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3345343206546545bc26a05b4602b6a24385b5ec7c75cb6059599e3d56831da2"}, - {file = "pandas-1.3.5-cp39-cp39-win32.whl", hash = "sha256:c69406a2808ba6cf580c2255bcf260b3f214d2664a3a4197d0e640f573b46fd3"}, - {file = "pandas-1.3.5-cp39-cp39-win_amd64.whl", hash = "sha256:32e1a26d5ade11b547721a72f9bfc4bd113396947606e00d5b4a5b79b3dcb006"}, - {file = "pandas-1.3.5.tar.gz", hash = "sha256:1e4285f5de1012de20ca46b188ccf33521bff61ba5c5ebd78b4fb28e5416a9f1"}, + +[[package]] +name = "pandas" +version = "1.5.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, ] -parse = [ +python-dateutil = ">=2.8.1" +pytz = ">=2020.1" + +[package.extras] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] + +[[package]] +name = "parse" +version = "1.19.1" +description = "parse() is the opposite of format()" +optional = false +python-versions = "*" +files = [ {file = "parse-1.19.1-py2.py3-none-any.whl", hash = "sha256:371ed3800dc63983832159cc9373156613947707bc448b5215473a219dbd4362"}, {file = "parse-1.19.1.tar.gz", hash = "sha256:cc3a47236ff05da377617ddefa867b7ba983819c664e1afe46249e5b469be464"}, ] -parse-type = [ + +[[package]] +name = "parse-type" +version = "0.6.2" +description = "Simplifies to build parse types based on the parse module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*" +files = [ {file = "parse_type-0.6.2-py2.py3-none-any.whl", hash = "sha256:06d39a8b70fde873eb2a131141a0e79bb34a432941fb3d66fad247abafc9766c"}, {file = "parse_type-0.6.2.tar.gz", hash = "sha256:79b1f2497060d0928bc46016793f1fca1057c4aacdf15ef876aa48d75a73a355"}, ] -parsedatetime = [ + +[package.dependencies] +parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} +six = ">=1.15" + +[package.extras] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)"] +docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] +testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] + +[[package]] +name = "parsedatetime" +version = "2.4" +description = "Parse human-readable date/time text." +optional = false +python-versions = "*" +files = [ {file = "parsedatetime-2.4-py2-none-any.whl", hash = "sha256:9ee3529454bf35c40a77115f5a596771e59e1aee8c53306f346c461b8e913094"}, {file = "parsedatetime-2.4.tar.gz", hash = "sha256:3d817c58fb9570d1eec1dd46fa9448cd644eeed4fb612684b02dfda3a79cb84b"}, ] -pathspec = [ + +[package.dependencies] +future = "*" + +[[package]] +name = "pathspec" +version = "0.11.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.7" +files = [ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, ] -pillow = [ + +[[package]] +name = "pillow" +version = "9.5.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.7" +files = [ {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, @@ -3510,27 +2198,86 @@ pillow = [ {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, ] -platformdirs = [ + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "platformdirs" +version = "2.6.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.7" +files = [ {file = "platformdirs-2.6.2-py3-none-any.whl", hash = "sha256:83c8f6d04389165de7c9b6f0c682439697887bca0aa2f1c87ef1826be3584490"}, {file = "platformdirs-2.6.2.tar.gz", hash = "sha256:e1fea1fe471b9ff8332e229df3cb7de4f53eeea4998d3b6bfff542115e998bd2"}, ] -pluggy = [ - {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, - {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, -] -portalocker = [ + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] + +[[package]] +name = "portalocker" +version = "2.7.0" +description = "Wraps the portalocker recipe for easy usage" +optional = false +python-versions = ">=3.5" +files = [ {file = "portalocker-2.7.0-py2.py3-none-any.whl", hash = "sha256:a07c5b4f3985c3cf4798369631fb7011adb498e2a46d8440efc75a8f29a0f983"}, {file = "portalocker-2.7.0.tar.gz", hash = "sha256:032e81d534a88ec1736d03f780ba073f047a06c478b06e2937486f334e955c51"}, ] -posthog = [ + +[package.dependencies] +pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} + +[package.extras] +docs = ["sphinx (>=1.7.1)"] +redis = ["redis"] +tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)"] + +[[package]] +name = "posthog" +version = "1.4.9" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +files = [ {file = "posthog-1.4.9-py2.py3-none-any.whl", hash = "sha256:f2dde13313193a735fc968acfca8298afb78efb5fe67780c496d533211a8f280"}, {file = "posthog-1.4.9.tar.gz", hash = "sha256:6f99ca20da4e343be80b7535a719a2652b77cf06c10406e66e3340a9c278f928"}, ] -proto-plus = [ - {file = "proto-plus-1.22.2.tar.gz", hash = "sha256:0e8cda3d5a634d9895b75c573c9352c16486cb75deb0e078b5fda34db4243165"}, - {file = "proto_plus-1.22.2-py3-none-any.whl", hash = "sha256:de34e52d6c9c6fcd704192f09767cb561bb4ee64e70eede20b0834d841f0be4d"}, -] -protobuf = [ + +[package.dependencies] +backoff = ">=1.10.0,<2.0.0" +monotonic = ">=1.5" +python-dateutil = ">2.1" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "isort", "pre-commit"] +sentry = ["django", "sentry-sdk"] +test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"] + +[[package]] +name = "protobuf" +version = "4.21.12" +description = "" +optional = false +python-versions = ">=3.7" +files = [ {file = "protobuf-4.21.12-cp310-abi3-win32.whl", hash = "sha256:b135410244ebe777db80298297a97fbb4c862c881b4403b71bac9d4107d61fd1"}, {file = "protobuf-4.21.12-cp310-abi3-win_amd64.whl", hash = "sha256:89f9149e4a0169cddfc44c74f230d7743002e3aa0b9472d8c28f0388102fc4c2"}, {file = "protobuf-4.21.12-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:299ea899484ee6f44604deb71f424234f654606b983cb496ea2a53e3c63ab791"}, @@ -3546,11 +2293,14 @@ protobuf = [ {file = "protobuf-4.21.12-py3-none-any.whl", hash = "sha256:b98d0148f84e3a3c569e19f52103ca1feacdac0d2df8d6533cf983d1fda28462"}, {file = "protobuf-4.21.12.tar.gz", hash = "sha256:7cd532c4566d0e6feafecc1059d04c7915aec8e182d1cf7adee8b24ef1e2e6ab"}, ] -py = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] -pyarrow = [ + +[[package]] +name = "pyarrow" +version = "10.0.1" +description = "Python library for Apache Arrow" +optional = true +python-versions = ">=3.7" +files = [ {file = "pyarrow-10.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:e00174764a8b4e9d8d5909b6d19ee0c217a6cf0232c5682e31fdfbd5a9f0ae52"}, {file = "pyarrow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f7a7dbe2f7f65ac1d0bd3163f756deb478a9e9afc2269557ed75b1b25ab3610"}, {file = "pyarrow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb627673cb98708ef00864e2e243f51ba7b4c1b9f07a1d821f98043eccd3f585"}, @@ -3577,54 +2327,28 @@ pyarrow = [ {file = "pyarrow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0ec7587d759153f452d5263dbc8b1af318c4609b607be2bd5127dcda6708cdb1"}, {file = "pyarrow-10.0.1.tar.gz", hash = "sha256:1a14f57a5f472ce8234f2964cd5184cccaa8df7e04568c64edc33b23eb285dd5"}, ] -pyasn1 = [ - {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, -] -pyasn1-modules = [ - {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, - {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, -] -pycparser = [ + +[package.dependencies] +numpy = ">=1.16.6" + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] -pycryptodomex = [ - {file = "pycryptodomex-3.17-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:12056c38e49d972f9c553a3d598425f8a1c1d35b2e4330f89d5ff1ffb70de041"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab33c2d9f275e05e235dbca1063753b5346af4a5cac34a51fa0da0d4edfb21d7"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:caa937ff29d07a665dfcfd7a84f0d4207b2ebf483362fa9054041d67fdfacc20"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:db23d7341e21b273d2440ec6faf6c8b1ca95c8894da612e165be0b89a8688340"}, - {file = "pycryptodomex-3.17-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:f854c8476512cebe6a8681cc4789e4fcff6019c17baa0fd72b459155dc605ab4"}, - {file = "pycryptodomex-3.17-cp27-cp27m-win32.whl", hash = "sha256:a57e3257bacd719769110f1f70dd901c5b6955e9596ad403af11a3e6e7e3311c"}, - {file = "pycryptodomex-3.17-cp27-cp27m-win_amd64.whl", hash = "sha256:d38ab9e53b1c09608ba2d9b8b888f1e75d6f66e2787e437adb1fecbffec6b112"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:3c2516b42437ae6c7a29ef3ddc73c8d4714e7b6df995b76be4695bbe4b3b5cd2"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5c23482860302d0d9883404eaaa54b0615eefa5274f70529703e2c43cc571827"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:7a8dc3ee7a99aae202a4db52de5a08aa4d01831eb403c4d21da04ec2f79810db"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:7cc28dd33f1f3662d6da28ead4f9891035f63f49d30267d3b41194c8778997c8"}, - {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:2d4d395f109faba34067a08de36304e846c791808524614c731431ee048fe70a"}, - {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:55eed98b4150a744920597c81b3965b632038781bab8a08a12ea1d004213c600"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:7fa0b52df90343fafe319257b31d909be1d2e8852277fb0376ba89d26d2921db"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f0ddd4adc64baa39b416f3637aaf99f45acb0bcdc16706f0cc7ebfc6f10109"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4fa037078e92c7cc49f6789a8bac3de06856740bb2038d05f2d9a2e4b165d59"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:88b0d5bb87eaf2a31e8a759302b89cf30c97f2f8ca7d83b8c9208abe8acb447a"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:6feedf4b0e36b395329b4186a805f60f900129cdf0170e120ecabbfcb763995d"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a6651a07f67c28b6e978d63aa3a3fccea0feefed9a8453af3f7421a758461b7"}, - {file = "pycryptodomex-3.17-cp35-abi3-win32.whl", hash = "sha256:32e764322e902bbfac49ca1446604d2839381bbbdd5a57920c9daaf2e0b778df"}, - {file = "pycryptodomex-3.17-cp35-abi3-win_amd64.whl", hash = "sha256:4b51e826f0a04d832eda0790bbd0665d9bfe73e5a4d8ea93b6a9b38beeebe935"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:d4cf0128da167562c49b0e034f09e9cedd733997354f2314837c2fa461c87bb1"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:c92537b596bd5bffb82f8964cabb9fef1bca8a28a9e0a69ffd3ec92a4a7ad41b"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-win32.whl", hash = "sha256:599bb4ae4bbd614ca05f49bd4e672b7a250b80b13ae1238f05fd0f09d87ed80a"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4c4674f4b040321055c596aac926d12f7f6859dfe98cd12f4d9453b43ab6adc8"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a3648025e4ddb72d43addab764336ba2e670c8377dba5dd752e42285440d31"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40e8a11f578bd0851b02719c862d55d3ee18d906c8b68a9c09f8c564d6bb5b92"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:23d83b610bd97704f0cd3acc48d99b76a15c8c1540d8665c94d514a49905bad7"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd29d35ac80755e5c0a99d96b44fb9abbd7e871849581ea6a4cb826d24267537"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64b876d57cb894b31056ad8dd6a6ae1099b117ae07a3d39707221133490e5715"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee8bf4fdcad7d66beb744957db8717afc12d176e3fd9c5d106835133881a049b"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c84689c73358dfc23f9fdcff2cb9e7856e65e2ce3b5ed8ff630d4c9bdeb1867b"}, - {file = "pycryptodomex-3.17.tar.gz", hash = "sha256:0af93aad8d62e810247beedef0261c148790c52f3cd33643791cc6396dd217c1"}, -] -pydantic = [ + +[[package]] +name = "pydantic" +version = "1.10.12" +description = "Data validation and settings management using python type hints" +optional = false +python-versions = ">=3.7" +files = [ {file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"}, {file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"}, {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"}, @@ -3662,80 +2386,122 @@ pydantic = [ {file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"}, {file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"}, ] -pygments = [ + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pygments" +version = "2.14.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.6" +files = [ {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, ] -pyjwt = [ + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pyjwt" +version = "2.6.0" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.7" +files = [ {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"}, {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"}, ] -pyopenssl = [ - {file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"}, - {file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"}, -] -pyparsing = [ + +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pyparsing" +version = "3.1.0" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ {file = "pyparsing-3.1.0-py3-none-any.whl", hash = "sha256:d554a96d1a7d3ddaf7183104485bc19fd80543ad6ac5bdb6426719d766fb06c1"}, {file = "pyparsing-3.1.0.tar.gz", hash = "sha256:edb662d6fe322d6e990b1594b5feaeadf806803359e3d4d42f11e295e588f0ea"}, ] -pyrsistent = [ - {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, - {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, - {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, - {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, - {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, - {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, -] -pytest = [ - {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, - {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, -] -pytest-mock = [ - {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, - {file = "pytest_mock-3.11.1-py3-none-any.whl", hash = "sha256:21c279fff83d70763b05f8874cc9cfb3fcacd6d354247a976f9529d19f9acf39"}, -] -python-dateutil = [ + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] -python-slugify = [ + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-slugify" +version = "8.0.0" +description = "A Python slugify application that also handles Unicode" +optional = false +python-versions = ">=3.7" +files = [ {file = "python-slugify-8.0.0.tar.gz", hash = "sha256:f1da83f3c7ab839b3f84543470cd95bdb5a81f1a0b80fed502f78b7dca256062"}, {file = "python_slugify-8.0.0-py2.py3-none-any.whl", hash = "sha256:51f217508df20a6c166c7821683384b998560adcf8f19a6c2ca8b460528ccd9c"}, ] -pytimeparse = [ + +[package.dependencies] +text-unidecode = ">=1.3" + +[package.extras] +unidecode = ["Unidecode (>=1.1.1)"] + +[[package]] +name = "pytimeparse" +version = "1.1.8" +description = "Time expression parser" +optional = false +python-versions = "*" +files = [ {file = "pytimeparse-1.1.8-py2.py3-none-any.whl", hash = "sha256:04b7be6cc8bd9f5647a6325444926c3ac34ee6bc7e69da4367ba282f076036bd"}, {file = "pytimeparse-1.1.8.tar.gz", hash = "sha256:e86136477be924d7e670646a98561957e8ca7308d44841e21f5ddea757556a0a"}, ] -pytz = [ + +[[package]] +name = "pytz" +version = "2022.7.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, ] -pytz-deprecation-shim = [ - {file = "pytz_deprecation_shim-0.1.0.post0-py2.py3-none-any.whl", hash = "sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6"}, - {file = "pytz_deprecation_shim-0.1.0.post0.tar.gz", hash = "sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d"}, -] -pywin32 = [ + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, @@ -3751,7 +2517,14 @@ pywin32 = [ {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, ] -pyyaml = [ + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, @@ -3793,183 +2566,503 @@ pyyaml = [ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] -redshift-connector = [ + +[[package]] +name = "redshift-connector" +version = "2.0.910" +description = "Redshift interface library" +optional = true +python-versions = ">=3.6" +files = [ {file = "redshift_connector-2.0.910-py3-none-any.whl", hash = "sha256:8f5ca07f2bec4a97e7a601ac7f0de9b47495298973c8e4d7f219f995b5682e23"}, ] -requests = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + +[package.dependencies] +beautifulsoup4 = ">=4.7.0,<5.0.0" +boto3 = ">=1.9.201,<2.0.0" +botocore = ">=1.12.201,<2.0.0" +lxml = ">=4.6.5" +packaging = "*" +pytz = ">=2020.1" +requests = ">=2.23.0,<3.0.0" +scramp = ">=1.2.0,<1.5.0" +setuptools = "*" + +[package.extras] +full = ["numpy", "pandas"] + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] -rich = [ + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.3.3" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ {file = "rich-13.3.3-py3-none-any.whl", hash = "sha256:540c7d6d26a1178e8e8b37e9ba44573a3cd1464ff6348b99ee7061b95d1c6333"}, {file = "rich-13.3.3.tar.gz", hash = "sha256:dc84400a9d842b3a9c5ff74addd8eb798d155f36c1c91303888e0a66850d2a15"}, ] -rsa = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, + +[package.dependencies] +markdown-it-py = ">=2.2.0,<3.0.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.18.1" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, + {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, + {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, + {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, + {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, + {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, + {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, + {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, + {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, + {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, + {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, + {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, + {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, ] -s3fs = [ + +[[package]] +name = "s3fs" +version = "2023.1.0" +description = "Convenient Filesystem interface over S3" +optional = true +python-versions = ">= 3.7" +files = [ {file = "s3fs-2023.1.0-py3-none-any.whl", hash = "sha256:a549ae518fff4388bd6fca5248575c29f521e7e39efcd2bfab476651701fd114"}, {file = "s3fs-2023.1.0.tar.gz", hash = "sha256:8b2e28372423e93f26312208a9272e22a962ddd0a79d63f9a68693b6af5ff187"}, ] -s3transfer = [ + +[package.dependencies] +aiobotocore = ">=2.4.2,<2.5.0" +aiohttp = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1" +fsspec = "2023.1.0" + +[package.extras] +awscli = ["aiobotocore[awscli] (>=2.4.2,<2.5.0)"] +boto3 = ["aiobotocore[boto3] (>=2.4.2,<2.5.0)"] + +[[package]] +name = "s3transfer" +version = "0.6.1" +description = "An Amazon S3 Transfer Manager" +optional = true +python-versions = ">= 3.7" +files = [ {file = "s3transfer-0.6.1-py3-none-any.whl", hash = "sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346"}, {file = "s3transfer-0.6.1.tar.gz", hash = "sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9"}, ] -scramp = [ + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "scramp" +version = "1.4.4" +description = "An implementation of the SCRAM protocol." +optional = true +python-versions = ">=3.7" +files = [ {file = "scramp-1.4.4-py3-none-any.whl", hash = "sha256:b142312df7c2977241d951318b7ee923d6b7a4f75ba0f05b621ece1ed616faa3"}, {file = "scramp-1.4.4.tar.gz", hash = "sha256:b7022a140040f33cf863ab2657917ed05287a807b917950489b89b9f685d59bc"}, ] -setuptools = [ + +[package.dependencies] +asn1crypto = ">=1.5.1" + +[[package]] +name = "setuptools" +version = "67.3.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = true +python-versions = ">=3.7" +files = [ {file = "setuptools-67.3.2-py3-none-any.whl", hash = "sha256:bb6d8e508de562768f2027902929f8523932fcd1fb784e6d573d2cafac995a48"}, {file = "setuptools-67.3.2.tar.gz", hash = "sha256:95f00380ef2ffa41d9bba85d95b27689d923c93dfbafed4aecd7cf988a25e012"}, ] -setuptools-scm = [ - {file = "setuptools_scm-6.4.2-py3-none-any.whl", hash = "sha256:acea13255093849de7ccb11af9e1fb8bde7067783450cee9ef7a93139bddf6d4"}, - {file = "setuptools_scm-6.4.2.tar.gz", hash = "sha256:6833ac65c6ed9711a4d5d2266f8024cfa07c533a0e55f4c12f6eff280a5a9e30"}, -] -six = [ + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] -sniffio = [ + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, ] -snowflake-connector-python = [ - {file = "snowflake-connector-python-3.0.3.tar.gz", hash = "sha256:5da1f24edfff63e8b5f27720117c058714b6dc8d26c1c4de4d0d0c55188db966"}, - {file = "snowflake_connector_python-3.0.3-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:feefde16658b8e5a86536bd7a92b26414664b1fada11c9e4f09ba348b6cc9d14"}, - {file = "snowflake_connector_python-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b390deb323a6300c881740a82a6cea6dd8b6a4d3eba4735b429c3d1df7460cb"}, - {file = "snowflake_connector_python-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:400b0ca919b810df9f0a60d82f77643562625c9fc784e2e539e6af5c999ed2b5"}, - {file = "snowflake_connector_python-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49ec09762e1d418fdeffa1c607237538b09dcd99fec148ab999549055a798438"}, - {file = "snowflake_connector_python-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:a18dc9661abe4ea513c0b3375dff0995726b2ff89e1a6ac5994e8ac8baa11e6a"}, - {file = "snowflake_connector_python-3.0.3-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:1507d6f8c774e2a0a4554f2c99d43316a2eef73943e271638d8cdad0b38e805a"}, - {file = "snowflake_connector_python-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bace1c760f5129b6b0636f0cf1630db0e5f27cc75405768d33d1c2d5ff772a80"}, - {file = "snowflake_connector_python-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c627b81c8602f3a1280bb0cc8ee6c70dab8cbe629336e793fc3bf761e193a4b"}, - {file = "snowflake_connector_python-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee523963fd77aeb7223c9d09df19923b517970c5432279bbe564e5c815ec8613"}, - {file = "snowflake_connector_python-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:4b13d704c2752c13955875b4132ef63499f1b4e13f65357ee642e4dbf17bb718"}, - {file = "snowflake_connector_python-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:090c28319c975c7af13e6932133475401064a118a1c09fc1fb2bd8a9e10e0f56"}, - {file = "snowflake_connector_python-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3556cc1a838b0a6fd5abaf8a9550cea7f78c0203b6197202cd0b94920836c64"}, - {file = "snowflake_connector_python-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:588981518c385bc755fefb5c804b66c68d929e54e366820c11a4c80a1b50dc08"}, - {file = "snowflake_connector_python-3.0.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:50828eeae7e7cb6ce32c6aecdd29bb6cb89ca3e0990f0d4fc97d28b06d290730"}, - {file = "snowflake_connector_python-3.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:39f5751f181b6fb8e2cfd48cbe7ac780d55c436a7d91f2ab2240a02362f81090"}, - {file = "snowflake_connector_python-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6b99a63221c8f662cb7b257a607d624d6d7b3393186eba0ce34b1a4303526d7"}, - {file = "snowflake_connector_python-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859571c633b74b46a7d40ac2f24005a874621bc229916cc2229042e59b34b292"}, - {file = "snowflake_connector_python-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:a8eb91f649410884c6f34a8a2514ef6a688f69adf0e287ecf4133477220f42c3"}, - {file = "snowflake_connector_python-3.0.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:2409ccebc9a02fb3c38502d937fc6a63d8f3f3f49a79de60a96977463e637259"}, - {file = "snowflake_connector_python-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dbab583afb1958d765d01c1f453d9358ecae5c119c8dbf3777dbfb86e2b3dd4c"}, - {file = "snowflake_connector_python-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1236e8ece702b8a0e739c08a6c2bfbbdc4845df48f75ee6e330d6e75b0eb050"}, - {file = "snowflake_connector_python-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1c4746a5c2d7fc6c5f3a30e06c697137d4a4e878d65d28c748ee3a0db52ef8a"}, - {file = "snowflake_connector_python-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:51ef9e7deadeb0058d32bb4c8ec9e5b063678c62a0b8cb9d3057ec6b4dad80d7"}, -] -soupsieve = [ + +[[package]] +name = "soupsieve" +version = "2.4.1" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = true +python-versions = ">=3.7" +files = [ {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, ] -sqlalchemy = [ - {file = "SQLAlchemy-1.4.46-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7001f16a9a8e06488c3c7154827c48455d1c1507d7228d43e781afbc8ceccf6d"}, - {file = "SQLAlchemy-1.4.46-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c7a46639ba058d320c9f53a81db38119a74b8a7a1884df44d09fbe807d028aaf"}, - {file = "SQLAlchemy-1.4.46-cp27-cp27m-win32.whl", hash = "sha256:c04144a24103135ea0315d459431ac196fe96f55d3213bfd6d39d0247775c854"}, - {file = "SQLAlchemy-1.4.46-cp27-cp27m-win_amd64.whl", hash = "sha256:7b81b1030c42b003fc10ddd17825571603117f848814a344d305262d370e7c34"}, - {file = "SQLAlchemy-1.4.46-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:939f9a018d2ad04036746e15d119c0428b1e557470361aa798e6e7d7f5875be0"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b7f4b6aa6e87991ec7ce0e769689a977776db6704947e562102431474799a857"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbf17ac9a61e7a3f1c7ca47237aac93cabd7f08ad92ac5b96d6f8dea4287fc1"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7f8267682eb41a0584cf66d8a697fef64b53281d01c93a503e1344197f2e01fe"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cb0ad8a190bc22d2112001cfecdec45baffdf41871de777239da6a28ed74b6"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-win32.whl", hash = "sha256:5f752676fc126edc1c4af0ec2e4d2adca48ddfae5de46bb40adbd3f903eb2120"}, - {file = "SQLAlchemy-1.4.46-cp310-cp310-win_amd64.whl", hash = "sha256:31de1e2c45e67a5ec1ecca6ec26aefc299dd5151e355eb5199cd9516b57340be"}, - {file = "SQLAlchemy-1.4.46-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d68e1762997bfebf9e5cf2a9fd0bcf9ca2fdd8136ce7b24bbd3bbfa4328f3e4a"}, - {file = "SQLAlchemy-1.4.46-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d112b0f3c1bc5ff70554a97344625ef621c1bfe02a73c5d97cac91f8cd7a41e"}, - {file = "SQLAlchemy-1.4.46-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69fac0a7054d86b997af12dc23f581cf0b25fb1c7d1fed43257dee3af32d3d6d"}, - {file = "SQLAlchemy-1.4.46-cp311-cp311-win32.whl", hash = "sha256:887865924c3d6e9a473dc82b70977395301533b3030d0f020c38fd9eba5419f2"}, - {file = "SQLAlchemy-1.4.46-cp311-cp311-win_amd64.whl", hash = "sha256:984ee13543a346324319a1fb72b698e521506f6f22dc37d7752a329e9cd00a32"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:9167d4227b56591a4cc5524f1b79ccd7ea994f36e4c648ab42ca995d28ebbb96"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d61e9ecc849d8d44d7f80894ecff4abe347136e9d926560b818f6243409f3c86"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3ec187acf85984263299a3f15c34a6c0671f83565d86d10f43ace49881a82718"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9883f5fae4fd8e3f875adc2add69f8b945625811689a6c65866a35ee9c0aea23"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-win32.whl", hash = "sha256:535377e9b10aff5a045e3d9ada8a62d02058b422c0504ebdcf07930599890eb0"}, - {file = "SQLAlchemy-1.4.46-cp36-cp36m-win_amd64.whl", hash = "sha256:18cafdb27834fa03569d29f571df7115812a0e59fd6a3a03ccb0d33678ec8420"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:a1ad90c97029cc3ab4ffd57443a20fac21d2ec3c89532b084b073b3feb5abff3"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4847f4b1d822754e35707db913396a29d874ee77b9c3c3ef3f04d5a9a6209618"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c5a99282848b6cae0056b85da17392a26b2d39178394fc25700bcf967e06e97a"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4b1cc7835b39835c75cf7c20c926b42e97d074147c902a9ebb7cf2c840dc4e2"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-win32.whl", hash = "sha256:c522e496f9b9b70296a7675272ec21937ccfc15da664b74b9f58d98a641ce1b6"}, - {file = "SQLAlchemy-1.4.46-cp37-cp37m-win_amd64.whl", hash = "sha256:ae067ab639fa499f67ded52f5bc8e084f045d10b5ac7bb928ae4ca2b6c0429a5"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:e3c1808008124850115a3f7e793a975cfa5c8a26ceeeb9ff9cbb4485cac556df"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d164df3d83d204c69f840da30b292ac7dc54285096c6171245b8d7807185aa"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b33ffbdbbf5446cf36cd4cc530c9d9905d3c2fe56ed09e25c22c850cdb9fac92"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d94682732d1a0def5672471ba42a29ff5e21bb0aae0afa00bb10796fc1e28dd"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-win32.whl", hash = "sha256:f8cb80fe8d14307e4124f6fad64dfd87ab749c9d275f82b8b4ec84c84ecebdbe"}, - {file = "SQLAlchemy-1.4.46-cp38-cp38-win_amd64.whl", hash = "sha256:07e48cbcdda6b8bc7a59d6728bd3f5f574ffe03f2c9fb384239f3789c2d95c2e"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:1b1e5e96e2789d89f023d080bee432e2fef64d95857969e70d3cadec80bd26f0"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3714e5b33226131ac0da60d18995a102a17dddd42368b7bdd206737297823ad"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:955162ad1a931fe416eded6bb144ba891ccbf9b2e49dc7ded39274dd9c5affc5"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6e4cb5c63f705c9d546a054c60d326cbde7421421e2d2565ce3e2eee4e1a01f"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-win32.whl", hash = "sha256:51e1ba2884c6a2b8e19109dc08c71c49530006c1084156ecadfaadf5f9b8b053"}, - {file = "SQLAlchemy-1.4.46-cp39-cp39-win_amd64.whl", hash = "sha256:315676344e3558f1f80d02535f410e80ea4e8fddba31ec78fe390eff5fb8f466"}, - {file = "SQLAlchemy-1.4.46.tar.gz", hash = "sha256:6913b8247d8a292ef8315162a51931e2b40ce91681f1b6f18f697045200c4a30"}, -] -sqlalchemy-redshift = [ + +[[package]] +name = "sqlalchemy" +version = "1.4.52" +description = "Database Abstraction Library" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "SQLAlchemy-1.4.52-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:f68016f9a5713684c1507cc37133c28035f29925c75c0df2f9d0f7571e23720a"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24bb0f81fbbb13d737b7f76d1821ec0b117ce8cbb8ee5e8641ad2de41aa916d3"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e93983cc0d2edae253b3f2141b0a3fb07e41c76cd79c2ad743fc27eb79c3f6db"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:84e10772cfc333eb08d0b7ef808cd76e4a9a30a725fb62a0495877a57ee41d81"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:427988398d2902de042093d17f2b9619a5ebc605bf6372f7d70e29bde6736842"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-win32.whl", hash = "sha256:1296f2cdd6db09b98ceb3c93025f0da4835303b8ac46c15c2136e27ee4d18d94"}, + {file = "SQLAlchemy-1.4.52-cp310-cp310-win_amd64.whl", hash = "sha256:80e7f697bccc56ac6eac9e2df5c98b47de57e7006d2e46e1a3c17c546254f6ef"}, + {file = "SQLAlchemy-1.4.52-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2f251af4c75a675ea42766880ff430ac33291c8d0057acca79710f9e5a77383d"}, + {file = "SQLAlchemy-1.4.52-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8f9e4c4718f111d7b530c4e6fb4d28f9f110eb82e7961412955b3875b66de0"}, + {file = "SQLAlchemy-1.4.52-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afb1672b57f58c0318ad2cff80b384e816735ffc7e848d8aa51e0b0fc2f4b7bb"}, + {file = "SQLAlchemy-1.4.52-cp311-cp311-win32.whl", hash = "sha256:6e41cb5cda641f3754568d2ed8962f772a7f2b59403b95c60c89f3e0bd25f15e"}, + {file = "SQLAlchemy-1.4.52-cp311-cp311-win_amd64.whl", hash = "sha256:5bed4f8c3b69779de9d99eb03fd9ab67a850d74ab0243d1be9d4080e77b6af12"}, + {file = "SQLAlchemy-1.4.52-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:49e3772eb3380ac88d35495843daf3c03f094b713e66c7d017e322144a5c6b7c"}, + {file = "SQLAlchemy-1.4.52-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:618827c1a1c243d2540314c6e100aee7af09a709bd005bae971686fab6723554"}, + {file = "SQLAlchemy-1.4.52-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de9acf369aaadb71a725b7e83a5ef40ca3de1cf4cdc93fa847df6b12d3cd924b"}, + {file = "SQLAlchemy-1.4.52-cp312-cp312-win32.whl", hash = "sha256:763bd97c4ebc74136ecf3526b34808c58945023a59927b416acebcd68d1fc126"}, + {file = "SQLAlchemy-1.4.52-cp312-cp312-win_amd64.whl", hash = "sha256:f12aaf94f4d9679ca475975578739e12cc5b461172e04d66f7a3c39dd14ffc64"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:853fcfd1f54224ea7aabcf34b227d2b64a08cbac116ecf376907968b29b8e763"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f98dbb8fcc6d1c03ae8ec735d3c62110949a3b8bc6e215053aa27096857afb45"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e135fff2e84103bc15c07edd8569612ce317d64bdb391f49ce57124a73f45c5"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5b5de6af8852500d01398f5047d62ca3431d1e29a331d0b56c3e14cb03f8094c"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3491c85df263a5c2157c594f54a1a9c72265b75d3777e61ee13c556d9e43ffc9"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-win32.whl", hash = "sha256:427c282dd0deba1f07bcbf499cbcc9fe9a626743f5d4989bfdfd3ed3513003dd"}, + {file = "SQLAlchemy-1.4.52-cp36-cp36m-win_amd64.whl", hash = "sha256:ca5ce82b11731492204cff8845c5e8ca1a4bd1ade85e3b8fcf86e7601bfc6a39"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:29d4247313abb2015f8979137fe65f4eaceead5247d39603cc4b4a610936cd2b"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a752bff4796bf22803d052d4841ebc3c55c26fb65551f2c96e90ac7c62be763a"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7ea11727feb2861deaa293c7971a4df57ef1c90e42cb53f0da40c3468388000"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d913f8953e098ca931ad7f58797f91deed26b435ec3756478b75c608aa80d139"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a251146b921725547ea1735b060a11e1be705017b568c9f8067ca61e6ef85f20"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-win32.whl", hash = "sha256:1f8e1c6a6b7f8e9407ad9afc0ea41c1f65225ce505b79bc0342159de9c890782"}, + {file = "SQLAlchemy-1.4.52-cp37-cp37m-win_amd64.whl", hash = "sha256:346ed50cb2c30f5d7a03d888e25744154ceac6f0e6e1ab3bc7b5b77138d37710"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:4dae6001457d4497736e3bc422165f107ecdd70b0d651fab7f731276e8b9e12d"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5d2e08d79f5bf250afb4a61426b41026e448da446b55e4770c2afdc1e200fce"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bbce5dd7c7735e01d24f5a60177f3e589078f83c8a29e124a6521b76d825b85"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bdb7b4d889631a3b2a81a3347c4c3f031812eb4adeaa3ee4e6b0d028ad1852b5"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c294ae4e6bbd060dd79e2bd5bba8b6274d08ffd65b58d106394cb6abbf35cf45"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-win32.whl", hash = "sha256:bcdfb4b47fe04967669874fb1ce782a006756fdbebe7263f6a000e1db969120e"}, + {file = "SQLAlchemy-1.4.52-cp38-cp38-win_amd64.whl", hash = "sha256:7d0dbc56cb6af5088f3658982d3d8c1d6a82691f31f7b0da682c7b98fa914e91"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:a551d5f3dc63f096ed41775ceec72fdf91462bb95abdc179010dc95a93957800"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab773f9ad848118df7a9bbabca53e3f1002387cdbb6ee81693db808b82aaab0"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2de46f5d5396d5331127cfa71f837cca945f9a2b04f7cb5a01949cf676db7d1"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7027be7930a90d18a386b25ee8af30514c61f3852c7268899f23fdfbd3107181"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99224d621affbb3c1a4f72b631f8393045f4ce647dd3262f12fe3576918f8bf3"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-win32.whl", hash = "sha256:c124912fd4e1bb9d1e7dc193ed482a9f812769cb1e69363ab68e01801e859821"}, + {file = "SQLAlchemy-1.4.52-cp39-cp39-win_amd64.whl", hash = "sha256:2c286fab42e49db23c46ab02479f328b8bdb837d3e281cae546cc4085c83b680"}, + {file = "SQLAlchemy-1.4.52.tar.gz", hash = "sha256:80e63bbdc5217dad3485059bdf6f65a7d43f33c8bde619df5c220edf03d87296"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"} + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=7)", "cx_oracle (>=7,<8)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +pymysql = ["pymysql", "pymysql (<1)"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "sqlalchemy-redshift" +version = "0.8.14" +description = "Amazon Redshift Dialect for sqlalchemy" +optional = true +python-versions = ">=3.4" +files = [ {file = "sqlalchemy-redshift-0.8.14.tar.gz", hash = "sha256:1f1f78d8ef7febaf0553d64fe247fc08acbfec84d8e1d2dc8264c656cc623622"}, {file = "sqlalchemy_redshift-0.8.14-py2.py3-none-any.whl", hash = "sha256:cfdfae2c8fb3043c181e2baedd4ee425c1ca7efed20f3c5ae274838d1015f919"}, ] -sqlparse = [ + +[package.dependencies] +packaging = "*" +SQLAlchemy = ">=0.9.2,<2.0.0" + +[[package]] +name = "sqlparse" +version = "0.4.3" +description = "A non-validating SQL parser." +optional = false +python-versions = ">=3.5" +files = [ {file = "sqlparse-0.4.3-py3-none-any.whl", hash = "sha256:0323c0ec29cd52bceabc1b4d9d579e311f3e4961b98d174201d5622a23b85e34"}, {file = "sqlparse-0.4.3.tar.gz", hash = "sha256:69ca804846bb114d2ec380e4360a8a340db83f0ccf3afceeb1404df028f57268"}, ] -structlog = [ + +[[package]] +name = "structlog" +version = "22.3.0" +description = "Structured Logging for Python" +optional = false +python-versions = ">=3.7" +files = [ {file = "structlog-22.3.0-py3-none-any.whl", hash = "sha256:b403f344f902b220648fa9f286a23c0cc5439a5844d271fec40562dbadbc70ad"}, {file = "structlog-22.3.0.tar.gz", hash = "sha256:e7509391f215e4afb88b1b80fa3ea074be57a5a17d794bd436a5c949da023333"}, ] -tblib = [ + +[package.extras] +dev = ["structlog[docs,tests,typing]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] +tests = ["coverage[toml]", "freezegun (>=0.2.8)", "pretend", "pytest (>=6.0)", "pytest-asyncio (>=0.17)", "simplejson"] +typing = ["mypy", "rich", "twisted"] + +[[package]] +name = "tblib" +version = "1.7.0" +description = "Traceback serialization library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ {file = "tblib-1.7.0-py2.py3-none-any.whl", hash = "sha256:289fa7359e580950e7d9743eab36b0691f0310fce64dee7d9c31065b8f723e23"}, {file = "tblib-1.7.0.tar.gz", hash = "sha256:059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c"}, ] -text-unidecode = [ + +[[package]] +name = "text-unidecode" +version = "1.3" +description = "The most basic Text::Unidecode port" +optional = false +python-versions = "*" +files = [ {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, ] -tomli = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] -trino = [ - {file = "trino-0.321.0-py3-none-any.whl", hash = "sha256:d713e0d37015860d428bf6601dc430b5cd80c574081ab28d634e38250a050fb4"}, - {file = "trino-0.321.0.tar.gz", hash = "sha256:6642a57dff253f8c13787e5ec832de43304ab6f1d8109bc51a17b1150a529dfa"}, -] -types-python-dateutil = [ + +[[package]] +name = "types-python-dateutil" +version = "2.8.19.14" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = "*" +files = [ {file = "types-python-dateutil-2.8.19.14.tar.gz", hash = "sha256:1f4f10ac98bb8b16ade9dbee3518d9ace017821d94b057a425b069f834737f4b"}, {file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"}, ] -typing-extensions = [ + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +optional = false +python-versions = ">=3.7" +files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, ] -tzdata = [ - {file = "tzdata-2022.7-py2.py3-none-any.whl", hash = "sha256:2b88858b0e3120792a3c0635c23daf36a7d7eeeca657c323da299d2094402a0d"}, - {file = "tzdata-2022.7.tar.gz", hash = "sha256:fe5f866eddd8b96e9fcba978f8e503c909b19ea7efda11e52e39494bad3a7bfa"}, -] -tzlocal = [ - {file = "tzlocal-4.2-py3-none-any.whl", hash = "sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745"}, - {file = "tzlocal-4.2.tar.gz", hash = "sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7"}, -] -urllib3 = [ + +[[package]] +name = "urllib3" +version = "1.26.14" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, ] -virtualenv = [ - {file = "virtualenv-20.19.0-py3-none-any.whl", hash = "sha256:54eb59e7352b573aa04d53f80fc9736ed0ad5143af445a1e539aada6eb947dd1"}, - {file = "virtualenv-20.19.0.tar.gz", hash = "sha256:37a640ba82ed40b226599c522d411e4be5edb339a0c0de030c0dc7b646d61590"}, -] -wcwidth = [ - {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, - {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, -] -werkzeug = [ - {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"}, - {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"}, + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "virtualenv" +version = "20.21.1" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.21.1-py3-none-any.whl", hash = "sha256:09ddbe1af0c8ed2bb4d6ed226b9e6415718ad18aef9fa0ba023d96b7a8356049"}, + {file = "virtualenv-20.21.1.tar.gz", hash = "sha256:4c104ccde994f8b108163cf9ba58f3d11511d9403de87fb9b4f52bf33dbc8668"}, ] -wrapt = [ + +[package.dependencies] +distlib = ">=0.3.6,<1" +filelock = ">=3.4.1,<4" +platformdirs = ">=2.4,<4" + +[package.extras] +docs = ["furo (>=2023.3.27)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.3)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.3.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "wrapt" +version = "1.14.1" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, @@ -4045,7 +3138,14 @@ wrapt = [ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] -yarl = [ + +[[package]] +name = "yarl" +version = "1.8.2" +description = "Yet another URL library" +optional = true +python-versions = ">=3.7" +files = [ {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bb81f753c815f6b8e2ddd2eef3c855cf7da193b82396ac013c661aaa6cc6b0a5"}, {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47d49ac96156f0928f002e2424299b2c91d9db73e08c4cd6742923a086f1c863"}, {file = "yarl-1.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3fc056e35fa6fba63248d93ff6e672c096f95f7836938241ebc8260e062832fe"}, @@ -4121,7 +3221,32 @@ yarl = [ {file = "yarl-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:6604711362f2dbf7160df21c416f81fac0de6dbcf0b5445a2ef25478ecc4c778"}, {file = "yarl-1.8.2.tar.gz", hash = "sha256:49d43402c6e3013ad0978602bf6bf5328535c48d192304b91b97a3c6790b1562"}, ] -zipp = [ + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[[package]] +name = "zipp" +version = "3.13.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.7" +files = [ {file = "zipp-3.13.0-py3-none-any.whl", hash = "sha256:e8b2a36ea17df80ffe9e2c4fda3f693c3dad6df1697d3cd3af232db680950b0b"}, {file = "zipp-3.13.0.tar.gz", hash = "sha256:23f70e964bc11a34cef175bc90ba2914e1e4545ea1e3e2f67c079671883f9cb6"}, ] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[extras] +postgres = [] +redshift = ["awswrangler", "sqlalchemy-redshift"] +teleport = ["s3fs"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "0746008591d98efd2c7a39cf76ca90917588078e593f431d479c37a1c6560639" diff --git a/projects/adapter/pyproject.toml b/projects/adapter/pyproject.toml index 5cc789a3..91356594 100644 --- a/projects/adapter/pyproject.toml +++ b/projects/adapter/pyproject.toml @@ -1,8 +1,6 @@ [tool.poetry] name = "dbt-postgres-python" -version = "1.5.9a1" -# name = "fal" -# version = "0.9.4a0" +version = "1.6.16a0" description = "Run python scripts from any dbt project. This project is based on the project https://github.com/fal-ai/fal initially authored by FAL.AI." readme = "README.md" homepage = "https://github.com/kudryk/dbt-postgres-python" @@ -10,9 +8,6 @@ repository = "https://github.com/kudryk/dbt-postgres-python" authors = [ "Features & Labels ", "Mark Kudryk " ] packages = [ { include = "dbt", from = "src" }, - # cli package - { include = "fal", from = "src" }, - { include = "_fal_testing", from = "tests" }, ] keywords = [ "dbt", "pandas", "fal", "runtime" ] @@ -22,78 +17,46 @@ classifiers = [ [tool.poetry.dependencies] python = "^3.8" -dbt-core = ">=1.5,<=1.5.9" -pandas = "^1.3.4" -posthog = "^1.4.5" -"backports.functools_lru_cache" = "^1.6.4" - -# cli -PyYAML = "^6.0" -agate-sql = "^0.5.8" -astor = "^0.8.1" -deprecation = "^2.1.0" +dbt-core = ">=1.6,<=1.6.16" +pandas = "^1.5.3" +posthog = "^1.4.9" +"backports.functools_lru_cache" = "^1.6.6" ## Environment management related dependencies -## TODO use fal-serverless instead -platformdirs = "^2.5.2" -virtualenv = "^20.16.2" +platformdirs = "^2.6.2" +virtualenv = "^20.21.1" dill = "0.3.7" # dbt-fal -sqlalchemy = "^1.4.41" +sqlalchemy = "^1.4.52" # Adapters -## snowflake -snowflake-connector-python = { version = "~=3.0", extras = ["pandas"], optional = true } - -## bigquery -### version defined by dbt-bigquery, installs pyarrow<8 -google-cloud-bigquery = { version = "~3.5.0", extras = ["pandas"], optional = true } - ## redshift awswrangler = { version = ">=3.0.0", extras = ["redshift"], optional = true, python = ">=3.8" } sqlalchemy-redshift = { version = "^0.8.9", optional = true } -## duckdb -duckdb-engine = { version = "^0.1.8", optional = true } - -## trino -trino = { version = "~0.321.0", extras = ["sqlalchemy"], optional = true } - # teleport s3fs = { version = ">=2022.8.2", optional = true } # fal cloud packaging = ">=23" fal = "^0.10.0" -importlib-metadata = "^6.0.0" +importlib-metadata = "^6.11.0" [tool.poetry.extras] postgres = [] -snowflake = ["snowflake-connector-python"] -bigquery = ["google-cloud-bigquery"] redshift = ["awswrangler", "sqlalchemy-redshift"] -duckdb = ["duckdb-engine"] -athena = [] -trino = ["trino"] teleport = ["s3fs"] [tool.poetry.group.dev] optional = true [tool.poetry.group.dev.dependencies] -pytest = "^5.2" -black = "^22.3" behave = "^1.2.6" -mock = "^4.0.3" -pytest-mock = "^3.7.0" matplotlib = "^3.5.2" -requests = "^2.27.1" +requests = "^2.32.3" [build-system] -requires = ["poetry-core>=1.0.0"] +requires = ["poetry-core>=1.5.0"] build-backend = "poetry.core.masonry.api" - -[tool.poetry.scripts] -dbt-postgres-python = "fal.dbt.cli:cli" diff --git a/projects/adapter/src/dbt/adapters/fal/load_db_profile.py b/projects/adapter/src/dbt/adapters/fal/load_db_profile.py index e79bd0be..8d46796d 100644 --- a/projects/adapter/src/dbt/adapters/fal/load_db_profile.py +++ b/projects/adapter/src/dbt/adapters/fal/load_db_profile.py @@ -64,8 +64,6 @@ def load_profiles_info_1_5() -> Tuple[Profile, Dict[str, Any]]: raw_profile=raw_profile, profile_name=profile_name, renderer=profile_renderer, - # TODO: should we load the user_config? - user_config={}, target_override=db_profile_target_name, ) except RecursionError as error: diff --git a/projects/adapter/src/fal/dbt/__init__.py b/projects/adapter/src/fal/dbt/__init__.py deleted file mode 100644 index 0cb9e151..00000000 --- a/projects/adapter/src/fal/dbt/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from dbt.contracts.results import NodeStatus - -from fal.dbt.integration.project import ( - FalDbt, - DbtModel, - DbtSource, - DbtTest, - DbtGenericTest, - DbtSingularTest, -) -from fal.dbt.fal_script import Context, CurrentModel diff --git a/projects/adapter/src/fal/dbt/cli/__init__.py b/projects/adapter/src/fal/dbt/cli/__init__.py deleted file mode 100644 index 4b7029b4..00000000 --- a/projects/adapter/src/fal/dbt/cli/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .cli import cli diff --git a/projects/adapter/src/fal/dbt/cli/args.py b/projects/adapter/src/fal/dbt/cli/args.py deleted file mode 100644 index cac25312..00000000 --- a/projects/adapter/src/fal/dbt/cli/args.py +++ /dev/null @@ -1,275 +0,0 @@ -from uuid import uuid4 -from typing import List, Any -import os -import argparse -import pkg_resources - - -LEVEL_FLAGS = {} - - -class FalArgsError(Exception): - pass - - -class _LevelFlag: - levels: List[str] - default: Any - - def __init__(self, default): - self.levels = [] - self.default = default - - -# This is to handle offering the same flag at several parser levels -# e.g. fal --profiles-dir ~/somewhere run --profiles-dir ~/other_place -# we should get profiles_dir=~/other_place (right-most wins) -def _flag_level(name: str, default=None): - level = uuid4() - # Store initial _LevelFlag value in case there is none yet - LEVEL_FLAGS[name] = LEVEL_FLAGS.get(name, _LevelFlag(default)) - level_flag = LEVEL_FLAGS[name] - # Add current level, these are meant to read in order (right-most wins) - level_flag.levels.append(level) - - if default != level_flag.default: - raise FalArgsError( - f"Different defaults '{default}' and '{level_flag.default}' for flag '{name}'" - ) - - return f"{name}_{level}" - - -# Use right after creating the parser, before adding subparsers to it -def _build_fal_common_options(parser: argparse.ArgumentParser): - parser.add_argument( - "--disable-logging", - action="store_true", - help="Disable logging.", - dest=_flag_level("disable_logging"), - ) - - -# Use right after creating the parser, before adding subparsers to it -def _build_dbt_common_options(parser: argparse.ArgumentParser): - parser.add_argument( - "--project-dir", - metavar="PROJECT_DIR", - help="Directory to look for dbt_project.yml.", - dest=_flag_level("project_dir", os.getcwd()), - ) - - parser.add_argument( - "--profiles-dir", - metavar="PROFILES_DIR", - help="Directory to look for profiles.yml.", - dest=_flag_level("profiles_dir"), - ) - - parser.add_argument( - "--defer", - action="store_true", - help="If set, defer to the state variable for resolving unselected nodes.", - dest=_flag_level("defer"), - ) - - -def _add_threads_option(parser: argparse.ArgumentParser): - parser.add_argument( - "--threads", - type=int, - help="Specify number of threads to use while executing Python scripts and dbt models. Overrides settings in profiles.yml.", - ) - - -def _add_target_option(parser: argparse.ArgumentParser): - parser.add_argument( - "--target", - type=str, - default=None, - help="Specify a custom target from profiles.yml.", - ) - - -def _add_full_refresh_option(parser: argparse.ArgumentParser): - parser.add_argument( - "--full-refresh", - action="store_true", - default=False, - help="If specified, fal will pass dbt calls the --full-refresh flag, which will drop incremental models and fully-recalculate the incremental table from the model definition.", - ) - - -def _add_state_option(parser: argparse.ArgumentParser): - parser.add_argument("--state", type=str, help="Specify dbt state artifact path") - - -def _add_vars_option(parser: argparse.ArgumentParser): - parser.add_argument( - "--vars", - type=str, - default="{}", - help=""" - Supply variables to the project. This argument overrides variables - defined in your dbt_project.yml file. This argument should be a YAML - string, eg. '{my_variable: my_value}' - """, - ) - - -def _build_dbt_selectors(sub: argparse.ArgumentParser): - # fmt: off - sub.add_argument( - "-s", "--select", - nargs="+", - dest="select", - help="Specify the nodes to include.", - ) - sub.add_argument( - "-m", "--models", - nargs="+", - dest="select", - help="Specify the nodes to include.", - ) - sub.add_argument( - "--selector", - help="The selector name to use, as defined in selectors.yml", - ) - sub.add_argument( - "--exclude", - nargs="+", - help="Specify the nodes to exclude.", - ) - # fmt: on - - -def _build_run_parser(sub: argparse.ArgumentParser): - # fmt: off - _build_dbt_selectors(sub) - _build_dbt_common_options(sub) - _build_fal_common_options(sub) - _add_threads_option(sub) - _add_target_option(sub) - _add_vars_option(sub) - - sub.add_argument( - "--all", - action="store_true", - help="Run scripts for all models. By default, fal runs scripts for models that ran in the last dbt run.", - ) - sub.add_argument( - "--scripts", - nargs="+", - help="Specify scripts to run, overrides schema.yml", - ) - - sub.add_argument( - "--before", - action="store_true", - help="Run scripts specified in model `before` tag", - ) - - sub.add_argument( - "--globals", - action="store_true", - default=False, - help="Run global scripts along with selected scripts", - ) - # fmt: on - - -def _build_flow_parser(sub: argparse.ArgumentParser): - flow_command_parsers = sub.add_subparsers( - title="flow commands", - dest="flow_command", - metavar="COMMAND", - required=True, - ) - _build_dbt_common_options(sub) - _build_fal_common_options(sub) - - flow_run_parser = flow_command_parsers.add_parser( - name="run", - help="Execute fal and dbt run in correct order", - ) - _build_dbt_selectors(flow_run_parser) - _build_dbt_common_options(flow_run_parser) - _build_fal_common_options(flow_run_parser) - _add_threads_option(flow_run_parser) - _add_state_option(flow_run_parser) - _add_vars_option(flow_run_parser) - _add_target_option(flow_run_parser) - _add_full_refresh_option(flow_run_parser) - - -def _build_cli_parser(): - parser = argparse.ArgumentParser( - prog="dbt-postgres-python", - description="Run Python scripts on dbt models", - ) - - try: - version = pkg_resources.get_distribution("dbt-postgres-python").version - except pkg_resources.DistributionNotFound: - # TODO: remove once `fal` is no longer a supported package - version = pkg_resources.get_distribution("fal").version - - parser.add_argument( - "-v", - "--version", - action="version", - version=f"fal {version}", - help="show fal version", - ) - - parser.add_argument( - "--debug", - action="store_true", - help="Display debug logging during execution.", - ) - - _build_dbt_common_options(parser) - _build_fal_common_options(parser) - - # Handle commands - command_parsers = parser.add_subparsers( - title="commands", - dest="command", - metavar="COMMAND", - required=True, - ) - - run_parser = command_parsers.add_parser( - name="run", - help="Run Python scripts as post-hook nodes", - ) - _build_run_parser(run_parser) - - flow_parser = command_parsers.add_parser( - name="flow", - help="Execute fal and dbt commands in correct order", - ) - - _build_flow_parser(flow_parser) - - return parser - - -cli_parser = _build_cli_parser() - - -def parse_args(argv: List[str]) -> argparse.Namespace: - args = cli_parser.parse_args(argv) - args_dict = vars(args) - - # Reduce level flags into a single one with the value to use - for name, level_flag in LEVEL_FLAGS.items(): - args_dict[name] = level_flag.default - for level in level_flag.levels: - # Read and delete the level flag to keep only the main one - current = args_dict.pop(f"{name}_{level}", None) - if current is not None: - args_dict[name] = current - - # Build new argparse.Namespace with the correct flags - return argparse.Namespace(**args_dict) diff --git a/projects/adapter/src/fal/dbt/cli/cli.py b/projects/adapter/src/fal/dbt/cli/cli.py deleted file mode 100644 index aeb5b361..00000000 --- a/projects/adapter/src/fal/dbt/cli/cli.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List -import sys - -from click.exceptions import ClickException - -from fal.dbt.cli.flow_runner import fal_flow_run -from .args import parse_args -from .fal_runner import fal_run -from fal.dbt.telemetry import telemetry - -from fal.dbt.integration.logger import log_manager - - -def cli(argv: List[str] = sys.argv): - # Wrapper to be able to shutdown telemetry afterwards - try: - _cli(argv) - finally: - telemetry.shutdown() - - -@telemetry.log_call("cli") -def _cli(argv: List[str]): - parsed = parse_args(argv[1:]) - - # TODO: do we still need this? - with log_manager.applicationbound(): - if parsed.debug: - log_manager.set_debug() - - if parsed.command == "flow": - if parsed.flow_command == "run": - exit_code = fal_flow_run(parsed) - if exit_code: - raise SystemExit(exit_code) - - elif parsed.command == "run": - fal_run(parsed) - - else: - raise ClickException(f"Unknown command {parsed.command}") diff --git a/projects/adapter/src/fal/dbt/cli/dbt_runner.py b/projects/adapter/src/fal/dbt/cli/dbt_runner.py deleted file mode 100644 index 1b827bf4..00000000 --- a/projects/adapter/src/fal/dbt/cli/dbt_runner.py +++ /dev/null @@ -1,181 +0,0 @@ -import multiprocessing -from multiprocessing.connection import Connection -from typing import Any, Dict, Optional, List -import warnings -import json -from fal.dbt.integration.logger import LOGGER -import os -import argparse - - -class DbtCliOutput: - def __init__( - self, - command: str, - return_code: int, - raw_output: Optional[str], - logs: Optional[List[Dict[str, Any]]], - run_results: Dict[str, Any], - ): - self._command = command - self._return_code = return_code - self._raw_output = raw_output - self._logs = logs - self._run_results = run_results - - @property - def docs_url(self) -> Optional[str]: - return None - - @property - def command(self) -> str: - return self._command - - @property - def return_code(self) -> int: - return self._return_code - - @property - def raw_output(self) -> Optional[str]: - return self._raw_output - - @property - def logs(self) -> Optional[List[Dict[str, Any]]]: - return self._logs - - @property - def run_results(self) -> Dict[str, Any]: - return self._run_results - - -def get_dbt_command_list(args: argparse.Namespace, models_list: List[str]) -> List[str]: - command_list = [] - - if args.debug: - command_list += ["--debug"] - - command_list += ["run"] - - # NOTE: Safety measure because we do threading on fal - command_list += ["--threads", str(1)] - - if args.project_dir: - command_list += ["--project-dir", args.project_dir] - if args.profiles_dir: - command_list += ["--profiles-dir", args.profiles_dir] - - if args.defer: - command_list += ["--defer"] - - if args.state: - command_list += ["--state", args.state] - - if args.full_refresh: - command_list += ["--full-refresh"] - - if args.target: - command_list += ["--target", args.target] - - if args.vars is not None and args.vars != "{}": - command_list += ["--vars", args.vars] - - if len(models_list) > 0: - command_list += ["--select"] + models_list - - # Assure all command parts are str - return list(map(str, command_list)) - - -# This is the Python implementation of the `dbt_run()` function, in which -# we directly use dbt-core as a Python library. We don't run it directly -# but rather use 'multiprocessing' to run it in a real system Process to -# imitate the existing behavior of `dbt_run()` (in terms of performance). - - -def _dbt_run_through_python( - args: List[str], target_path: str, run_index: int, connection: Connection -): - # logbook is currently using deprecated APIs internally, which is causing - # a crash. We'll mirror the solution from DBT, until it is fixed on - # upstream. - # - # PR from dbt-core: https://github.com/dbt-labs/dbt-core/pull/4866 - - warnings.filterwarnings("ignore", category=DeprecationWarning, module="logbook") - - from dbt.cli.main import dbtRunner - from dbt.contracts.results import RunExecutionResult - - runner = dbtRunner() - - run_results: Optional[RunExecutionResult] = None - exc = None - try: - runner_run_results = runner.invoke(args) - run_results = runner_run_results.result - except BaseException as _exc: - return_code = getattr(_exc, "code", 1) - exc = _exc - else: - return_code = 0 if runner_run_results.success else 1 - - LOGGER.debug(f"dbt exited with return code {return_code}") - - # The 'run_results' object has a 'write()' method which is basically json.dump(). - # We'll dump it directly to the fal results file (instead of first dumping it to - # run results and then copying it over). - if run_results is not None: - run_results_path = os.path.join(target_path, f"fal_results_{run_index}.json") - run_results.write(run_results_path) - else: - connection.send(exc) - return - - connection.send(return_code) - - -def dbt_run_through_python( - args: argparse.Namespace, models_list: List[str], target_path: str, run_index: int -) -> DbtCliOutput: - """Run DBT from the Python entry point in a subprocess.""" - # dbt-core is currently using the spawn as its mulitprocessing context - # so we'll mirror it. - if multiprocessing.get_start_method() != "spawn": - multiprocessing.set_start_method("spawn", force=True) - - args_list = get_dbt_command_list(args, models_list) - - cmd_str = " ".join(["dbt", *args_list]) - LOGGER.info("Running command: {}", cmd_str) - - # We will be using a multiprocessing.Pipe to communicate - # from subprocess to main process about the return code - # as well as the exceptions that might arise. - p_connection, c_connection = multiprocessing.Pipe() - process = multiprocessing.Process( - target=_dbt_run_through_python, - args=(args_list, target_path, run_index, c_connection), - ) - - process.start() - result = p_connection.recv() - if not isinstance(result, int): - raise RuntimeError("Error running dbt run") from result - process.join() - - run_results = _get_index_run_results(target_path, run_index) - return DbtCliOutput( - command=cmd_str, - return_code=result, - raw_output=None, - logs=None, - run_results=run_results, - ) - - -def _get_index_run_results(target_path: str, run_index: int) -> Dict[Any, Any]: - """Get run results for a given run index.""" - with open( - os.path.join(target_path, f"fal_results_{run_index}.json") - ) as raw_results: - return json.load(raw_results) diff --git a/projects/adapter/src/fal/dbt/cli/fal_runner.py b/projects/adapter/src/fal/dbt/cli/fal_runner.py deleted file mode 100644 index 63365249..00000000 --- a/projects/adapter/src/fal/dbt/cli/fal_runner.py +++ /dev/null @@ -1,178 +0,0 @@ -import argparse -from pathlib import Path -from typing import Any, Dict, List - -from fal.dbt.planner.executor import parallel_executor -from fal.dbt.planner.schedule import Scheduler -from fal.dbt.planner.tasks import FalLocalHookTask, Status, TaskGroup - -from fal.dbt.fal_script import FalScript, TimingType -from fal.dbt.integration.project import FAL, DbtModel, FalDbt, FalGeneralException - - -def create_fal_dbt( - args: argparse.Namespace, generated_models: Dict[str, Path] = {} -) -> FalDbt: - real_state = None - if hasattr(args, "state") and args.state is not None: - real_state = args.state - - return FalDbt( - args.project_dir, - args.profiles_dir, - args.select, - args.exclude, - args.selector, - args.threads, - real_state, - args.target, - args.vars, - generated_models, - ) - - -def fal_run(args: argparse.Namespace): - "Runs the fal run command in a subprocess" - - selector_flags = args.select or args.exclude or args.selector - if args.all and selector_flags: - raise FalGeneralException( - "Cannot pass --all flag alongside selection flags (--select/--models, --exclude, --selector)" - ) - - faldbt = create_fal_dbt(args) - models = _get_filtered_models(faldbt, args.all, selector_flags, args.before) - - scripts = _select_scripts(args, models, faldbt) - global_scripts = _get_global_scripts(faldbt, args) - - if args.before: - _handle_global_scripts(args, global_scripts, faldbt, selector_flags) - - pre_hook_scripts = _get_hooks_for_model(models, faldbt, TimingType.PRE) - - _run_scripts(args, pre_hook_scripts, faldbt) - - _run_scripts(args, scripts, faldbt) - - else: - _run_scripts(args, scripts, faldbt) - - post_hook_scripts = _get_hooks_for_model(models, faldbt, TimingType.POST) - _run_scripts(args, post_hook_scripts, faldbt) - _handle_global_scripts(args, global_scripts, faldbt, selector_flags) - - -def _handle_global_scripts( - args: argparse.Namespace, - global_scripts: List[FalScript], - faldbt: FalDbt, - selector_flags: Any, -) -> None: - scripts_flag = _scripts_flag(args) - if not scripts_flag and not selector_flags: - # run globals when no --script is passed and no selector is passed - _run_scripts(args, global_scripts, faldbt) - if (scripts_flag or selector_flags) and args.globals: - _run_scripts(args, global_scripts, faldbt) - - -def _run_scripts(args: argparse.Namespace, scripts: List[FalScript], faldbt: FalDbt): - scheduler = Scheduler( - [TaskGroup(FalLocalHookTask.from_fal_script(script)) for script in scripts] - ) - parallel_executor(args, faldbt, scheduler) - - failed_tasks: List[FalLocalHookTask] = [ - group.task for group in scheduler.filter_groups(Status.FAILURE) - ] # type: ignore - failed_script_ids = [task.build_fal_script(faldbt).id for task in failed_tasks] - if failed_script_ids: - raise RuntimeError(f"Error in scripts {str.join(', ',failed_script_ids)}") - - -def _scripts_flag(args: argparse.Namespace) -> bool: - return bool(args.scripts) - - -def _get_hooks_for_model( - models: List[DbtModel], faldbt: FalDbt, hook_type: TimingType -) -> List[FalScript]: - return [ - FalScript.from_hook(faldbt, model, hook, hook_type) - for model in models - for hook in model.get_hooks(hook_type=hook_type) - ] - - -def _select_scripts( - args: argparse.Namespace, models: List[DbtModel], faldbt: FalDbt -) -> List[FalScript]: - scripts = [] - scripts_flag = _scripts_flag(args) - is_before = bool(args.before) - timing_type = TimingType.PRE if is_before else TimingType.POST - - for model in models: - model_scripts = model.get_scripts(before=is_before) - for path in model_scripts: - if not scripts_flag: - # run all scripts when no --script is passed - scripts.append(FalScript(faldbt, model, path, timing_type=timing_type)) - elif path in args.scripts: - # if --script selector is there only run selected scripts - scripts.append(FalScript(faldbt, model, path, timing_type=timing_type)) - - return scripts - - -def _get_global_scripts(faldbt: FalDbt, args: argparse.Namespace): - scripts_flag = _scripts_flag(args) - is_before = bool(args.before) - timing_type = TimingType.PRE if is_before else TimingType.POST - return [ - FalScript(faldbt, None, path, timing_type=timing_type) - for path in faldbt._global_script_paths["before" if is_before else "after"] - if not scripts_flag or path in args.scripts - ] - - -def _get_models_with_keyword(faldbt: FalDbt) -> List[DbtModel]: - return list(filter(lambda model: FAL in model.meta, faldbt.list_models())) - - -def _get_filtered_models(faldbt: FalDbt, all, selected, before) -> List[DbtModel]: - selected_ids = _models_ids(faldbt._compile_task._flattened_nodes) - filtered_models: List[DbtModel] = [] - - if ( - not all - and not selected - and not before - and faldbt._run_results.native_run_result is None - ): - from fal.dbt.integration.parse import FalParseError - - raise FalParseError( - "Cannot define models to run without selection flags or dbt run_results artifact or --before flag" - ) - - models = _get_models_with_keyword(faldbt) - - for node in models: - if selected: - if node.unique_id in selected_ids: - filtered_models.append(node) - elif before: - if node.get_scripts(before=before) != []: - filtered_models.append(node) - elif all: - filtered_models.append(node) - elif node.status != "skipped": - filtered_models.append(node) - - return filtered_models - - -def _models_ids(models): - return list(map(lambda r: r.unique_id, models)) diff --git a/projects/adapter/src/fal/dbt/cli/flow_runner.py b/projects/adapter/src/fal/dbt/cli/flow_runner.py deleted file mode 100644 index 52768e8d..00000000 --- a/projects/adapter/src/fal/dbt/cli/flow_runner.py +++ /dev/null @@ -1,133 +0,0 @@ -import json -import copy -from pathlib import Path -from typing import Any, Dict, Optional, cast, Union - -from fal.dbt.cli.fal_runner import create_fal_dbt -from fal.dbt.cli.selectors import ExecutionPlan -from fal.dbt.cli.model_generator import generate_python_dbt_models -from fal.dbt.fal_script import FalScript -from fal.dbt.node_graph import DbtModelNode, FalFlowNode, NodeGraph, ScriptNode -from fal.dbt.integration.project import FalDbt, NodeStatus -import argparse - - -DBT_RUN_RESULTS_FILENAME = "run_results.json" -FAL_RUN_RESULTS_FILENAME = "fal_results.json" -RUN_RESULTS_KEY = "results" -ELAPSED_TIME_KEY = "elapsed_time" - - -def run_threaded( - fal_dbt: FalDbt, - parsed: argparse.Namespace, - node_graph: NodeGraph, -) -> int: - from fal.dbt.planner.plan import ( - OriginGraph, - FilteredGraph, - PlannedGraph, - ScriptConnectedGraph, - ) - from fal.dbt.planner.schedule import schedule_graph - from fal.dbt.planner.executor import parallel_executor - - execution_plan = ExecutionPlan.create_plan_from_graph(parsed, node_graph, fal_dbt) - - origin_graph = OriginGraph(node_graph.graph) - filtered_graph = FilteredGraph.from_execution_plan( - origin_graph, execution_plan=execution_plan - ) - connected_graph = ScriptConnectedGraph.from_filtered_graph(filtered_graph) - planned_graph = PlannedGraph.from_script_connected_graph( - connected_graph, enable_chunking=False - ) - scheduler = schedule_graph(planned_graph.graph, node_graph) - return parallel_executor(parsed, fal_dbt, scheduler) - - -def fal_flow_run(parsed: argparse.Namespace) -> int: - # fal-format Python models - generated_models = generate_python_dbt_models(parsed.project_dir, parsed.vars) - - fal_dbt = create_fal_dbt(parsed, generated_models) - _mark_dbt_nodes_status(fal_dbt, NodeStatus.Skipped) - - node_graph = NodeGraph.from_fal_dbt(fal_dbt) - exit_code = run_threaded(fal_dbt=fal_dbt, parsed=parsed, node_graph=node_graph) - - # each dbt run creates its own run_results file, here we are combining - # these files in a single run_results file that fits dbt file format - _combine_fal_run_results(fal_dbt.target_path) - return exit_code - - -def _mark_dbt_nodes_status( - fal_dbt: FalDbt, status: NodeStatus, dbt_node: Optional[str] = None -): - for model in fal_dbt.models: - if dbt_node is not None: - if model.unique_id == dbt_node: - model.status = status - else: - model.status = status - - -def node_to_script(node: Union[FalFlowNode, None], fal_dbt: FalDbt) -> FalScript: - """Convert dbt node into a FalScript.""" - if node is not None and isinstance(node, ScriptNode): - return cast(ScriptNode, node).script - elif node is not None and isinstance(node, DbtModelNode): - return FalScript.model_script(fal_dbt, node.model) - else: - raise Exception(f"Cannot convert node to script. Node: {node}") - - -def _combine_fal_run_results(target_path: str) -> None: - target_path = Path(target_path) - dbt_run_results, fal_run_results = [], [] - for path in target_path.glob("fal_results_*.json"): - assert path.is_file() - - results = _get_all_result_content(path) - - if "dbt_schema_version" in results.get("metadata", {}): - dbt_run_results.append(results) - fal_run_results.append(results) - - # Clear out files as we go. - path.unlink() - - # Use the last DBT result as the framework for putting - # the rest of the run results. - if dbt_run_results: - result_framework = dbt_run_results[-1] - else: - result_framework = { - "metadata": {}, - "args": {}, - ELAPSED_TIME_KEY: float("nan"), - } - - for file, results in [ - (DBT_RUN_RESULTS_FILENAME, dbt_run_results), - (FAL_RUN_RESULTS_FILENAME, fal_run_results), - ]: - if not results: - continue - - combined_results = copy.deepcopy(result_framework) - combined_results[RUN_RESULTS_KEY] = [] - combined_results[ELAPSED_TIME_KEY] = 0.0 - - for result in results: - combined_results[ELAPSED_TIME_KEY] += result.get(ELAPSED_TIME_KEY, 0) - combined_results[RUN_RESULTS_KEY].extend(result[RUN_RESULTS_KEY]) - - with open(target_path / file, "w") as stream: - json.dump(combined_results, stream) - - -def _get_all_result_content(file) -> Dict[str, Any]: - with open(file) as content: - return json.load(content) diff --git a/projects/adapter/src/fal/dbt/cli/model_generator/__init__.py b/projects/adapter/src/fal/dbt/cli/model_generator/__init__.py deleted file mode 100644 index 3b6a3828..00000000 --- a/projects/adapter/src/fal/dbt/cli/model_generator/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .model_generator import generate_python_dbt_models diff --git a/projects/adapter/src/fal/dbt/cli/model_generator/model_generator.py b/projects/adapter/src/fal/dbt/cli/model_generator/model_generator.py deleted file mode 100644 index 470e4ee5..00000000 --- a/projects/adapter/src/fal/dbt/cli/model_generator/model_generator.py +++ /dev/null @@ -1,178 +0,0 @@ -import ast -from functools import partial -import re -from typing import Callable, Iterable, List, TypeVar -from pathlib import Path -from fal.dbt.fal_script import python_from_file - -from fal.dbt.integration.parse import get_fal_models_dirs, load_dbt_project_contract -from fal.dbt.cli.model_generator.module_check import ( - generate_dbt_dependencies, - write_to_model_check, -) - -from fal.dbt.integration.logger import LOGGER - -from fal.dbt.telemetry import telemetry - -SQL_MODEL_TEMPLATE = """ -{{ config(materialized='ephemeral') }} -/* -FAL_GENERATED __checksum__ - -Script dependencies: - -__deps__ - -*/ - -SELECT * FROM {{ this }} -""" - -GENERATED_DIR = Path("fal") - -CHECKSUM_REGEX = re.compile(r"FAL_GENERATED ([_\d\w]+)") - - -def generate_python_dbt_models(project_dir: str, args_vars: str): - fal_models_paths = _get_fal_models_paths(project_dir, args_vars) - dbt_models_paths = list( - map(Path, load_dbt_project_contract(project_dir).model_paths or []) - ) - - base_dbt_models_dir = Path(project_dir, dbt_models_paths[0]) - - old_generated_sqls = list( - _process_models_paths(dbt_models_paths, _find_fal_generated_models) - ) - - fal_python_models_and_sqls = list( - _process_models_paths( - fal_models_paths, - partial(_find_fal_python_models_and_sql_target, base_dbt_models_dir), - ) - ) - - python_paths: List[Path] = [] - fal_target_sqls: List[Path] = [] - if fal_python_models_and_sqls: - python_paths, fal_target_sqls, _ = map(list, zip(*fal_python_models_and_sqls)) - - _delete_old_generated_sqls(old_generated_sqls, fal_target_sqls) - - for py_path, sql_path, old_checksum in fal_python_models_and_sqls: - new_checksum = _generate_sql_for_fal_python_model(py_path, sql_path) - if not old_checksum or new_checksum != old_checksum: - LOGGER.warn( - f"File '{sql_path.relative_to(project_dir)}' was generated from '{py_path.relative_to(project_dir)}'.\n" - "Please do not modify it directly. We recommend committing it to your repository." - ) - - if python_paths: - telemetry.log_api( - action="python_models_generated", - additional_props={"models": len(python_paths)}, - ) - - return {path.stem: path for path in python_paths} - - -def _get_fal_models_paths(project_dir: str, args_vars: str): - models_paths = get_fal_models_dirs(project_dir, args_vars) - project_path = Path(project_dir) - return list(map(project_path.joinpath, models_paths)) - - -def _generate_sql_for_fal_python_model(py_path: Path, sql_path: Path): - source_code = python_from_file(py_path) - module = ast.parse(source_code, str(py_path), "exec") - - # Fails if it does not have write_to_model - write_to_model_check(module) - - dbt_deps = generate_dbt_dependencies(module) - - sql_contents = SQL_MODEL_TEMPLATE.replace("__deps__", dbt_deps) - checksum, _ = _checksum(sql_contents) - sql_contents = sql_contents.replace("__checksum__", checksum) - - sql_path.parent.mkdir(parents=True, exist_ok=True) - with open(sql_path, "w") as file: - file.write(sql_contents) - - return checksum - - -# TODO: unit tests -def _check_path_safe_to_write(sql_path: Path, py_path: Path): - if sql_path.exists(): - with open(sql_path, "r") as file: - contents = file.read() - checksum, found = _checksum(contents) - if not found or checksum != found: - LOGGER.debug( - f"Existing file calculated checksum: {checksum}\nFound checksum: {found}" - ) - raise RuntimeError( - f"File '{sql_path}' not generated by fal would be " - f"overwritten by generated model of '{py_path}'. Please rename or remove." - ) - return checksum - - -T = TypeVar("T") - - -def _process_models_paths( - models_paths: Iterable[Path], func: Callable[[Path], Iterable[T]] -) -> Iterable[T]: - for models_path in models_paths: - yield from func(models_path) - - -def _find_fal_python_models_and_sql_target(base_sql_path: Path, models_path: Path): - for py_path in _find_python_files(models_path): - sql_path = _sql_path_from_python_path( - base_sql_path, py_path.relative_to(models_path) - ) - old_checksum = _check_path_safe_to_write(sql_path, py_path) - yield py_path, sql_path, old_checksum - - -def _sql_path_from_python_path(base_sql_path: Path, relative_py_path: Path): - return base_sql_path / GENERATED_DIR / relative_py_path.with_suffix(".sql") - - -def _find_fal_generated_models(models_path: Path): - fal_path = models_path / GENERATED_DIR - return (file for file in fal_path.glob("**/*.sql") if _is_fal_generated(file)) - - -def _delete_old_generated_sqls(old_models: Iterable[Path], new_models: Iterable[Path]): - for sql_path in old_models: - if sql_path not in new_models: - sql_path.unlink() - - -def _is_fal_generated(file_path): - with open(file_path) as file: - return CHECKSUM_REGEX.search(file.read()) - - -def _checksum(contents: str): - import hashlib - - found = CHECKSUM_REGEX.search(contents) - to_check = CHECKSUM_REGEX.sub("FAL_GENERATED", contents.strip()) - return ( - hashlib.md5(to_check.encode("utf-8")).hexdigest(), - found.group(1) if found else None, - ) - - -def _find_python_files(models_path: Path) -> List[Path]: - files = [] - files.extend(models_path.rglob("*.py")) - files.extend(models_path.rglob("*.ipynb")) - - return [p for p in files if p.is_file()] diff --git a/projects/adapter/src/fal/dbt/cli/model_generator/module_check.py b/projects/adapter/src/fal/dbt/cli/model_generator/module_check.py deleted file mode 100644 index fe67cd13..00000000 --- a/projects/adapter/src/fal/dbt/cli/model_generator/module_check.py +++ /dev/null @@ -1,109 +0,0 @@ -import ast -from typing import Iterator, List -import astor -import re - - -def generate_dbt_dependencies(module: ast.Module) -> str: - """ - Search for dbt function uses and return them as found wrapped in Jinja braces. - We do not modify them to let dbt decide if they make sense. - """ - - function_calls = _find_function_calls(ast.walk(module)) - ref_calls = _filter_function_calls_by_name(function_calls, "ref") - source_calls = _filter_function_calls_by_name(function_calls, "source") - - dbt_ast_calls = _filter_constant_calls(ref_calls + source_calls) - - # Convert ast.Calls back to source code - dbt_function_calls = list(map(astor.to_source, dbt_ast_calls)) - docstring_dbt_functions = _find_docstring_dbt_functions(module) - - lines: List[str] = docstring_dbt_functions + dbt_function_calls - - # Jinja-fy the calls - return "\n".join(map(lambda s: "{{ " + s.strip() + " }}", lines)) - - -def write_to_model_check(module: ast.Module): - """ - Make sure a there is a single write_to_model function call on the top level - """ - - all_function_calls = _find_function_calls(ast.walk(module)) - all_wtm_calls = _filter_function_calls_by_name(all_function_calls, "write_to_model") - - assert ( - len(all_wtm_calls) > 0 - ), "There must be at least one write_to_model call in the Python Model" - - -def _find_function_calls(nodes: Iterator[ast.AST]) -> List[ast.Call]: - return [node for node in nodes if isinstance(node, ast.Call)] - - -def _filter_function_calls_by_name(calls: List[ast.Call], func_name: str): - """ - Analyze all function calls passed to find the ones that call `func_name`. - """ - return [ - call - for call in calls - if isinstance(call.func, ast.Name) and call.func.id == func_name - ] - - -def _filter_constant_calls(calls: List[ast.Call]) -> List[ast.Call]: - """ - Analyze all function calls passed to find the ones with all literal arguments. - We ignore a `_func(var)` but accept a `_func('model_name')` - """ - - def _is_constant(arg: ast.expr): - import sys - - if sys.version_info < (3, 8): - return isinstance(arg, ast.Str) - else: - return isinstance(arg, ast.Constant) - - return [call for call in calls if all(map(_is_constant, call.args))] - - -def _print_node(node: ast.AST): - """ - For temporary usage during debugging. - """ - print( - node, - *((f, getattr(node, f)) for f in node._fields), - *((f, getattr(node, f)) for f in node._attributes), - ) - - -REF_RE = re.compile("ref\\([^)]*\\)") -SOURCE_RE = re.compile("source\\([^)]*\\)") - - -def _find_docstring_dbt_functions(module: ast.Module) -> List[str]: - ''' - Simple regex analysis for docstring in top of the file. User can list dependencies one per line, but not multiline. - Example: - - """ - A Python model with some docstring introduction. - - Dependencies: - - ref('model') - - source('some', 'table') - """ - ''' - docstring = ast.get_docstring(module, True) or "" - - calls = [] - for line in docstring.splitlines(): - calls.extend(REF_RE.findall(line)) - calls.extend(SOURCE_RE.findall(line)) - - return calls diff --git a/projects/adapter/src/fal/dbt/cli/selectors.py b/projects/adapter/src/fal/dbt/cli/selectors.py deleted file mode 100644 index 2e24e819..00000000 --- a/projects/adapter/src/fal/dbt/cli/selectors.py +++ /dev/null @@ -1,311 +0,0 @@ -import itertools -import re -from dataclasses import dataclass -from typing import List, Optional, Union, Iterator -from fal.dbt.node_graph import NodeGraph -from fal.dbt.integration.project import CompileArgs, FalDbt, FalGeneralException -from dbt.task.compile import CompileTask -from enum import Enum -from functools import reduce -import networkx as nx - - -class ExecutionPlan: - """ - Represents a fal flow excution - """ - - before_scripts: List[str] - dbt_models: List[str] - after_scripts: List[str] - project_name: str - - def __init__(self, unique_ids: List[str], project_name): - self.before_scripts = [] - self.dbt_models = [] - self.after_scripts = [] - self.project_name = project_name - for id in unique_ids: - if _is_before_script(id): - self.before_scripts.append(id) - elif _is_after_script(id): - self.after_scripts.append(id) - else: - self.dbt_models.append(id) - - @property - def nodes(self) -> List[str]: - return self.before_scripts + self.after_scripts + self.dbt_models - - @classmethod - def create_plan_from_graph(cls, parsed, nodeGraph: NodeGraph, fal_dbt: FalDbt): - """ - Creates and ExecutionPlan from the cli arguments - """ - unique_ids = list(nodeGraph.graph.nodes.keys()) - - ids_to_execute = unique_ids - - if parsed.select: - ids_to_execute = _filter_node_ids( - unique_ids, fal_dbt, list(parsed.select), nodeGraph - ) - - ids_to_exclude = [] - if "exclude" in parsed and parsed.exclude: - ids_to_exclude = _filter_node_ids( - unique_ids, fal_dbt, list(parsed.exclude), nodeGraph - ) - - ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude] - - # Remove non-model nodes (sources, maybe more?) by making sure they are in the node_lookup dict - ids_to_execute = [i for i in ids_to_execute if i in nodeGraph.node_lookup] - - return cls(list(set(ids_to_execute)), fal_dbt.project_name) - - -@dataclass -class SelectionUnion: - components: List[str] - - -@dataclass -class SelectionIntersection: - components: List[str] - - -def parse_union( - components: List[str], -) -> SelectionUnion: - # Based on the original implemention at dbt-core. - - # turn ['a b', 'c'] -> ['a', 'b', 'c'] - raw_specs = itertools.chain.from_iterable(r.split(OP_SET_UNION) for r in components) - union_components = [] - - # ['a', 'b', 'c,d'] -> union('a', 'b', intersection('c', 'd')) - for raw_spec in raw_specs: - union_components.append( - SelectionIntersection( - raw_spec.split(OP_SET_INTERSECTION), - ) - ) - return SelectionUnion( - union_components, - ) - - -def _filter_node_ids( - unique_ids: List[str], - fal_dbt: FalDbt, - selectors: List[str], - nodeGraph: NodeGraph, -) -> List[str]: - """Filter list of unique_ids according to a selector.""" - output = set() - - union = parse_union(selectors) - - for intersection in union.components: - try: - plan_outputs = [ - set(SelectorPlan(selector, unique_ids, fal_dbt).execute(nodeGraph)) - for selector in intersection.components - if selector - ] - except nx.NetworkXError: - # When the user selects a non-existent node, don't fail immediately - # but rather just continue processing the rest of selectors. - plan_outputs = [] - - if plan_outputs: - output |= set.intersection(*plan_outputs) - - return list(output) - - -def _get_children_with_parents(node_id: str, nodeGraph: NodeGraph) -> List[str]: - children = nodeGraph.get_descendants(node_id) - output = reduce(lambda l, ch: l + nodeGraph.get_ancestors(ch), children, children) - - output = list(set(output)) - - return output - - -def _expand_script(script_name: str, unique_ids: List[str]) -> List[str]: - """ - Expands the selected script name to unique id format. - for example [scripta.py] to [script.modelB.AFTER.scripta.py, script.modelA.BEFORE.scripta.py] - """ - - def contains_script_name(id: str): - return script_name in id - - return list(filter(contains_script_name, unique_ids)) - - -class SelectType(Enum): - MODEL = 1 - SCRIPT = 2 - COMPLEX = 3 - TAG = 4 - - -@dataclass(init=False) -class SelectorPlan: - """ - Represents a single selector, for example in the command - - fal flow run --select script.py+ - - script.py+ is the SelectorPlan with needs_children attribute set to true - """ - - unique_ids: List[str] - children: bool - children_levels: Optional[int] - children_with_parents: bool - parents: bool - parents_levels: Optional[int] - type: SelectType - raw: str - - def __init__(self, selector: str, unique_ids: List[str], fal_dbt: FalDbt): - self.raw = selector - self.children_with_parents = OP_CHILDREN_WITH_PARENTS.match(selector) - selector = OP_CHILDREN_WITH_PARENTS.rest(selector) - - self.parents = OP_PARENTS.match(selector) - self.parents_levels = OP_PARENTS.depth(selector) - selector = OP_PARENTS.rest(selector) - - self.children = OP_CHILDREN.match(selector) - self.children_levels = OP_CHILDREN.depth(selector) - selector = OP_CHILDREN.rest(selector) - - self.type = _to_select_type(selector) - - if self.type == SelectType.MODEL: - self.unique_ids = [f"model.{fal_dbt.project_name}.{selector}"] - elif self.type == SelectType.SCRIPT: - self.unique_ids = _expand_script(selector, unique_ids) - elif self.type == SelectType.TAG: - self.unique_ids = unique_ids_from_tag_selector(selector, fal_dbt) - elif self.type == SelectType.COMPLEX: - self.unique_ids = unique_ids_from_complex_selector(selector, fal_dbt) - - def __post_init__(self): - if self.children and self.children_with_parents: - raise RuntimeError( - f'Invalid node spec {self.raw} - "@" prefix and "+" suffix are incompatible' - ) - - def execute(self, nodeGraph: NodeGraph) -> Iterator[str]: - for id in self.unique_ids: - yield id - - if self.children: - if self.children_levels is None: - children = nodeGraph.get_descendants(id) - else: - children = nodeGraph.get_successors(id, self.children_levels) - yield from children - - if self.parents: - if self.parents_levels is None: - parents = nodeGraph.get_ancestors(id) - else: - parents = nodeGraph.get_predecessors(id, self.parents_levels) - yield from parents - - if self.children_with_parents: - ids = _get_children_with_parents(id, nodeGraph) - yield from ids - - -def unique_ids_from_complex_selector(select, fal_dbt: FalDbt) -> List[str]: - args = CompileArgs(None, [select], [select], tuple(), fal_dbt._state, None) - compile_task = CompileTask(args, fal_dbt._config, fal_dbt._manifest.native_manifest) - compile_task._runtime_initialize() - spec = compile_task.get_selection_spec() - graph = compile_task.get_node_selector().get_graph_queue(spec) - return list(graph.queued) - -def unique_ids_from_tag_selector(selector: str, fal_dbt: FalDbt) -> List[str]: - parts = selector.split(":") - if len(parts) != 2: - raise FalGeneralException(f"Expected selector tag:tag_value, got: {selector}") - tag = parts[1] - ids = [model.unique_id for model in fal_dbt.models if tag in model.node.tags] - return ids - - -def _to_select_type(selector: str) -> SelectType: - if ":" in selector: - parts = selector.split(':') - if parts[0] == "tag": - return SelectType.TAG - return SelectType.COMPLEX - else: - if _is_script_node(selector): - return SelectType.SCRIPT - else: - return SelectType.MODEL - - -def _is_script_node(node_name: str) -> bool: - return node_name.endswith(".py") or node_name.endswith(".ipynb") - - -class SelectorGraphOp: - _regex: re.Pattern - - def __init__(self, regex: re.Pattern): - self._regex = regex - assert ( - "rest" in regex.groupindex - ), 'rest must be in regex. Use `re.compile("something(?P.*)")`' - - def _select(self, selector: str, group: Union[str, int]) -> Optional[str]: - match = self._regex.match(selector) - if match: - return match.group(group) - - def match(self, selector: str) -> bool: - return self._select(selector, 0) is not None - - def rest(self, selector: str) -> str: - rest = self._select(selector, "rest") - if rest is not None: - return rest - return selector - - -class SelectorGraphOpDepth(SelectorGraphOp): - def depth(self, selector: str) -> Optional[int]: - depth = self._select(selector, "depth") - if depth: - return int(depth) - - -# Graph operators from their regex Patterns -OP_CHILDREN_WITH_PARENTS = SelectorGraphOp(re.compile("^\\@(?P.*)")) -OP_PARENTS = SelectorGraphOpDepth(re.compile("^(?P\\d*)\\+(?P.*)")) -OP_CHILDREN = SelectorGraphOpDepth(re.compile("(?P.*)\\+(?P\\d*)$")) - -# Logic based set operators -OP_SET_UNION = " " -OP_SET_INTERSECTION = "," - -IS_BEFORE_SCRIPT_REGEX = re.compile("^script\\..+\\.BEFORE\\..+\\.(py|ipynb)$") -IS_AFTER_SCRIPT_REGEX = re.compile("^script\\..+\\.AFTER\\..+\\.(py|ipynb)") - - -def _is_before_script(id: str) -> bool: - return bool(IS_BEFORE_SCRIPT_REGEX.match(id)) - - -def _is_after_script(id: str) -> bool: - return bool(IS_AFTER_SCRIPT_REGEX.match(id)) diff --git a/projects/adapter/src/fal/dbt/fal_script.py b/projects/adapter/src/fal/dbt/fal_script.py deleted file mode 100644 index 654e97a0..00000000 --- a/projects/adapter/src/fal/dbt/fal_script.py +++ /dev/null @@ -1,428 +0,0 @@ -import os -import json -from enum import Enum -from typing import Dict, Any, List, Optional, Union, Callable -from pathlib import Path -from functools import partial -from dataclasses import dataclass, field -from deprecation import deprecated - -import hashlib - -from fal.dbt.integration.parse import normalize_path -from fal.dbt.integration.project import DbtModel, FalDbt, FAL - -from dbt.contracts.results import RunStatus -from dbt.config.runtime import RuntimeConfig -from fal.dbt.integration.logger import LOGGER - -from fal.dbt.telemetry import telemetry - -from dbt.contracts.graph.nodes import ColumnInfo - - -class TimingType(Enum): - PRE = "pre" - POST = "post" - - def for_script(self): - if self == TimingType.PRE: - return "before" - elif self == TimingType.POST: - return "after" - else: - raise ValueError(f"Unknown timing type: {self}") - - def for_hook(self): - return self.value - - def __str__(self): - return self.for_hook() - - -class Hook: - path: str - arguments: Dict[str, Any] - - -@dataclass -class LocalHook(Hook): - path: str - arguments: Dict[str, Any] = field(default_factory=dict) - - -@dataclass -class IsolatedHook(Hook): - path: str - environment_name: str - arguments: Dict[str, Any] = field(default_factory=dict) - - -def _is_local_environment(environment_name: str) -> None: - return environment_name == "local" - - -def create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook: - if isinstance(raw_hook, str): - raw_hook = {"path": raw_hook} - - if not isinstance(raw_hook, dict): - raise ValueError(f"Unrecognized hook value: {raw_hook}") - - if "path" not in raw_hook: - raise ValueError(f"A hook must specify path.") - - environment_name = raw_hook.get("environment", default_environment_name) - if environment_name and not _is_local_environment(environment_name): - return IsolatedHook( - raw_hook["path"], - environment_name, - raw_hook.get("with", {}), - ) - else: - return LocalHook(raw_hook["path"], raw_hook.get("with", {})) - - -@dataclass -class CurrentAdapterResponse: - message: str - code: Optional[str] - rows_affected: Optional[int] - - -@dataclass -class CurrentModel: - name: str - alias: str - status: RunStatus - columns: Dict[str, ColumnInfo] - tests: List[Any] - meta: Dict[Any, Any] - is_incremental: str - adapter_response: Optional[CurrentAdapterResponse] - - -@dataclass -class CurrentTest: - name: str - model_name: str - column: str - status: str - - @property - @deprecated(details="Use 'model_name' instead") - def modelname(self): - return self.model_name - - -@dataclass -class ContextConfig: - target_path: Path - - def __init__(self, config: RuntimeConfig): - self.target_path = Path( - os.path.realpath(os.path.join(config.project_root, config.target_path)) - ) - - -@dataclass -class ContextTarget: - def __init__(self, config: RuntimeConfig): - self.profile_name = config.profile_name - self.name = config.target_name - self.threads = config.threads - self.type = config.credentials.type - self.database = config.credentials.database - self.schema = config.credentials.schema - - -@dataclass -class Context: - current_model: Union[CurrentModel, None] - config: ContextConfig - target: ContextTarget - _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None) - - @property - def arguments(self) -> Dict[str, Any]: - if self._arguments is None: - raise ValueError( - "'context.arguments' is only accessible from hooks, " - "not from scripts/models" - ) - return self._arguments - - -@dataclass(frozen=True, init=False) -class FalScript: - model: Optional[DbtModel] - path: Path - faldbt: FalDbt - hook_arguments: Optional[Dict[str, Any]] - is_hook: bool - timing_type: Optional[TimingType] - - def __init__( - self, - faldbt: FalDbt, - model: Optional[DbtModel], - path: Union[str, Path], - hook_arguments: Optional[Dict[str, Any]] = None, - is_hook: bool = False, - is_model: bool = False, - timing_type: Optional[TimingType] = None, - ): - # Necessary because of frozen=True - object.__setattr__(self, "model", model) - object.__setattr__( - self, "path", path if is_model else normalize_path(faldbt.scripts_dir, path) - ) - object.__setattr__(self, "faldbt", faldbt) - object.__setattr__(self, "hook_arguments", hook_arguments) - object.__setattr__(self, "is_hook", is_hook) - object.__setattr__(self, "timing_type", timing_type) - - self._telemetry() - - def _telemetry(self): - try: - _is_global = self.model is None - _is_hook = self.is_hook - _timing_type = str(self.timing_type) if self.timing_type else None - _is_model = self.model and self.model.python_model == self.path - _path_hash = hashlib.md5(str(self.path).encode()).hexdigest() - - _script_timing_desc = ( - self.timing_type.for_script() if self.timing_type else "error" - ) - _hook_timing_desc = ( - self.timing_type.for_hook() if self.timing_type else "error" - ) - - if _is_global: - _script_desc = f"{_script_timing_desc}-global" - # globals are not hooks nor scripts - _is_hook = None - - elif _is_model: - _script_desc = "fal-model" - _is_global = None - # models are not hooks nor scripts - _is_hook = None - - else: - if _is_hook: - _script_desc = f"{_hook_timing_desc}-hook" - else: - _script_desc = f"{_script_timing_desc}-script" - - telemetry.log_api( - action="falscript_initialized", - additional_props={ - "is_global": _is_global, - "is_hook": _is_hook, - "is_model": _is_model, - "script_timing_type": _timing_type, - "script_desc": _script_desc, - "script_path": _path_hash, - }, - ) - except: - # Ignore telemetry errors - pass - - @classmethod - def from_hook( - cls, faldbt: FalDbt, model: DbtModel, hook: Hook, timing_type: TimingType - ): - """ - Creates a FalScript from a hook - """ - assert isinstance(hook, LocalHook) - return cls( - faldbt=faldbt, - model=model, - path=hook.path, - hook_arguments=hook.arguments, - is_hook=True, - timing_type=timing_type, - ) - - @classmethod - def model_script(cls, faldbt: FalDbt, model: DbtModel): - assert model.python_model, "path for Python models must be set" - return FalScript( - faldbt=faldbt, model=model, path=model.python_model, is_model=True - ) - - def exec(self): - """ - Executes the script - """ - # Enable local imports - try: - source_code = python_from_file(self.path) - program = compile(source_code, self.path, "exec") - - exec_globals = { - "__name__": "__main__", - "context": self._build_script_context(), - "ref": self.faldbt.ref, - "source": self.faldbt.source, - "list_models": self.faldbt.list_models, - "list_models_ids": self.faldbt.list_models_ids, - "list_sources": self.faldbt.list_sources, - "list_features": self.faldbt.list_features, - "execute_sql": self.faldbt.execute_sql, - } - - if not self.is_hook: - exec_globals["write_to_source"] = self.faldbt.write_to_source - - if self.model is not None: - # Hard-wire the model - exec_globals["write_to_model"] = partial( - self.faldbt.write_to_model, - target_1=self.model.name, - target_2=None, - ) - - else: - exec_globals["write_to_source"] = _not_allowed_function_maker( - "write_to_source" - ) - exec_globals["write_to_model"] = _not_allowed_function_maker( - "write_to_model" - ) - exec(program, exec_globals) - finally: - pass - - @property - def relative_path(self): - if self.is_model: - return self.path.relative_to(self.faldbt.project_dir) - else: - return self.path.relative_to(self.faldbt.scripts_dir) - - @property - def id(self): - if self.is_model: - return f"(model: {self.relative_path})" - else: - return f"({self.model_name}, {self.relative_path})" - - @property - def is_global(self): - return self.model is None - - @property - def is_model(self): - if self.model is not None and self.model.python_model is not None: - return self.model.python_model == self.path - - @property - def model_name(self): - return "" if self.is_global else self.model.name # type: ignore - - def _build_script_context(self) -> Context: - config: RuntimeConfig = self.faldbt._config - context_config = ContextConfig(config) - target = ContextTarget(config) - - if self.is_global: - return Context(current_model=None, target=target, config=context_config) - - model: DbtModel = self.model # type: ignore - - meta = model.meta or {} - _del_key(meta, FAL) - - tests = _process_tests(model.tests) - - current_adapter_response = None - if model.adapter_response: - current_adapter_response = CurrentAdapterResponse( - message=str(model.adapter_response), - code=model.adapter_response.code, - rows_affected=model.adapter_response.rows_affected, - ) - - current_model = CurrentModel( - name=model.name, - alias=model.alias, - status=model.status, - columns=model.columns, - tests=tests, - meta=meta, - is_incremental=model.is_incremental, - adapter_response=current_adapter_response, - ) - - return Context( - current_model=current_model, - target=target, - config=context_config, - _arguments=self.hook_arguments, - ) - - -def _del_key(dict: Dict[str, Any], key: str): - try: - del dict[key] - except KeyError: - pass - - -def _process_tests(tests: List[Any]): - return list( - map( - lambda test: CurrentTest( - name=test.name, - column=test.column, - status=test.status, - model_name=test.model, - ), - tests, - ) - ) - - -def python_from_file(path: Path) -> str: - with open(path) as file: - raw_source_code = file.read() - if path.suffix == ".ipynb": - raw_source_code = _process_ipynb(raw_source_code) - return raw_source_code - - -def _process_ipynb(raw_source_code: str) -> str: - def strip_magic(source: List[str]) -> List[str]: - NOTEBOOK_LIB = "faldbt.magics" - return [item for item in source if item[0] != "%" and NOTEBOOK_LIB not in item] - - ipynb_struct = json.loads(raw_source_code) - - script_list = [] - for cell in ipynb_struct["cells"]: - if cell["cell_type"] == "code": - source = strip_magic(cell["source"]) - script_list.append("".join(source)) - - joined_script = "\n #cell \n".join(script_list) - - LOGGER.debug(f"Joined .ipynb cells to:\n{joined_script}") - - return joined_script - - -def _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]: - def not_allowed_function(*args, **kwargs): - raise Exception( - ( - f"{function_name} is not allowed in hooks." - " Consider using a Python model." - ) - ) - - return not_allowed_function diff --git a/projects/adapter/src/fal/dbt/feature_store/__init__.py b/projects/adapter/src/fal/dbt/feature_store/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/feature_store/feature.py b/projects/adapter/src/fal/dbt/feature_store/feature.py deleted file mode 100644 index 8aa5e572..00000000 --- a/projects/adapter/src/fal/dbt/feature_store/feature.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Classes and functions for managing features.""" -from dataclasses import dataclass - - -@dataclass -class Feature: - """Feature is a column in a dbt model.""" - - model: str - column: str - entity_column: str - timestamp_column: str - description: str - - def get_name(self) -> str: - """Return a generated unique name for this feature.""" - return f"{self.model}.{self.column}" diff --git a/projects/adapter/src/fal/dbt/integration/__init__.py b/projects/adapter/src/fal/dbt/integration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/integration/lib.py b/projects/adapter/src/fal/dbt/integration/lib.py deleted file mode 100644 index 7aeac7b5..00000000 --- a/projects/adapter/src/fal/dbt/integration/lib.py +++ /dev/null @@ -1,778 +0,0 @@ -# NOTE: INSPIRED IN https://github.com/dbt-labs/dbt-core/blob/43edc887f97e359b02b6317a9f91898d3d66652b/core/dbt/lib.py -from contextlib import contextmanager -import six -from enum import Enum -from dataclasses import dataclass -from uuid import uuid4 -from typing import Dict, Iterator, List, Optional, Tuple -from urllib.parse import quote_plus -import threading - -import dbt.flags as flags -import dbt.adapters.factory as adapters_factory - -from dbt.contracts.connection import AdapterResponse -from dbt.adapters.sql import SQLAdapter -from dbt.adapters.base import BaseRelation, BaseAdapter, BaseConnectionManager -from dbt.config import RuntimeConfig - -from dbt.contracts.graph.nodes import ResultNode - -import pandas as pd -from pandas.io import sql as pdsql - -import agate -import sqlalchemy -from sqlalchemy.sql.ddl import CreateTable -from sqlalchemy.sql import Insert - -from dbt.contracts.sql import RemoteRunResult - - -from fal.dbt.integration import parse -from fal.dbt.integration.logger import LOGGER - - -class WriteModeEnum(Enum): - APPEND = "append" - OVERWRITE = "overwrite" - - -@dataclass -class FlagsArgs: - project_dir: str - profiles_dir: str - threads: Optional[int] - profile: Optional[str] - target: Optional[str] - vars: Optional[Dict[str, str]] - use_colors: Optional[bool] - - -def initialize_dbt_flags( - profiles_dir: str, - project_dir: str, - profile_target: Optional[str], - vars: Optional[dict], - threads: Optional[int], -): - """ - Initializes the flags module from dbt, since it's accessed from around their code. - """ - args = FlagsArgs( - use_colors=None, - project_dir=project_dir, - profiles_dir=profiles_dir, - # TODO: accept profile flag too - profile=None, - target=profile_target, - vars=vars, - threads=threads, - ) - - flags.set_from_args(args, None) - - # Set invocation id - import dbt.events.functions as events_functions - - events_functions.set_invocation_id() - - return flags.get_flags() - - -def register_adapters(config: RuntimeConfig): - # HACK: to avoid 'Node package named not found' - adapters_factory.reset_adapters() - adapters_factory.register_adapter(config) - - -# NOTE: Once we get an adapter, we must call `connection_for` or `connection_named` to use it -def _get_adapter( - project_dir: str, - profiles_dir: str, - profile_target: str, - *, - config: Optional[RuntimeConfig] = None, -) -> SQLAdapter: - if config is None: - config = parse.get_dbt_config( - project_dir=project_dir, - profiles_dir=profiles_dir, - profile_target=profile_target, - ) - adapter: SQLAdapter = adapters_factory.get_adapter(config) # type: ignore - - return adapter - - -# HACK: to avoid https://github.com/uqfoundation/dill/issues/321 -# When the lock is unpickled in the other thread, it is initialized as locked -if hasattr(threading, "_PyRLock"): - _lock: threading.RLock = threading._PyRLock() # type: ignore -else: - _lock = threading.RLock() - -@contextmanager -def _cache_lock(info: str = ""): - global _lock - - operationId = uuid4() - LOGGER.debug("Locking {} {}", operationId, info) - - with _lock: - yield - - -def _connection_name(prefix: str, obj, _hash: bool = True): - # HACK: we need to include uniqueness (UUID4) to avoid clashes - return f"{prefix}:{hash(str(obj)) if _hash else obj}:{uuid4()}" - - -def _execute_sql( - adapter: SQLAdapter, - sql: str, - *, - new_conn=True, -) -> Tuple[AdapterResponse, pd.DataFrame]: - if adapter.type() == "bigquery": - return _bigquery_execute_sql(adapter, sql, new_conn) - - if adapter.type() == "snowflake": - return _snowflake_execute_sql(adapter, sql, new_conn) - - with _existing_or_new_connection( - adapter, _connection_name("execute_sql", sql), new_conn - ) as is_new: - exec_response: Tuple[AdapterResponse, agate.Table] = adapter.execute( - sql, auto_begin=is_new, fetch=True - ) - response, agate_table = exec_response - - if is_new: - adapter.commit_if_has_connection() - - return response, _agate_table_to_df(agate_table) - - -def _clear_relations_cache(adapter: BaseAdapter): - # HACK: Sometimes we cache an incomplete cache or create stuff without the cache noticing. - # Some adapters work without this. We should separate adapter solutions like dbt. - manifest = parse.get_dbt_manifest(adapter.config) - adapter.set_relations_cache(manifest, True) - - -def _get_target_relation( - adapter: SQLAdapter, target: ResultNode -) -> Optional[BaseRelation]: - with adapter.connection_named(_connection_name("relation", target)): - with _cache_lock("_get_target_relation"): - _clear_relations_cache(adapter) - - # This ROLLBACKs so it has to be a new connection - return adapter.get_relation( - target.database, target.schema, target.identifier - ) - - -def compile_sql( - project_dir: str, - profiles_dir: str, - profile_target: str, - sql: str, - *, - config: Optional[RuntimeConfig] = None, - adapter: Optional[SQLAdapter] = None, -): - from dbt.parser.manifest import process_node - from dbt.task.sql import SqlCompileRunner - from dbt.parser.sql import SqlBlockParser - - if config is None: - config = parse.get_dbt_config( - project_dir=project_dir, - profiles_dir=profiles_dir, - profile_target=profile_target, - ) - - if adapter is None: - adapter = _get_adapter(project_dir, profiles_dir, profile_target, config=config) - - manifest = parse.get_dbt_manifest(config) - - block_parser = SqlBlockParser( - project=config, - manifest=manifest, - root_project=config, - ) - - sql_node = block_parser.parse_remote(sql, _connection_name("compile_sql", sql)) - process_node(config, manifest, sql_node) - runner = SqlCompileRunner(config, adapter, sql_node, 1, 1) - result: RemoteRunResult = runner.safe_run(manifest) - return result - - -def execute_sql( - project_dir: str, - profiles_dir: str, - profile_target: str, - sql: str, - *, - config: Optional[RuntimeConfig] = None, - adapter: Optional[SQLAdapter] = None, -) -> pd.DataFrame: - if adapter is None: - adapter = _get_adapter(project_dir, profiles_dir, profile_target, config=config) - - return _execute_sql(adapter, sql)[1] - - -def _agate_table_to_df(table: agate.Table) -> pd.DataFrame: - column_names = list(table.column_names) - rows = [list(row) for row in table] - - # TODO: better type matching? - return pd.DataFrame.from_records(data=rows, columns=column_names, coerce_float=True) - - -def fetch_target( - project_dir: str, - profiles_dir: str, - target: ResultNode, - profile_target: str, - *, - config: Optional[RuntimeConfig] = None, - adapter: Optional[SQLAdapter] = None, -) -> pd.DataFrame: - if adapter is None: - adapter = _get_adapter(project_dir, profiles_dir, profile_target, config=config) - - relation = _get_target_relation(adapter, target) - - if relation is None: - raise Exception(f"Could not get relation for '{target.unique_id}'") - - return _fetch_relation(adapter, relation) - - -def _fetch_relation(adapter: SQLAdapter, relation: BaseRelation) -> pd.DataFrame: - if adapter.type() == "postgres": - return _sqlalchemy_engine_fetch_relation(adapter, relation) - - query = f"SELECT * FROM {relation}" - return _execute_sql(adapter, query)[1] - - -def _build_table_from_parts( - adapter: SQLAdapter, - database: Optional[str], - schema: Optional[str], - identifier: Optional[str], -) -> BaseRelation: - from dbt.contracts.relation import Path, RelationType - - path = Path(database, schema, identifier) - - # NOTE: assuming we want TABLE relation if not found - return adapter.Relation(path, type=RelationType.Table) - - -def _build_table_from_target(adapter: SQLAdapter, target: ResultNode): - return _build_table_from_parts( - adapter, target.database, target.schema, target.identifier - ) - - -def overwrite_target( - data: pd.DataFrame, - project_dir: str, - profiles_dir: str, - profile_target: str, - target: ResultNode, - *, - dtype=None, - config: Optional[RuntimeConfig] = None, - adapter: Optional[SQLAdapter] = None, -) -> AdapterResponse: - if not adapter: - adapter = _get_adapter(project_dir, profiles_dir, profile_target, config=config) - - relation = _build_table_from_target(adapter, target) - - if adapter.type() == "bigquery": - return _bigquery_write_relation( - adapter, - data, - relation, - mode=WriteModeEnum.OVERWRITE, - fields_schema=dtype, - ) - - # With some writing functions, it could be called twice at the same time for the same identifier - # so we avoid overwriting temporal tables by attaching uniqueness to the name - unique_str = str(uuid4())[0:8] - temporal_relation = _build_table_from_parts( - adapter, - relation.database, - relation.schema, - f"{relation.identifier}__f__{unique_str}", - ) - - results = _write_relation(adapter, data, temporal_relation, dtype=dtype) - - try: - _replace_relation(adapter, relation, temporal_relation) - - return results - except: - _drop_relation(adapter, temporal_relation) - raise - - -def write_target( - data: pd.DataFrame, - project_dir: str, - profiles_dir: str, - profile_target: str, - target: ResultNode, - *, - dtype=None, - config: Optional[RuntimeConfig] = None, - adapter: Optional[SQLAdapter] = None, -) -> AdapterResponse: - if adapter is None: - adapter = _get_adapter(project_dir, profiles_dir, profile_target, config=config) - - relation = _build_table_from_target(adapter, target) - - return _write_relation(adapter, data, relation, dtype=dtype) - - -def _write_relation( - adapter: SQLAdapter, - data: pd.DataFrame, - relation: BaseRelation, - *, - dtype=None, -) -> AdapterResponse: - if adapter.type() == "fal": - adapter = adapter._db_adapter - - if adapter.type() == "bigquery": - return _bigquery_write_relation( - adapter, - data, - relation, - mode=WriteModeEnum.APPEND, - fields_schema=dtype, - ) - - if adapter.type() == "snowflake": - return _snowflake_write_relation( - adapter, - data, - relation, - ) - - if adapter.type() == "postgres": - return _sqlalchemy_engine_write_relation(adapter, data, relation, dtype=dtype) - - database, schema, identifier = ( - relation.database, - relation.schema, - relation.identifier, - ) - - engine = _alchemy_mock_engine(adapter) - pddb = pdsql.SQLDatabase(engine, schema=schema) - pdtable = pdsql.SQLTable(identifier, pddb, data, index=False, dtype=dtype) - - alchemy_table: sqlalchemy.Table = pdtable.table.to_metadata(pdtable.pd_sql.meta) - - # HACK: athena needs "location" property that is not passed by mock adapter - if adapter.type() == "athena": - s3_dir = adapter.config.credentials.s3_staging_dir - alchemy_table.dialect_options["awsathena"] = { - "location": f"{s3_dir}{alchemy_table.schema}/{alchemy_table.name}/", - "tblproperties": None, - "compression": None, - "bucket_count": None, - "row_format": None, - "serdeproperties": None, - "file_format": None, - "partition": None, - "cluster": None, - } - - column_names: List[str] = list(data.columns) - - rows = data.to_records(index=False) - row_dicts = list(map(lambda row: dict(zip(column_names, row)), rows)) - - create_stmt = CreateTable(alchemy_table, if_not_exists=True).compile( - bind=engine, compile_kwargs={"literal_binds": True} - ) - _execute_sql(adapter, six.text_type(create_stmt).strip()) - - insert_stmt = Insert(alchemy_table, values=row_dicts).compile( - bind=engine, compile_kwargs={"literal_binds": True} - ) - response, _ = _execute_sql(adapter, six.text_type(insert_stmt).strip()) - return response - - -def _replace_relation( - adapter: SQLAdapter, - original_relation: BaseRelation, - new_relation: BaseRelation, -): - with adapter.connection_named( - _connection_name("replace_relation", original_relation, _hash=False) - ): - with _cache_lock("_replace_relation"): - adapter.connections.begin() - - _clear_relations_cache(adapter) - - if adapter.type() not in ("bigquery", "snowflake"): - # This is a 'DROP ... IF EXISTS', so it always works - adapter.drop_relation(original_relation) - - if adapter.type() == "athena": - # HACK: athena doesn't support renaming tables, we do it manually - create_stmt = f"create table {original_relation} as select * from {new_relation} with data" - _execute_sql( - adapter, - six.text_type(create_stmt).strip(), - new_conn=False, - ) - adapter.drop_relation(new_relation) - elif adapter.type() == "bigquery": - create_stmt = f"create or replace table {original_relation} as select * from {new_relation}" - _bigquery_execute_sql( - adapter, - six.text_type(create_stmt).strip(), - new_conn=False, - ) - adapter.drop_relation(new_relation) - elif adapter.type() == "snowflake": - create_stmt = ( - f"create or replace table {original_relation} clone {new_relation}" - ) - _snowflake_execute_sql( - adapter=adapter, - sql=six.text_type(create_stmt).strip(), - new_conn=False, - fetch=False, # Avoid trying to fetch as pandas - ) - adapter.drop_relation(new_relation) - else: - adapter.rename_relation(new_relation, original_relation) - - adapter.commit_if_has_connection() - - -def _drop_relation(adapter: SQLAdapter, relation: BaseRelation): - with adapter.connection_named(_connection_name("drop_relation", relation)): - with _cache_lock("_drop_relation"): - adapter.connections.begin() - - _clear_relations_cache(adapter) - - adapter.drop_relation(relation) - - adapter.commit_if_has_connection() - - -def _alchemy_mock_engine(adapter: SQLAdapter): - url_string = f"{adapter.type()}://" - if adapter.type() == "athena": - SCHEMA_NAME = adapter.config.credentials.schema - S3_STAGING_DIR = adapter.config.credentials.s3_staging_dir - AWS_REGION = adapter.config.credentials.region_name - - conn_str = ( - "awsathena+rest://athena.{region_name}.amazonaws.com:443/" - "{schema_name}?s3_staging_dir={s3_staging_dir}&work_group=primary" - ) - - url_string = conn_str.format( - region_name=AWS_REGION, - schema_name=SCHEMA_NAME, - s3_staging_dir=quote_plus(S3_STAGING_DIR), - ) - - # TODO: add special cases as needed - - def null_dump(sql, *multiparams, **params): - pass - - return sqlalchemy.create_mock_engine(url_string, executor=null_dump) - - -def _create_engine_from_connection(adapter: SQLAdapter): - if adapter.type() == "postgres": - url_string = "postgresql+psycopg2://" - else: - # TODO: add special cases as needed - LOGGER.warn("No explicit url string for adapter {}", adapter.type()) - url_string = f"{adapter.type()}://" - - connection = adapter.connections.get_thread_connection() - return sqlalchemy.create_engine(url_string, creator=lambda: connection.handle) - - -@contextmanager -def _existing_or_new_connection( - adapter: BaseAdapter, - name: str, - new_conn: bool, # TODO: new_conn solution feels hacky -) -> Iterator[bool]: - if new_conn: - with adapter.connection_named(name): - yield True - else: - yield False - - -# Adapter: salalchemy connection -def _sqlalchemy_engine_fetch_relation(adapter: SQLAdapter, relation: BaseRelation): - # TODO: use database, just using schema and identifier - database, schema, identifier = ( - relation.database, - relation.schema, - relation.identifier, - ) - - assert identifier - - with _existing_or_new_connection( - adapter, _connection_name("write_target", relation, _hash=False), True - ): - engine = _create_engine_from_connection(adapter) - # TODO: use database, just using schema and identifier - return pd.read_sql_table( - table_name=identifier, - schema=schema, - con=engine, - ) - - -def _sqlalchemy_engine_write_relation( - adapter: SQLAdapter, - data: pd.DataFrame, - relation: BaseRelation, - *, - dtype=None, -): - # TODO: use database, just using schema and identifier - database, schema, identifier = ( - relation.database, - relation.schema, - relation.identifier, - ) - - assert identifier - - with _existing_or_new_connection( - adapter, _connection_name("write_target", relation, _hash=False), True - ): - engine = _create_engine_from_connection(adapter) - - rows_affected = data.to_sql( - name=identifier, - con=engine, - schema=schema, - if_exists="append", - index=False, - dtype=dtype, - ) - - return AdapterResponse("OK", rows_affected=rows_affected) - - -# Adapter: BigQuery -def _bigquery_execute_sql( - adapter: BaseAdapter, sql: str, new_conn: bool -) -> Tuple[AdapterResponse, pd.DataFrame]: - assert adapter.type() == "bigquery" - - import google.cloud.bigquery as bigquery - - with _existing_or_new_connection( - adapter, _connection_name("bigquery:execute_sql", sql), new_conn - ): - connection_manager: BaseConnectionManager = adapter.connections # type: ignore - client: bigquery.Client = connection_manager.get_thread_connection().handle # type: ignore - - job = client.query(sql) - df = job.to_dataframe() - if job.destination: - query_table = client.get_table(job.destination) - num_rows = query_table.num_rows - else: - num_rows = df.size - - # TODO: better AdapterResponse - return AdapterResponse("OK", rows_affected=num_rows), df - - -def _bigquery_write_relation( - adapter: SQLAdapter, - data: pd.DataFrame, - relation: BaseRelation, - *, - mode: WriteModeEnum, - fields_schema: Optional[List[dict]] = None, -) -> AdapterResponse: - import google.cloud.bigquery as bigquery - from google.cloud.bigquery.job import WriteDisposition - from dbt.adapters.bigquery import BigQueryAdapter, BigQueryConnectionManager - from dbt.semver import VersionSpecifier - - assert adapter.type() == "bigquery" - - _adapter: BigQueryAdapter = adapter # type: ignore - - disposition = ( - WriteDisposition.WRITE_TRUNCATE - if WriteModeEnum.OVERWRITE == mode - else WriteDisposition.WRITE_APPEND - ) - - project: str = relation.database # type: ignore - dataset: str = relation.schema # type: ignore - table: str = relation.identifier # type: ignore - - with _adapter.connection_named( - _connection_name("bigquery:write_relation", relation, _hash=False) - ): - connection_manager: BigQueryConnectionManager = _adapter.connections - conn = connection_manager.get_thread_connection() - client: bigquery.Client = conn.handle # type: ignore - - table_ref = bigquery.TableReference( - bigquery.DatasetReference(project, dataset), table - ) - - job_config = bigquery.LoadJobConfig( - # Specify a (partial) schema. All columns are always written to the - # table. The schema is used to assist in data type definitions. - schema=[ - # field_types is a list of API-representation of BigQuery.FieldSchema - bigquery.SchemaField.from_api_repr(field) - for field in (fields_schema or []) - ], - source_format="PARQUET", - write_disposition=disposition, - ) - - with connection_manager.exception_handler("START JOB"): - job = client.load_table_from_dataframe( - data, table_ref, job_config=job_config - ) - - from dbt.adapters.bigquery.__version__ import version as bigquery_version - - ADAPTER_VCURRENT = VersionSpecifier.from_version_string(bigquery_version) - # https://github.com/dbt-labs/dbt-bigquery/commit/141b86749df813cf3a3a90a90e7a7dfc401ba9b0 - if ADAPTER_VCURRENT.compare(VersionSpecifier.from_version_string("1.1.0")) >= 0: - timeout = connection_manager.get_job_execution_timeout_seconds(conn) or 300 - else: - timeout = connection_manager.get_timeout(conn) or 300 - - with connection_manager.exception_handler("LOAD TABLE"): - _adapter.poll_until_job_completes(job, timeout) - - query_table = client.get_table(job.destination) - num_rows = query_table.num_rows - - # TODO: better AdapterResponse - return AdapterResponse("OK", rows_affected=num_rows) - - -# Adapter: Snowflake -def _snowflake_execute_sql( - adapter: BaseAdapter, - sql: str, - new_conn: bool, - *, - fetch: bool = True, -) -> Tuple[AdapterResponse, pd.DataFrame]: - assert adapter.type() == "snowflake" - - import snowflake.connector as snowflake - from dbt.adapters.snowflake import SnowflakeConnectionManager - - with _existing_or_new_connection( - adapter, _connection_name("snowflake:execute_sql", sql), new_conn - ): - connection_manager: SnowflakeConnectionManager = adapter.connections # type: ignore - conn: snowflake.SnowflakeConnection = connection_manager.get_thread_connection().handle # type: ignore - - with connection_manager.exception_handler("EXECUTE SQL"): - cur = conn.cursor() - - cur.execute(sql) - - # Use snowflake-dbt function directly - res = connection_manager.get_response(cur) - - df = pd.DataFrame({}) - if fetch: - df: pd.DataFrame = cur.fetch_pandas_all() - - # HACK: manually parse ARRAY and VARIANT since they are returned as strings right now - # Related issue: https://github.com/snowflakedb/snowflake-connector-python/issues/544 - for desc in cur.description: - # 5=VARIANT, 10=ARRAY -- https://docs.snowflake.com/en/user-guide/python-connector-api.html#type-codes - if desc.type_code in [5, 10]: - import json - - df[desc.name] = df[desc.name].map(lambda v: json.loads(v)) - - return res, df - - -def _snowflake_write_relation( - adapter: SQLAdapter, - data: pd.DataFrame, - relation: BaseRelation, -) -> AdapterResponse: - from dbt.adapters.snowflake import SnowflakeAdapter, SnowflakeConnectionManager - import snowflake.connector as snowflake - import snowflake.connector.pandas_tools as snowflake_pandas - - assert adapter.type() == "snowflake" - - _adapter: SnowflakeAdapter = adapter # type: ignore - - database: str = relation.database # type: ignore - schema: str = relation.schema # type: ignore - table: str = relation.identifier # type: ignore - - with _adapter.connection_named( - _connection_name("snowflake:write_relation", relation, _hash=False) - ): - connection_manager: SnowflakeConnectionManager = _adapter.connections # type: ignore - conn: snowflake.SnowflakeConnection = connection_manager.get_thread_connection().handle # type: ignore - - with connection_manager.exception_handler("LOAD TABLE"): - success, chunks, num_rows, output = snowflake_pandas.write_pandas( - conn, - data, - table_name=table, - database=database, - schema=schema, - auto_create_table=True, - quote_identifiers=False, - ) - if not success: - # In case the failure does not raise by itself - # I have not been able to reproduce such a case - from dbt.exceptions import DatabaseException - - raise DatabaseException(output) - - # TODO: better AdapterResponse - return AdapterResponse(str(output), rows_affected=num_rows) diff --git a/projects/adapter/src/fal/dbt/integration/logger.py b/projects/adapter/src/fal/dbt/integration/logger.py deleted file mode 100644 index 705481df..00000000 --- a/projects/adapter/src/fal/dbt/integration/logger.py +++ /dev/null @@ -1,102 +0,0 @@ -from contextlib import contextmanager -import logging -import datetime -import sys - -import dbt.ui as ui -from dbt.logger import log_manager as dbt_log_manager - -TRACE = logging.DEBUG - 5 -logging.addLevelName(TRACE, "TRACE") - -class FalLogger: - def __init__(self): - self._stdout_handler = logging.StreamHandler(sys.stdout) - self._stdout_handler.setLevel(logging.INFO) - - self._logger = logging.Logger("fal", logging.INFO) - self._logger.addHandler(self._stdout_handler) - - def __getstate__(self): - # Don't pickle the logger - d = self.__dict__.copy() - d['_logger'] = d['_logger'].name - return d - - def __setstate__(self, d): - # Set logger when unpickling - d['_logger'] = logging.Logger( - d['_logger'] if isinstance(d['_logger'], str) else "fal", logging.INFO) - self.__dict__.update(d) - - def set_level(self, level: int): - self._logger.setLevel(level) - - @property - def level(self): - return self._logger.level - - def log(self, level: int, msg: str, *args, **kwargs): - now = datetime.datetime.now(datetime.timezone.utc) - if self.level <= logging.DEBUG: - prefix = now.strftime(r"%H:%M:%S.%f") - - prefix += f" [{logging.getLevelName(level).lower()[0:5].ljust(5)}]" - - # Spaces to match the spacing of dbt's debug logs, like the following - # - # 21:32:31.816189 [debug] [MainThread]: Flushing usage events - # 21:32:32.530385 [error] [fal ]: Error in script (...): - # 21:32:34.554038 [debug] [Thread-1 ]: Opening a new connection, currently in state closed - prefix += " [fal ]:" - else: - prefix = now.strftime("%H:%M:%S") - prefix += " [fal]:" - - self._logger.log(level, f"{prefix} {_prepare_msg(msg, *args, **kwargs)}") - - def trace(self, msg: str, *args, **kwargs): - self.log(TRACE, msg, *args, **kwargs) - - def debug(self, msg: str, *args, **kwargs): - self.log(logging.DEBUG, msg, *args, **kwargs) - - def info(self, msg: str, *args, **kwargs): - self.log(logging.INFO, msg, *args, **kwargs) - - def warning(self, msg: str, *args, **kwargs): - self.log(logging.WARNING, ui.warning_tag(msg), *args, **kwargs) - - def warn(self, msg: str, *args, **kwargs): - # Alias to warning - return self.warning(msg, *args, **kwargs) - - def error(self, msg: str, *args, **kwargs): - self.log(logging.ERROR, msg, *args, **kwargs) - -class LogManager(): - def __init__(self, dbt_log_manager): - self._dbt_log_manager = dbt_log_manager - - @contextmanager - def applicationbound(self): - # TODO: probably where we can add threadding information if we want it for debug logs - with self._dbt_log_manager.applicationbound(): - yield - - def set_debug(self): - self._dbt_log_manager.set_debug() - LOGGER.set_level(logging.DEBUG) - - def set_trace(self): - self._dbt_log_manager.set_debug() - LOGGER.set_level(TRACE) - -def _prepare_msg(msg: str, *args, **kwargs): - if args or kwargs: - return msg.format(*args, **kwargs) - else: - return msg - -LOGGER = FalLogger() -log_manager = LogManager(dbt_log_manager) diff --git a/projects/adapter/src/fal/dbt/integration/magics.py b/projects/adapter/src/fal/dbt/integration/magics.py deleted file mode 100644 index bbd50ae5..00000000 --- a/projects/adapter/src/fal/dbt/integration/magics.py +++ /dev/null @@ -1,62 +0,0 @@ -from IPython.core.magic import register_line_magic, needs_local_scope -from functools import partial -from fal.dbt import FalDbt - - -@register_line_magic -@needs_local_scope -def init_fal(line="", local_ns={}): - ''' - Init fal magic variables. Must provide project_dir and profiles_dir. - - Example: - """ - from fal.dbt.integration.magics import init_fal - - %init_fal project_dir=/my_project_dir profiles_dir=/my_profiles_dir default_model_name=my_model - """ - ''' - args = dict([arg.split("=") for arg in line.split()]) - if not args.get("project_dir") or not args.get("profiles_dir"): - raise Exception( - """ - Both project_dir and profiles_dir need to be provided: - Example: %init_fal project_dir=/my_project_dir profiles_dir=/my_profiles_dir - """ - ) - - faldbt = FalDbt(args["project_dir"], args["profiles_dir"]) - - fal_globals = { - "ref": faldbt.ref, - "source": faldbt.source, - "write_to_source": faldbt.write_to_source, - "list_models": faldbt.list_models, - "list_models_ids": faldbt.list_models_ids, - "list_sources": faldbt.list_sources, - "list_features": faldbt.list_features, - "execute_sql": faldbt.execute_sql, - } - - if args.get("default_model_name"): - fal_globals["write_to_model"] = partial( - faldbt.write_to_model, - target_1=args.get("default_model_name"), - target_2=None, - ) - - else: - fal_globals["write_to_model"] = _raise_no_model_exception - - local_ns.update(fal_globals) - - -def _raise_no_model_exception(): - raise Exception( - ''' - Model not found. Please provide a default model name. Example: - """ - %init_fal project_dir=/my_project_dir profiles_dir=/my_profiles_dir default_model_name=my_model - """ - ''' - ) diff --git a/projects/adapter/src/fal/dbt/integration/parse.py b/projects/adapter/src/fal/dbt/integration/parse.py deleted file mode 100644 index ba211bb0..00000000 --- a/projects/adapter/src/fal/dbt/integration/parse.py +++ /dev/null @@ -1,261 +0,0 @@ -import os -from dataclasses import dataclass -import glob -from pathlib import Path -from typing import Any, List, Dict, Optional, Union, TYPE_CHECKING - -from dbt.contracts.project import Project as ProjectContract -from dbt.config import RuntimeConfig, Project -from dbt.config.utils import parse_cli_vars as dbt_parse_cli_vars -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.results import RunResultsArtifact, FreshnessExecutionResultArtifact -from dbt.contracts.project import UserConfig -from dbt.config.profile import read_user_config - -from dbt.exceptions import IncompatibleSchemaError, DbtRuntimeError - -from fal.dbt.utils import cache_static - -from fal.dbt.integration.logger import LOGGER -from fal.dbt.integration.utils.yaml_helper import load_yaml -from fal.dbt.telemetry import telemetry - -if TYPE_CHECKING: - from fal.dbt.packages.environments import BaseEnvironment - -FAL_SCRIPTS_PATH = "fal-scripts-path" -FAL_MODELS_PATHS = "fal-models-paths" - - -class FalParseError(Exception): - pass - - -def get_dbt_user_config(profiles_dir: str) -> UserConfig: - return read_user_config(profiles_dir) - - -@dataclass -class RuntimeArgs: - project_dir: str - profiles_dir: str - threads: Optional[int] - single_threaded: bool - profile: Optional[str] - target: Optional[str] - vars: Dict[str, str] - - -def load_dbt_project_contract(project_dir: str) -> ProjectContract: - partial_project = Project.partial_load(project_dir) - contract = ProjectContract.from_dict(partial_project.project_dict) - if not hasattr(contract, "model_paths") or contract.model_paths is None: - setattr(contract, "model_paths", contract.source_paths) - if not hasattr(contract, "seed_paths") or contract.seed_paths is None: - setattr(contract, "seed_paths", contract.data_paths) - return contract - - -def get_dbt_config( - *, - project_dir: str, - profiles_dir: str, - profile_target: Optional[str] = None, - threads: Optional[int] = None, - profile: Optional[str] = None, - args_vars: str = "{}", -) -> RuntimeConfig: - # Construct a phony config - import os - - vars = get_vars_dict(project_dir, args_vars) - args = RuntimeArgs( - project_dir=project_dir, - profiles_dir=profiles_dir, - threads=threads, - single_threaded=False, - profile=profile, - target=profile_target, - vars=vars, - ) - - if project_dir and not "PYTEST_CURRENT_TEST" in os.environ: - # HACK: initializing dbt-fal requires cwd to be project_dir - # TODO: this doesn't work in pytest + Github Actions - owd = os.getcwd() - os.chdir(project_dir) - config = RuntimeConfig.from_args(args) - os.chdir(owd) - else: - config = RuntimeConfig.from_args(args) - - # HACK: issue in dbt-core 1.5.0 https://github.com/dbt-labs/dbt-core/issues/7465 - env_target_path = os.getenv("DBT_TARGET_PATH") - if env_target_path: - config.target_path = env_target_path - # TODO: should we check flags too? - - return config - - -def get_vars_dict(project_dir: str, args_vars: str) -> Dict[str, Any]: - project_contract = load_dbt_project_contract(project_dir) - - # NOTE: This happens usually inside unit tests - vars = (project_contract is not None and project_contract.vars) or {} - cli_vars = parse_cli_vars(args_vars) - - # cli_vars have higher priority - return {**vars, **cli_vars} - -def parse_cli_vars(args_vars: str) -> Dict[str, Any]: - if not args_vars: - return {} - - try: - return dbt_parse_cli_vars(args_vars) - except DbtRuntimeError as exc: - raise FalParseError(exc) - - -@cache_static -def get_fal_models_dirs(project_dir: str, args_vars: str) -> List[str]: - vars = get_vars_dict(project_dir, args_vars) - model_paths = vars.get(FAL_MODELS_PATHS) or [] - if not model_paths: - # None or empty list - LOGGER.warn( - f"Variable '{FAL_MODELS_PATHS}' not defined. Locate fal-format " - "Python models in a separate model directory and set it as the variable. " - "e.g. {FAL_MODELS_PATHS}: ['fal_models']" - ) - - telemetry.log_api(action="fal_models_paths_not_set") - - if not isinstance(model_paths, list): - raise FalParseError( - f"Error parsing '{FAL_MODELS_PATHS}'. Expected list of strings and got '{type(model_paths)}'" - ) - - return model_paths - - -def get_scripts_dir(project_dir: str, args_vars: str) -> str: - vars = get_vars_dict(project_dir, args_vars) - scripts_dir = vars.get(FAL_SCRIPTS_PATH, project_dir) - - if not isinstance(scripts_dir, str): - raise FalParseError( - f"Error parsing '{FAL_SCRIPTS_PATH}'. Expected string and got '{type(scripts_dir)}'" - ) - - return os.path.join(project_dir, scripts_dir) - - -def get_dbt_manifest(config) -> Manifest: - from dbt.parser.manifest import ManifestLoader - - return ManifestLoader.get_full_manifest(config) - - -def get_dbt_sources_artifact(project_dir: str, config: RuntimeConfig): - # Changed in dbt 1.5.0 to use path relative to CWD instead of path relative to project_dir - sources_path = os.path.join(config.target_path, "sources.json") - try: - return FreshnessExecutionResultArtifact.read_and_check_versions(sources_path) - - except IncompatibleSchemaError as exc: - # TODO: add test for this case - exc.add_filename(sources_path) - raise - except DbtRuntimeError as exc: - LOGGER.warn("Could not read dbt sources artifact") - return None - - -def get_dbt_results(project_dir: str, config: RuntimeConfig) -> Optional[RunResultsArtifact]: - # Changed in dbt 1.5.0 to use path relative to CWD instead of path relative to project_dir - results_path = os.path.join(config.target_path, "run_results.json") - try: - return RunResultsArtifact.read_and_check_versions(results_path) - - except IncompatibleSchemaError as exc: - # TODO: add test for this case - exc.add_filename(results_path) - raise - except DbtRuntimeError as exc: - LOGGER.warn("Could not read dbt run_results artifact") - return None - - -def get_scripts_list(scripts_dir: str) -> List[str]: - scripts_path = Path(scripts_dir) - return list(map(str, [*scripts_path.rglob("*.py"), *scripts_path.rglob("*.ipynb")])) - - -def get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]: - global_scripts = {"before": [], "after": []} - for source_dir in source_dirs: - # Scan directories for .yml files - schema_files = glob.glob(os.path.join(source_dir, "**.yml"), recursive=True) - # Scan directories for .yaml files - schema_files += glob.glob(os.path.join(source_dir, "**.yaml"), recursive=True) - for file in schema_files: - schema_yml = load_yaml(file) - if schema_yml is not None: - fal_config = schema_yml.get("fal", None) - if fal_config is not None: - # sometimes `scripts` can *be* there and still be None - script_paths = fal_config.get("scripts") or [] - if isinstance(script_paths, list): - global_scripts["after"] += script_paths - else: - global_scripts["before"] += script_paths.get("before") or [] - global_scripts["after"] += script_paths.get("after") or [] - else: - raise FalParseError("Error parsing the schema file " + file) - - return global_scripts - - -def _get_required_key(data: Dict[str, Any], name: str) -> Any: - if name not in data: - raise FalParseError("Missing required key: " + name) - return data[name] - - -def load_environments(base_dir: str) -> Dict[str, "BaseEnvironment"]: - from fal.dbt.packages.environments import create_environment - from fal.dbt.fal_script import _is_local_environment - - try: - fal_project_path = os.path.join(base_dir, "fal_project.yml") - if not os.path.exists(fal_project_path): - raise FalParseError(f"{fal_project_path} must exist to define environments") - - fal_project = load_yaml(fal_project_path) - - environments = {} - for environment in fal_project.get("environments", []): - env_name = _get_required_key(environment, "name") - if _is_local_environment(env_name): - raise FalParseError( - f"Environment name conflicts with a reserved name: {env_name}." - ) - - env_kind = _get_required_key(environment, "type") - environments[env_name] = create_environment(env_name, env_kind, environment) - return environments - except FalParseError as e: - raise RuntimeError("Error loading environments from fal_project.yml") from e - - -def normalize_path(base: str, path: Union[Path, str]): - real_base = os.path.realpath(os.path.normpath(base)) - return Path(os.path.realpath(os.path.join(real_base, path))) - - -def normalize_paths( - base: str, paths: Union[List[Path], List[str], List[Union[Path, str]]] -): - return list(map(lambda path: normalize_path(base, path), paths)) diff --git a/projects/adapter/src/fal/dbt/integration/project.py b/projects/adapter/src/fal/dbt/integration/project.py deleted file mode 100644 index 71d4228c..00000000 --- a/projects/adapter/src/fal/dbt/integration/project.py +++ /dev/null @@ -1,902 +0,0 @@ -from collections import defaultdict -import os.path -from dataclasses import dataclass, field -from typing import ( - Dict, - Iterable, - List, - Any, - Optional, - Tuple, - Sequence, - TYPE_CHECKING, -) -from pathlib import Path -from deprecation import deprecated - -import fal.dbt.integration.version as version - -from dbt.cli.resolvers import default_profiles_dir -from dbt.cli.main import dbtRunner, dbtRunnerResult - -from dbt.contracts.graph.nodes import ( - SourceDefinition, - TestMetadata, - GenericTestNode, - SingularTestNode, -) -from dbt.contracts.graph.nodes import ManifestNode - -from dbt.contracts.graph.manifest import ( - Manifest, - MaybeNonSource, - MaybeParsedSource, - Disabled, -) - -from dbt.node_types import NodeType -from dbt.contracts.connection import AdapterResponse -from dbt.contracts.results import ( - RunResultsArtifact, - RunResultOutput, - NodeStatus, - FreshnessExecutionResultArtifact, - FreshnessNodeOutput, -) -from dbt.task.compile import CompileTask - -from . import parse -from . import lib -from . import version - -from fal.dbt.feature_store.feature import Feature - -import pandas as pd - -from fal.dbt.telemetry import telemetry -from fal.dbt.utils import has_side_effects - -if TYPE_CHECKING: - from fal.dbt.fal_script import Hook, TimingType - from fal.dbt.packages.environments import BaseEnvironment - - -class FalGeneralException(Exception): - pass - - -FAL = "fal" - - -@dataclass -class _DbtNode: - node: Any = field(repr=False) - _status: str = field(default=NodeStatus.Skipped.value) - - @property - def name(self) -> str: - return self.node.name - - @property - def unique_id(self) -> str: - return self.node.unique_id - - def _get_status(self): - return self._status - - def _set_status(self, status: str): - self._status = status - - status = property(_get_status, _set_status) - - -@dataclass -class DbtTest(_DbtNode): - model_ids: List[str] = field(init=False, default_factory=list) - source_ids: List[str] = field(init=False, default_factory=list) - - @classmethod - def init(cls, node): - if node.resource_type == NodeType.Test: - if isinstance(node, GenericTestNode): - test = DbtGenericTest(node=node) - elif isinstance(node, SingularTestNode): - test = DbtSingularTest(node=node) - else: - raise ValueError(f"Unexpected test class {node.__class__.__name__}") - - for dep in test.node.depends_on.nodes: - if dep.startswith("model."): - test.model_ids.append(dep) - if dep.startswith("source."): - test.source_ids.append(dep) - - return test - else: - raise TypeError( - f"Initialized DbtTest with node of type {node.resource_type}" - ) - - -@dataclass -class DbtGenericTest(DbtTest): - column: Optional[str] = field(init=False) - - def __repr__(self): - attrs = ["name", "_status", "model_ids", "source_ids", "column"] - props = ", ".join([f"{item}={repr(getattr(self, item))}" for item in attrs]) - return f"DbtGenericTest({props})" - - def __post_init__(self): - assert isinstance(self.node, GenericTestNode) - self.column = self.node.column_name - - # Column name might be stored in test_metadata - if not self.column and self.node.test_metadata.kwargs.get("column_name"): - self.column = self.node.test_metadata.kwargs.get("column_name") - - @property - def source_id(self): - if self.source_ids: - return self.source_ids[0] - - @property - def model_id(self): - if self.model_ids: - return self.model_ids[0] - - # TODO: Deprecate? - @property - def source(self): - if self.source_id: - parts = self.source_id.split(".") - return parts[-2], parts[-1] - - # TODO: Deprecate? - @property - def model(self): - if self.model_id: - # TODO: handle package models - parts = self.model_id.split(".") - return parts[-1] - - @property - def name(self) -> str: - metadata: TestMetadata = self.node.test_metadata - return metadata.name - - -@dataclass -class DbtSingularTest(DbtTest): - def __post_init__(self): - assert isinstance(self.node, SingularTestNode) - - -@dataclass -class _DbtTestableNode(_DbtNode): - # TODO: should this include singular tests that ref to this node? - tests: List[DbtGenericTest] = field(default_factory=list) - - def _get_status(self): - if self._status == NodeStatus.Skipped and any( - test.status != NodeStatus.Skipped for test in self.tests - ): - return "tested" - else: - return self._status - - status = property(_get_status, _DbtNode._set_status) - - -@dataclass -class DbtSource(_DbtTestableNode): - freshness: Optional[FreshnessNodeOutput] = field(default=None) - - def __repr__(self): - attrs = ["name", "table_name", "tests", "status"] - props = ", ".join([f"{item}={repr(getattr(self, item))}" for item in attrs]) - return f"DbtSource({props})" - - @property - def meta(self): - return self.node.meta - - @property - def table_name(self) -> str: - return self.node.name - - @property - def name(self) -> str: - return self.node.source_name - - -@dataclass -class DbtModel(_DbtTestableNode): - python_model: Optional[Path] = field(default=None) - - _adapter_response: Optional[AdapterResponse] = field(default=None) - - def __repr__(self): - attrs = ["name", "alias", "unique_id", "columns", "tests", "status"] - props = ", ".join([f"{item}={repr(getattr(self, item))}" for item in attrs]) - return f"DbtModel({props})" - - @property - def columns(self): - return self.node.columns - - @property - def alias(self): - return self.node.alias - - @property - def meta(self): - return self.node.meta - - @property - def is_incremental(self): - return self.node.config.materialized == "incremental" - - def _get_adapter_response(self): - return self._adapter_response - - def _set_adapter_response(self, adapter_response: Optional[dict]): - self._adapter_response = ( - AdapterResponse.from_dict(adapter_response) if adapter_response else None - ) - - adapter_response = property(_get_adapter_response, _set_adapter_response) - - def __hash__(self) -> int: - return self.unique_id.__hash__() - - def get_depends_on_nodes(self) -> List[str]: - return self.node.depends_on_nodes - - def get_hooks( - self, - hook_type: "TimingType", - ) -> List["Hook"]: - from fal.dbt.fal_script import create_hook, TimingType - - meta = self.meta or {} - - keyword_dict = meta.get(FAL) or {} - if not isinstance(keyword_dict, dict): - return [] - - if hook_type == TimingType.PRE: - hook_key = "pre-hook" - elif hook_type == TimingType.POST: - hook_key = "post-hook" - else: - raise ValueError(f"Unexpected hook type {hook_type}") - - raw_hooks = keyword_dict.get(hook_key) or [] - if not isinstance(raw_hooks, list): - return [] - - return [ - create_hook(raw_hook, default_environment_name=self.environment_name) - for raw_hook in raw_hooks - ] - - def get_scripts(self, *, before: bool) -> List[str]: - # sometimes properties can *be* there and still be None - meta = self.meta or {} - - keyword_dict = meta.get(FAL) or {} - if not isinstance(keyword_dict, dict): - return [] - - scripts_node = keyword_dict.get("scripts") or [] - if not scripts_node: - return [] - - if isinstance(scripts_node, list): - if before: - return [] - else: - return scripts_node - - if not isinstance(scripts_node, dict): - return [] - - if before: - return scripts_node.get("before") or [] - else: - return scripts_node.get("after") or [] - - @property - def environment_name(self) -> Optional[str]: - meta = self.meta or {} - fal = meta.get("fal") or {} - return fal.get("environment") - - -@dataclass -class DbtRunResult: - native_run_result: Optional[RunResultsArtifact] - - @property - @deprecated(details="Use native_run_result instead") - def nativeRunResult(self): - return self.native_run_result - - @property - def results(self) -> Sequence[RunResultOutput]: - if self.native_run_result: - return self.native_run_result.results - else: - return [] - - -@dataclass -class DbtFreshnessExecutionResult: - _artifact: Optional[FreshnessExecutionResultArtifact] - - @property - def results(self) -> Sequence[FreshnessNodeOutput]: - if self._artifact: - return self._artifact.results - else: - return [] - - -@dataclass -class DbtManifest: - native_manifest: Manifest - - @property - @deprecated(details="Use native_manifest instead") - def nativeManifest(self): - return self.native_manifest - - def get_model_nodes(self) -> Iterable[ManifestNode]: - return ( - node - for node in self.native_manifest.nodes.values() - if node.resource_type == NodeType.Model - ) - - def get_test_nodes(self) -> Iterable[ManifestNode]: - return ( - node - for node in self.native_manifest.nodes.values() - if node.resource_type == NodeType.Test - ) - - def get_source_nodes(self) -> Iterable[SourceDefinition]: - return self.native_manifest.sources.values() - - def _map_nodes( - self, - run_results: DbtRunResult, - freshness_results: DbtFreshnessExecutionResult, - generated_models: Dict[str, Path], - ) -> Tuple[List[DbtModel], List[DbtSource], List[DbtTest]]: - results_map = {r.unique_id: r for r in run_results.results} - - tests: List[DbtTest] = [] - - tests_dict: Dict[str, List[DbtGenericTest]] = defaultdict(list) - for node in self.get_test_nodes(): - test: DbtTest = DbtTest.init(node=node) - - result = results_map.get(node.unique_id) - if result: - test.status = result.status.value - - tests.append(test) - - if isinstance(test, DbtGenericTest): - if test.model_id: - tests_dict[test.model_id].append(test) - if test.source_id: - tests_dict[test.source_id].append(test) - - models: List[DbtModel] = [] - for node in self.get_model_nodes(): - model = DbtModel( - node=node, - tests=tests_dict[node.unique_id], - python_model=generated_models.get(node.name), - ) - - result = results_map.get(node.unique_id) - if result: - model.status = result.status.value - model.adapter_response = result.adapter_response - - models.append(model) - - source_freshness_map = {r.unique_id: r for r in freshness_results.results} - - sources: List[DbtSource] = [] - for node in self.get_source_nodes(): - source = DbtSource( - node=node, - tests=tests_dict[node.unique_id], - freshness=source_freshness_map.get(node.unique_id), - ) - - result = results_map.get(node.unique_id) - if result: - source.status = result.status.value - - sources.append(source) - - return models, sources, tests - - -@dataclass -class CompileArgs: - selector: Optional[str] - select: List[str] - models: List[str] - exclude: Tuple[str] - state: Optional[Path] - single_threaded: Optional[bool] - - -@has_side_effects -class FalDbt: - """Holds the entire dbt project information.""" - - # TODO: figure out a meaningful __repr__ for this class - def __init__( - self, - project_dir: Optional[str] = None, - profiles_dir: Optional[str] = None, - select: List[str] = [], - exclude: Tuple[str] = tuple(), - selector: Optional[str] = None, - threads: Optional[int] = None, - state: Optional[str] = None, - profile_target: Optional[str] = None, - args_vars: str = "{}", - generated_models: Dict[str, Path] = {}, - ): - if not version.is_version_plus("1.0.0"): - raise NotImplementedError( - f"dbt version {version.DBT_VCURRENT} is no longer supported, please upgrade to dbt 1.0.0 or above" - ) - - if project_dir is None: - project_dir = os.getcwd() - - if profiles_dir is None: - profiles_dir = str(default_profiles_dir()) - - project_dir = os.path.realpath(os.path.expanduser(project_dir)) - profiles_dir = os.path.realpath(os.path.expanduser(profiles_dir)) - - vars = parse.parse_cli_vars(args_vars) - - flags = lib.initialize_dbt_flags( - profiles_dir=profiles_dir, - project_dir=project_dir, - threads=threads, - profile_target=profile_target, - vars=vars, - ) - - self.project_dir = flags.PROJECT_DIR - self.profiles_dir = flags.PROFILES_DIR - - self._state = None - if state is not None: - self._state = Path(os.path.realpath(os.path.expanduser(state))) - - self.scripts_dir = parse.get_scripts_dir(self.project_dir, args_vars) - - - # Can be overwritten if profile_target is not None - self._config = parse.get_dbt_config( - project_dir=self.project_dir, - profiles_dir=self.profiles_dir, - profile_target=profile_target, - threads=threads, - args_vars=args_vars, - ) - - self._run_results = DbtRunResult( - parse.get_dbt_results(self.project_dir, self._config) - ) - - if self._run_results.native_run_result: - if profile_target is None: - profile_target = _get_custom_target(self._run_results) - - if profile_target is not None: - self._config = parse.get_dbt_config( - project_dir=self.project_dir, - profiles_dir=self.profiles_dir, - threads=threads, - profile_target=profile_target, - args_vars=args_vars, - ) - - lib.register_adapters(self._config) - - parse_result = self._dbt_invoke("parse") - native_manifest: Manifest = parse_result.result # type: ignore - - # Necessary for manifest loading to not fail - # dbt.tracking.initialize_tracking(self.profiles_dir) - - args = CompileArgs(selector, select, select, exclude, self._state, None) - self._compile_task = CompileTask(args, self._config, native_manifest) - - self._compile_task._runtime_initialize() - - self._manifest = DbtManifest(native_manifest) - - freshness_execution_results = DbtFreshnessExecutionResult( - parse.get_dbt_sources_artifact(self.project_dir, self._config) - ) - - self.models, self.sources, self.tests = self._manifest._map_nodes( - self._run_results, - freshness_execution_results, - generated_models, - ) - - normalized_model_paths = parse.normalize_paths( - self.project_dir, self.source_paths - ) - - self._global_script_paths = parse.get_global_script_configs( - normalized_model_paths - ) - - self.features = self._find_features() - self._environments = None - - telemetry.log_api( - action="faldbt_initialized", - dbt_config=self._config, - ) - - def _dbt_invoke( - self, cmd: str, args: Optional[List[str]] = None - ) -> dbtRunnerResult: - runner = dbtRunner() - - if args is None: - args = [] - - project_args = [ - "--project-dir", - self.project_dir, - "--profiles-dir", - self.profiles_dir, - "--target", - self._profile_target, - ] - - # TODO: Intervene the dbt logs and capture them to avoid printing them to the console - return runner.invoke([cmd] + project_args + args) - - @property - def model_paths(self) -> List[str]: - return self._config.model_paths - - @property - @deprecated(details="Use model_paths instead") - def source_paths(self) -> List[str]: - return self.model_paths - - @property - def _profile_target(self): - return self._config.target_name - - @property - def threads(self): - return self._config.threads - - @property - def target_path(self): - return self._config.target_path - - @property - def project_name(self): - return self._config.project_name - - def list_sources(self) -> List[DbtSource]: - """ - List tables available for `source` usage - """ - with telemetry.log_time("list_sources", dbt_config=self._config): - return self.sources - - def list_models_ids(self) -> Dict[str, str]: - """ - List model ids available for `ref` usage, formatting like `[ref_name, ...]` - """ - with telemetry.log_time("list_models_ids", dbt_config=self._config): - res = {} - for model in self.models: - res[model.unique_id] = model.status - - return res - - def list_models(self) -> List[DbtModel]: - """ - List models - """ - with telemetry.log_time("list_models", dbt_config=self._config): - return self.models - - def list_tests(self) -> List[DbtTest]: - """ - List tests - """ - with telemetry.log_time("list_tests", dbt_config=self._config): - return self.tests - - def list_features(self) -> List[Feature]: - with telemetry.log_time("list_features", dbt_config=self._config): - return self.features - - def _find_features(self) -> List[Feature]: - """List features defined in schema.yml files.""" - models = self.models - models = list( - filter( - # Find models that have both feature store and column defs - lambda model: FAL in model.meta - and isinstance(model.meta[FAL], dict) - and "feature_store" in model.meta[FAL] - and len(list(model.columns.keys())) > 0, - models, - ) - ) - features = [] - for model in models: - for column_name in model.columns.keys(): - if column_name == model.meta[FAL]["feature_store"]["entity_column"]: - continue - if column_name == model.meta[FAL]["feature_store"]["timestamp_column"]: - continue - features.append( - Feature( - model=model.name, - column=column_name, - description=model.columns[column_name].description, - entity_column=model.meta[FAL]["feature_store"]["entity_column"], - timestamp_column=model.meta[FAL]["feature_store"][ - "timestamp_column" - ], - ) - ) - return features - - def _model( - self, target_model_name: str, target_package_name: Optional[str] - ) -> ManifestNode: - # HACK: always setting node package as self.project_dir - target_model: MaybeNonSource = self._manifest.native_manifest.resolve_ref( - target_model_name, - target_package_name, - None, - self.project_dir, - self.project_dir, - ) - package_str = f"'{target_package_name}'." if target_package_name else "" - model_str = f"{package_str}'{target_model_name}'" - if target_model is None: - raise Exception(f"Could not find model {model_str}") - - if isinstance(target_model, Disabled): - raise RuntimeError(f"Model {model_str} is disabled") - - return target_model - - def ref(self, target_1: str, target_2: Optional[str] = None) -> pd.DataFrame: - """ - Download a dbt model as a pandas.DataFrame automagically. - """ - with telemetry.log_time("ref", dbt_config=self._config): - target_model_name = target_1 - target_package_name = None - if target_2 is not None: - target_package_name = target_1 - target_model_name = target_2 - - target_model = self._model(target_model_name, target_package_name) - - return lib.fetch_target( - self.project_dir, - self.profiles_dir, - target_model, - self._profile_target, - config=self._config, - ) - - def _source( - self, target_source_name: str, target_table_name: str - ) -> SourceDefinition: - # HACK: always setting node package as self.project_dir - target_source: MaybeParsedSource = ( - self._manifest.native_manifest.resolve_source( - target_source_name, - target_table_name, - self.project_dir, - self.project_dir, - ) - ) - - if target_source is None: - raise RuntimeError( - f"Could not find source '{target_source_name}'.'{target_table_name}'" - ) - - if isinstance(target_source, Disabled): - raise RuntimeError( - f"Source '{target_source_name}'.'{target_table_name}' is disabled" - ) - - return target_source - - def source(self, target_source_name: str, target_table_name: str) -> pd.DataFrame: - """ - Download a dbt source as a pandas.DataFrame automagically. - """ - with telemetry.log_time("source", dbt_config=self._config): - target_source = self._source(target_source_name, target_table_name) - - return lib.fetch_target( - self.project_dir, - self.profiles_dir, - target_source, - self._profile_target, - config=self._config, - ) - - def write_to_source( - self, - data: pd.DataFrame, - target_source_name: str, - target_table_name: str, - *, - dtype: Any = None, - mode: str = "append", - ): - """ - Write a pandas.DataFrame to a dbt source automagically. - """ - - with telemetry.log_time( - "write_to_source", - dbt_config=self._config, - additional_props={"args": {"mode": mode}}, - ): - target_source = self._source(target_source_name, target_table_name) - - write_mode = lib.WriteModeEnum(mode.lower().strip()) - if write_mode == lib.WriteModeEnum.APPEND: - lib.write_target( - data, - self.project_dir, - self.profiles_dir, - self._profile_target, - target_source, - dtype=dtype, - config=self._config, - ) - - elif write_mode == lib.WriteModeEnum.OVERWRITE: - lib.overwrite_target( - data, - self.project_dir, - self.profiles_dir, - self._profile_target, - target_source, - dtype=dtype, - config=self._config, - ) - - else: - raise Exception(f"write_to_source mode `{mode}` not supported") - - def write_to_model( - self, - data: pd.DataFrame, - target_1: str, - target_2: Optional[str] = None, - *, - dtype: Any = None, - mode: str = "overwrite", - ): - """ - Write a pandas.DataFrame to a dbt model automagically. - """ - - with telemetry.log_time( - "write_to_model", - dbt_config=self._config, - additional_props={"args": {"mode": mode}}, - ): - target_model_name = target_1 - target_package_name = None - if target_2 is not None: - target_package_name = target_1 - target_model_name = target_2 - - target_model = self._model(target_model_name, target_package_name) - - write_mode = lib.WriteModeEnum(mode.lower().strip()) - if write_mode == lib.WriteModeEnum.APPEND: - lib.write_target( - data, - self.project_dir, - self.profiles_dir, - self._profile_target, - target_model, - dtype=dtype, - config=self._config, - ) - - elif write_mode == lib.WriteModeEnum.OVERWRITE: - lib.overwrite_target( - data, - self.project_dir, - self.profiles_dir, - self._profile_target, - target_model, - dtype=dtype, - config=self._config, - ) - - else: - raise Exception(f"write_to_model mode `{mode}` not supported") - - def execute_sql(self, sql: str) -> pd.DataFrame: - """Execute a sql query.""" - - with telemetry.log_time("execute_sql", dbt_config=self._config): - # HACK: we need to pass config in because of weird behavior of execute_sql when - # ran from GitHub Actions. For some reason, it can not find the right profile. - # Haven't been able to reproduce this behavior locally and therefore developed - # this workaround. - compiled_result = lib.compile_sql( - self.project_dir, - self.profiles_dir, - self._profile_target, - sql, - config=self._config, - ) - - # HACK: we need to pass config in because of weird behavior of execute_sql when - # ran from GitHub Actions. For some reason, it can not find the right profile. - # Haven't been able to reproduce this behavior locally and therefore developed - # this workaround. - - # NOTE: changed in version 1.3.0 to `compiled_code` - if hasattr(compiled_result, "compiled_code"): - sql = compiled_result.compiled_code - else: - sql = compiled_result.compiled_sql - return lib.execute_sql( - self.project_dir, - self.profiles_dir, - self._profile_target, - sql, - config=self._config, - ) - - def _load_environment(self, name: str) -> "BaseEnvironment": - """ - Return the environment for the given ``name``. - If the environment does not exist, it raises an exception. - """ - if self._environments is None: - self._environments = parse.load_environments(self.project_dir) - return self._environments[name] - - -def _get_custom_target(run_results: DbtRunResult): - if "target" in run_results.native_run_result.args: - return run_results.native_run_result.args["target"] - return None diff --git a/projects/adapter/src/fal/dbt/integration/utils/__init__.py b/projects/adapter/src/fal/dbt/integration/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/integration/utils/yaml_helper.py b/projects/adapter/src/fal/dbt/integration/utils/yaml_helper.py deleted file mode 100644 index 6ef96978..00000000 --- a/projects/adapter/src/fal/dbt/integration/utils/yaml_helper.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import Any, Dict, Optional -import yaml -import yaml.scanner - -# the C version is faster, but it doesn't always exist -try: - from yaml import CLoader as Loader, CSafeLoader as SafeLoader, CDumper as Dumper -except ImportError: - from yaml import Loader, SafeLoader, Dumper # type: ignore # noqa: F401 - - -YAML_ERROR_MESSAGE = """ -Syntax error near line {line_number} ------------------------------- -{nice_error} - -Raw Error: ------------------------------- -{raw_error} -""".strip() - - -def line_no(i, line, width=3): - line_number = str(i).ljust(width) - return "{}| {}".format(line_number, line) - - -def prefix_with_line_numbers(string, no_start, no_end): - line_list = string.split("\n") - - numbers = range(no_start, no_end) - relevant_lines = line_list[no_start:no_end] - - return "\n".join( - [line_no(i + 1, line) for (i, line) in zip(numbers, relevant_lines)] - ) - - -def contextualized_yaml_error(raw_contents, error): - mark = error.problem_mark - - min_line = max(mark.line - 3, 0) - max_line = mark.line + 4 - - nice_error = prefix_with_line_numbers(raw_contents, min_line, max_line) - - return YAML_ERROR_MESSAGE.format( - line_number=mark.line + 1, nice_error=nice_error, raw_error=error - ) - - -def safe_load(contents) -> Dict[str, Any]: - return yaml.load(contents, Loader=SafeLoader) - - -def load_yaml_text(contents): - try: - return safe_load(contents) - except (yaml.scanner.ScannerError, yaml.YAMLError) as e: - if hasattr(e, "problem_mark"): - error = contextualized_yaml_error(contents, e) - else: - error = str(e) - - raise Exception(error) - - -def _load_file_contents(path: str, strip: bool = True) -> str: - with open(path, "rb") as handle: - to_return = handle.read().decode("utf-8") - - if strip: - to_return = to_return.strip() - - return to_return - - -def load_yaml(path): - contents = _load_file_contents(path) - return load_yaml_text(contents) diff --git a/projects/adapter/src/fal/dbt/integration/version.py b/projects/adapter/src/fal/dbt/integration/version.py deleted file mode 100644 index 4b9cf6b5..00000000 --- a/projects/adapter/src/fal/dbt/integration/version.py +++ /dev/null @@ -1,10 +0,0 @@ -import dbt.version -from dbt.semver import VersionSpecifier - -def version_compare(version_string: str): - return DBT_VCURRENT.compare(VersionSpecifier.from_version_string(version_string)) - -def is_version_plus(version_string: str): - return version_compare(version_string) >= 0 - -DBT_VCURRENT = dbt.version.installed diff --git a/projects/adapter/src/fal/dbt/new/project.py b/projects/adapter/src/fal/dbt/new/project.py deleted file mode 100644 index 51cb6183..00000000 --- a/projects/adapter/src/fal/dbt/new/project.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Optional, List, cast -from dbt.cli.main import dbtRunner, dbtRunnerResult - -class falProject: - """ - Represents a dbt project and access to its resources and utility functions. - """ - - def _dbt_invoke( - self, cmd: str, args: Optional[List[str]] = None - ) -> dbtRunnerResult: - if args is None: - args = [] - - project_args = [ - "--project-dir", - self.project_dir, - "--profiles-dir", - self.profiles_dir, - ] - if self.target_name: - project_args.extend(["--target", self.target_name]) - - # TODO: Intervene the dbt logs and capture them to avoid printing them to the console - return self._runner.invoke([cmd] + project_args + args) - - def __init__( - self, project_dir: str, profiles_dir: str, target_name: Optional[str] = None - ): - # TODO: Make project_dir and profiles_dir optional and use the current working directory and default profiles dir? - self.project_dir = project_dir - self.profiles_dir = profiles_dir - self.target_name = target_name - - # Load the manifest information - self._runner = dbtRunner() - parse_result = self._dbt_invoke("parse") - native_manifest = cast(Manifest, parse_result.result) # type: ignore - self._manifest = native_manifest - # self._manifest = DbtManifest(native_manifest) - - # TODO: Do we need the manifest in there? - self._runner = dbtRunner(manifest=native_manifest) diff --git a/projects/adapter/src/fal/dbt/node_graph.py b/projects/adapter/src/fal/dbt/node_graph.py deleted file mode 100644 index 3ca18e9d..00000000 --- a/projects/adapter/src/fal/dbt/node_graph.py +++ /dev/null @@ -1,253 +0,0 @@ -from __future__ import annotations -from dataclasses import dataclass -from typing import List, Tuple, Dict, cast - -from fal.dbt.fal_script import FalScript, TimingType -from fal.dbt.integration.project import DbtModel, FalDbt -from pathlib import Path -import networkx as nx -import os as os -from functools import reduce -from enum import Enum - - -class NodeKind(str, Enum): - DBT_MODEL = "dbt model" - FAL_MODEL = "fal model" - FAL_SCRIPT = "fal script" - - -@dataclass -class FalFlowNode: - "Represents a Node that can be invoked by fal flow command" - unique_id: str - - -@dataclass -class ScriptNode(FalFlowNode): - "Represents a python script node" - script: FalScript - - -@dataclass -class DbtModelNode(FalFlowNode): - "Represents a dbt node" - model: DbtModel - - -def _add_after_scripts( - model: DbtModel, - upstream_fal_node_unique_id: str, - faldbt: FalDbt, - graph: nx.DiGraph, - nodeLookup: Dict[str, FalFlowNode], -) -> Tuple[nx.DiGraph, Dict[str, FalFlowNode]]: - "Add dbt node to after scripts edges to the graph" - after_scripts = model.get_scripts(before=False) - after_fal_scripts = map( - lambda script_path: FalScript(faldbt, model, script_path, timing_type=TimingType.POST), after_scripts - ) - after_fal_script_nodes = list( - map( - lambda fal_script: ScriptNode( - _script_id_from_path(fal_script.path, model.name, "AFTER"), fal_script - ), - after_fal_scripts, - ) - ) - for fal_script_node in after_fal_script_nodes: - graph.add_node(fal_script_node.unique_id, kind=NodeKind.FAL_SCRIPT) - nodeLookup[fal_script_node.unique_id] = fal_script_node - # model_fal_node depends on fal_script_node - graph.add_edge(upstream_fal_node_unique_id, fal_script_node.unique_id) - - return graph, nodeLookup - - -def _add_before_scripts( - model: DbtModel, - downstream_fal_node_unique_id: str, - faldbt: FalDbt, - graph: nx.DiGraph, - nodeLookup: Dict[str, FalFlowNode], -) -> Tuple[nx.DiGraph, Dict[str, FalFlowNode]]: - "Add before scripts to dbt node edges to the graph" - before_scripts = model.get_scripts(before=True) - before_fal_scripts = map( - lambda script_path: FalScript(faldbt, model, script_path, timing_type=TimingType.PRE), before_scripts - ) - before_fal_script_node = map( - lambda fal_script: ScriptNode( - _script_id_from_path(fal_script.path, model.name, "BEFORE"), fal_script - ), - before_fal_scripts, - ) - - for fal_script_node in before_fal_script_node: - graph.add_node(fal_script_node.unique_id, kind=NodeKind.FAL_SCRIPT) - nodeLookup[fal_script_node.unique_id] = fal_script_node - # fal_script_node depends on model_fal_node - graph.add_edge(fal_script_node.unique_id, downstream_fal_node_unique_id) - - return graph, nodeLookup - - -def _script_id_from_path(scriptPath: Path, modelName: str, order: str): - script_file_name = os.path.basename(scriptPath) - return f"script.{modelName}.{order}.{script_file_name}" - - -@dataclass -class NodeGraph: - "Wrapper around networkx graph" - - @classmethod - def from_fal_dbt(cls, fal_dbt: FalDbt): - graph = nx.DiGraph() - node_lookup: Dict[str, FalFlowNode] = {} - for model in cast(List[DbtModel], fal_dbt.list_models()): - model_fal_node = DbtModelNode(model.unique_id, model) - node_lookup[model_fal_node.unique_id] = model_fal_node - graph.add_node( - model_fal_node.unique_id, - kind=NodeKind.FAL_MODEL if model.python_model else NodeKind.DBT_MODEL, - pre_hook=model.get_hooks(TimingType.PRE), - post_hook=model.get_hooks(TimingType.POST), - environment=model.environment_name, - ) - - # Add dbt model dependencies - for dbt_dependency_unique_id in model_fal_node.model.get_depends_on_nodes(): - if dbt_dependency_unique_id not in node_lookup: - graph.add_node(dbt_dependency_unique_id, kind=NodeKind.DBT_MODEL) - graph.add_edge(dbt_dependency_unique_id, model_fal_node.unique_id) - - _add_after_scripts( - model, - model_fal_node.unique_id, - fal_dbt, - graph, - node_lookup, - ) - - _add_before_scripts( - model, - model_fal_node.unique_id, - fal_dbt, - graph, - node_lookup, - ) - - return cls(graph, node_lookup) - - def __init__(self, graph: nx.DiGraph, node_lookup: Dict[str, FalFlowNode]): - self.graph = graph - self.node_lookup = node_lookup - - def get_successors(self, id: str, levels: int) -> List[str]: - assert levels >= 0 - if levels == 0: - return [] - else: - current: List[str] = list(self.graph.successors(id)) - return reduce( - lambda acc, id: acc + self.get_successors(id, levels - 1), - current, - current, - ) - - def get_descendants(self, id: str) -> List[str]: - return list(nx.descendants(self.graph, id)) - - def get_predecessors(self, id: str, levels: int) -> List[str]: - assert levels >= 0 - if levels == 0: - return [] - else: - current: List[str] = list(self.graph.predecessors(id)) - return reduce( - lambda acc, id: acc + self.get_predecessors(id, levels - 1), - current, - current, - ) - - def get_ancestors(self, id: str) -> List[str]: - return list(nx.ancestors(self.graph, id)) - - def get_node(self, id: str) -> FalFlowNode | None: - return self.node_lookup.get(id) - - def _is_script_node(self, node_name: str) -> bool: - return _is_script(node_name) - - def _is_critical_node(self, node): - successors = list(self.graph.successors(node)) - if node in successors: - successors.remove(node) - - def is_python_model(id: str): - inode = self.get_node(id) - if isinstance(inode, DbtModelNode): - return inode.model.python_model - return False - - def has_post_hooks(node_id: str): - inode = self.get_node(node_id) - if isinstance(inode, DbtModelNode): - return bool(inode.model.get_hooks(TimingType.POST)) - return False - - is_model_pred = lambda node_name: node_name.split(".")[0] == "model" - # fmt:off - return ( - (any(_is_script(i) for i in successors) and - any(is_model_pred(i) for i in successors)) or - is_python_model(node) or - has_post_hooks(node) - ) - # fmt:on - - def sort_nodes(self): - return nx.topological_sort(self.graph) - - def _group_nodes(self) -> List[List[str]]: - nodes = list(self.sort_nodes()) - buckets = [] - local_bucket = [] - seen_nodes = [] - for node in nodes: - if node not in seen_nodes: - local_bucket.append(node) - seen_nodes.append(node) - if self._is_critical_node(node): - script_successors = list( - filter( - _is_script, - self.graph.successors(node), - ) - ) - seen_nodes.extend(script_successors) - local_bucket.extend(script_successors) - buckets.append(local_bucket) - local_bucket = [] - buckets.append(local_bucket) - return buckets - - def generate_sub_graphs(self) -> List[NodeGraph]: - "Generates subgraphs that are seperated by `critical nodes`" - sub_graphs = [] - for bucket in self._group_nodes(): - sub_graph = self.graph.subgraph(bucket) - sub_graph_nodes = list(sub_graph.nodes()) - local_lookup = dict( - filter( - lambda node: node[0] in sub_graph_nodes, self.node_lookup.items() - ) - ) - node_graph = NodeGraph(sub_graph, local_lookup) - sub_graphs.append(node_graph) - return sub_graphs - - -def _is_script(name: str) -> bool: - return name.endswith(".py") or name.endswith(".ipynb") diff --git a/projects/adapter/src/fal/dbt/packages/__init__.py b/projects/adapter/src/fal/dbt/packages/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/packages/bridge.py b/projects/adapter/src/fal/dbt/packages/bridge.py deleted file mode 100644 index 523018e9..00000000 --- a/projects/adapter/src/fal/dbt/packages/bridge.py +++ /dev/null @@ -1,53 +0,0 @@ -# A Python native two-way bridge implementation between the controller -# (fal-cli) process and the child process(es) (isolated scripts). -# -# controller both children -# /--------------\ /--------\ /------------\ -# object -> dill -> socket -> dill -> object -# -# -# The controller starts the listener (server) and then passes the raw -# socket address to the children. Each child then connects to the server -# and from that point onward, all exchanges (via send/recv) are done via -# regular Python objects (using dill). -# - - -from __future__ import annotations - -import base64 -from contextlib import closing -from functools import partial -from multiprocessing.connection import Client, ConnectionWrapper, Listener -from typing import Union - -import dill - -dill_wrapper = partial(ConnectionWrapper, dumps=dill.dumps, loads=dill.loads) - - -class _DillListener(Listener): - def accept(self) -> ConnectionWrapper: - return closing(dill_wrapper(super().accept())) - - -def controller_connection() -> _DillListener: - # The controller here assumes that there will be at most one - # client. This restriction might change in the future as an - # optimization. - return _DillListener() - - -def child_connection(address: str) -> ConnectionWrapper: - return closing(dill_wrapper(Client(address))) - - -def encode_service_address(address: Union[bytes, str]) -> str: - if isinstance(address, bytes): - address = address.decode() - - return base64.b64encode(address.encode()).decode("utf-8") - - -def decode_service_address(address: str) -> str: - return base64.b64decode(address).decode("utf-8") diff --git a/projects/adapter/src/fal/dbt/packages/dependency_analysis.py b/projects/adapter/src/fal/dbt/packages/dependency_analysis.py deleted file mode 100644 index efa9a408..00000000 --- a/projects/adapter/src/fal/dbt/packages/dependency_analysis.py +++ /dev/null @@ -1,138 +0,0 @@ -from __future__ import annotations - -from pathlib import Path -from typing import Iterator, List, Optional, Tuple - -import importlib_metadata - -from fal.dbt.utils import cache_static - -import importlib_metadata - - -def _get_project_root_path(pacakge: str) -> Path: - import fal.dbt as fal - - # If this is a development version, we'll install - # the current fal itself. - path = Path(fal.__file__) - while path is not None: - if (path.parent / ".git").exists(): - break - path = path.parent - return path / pacakge - - -def _get_dbt_packages() -> Iterator[Tuple[str, Optional[str]]]: - # package_distributions will return a mapping of top-level package names to a list of distribution names ( - # the PyPI names instead of the import names). An example distribution info is the following, which - # contains both the main exporter of the top-level name (dbt-core) as well as all the packages that - # export anything to that namespace: - # {"dbt": ["dbt-core", "dbt-postgres", "dbt-athena-adapter"]} - # - # This won't only include dbt.adapters.xxx, but anything that might export anything to the dbt namespace - # (e.g. a hypothetical plugin that only exports stuff to dbt.includes.xxx) which in theory would allow us - # to replicate the exact environment. - package_distributions = importlib_metadata.packages_distributions() - for dbt_plugin_name in package_distributions.get("dbt", []): - distribution = importlib_metadata.distribution(dbt_plugin_name) - - # Skip dbt-core since it will be determined by other packages being installed - if dbt_plugin_name == "dbt-core": - continue - - # Skip dbt-fal since it is already handled by _get_dbt_fal_package_name - if dbt_plugin_name == "dbt-postgres-python": - continue - - yield dbt_plugin_name, distribution.version - - -def _find_fal_extras(package: str) -> set[str]: - # Return a possible set of extras that might be required when installing - # fal in the new environment. The original form which the user has installed - # is not present to us (it is not saved anywhere during the package installation - # process, so there is no way for us to know how a user initially installed fal). - # We'll therefore be as generous as possible and install all the extras for all - # the dbt.adapters that the user currently has (so this will still be a subset - # of dependencies, e.g. if there is no dbt.adapter.duckdb then we won't include - # duckdb as an extra). - - import pkgutil - - import dbt.adapters - - fal_dist = importlib_metadata.distribution(package) - all_extras = fal_dist.metadata.get_all("Provides-Extra", []) - - # This list is different from the one we obtain in _get_dbt_packages - # since the names here are the actual import names, not the PyPI names - # (e.g. this one will say athena, and the other one will say dbt-athena-adapter). - available_dbt_adapters = { - module_info.name - for module_info in pkgutil.iter_modules(dbt.adapters.__path__) - if module_info.ispkg - } - - # There will be adapters which we won't have an extra for (e.g. oraceledb) - # and there will be extras which the user did not install the adapter for - # (e.g. dbt-redshift). We want to take the intersection of all the adapters - # that the user has installed and all the extras that fal provides and find - # the smallest possible subset of extras that we can install. - return available_dbt_adapters.intersection(all_extras) - -def _running_pre_release() -> bool: - raw_fal_version = importlib_metadata.version("dbt-postgres-python") - return _version_is_prerelease(raw_fal_version) - -def _version_is_prerelease(raw_version: str) -> bool: - from packaging.version import Version - - package_version = Version(raw_version) - return package_version.is_prerelease - -def _get_dbt_fal_package() -> Tuple[str, Optional[str]]: - if _running_pre_release(): - proj_path = _get_project_root_path("adapter") - if proj_path.exists(): - # We are going to install it from the local path. - dbt_fal_dep = str(proj_path) - dbt_fal_version = None - else: - # We are going to install it from PyPI. - dbt_fal_dep = "dbt-postgres-python" - try: - dbt_fal_version = importlib_metadata.version("dbt-postgres-python") - except importlib_metadata.PackageNotFoundError: - # TODO: remove once `fal` is no longer a supported package - dbt_fal_version = importlib_metadata.version("fal") - else: - dbt_fal_dep = "dbt-postgres-python" - try: - dbt_fal_version = importlib_metadata.version("dbt-postgres-python") - except importlib_metadata.PackageNotFoundError: - # TODO: remove once `fal` is no longer a supported package - dbt_fal_version = importlib_metadata.version("fal") - - try: - dbt_fal_extras = _find_fal_extras("dbt-postgres-python") - except importlib_metadata.PackageNotFoundError: - # TODO: remove once `fal` is no longer a supported package - dbt_fal_extras = _find_fal_extras("fal") - - if dbt_fal_extras: - dbt_fal_dep += f"[{','.join(dbt_fal_extras)}]" - - return dbt_fal_dep, dbt_fal_version - -def get_default_requirements() -> Iterator[Tuple[str, Optional[str]]]: - yield _get_dbt_fal_package() - yield from _get_dbt_packages() - - -@cache_static -def get_default_pip_dependencies() -> List[str]: - return [ - f"{package}=={version}" if version else package - for package, version in get_default_requirements() - ] diff --git a/projects/adapter/src/fal/dbt/packages/environments/__init__.py b/projects/adapter/src/fal/dbt/packages/environments/__init__.py deleted file mode 100644 index 8f095daf..00000000 --- a/projects/adapter/src/fal/dbt/packages/environments/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from pathlib import Path -from typing import List, Optional, Dict, Any - -from fal.dbt.packages.environments.base import BASE_CACHE_DIR, BaseEnvironment -from fal.dbt.packages.environments.conda import CondaEnvironment -from fal.dbt.packages.environments.virtual_env import VirtualPythonEnvironment - -REGISTERED_ENVIRONMENTS: Dict[str, BaseEnvironment] = { - "conda": CondaEnvironment, - "venv": VirtualPythonEnvironment, -} - - -def create_environment(name: str, kind: str, config: Dict[str, Any]) -> BaseEnvironment: - env_type = REGISTERED_ENVIRONMENTS.get(kind) - if env_type is None: - raise ValueError( - f"Invalid environment type (of {kind}) for {name}. Please choose from: " - + ", ".join(REGISTERED_ENVIRONMENTS.keys()) - ) - - return env_type.from_config(config) diff --git a/projects/adapter/src/fal/dbt/packages/environments/base.py b/projects/adapter/src/fal/dbt/packages/environments/base.py deleted file mode 100644 index e33c680c..00000000 --- a/projects/adapter/src/fal/dbt/packages/environments/base.py +++ /dev/null @@ -1,254 +0,0 @@ -from __future__ import annotations - -import os -import shutil -import subprocess -import sysconfig -import threading -from collections import defaultdict -from contextlib import ExitStack, contextmanager, nullcontext -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Callable, ContextManager, Generic, Iterator, TypeVar, Dict, Any - -from platformdirs import user_cache_dir - -from fal.dbt.integration.logger import LOGGER -from fal.dbt.packages import bridge, isolated_runner - -BASE_CACHE_DIR = Path(user_cache_dir("fal", "fal")) -BASE_CACHE_DIR.mkdir(exist_ok=True) - -T = TypeVar("T") -K = TypeVar("K", bound="BaseEnvironment") -BasicCallable = Callable[[], int] - - -@contextmanager -def rmdir_on_fail(path: Path) -> Iterator[None]: - try: - yield - except Exception: - if path.exists(): - shutil.rmtree(path) - raise - - -def log_env(env: BaseEnvironment, message: str, *args, kind: str = "trace", **kwargs): - message = f"[{env.key}] {message}" - log_method = getattr(LOGGER, kind) - log_method(message, *args, **kwargs) - - -class BaseEnvironment(Generic[T]): - def __init_subclass__(cls, make_thread_safe: bool = False) -> None: - if make_thread_safe: - lock_cls = threading.Lock - else: - lock_cls = nullcontext - - cls.lock_manager = defaultdict(lock_cls) - return cls - - @classmethod - def from_config(cls, config: Dict[str, Any]) -> BaseEnvironment: - """Create a new environment from the given configuration.""" - raise NotImplementedError - - @property - def key(self) -> str: - """A unique identifier for this environment (combination of requirements, - python version and other relevant information) that can be used for caching - and identification purposes.""" - raise NotImplementedError - - def _get_or_create(self) -> T: - """Implementation of the environment creation and retrieval behavior. Not - thread safe.""" - raise NotImplementedError - - def get_or_create(self) -> T: - """If the environment exists, return the connection info for it. If not, - setup the environment and return it first and then return the newly constructed - information. Thread safe.""" - with self.lock_manager[self.key]: - return self._get_or_create() - - def open_connection(self, conn_info: T) -> EnvironmentConnection: - """Return a new connection to the environment residing inside - given path.""" - raise NotImplementedError - - @contextmanager - def connect(self) -> Iterator[EnvironmentConnection]: - env_info = self.get_or_create() - with self.open_connection(env_info) as connection: - yield connection - - -@dataclass -class EnvironmentConnection(Generic[K]): - env: K - - def __enter__(self) -> EnvironmentConnection: - return self - - def __exit__(self, *exc_info): - return None - - def run(self, executable: BasicCallable, *args, **kwargs) -> Any: - raise NotImplementedError - - -@dataclass -class IsolatedProcessConnection(EnvironmentConnection[K]): - def run(self, executable: BasicCallable, *args, **kwargs) -> int: - with ExitStack() as stack: - # IPC flow is the following: - # 1. [controller]: Create the socket server - # 2. [controller]: Spawn the isolated process with the socket address - # 3. [isolated]: Connect to the socket server - # 4. [controller]: Accept the incoming connection request - # 5. [controller]: Send the executable over the established bridge - # 6. [isolated]: Receive the executable from the bridge - # 7. [isolated]: Execute the executable and once done send the result back - # 8. [controller]: Loop until either the isolated process exits or sends any - # data (will be interpreted as a tuple of two mutually exclusive - # objects, either a result object or an exception to be raised). - # - - log_env(self.env, "Starting the controller bridge.") - controller_service = stack.enter_context(bridge.controller_connection()) - - log_env( - self.env, - "Controller server is listening at {}.", - controller_service.address, - ) - isolated_process = stack.enter_context( - self.start_process(controller_service, *args, **kwargs) - ) - - log_env( - self.env, - "Awaiting child process of {} to establish a connection.", - isolated_process.pid, - ) - established_connection = stack.enter_context(controller_service.accept()) - - log_env( - self.env, - "Bridge between controller and the child has been established at {}.", - controller_service.address, - ) - established_connection.send(executable) - - log_env( - self.env, - "Executable has been sent, awaiting execution result and logs.", - ) - return self.poll_until_result(isolated_process, established_connection) - - def start_process( - self, connection: bridge.ConnectionWrapper, *args, **kwargs - ) -> ContextManager[subprocess.Popen]: - raise NotImplementedError - - def poll_until_result( - self, process: subprocess.Popen, connection: bridge.ConnectionWrapper - ) -> Any: - while process.poll() is None: - # Normally, if we do connection.read() without having this loop - # it is going to block us indefinitely (even if the underlying - # process has crashed). We can use a combination of process.poll - # and connection.poll to check if the process is alive and has data - # to move forward. - if not connection.poll(): - continue - - try: - result, exception = connection.recv() - except EOFError: - log_env(self.env, "The isolated process has unexpectedly exited.", kind="error") - raise RuntimeError("The isolated process has unexpectedly exited.") - - if exception is None: - log_env( - self.env, "Isolated process has returned the result: {}", result - ) - return result - else: - log_env( - self.env, - "The isolated process has exited with an exception.", - kind="error", - ) - raise exception - - -@dataclass -class DualPythonIPC(IsolatedProcessConnection[BaseEnvironment]): - # We manage user-defined dual-Python environments in two steps. - # 1. Create a primary environment which contains the default dependencies - # to run a fal script (like fal, dbt-core, and required dbt adapters). - # 2. Create a secondary environment which contains the user-defined dependencies. - # - # This is an optimization we apply to reduce the cost of user-defined environments - # where we can actually share the primary environment and save a lot of time from not - # installing heavy dependencies like dbt adapters again and again. This also heavily - # reduces the disk usage. - - primary_path: Path - secondary_path: Path - - def start_process( - self, - service: bridge.Listener, - *args, - **kwargs, - ) -> ContextManager[subprocess.Popen]: - # We are going to use the primary environment to run the Python - # interpreter, but at the same time we are going to inherit all - # the packages from the secondary environment (user's environment) - # so that they can technically override anything. - - # The search order is important, we want the secondary path to - # take precedence. - python_path = python_path_for(self.secondary_path, self.primary_path) - - # The environment which the python executable is going to be used - # shouldn't matter much for the virtual-env based installations but - # in conda, the Python executable between the primary and the secondary - # environment might differ and we'll give precedence to the user's - # choice (the secondary environment). - python_executable = get_executable_path(self.secondary_path, "python") - return subprocess.Popen( - [ - python_executable, - isolated_runner.__file__, - bridge.encode_service_address(service.address), - ], - env={"PYTHONPATH": python_path, **os.environ}, - ) - - -def python_path_for(*search_paths) -> str: - assert len(search_paths) >= 1 - return os.pathsep.join( - # sysconfig takes the virtual environment path and returns - # the directory where all the site packages are located. - sysconfig.get_path("purelib", vars={"base": search_path}) - for search_path in search_paths - ) - - -def get_executable_path(search_path: Path, executable_name: str) -> Path: - bin_dir = (search_path / "bin").as_posix() - executable_path = shutil.which(executable_name, path=bin_dir) - if executable_path is None: - raise RuntimeError( - f"Could not find {executable_name} in {search_path}. " - f"Is the virtual environment corrupted?" - ) - - return Path(executable_path) diff --git a/projects/adapter/src/fal/dbt/packages/environments/conda.py b/projects/adapter/src/fal/dbt/packages/environments/conda.py deleted file mode 100644 index d581f9b1..00000000 --- a/projects/adapter/src/fal/dbt/packages/environments/conda.py +++ /dev/null @@ -1,121 +0,0 @@ -from __future__ import annotations - -import sys -import hashlib -import os -import shutil -import subprocess -import sysconfig -from dataclasses import dataclass -from pathlib import Path -from typing import List, Dict, Any - -from fal.dbt.packages.environments.base import ( - BASE_CACHE_DIR, - BaseEnvironment, - DualPythonIPC, - log_env, - rmdir_on_fail, -) -from fal.dbt.packages.environments.virtual_env import get_primary_virtual_env -from fal.dbt.utils import cache_static - -_BASE_CONDA_DIR = BASE_CACHE_DIR / "conda" -_BASE_CONDA_DIR.mkdir(exist_ok=True) - -# Specify the path where the conda binary might -# reside in (or mamba, if it is the preferred one). -_CONDA_COMMAND = os.environ.get("CONDA_EXE", "conda") -_FAL_CONDA_HOME = os.getenv("FAL_CONDA_HOME") - - -@dataclass -class CondaEnvironment(BaseEnvironment[Path], make_thread_safe=True): - packages: List[str] - inherit_from_local: bool = False - - @classmethod - def from_config(cls, config: Dict[str, Any]) -> CondaEnvironment: - user_provided_packages = config.get("packages", []) - # TODO: remove this once cross-Python-version serialization is - # working. - for raw_requirement in user_provided_packages: - raw_requirement = raw_requirement.replace(" ", "") - if raw_requirement.startswith("python"): - continue - - # Ensure that the package is not python-something but rather - # python followed by any of the version constraints. - version_identifier = raw_requirement[len("python") :] - if version_identifier and version_identifier[0] in ( - "=", - "<", - ">", - "!", - ): - raise RuntimeError( - "Conda environments cannot customize their Python version." - ) - - # We currently don't support sending/receiving data between - # different Python versions so we need to make sure that the - # conda environment that is created also uses the same Python version. - python_version = sysconfig.get_python_version() - final_packages = user_provided_packages + [f"python={python_version}"] - - inherit_from_local = config.get("_inherit_from_local", False) - return cls(final_packages, inherit_from_local=inherit_from_local) - - @property - def key(self) -> str: - return hashlib.sha256(" ".join(self.packages).encode()).hexdigest() - - def _get_or_create(self) -> Path: - env_path = _BASE_CONDA_DIR / self.key - if env_path.exists(): - return env_path - - with rmdir_on_fail(env_path): - self._run_conda( - "create", - "--yes", - # The environment will be created under $BASE_CACHE_DIR/conda - # so that in the future we can reuse it. - "--prefix", - env_path, - *self.packages, - ) - return env_path - - def _run_conda(self, *args, **kwargs) -> None: - log_env(self, "Installing conda environment") - conda_executable = get_conda_executable() - subprocess.check_call([conda_executable, *args], **kwargs, text=True) - - def open_connection(self, conn_info: Path) -> DualPythonIPC: - if self.inherit_from_local: - # Instead of creating a separate environment that only has - # the same versions of fal/dbt-core etc. you have locally, - # we can also use your environment as the primary. This is - # mainly for the development time where the fal or dbt-core - # you are using is not available on PyPI yet. - primary_env_path = Path(sys.exec_prefix) - else: - primary_env = get_primary_virtual_env() - primary_env_path = primary_env.get_or_create() - secondary_env_path = conn_info - return DualPythonIPC(self, primary_env_path, secondary_env_path) - - -@cache_static -def get_conda_executable() -> Path: - for path in [_FAL_CONDA_HOME, None]: - conda_path = shutil.which(_CONDA_COMMAND, path=path) - if conda_path is not None: - return conda_path - else: - # TODO: we should probably point to the instructions on how you - # can install conda here. - raise RuntimeError( - "Could not find conda executable. Please install conda or set FAL_CONDA_HOME." - ) diff --git a/projects/adapter/src/fal/dbt/packages/environments/virtual_env.py b/projects/adapter/src/fal/dbt/packages/environments/virtual_env.py deleted file mode 100644 index 30898bae..00000000 --- a/projects/adapter/src/fal/dbt/packages/environments/virtual_env.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import sys -import hashlib -import subprocess -from dataclasses import dataclass, field -from pathlib import Path -from typing import List, Dict, Any - -from fal.dbt.packages.dependency_analysis import get_default_pip_dependencies -from fal.dbt.packages.environments.base import ( - BASE_CACHE_DIR, - BaseEnvironment, - DualPythonIPC, - get_executable_path, - log_env, - python_path_for, - rmdir_on_fail, -) -from fal.dbt.utils import cache_static - -_BASE_VENV_DIR = BASE_CACHE_DIR / "venvs" -_BASE_VENV_DIR.mkdir(exist_ok=True) - - -@dataclass -class VirtualPythonEnvironment(BaseEnvironment[Path], make_thread_safe=True): - requirements: List[str] - inherit_from_local: bool = False - - @classmethod - def from_config(cls, config: Dict[str, Any]) -> VirtualPythonEnvironment: - requirements = config.get("requirements", []) - inherit_from_local = config.get("_inherit_from_local", False) - return cls(requirements, inherit_from_local=inherit_from_local) - - @property - def key(self) -> str: - return hashlib.sha256(" ".join(self.requirements).encode()).hexdigest() - - def _verify_dependencies(self, primary_path: Path, secondary_path: Path) -> None: - # Ensure that there are no dependency mismatches between the - # primary environment and the secondary environment. - python_path = python_path_for(secondary_path, primary_path) - original_pip = get_executable_path(primary_path, "pip") - subprocess.check_call([original_pip, "check"], env={"PYTHONPATH": python_path}) - - def _get_or_create(self) -> Path: - from virtualenv import cli_run - - path = _BASE_VENV_DIR / self.key - if path.exists(): - return path - - with rmdir_on_fail(path): - log_env(self, "Creating virtual environment at {}", path, kind="info") - cli_run([str(path)]) - log_env( - self, - "Installing requirements: {}", - ", ".join(self.requirements), - kind="info", - ) - if self.requirements: - pip_path = get_executable_path(path, "pip") - subprocess.check_call([pip_path, "install"] + self.requirements) - - if not self.inherit_from_local: - primary_env = get_primary_virtual_env() - if self is not primary_env: - self._verify_dependencies(primary_env._get_or_create(), path) - - return path - - def open_connection(self, conn_info: Path) -> DualPythonIPC: - if self.inherit_from_local: - # Instead of creating a separate environment that only has - # the same versions of fal/dbt-core etc. you have locally, - # we can also use your environment as the primary. This is - # mainly for the development time where the fal or dbt-core - # you are using is not available on PyPI yet. - primary_venv_path = Path(sys.exec_prefix) - else: - primary_venv = get_primary_virtual_env() - primary_venv_path = primary_venv.get_or_create() - secondary_venv_path = conn_info - return DualPythonIPC(self, primary_venv_path, secondary_venv_path) - - -@cache_static -def get_primary_virtual_env() -> VirtualPythonEnvironment: - return VirtualPythonEnvironment(get_default_pip_dependencies()) diff --git a/projects/adapter/src/fal/dbt/packages/isolated_runner.py b/projects/adapter/src/fal/dbt/packages/isolated_runner.py deleted file mode 100644 index 730aced4..00000000 --- a/projects/adapter/src/fal/dbt/packages/isolated_runner.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import sys -import time -import site -from argparse import ArgumentParser - -# Isolated processes are really tricky to debug properly -# so we want to a smooth way to enter the process and see -# what is really going on in the case of errors. -# -# For using the debug mode, you first need to set FAL_DEBUG_ISOLATED_RUNNER -# environment variable to "1" on your `fal flow run` command. This will -# make the isolated process hang at the initialization, and make it print -# the instructions to connect to the controller process. -# -# On a separate shell (while letting the `fal flow run` hang), you can -# execute that command to drop into PDB (Python Debugger). With that -# you can observe each step of the connection and run process. -# -# Starting the process... -# ============================================================ -# -# -# Debug mode successfully activated. You can start your debugging session with the following command: -# $ /[...]/fal/venvs/[...]bin/python /[...]/isolated_runner.py --with-pdb A[...]A= -# -# -# ============================================================ -# Child connection has been established at the bridge b'\x00listener-17368-0' -# Awaiting the child process to exit at b'\x00listener-17368-0' -# Isolated process has exited with status: 0 -# - -IS_DEBUG_MODE = os.getenv("FAL_DEBUG_ISOLATED_RUNNER") == "1" -DEBUG_TIMEOUT = 60 * 15 - - -def run_client(address: str, *, with_pdb: bool = False) -> int: - from fal.dbt.integration.logger import LOGGER - from fal.dbt.packages import bridge - - if with_pdb: - # This condition will only be activated if we want to - # debug the isolated process by passing the --with-pdb - # flag when executing the binary. - breakpoint() - - LOGGER.debug("Trying to create a connection to {}", address) - with bridge.child_connection(address) as connection: - LOGGER.debug("Created child connection to {}", address) - callable = connection.recv() - LOGGER.debug("Received the callable at {}", address) - try: - result = callable() - exception = None - except BaseException as exc: - result = None - exception = exc - finally: - try: - connection.send((result, exception)) - except BaseException: - if exception: - # If we can't even send it through the connection - # still try to dump it to the stdout as the last - # resort. - import traceback - traceback.print_exc(exception) - raise - return result - - -def _get_shell_bootstrap() -> str: - # Return a string that contains environment variables that - # might be used during isolated hook's execution. - return " ".join( - f"{session_variable}={os.getenv(session_variable)}" - for session_variable in [ - # PYTHONPATH is customized by the Dual Environment IPC - # system to make sure that the isolated process can - # import stuff from the primary environment. Without this - # the isolated process will not be able to run properly - # on the newly created debug session. - "PYTHONPATH", - ] - if session_variable in os.environ - ) - - -def _fal_main() -> None: - from fal.dbt.integration.logger import LOGGER - from fal.dbt.packages import bridge - - LOGGER.debug("Starting the isolated process at PID {}", os.getpid()) - - parser = ArgumentParser() - parser.add_argument("listen_at") - parser.add_argument("--with-pdb", action="store_true", default=False) - - options = parser.parse_args() - if IS_DEBUG_MODE: - assert not options.with_pdb, "--with-pdb can't be used in the debug mode" - message = "=" * 60 - message += "\n" * 3 - message += "Debug mode successfully activated. You can start your debugging session with the following command:\n" - message += f" $ {_get_shell_bootstrap()}\\\n {sys.executable} {os.path.abspath(__file__)} --with-pdb {options.listen_at}" - message += "\n" * 3 - message += "=" * 60 - LOGGER.info(message) - time.sleep(DEBUG_TIMEOUT) - - address = bridge.decode_service_address(options.listen_at) - return run_client(address, with_pdb=options.with_pdb) - - -def _process_primary_env_packages() -> None: - python_path = os.getenv("PYTHONPATH") - if python_path is None: - return None - - for site_dir in python_path.split(os.pathsep): - site.addsitedir(site_dir) - - -def main(): - _process_primary_env_packages() - - from fal.dbt.integration.logger import log_manager - - # TODO: do we still need this? - with log_manager.applicationbound(): - _fal_main() - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/projects/adapter/src/fal/dbt/planner/__init__.py b/projects/adapter/src/fal/dbt/planner/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/planner/executor.py b/projects/adapter/src/fal/dbt/planner/executor.py deleted file mode 100644 index 2fb5f917..00000000 --- a/projects/adapter/src/fal/dbt/planner/executor.py +++ /dev/null @@ -1,178 +0,0 @@ -import argparse -from enum import Enum, auto -from concurrent.futures import ( - FIRST_COMPLETED, - Executor, - Future, - ThreadPoolExecutor, - wait, -) -from dataclasses import dataclass, field -from typing import Iterator, List, Optional - -from fal.dbt.planner.schedule import SUCCESS, Scheduler -from fal.dbt.planner.tasks import ( - TaskGroup, - Task, - Status, - DBTTask, - FalLocalHookTask, - HookType, -) -from fal.dbt.integration.project import FalDbt - -from fal.dbt.integration.logger import LOGGER - - -class State(Enum): - PRE_HOOKS = auto() - MAIN_TASK = auto() - POST_HOOKS = auto() - - -def _collect_nodes(groups: List[TaskGroup], fal_dbt: FalDbt) -> Iterator[str]: - for group in groups: - if isinstance(group.task, DBTTask): - yield from group.task.model_ids - if ( - isinstance(group.task, FalLocalHookTask) - and group.task.hook_type is HookType.SCRIPT - ): - # Is a before/after script - yield group.task.build_fal_script(fal_dbt).id - - -def _show_failed_groups(scheduler: Scheduler, fal_dbt: FalDbt) -> None: - failed_nodes = list( - _collect_nodes(scheduler.filter_groups(Status.FAILURE), fal_dbt) - ) - if failed_nodes: - message = ", ".join(failed_nodes) - LOGGER.info("Failed calculating the following nodes: {}", message) - - skipped_nodes = list( - _collect_nodes(scheduler.filter_groups(Status.SKIPPED), fal_dbt) - ) - if skipped_nodes: - message = ", ".join(skipped_nodes) - LOGGER.info("Skipped calculating the following nodes: {}", message) - - -@dataclass -class FutureGroup: - args: argparse.Namespace - fal_dbt: FalDbt - task_group: TaskGroup - executor: Executor - futures: List[Future] = field(default_factory=list) - status: int = SUCCESS - state: Optional[State] = None - - def __post_init__(self) -> None: - if self.task_group.pre_hooks: - self.switch_to(State.PRE_HOOKS) - else: - self.switch_to(State.MAIN_TASK) - - def switch_to(self, group_status: State) -> None: - self.state = group_status - if group_status is State.PRE_HOOKS: - self._add_tasks(*self.task_group.pre_hooks) - elif group_status is State.MAIN_TASK: - self._add_tasks(self.task_group.task) - else: - assert group_status is State.POST_HOOKS - self._add_tasks(*self.task_group.post_hooks) - - def process(self, future: Future) -> None: - assert future in self.futures - self.futures.remove(future) - - # If non-zero, it will remain non-zero - self.status |= future.result() - - if self.futures: - return None - - if self.state is State.PRE_HOOKS: - # If there are no tasks left and the previous group was pre-hooks, - # we'll decide based on the exit status (success => move to the main task, - # failure => run the post-hooks and then exit). - if self.status == SUCCESS: - self.switch_to(State.MAIN_TASK) - else: - self.switch_to(State.POST_HOOKS) - elif self.state is State.MAIN_TASK: - # If we just executed the main task, then we'll proceed to the post-hooks - self.switch_to(State.POST_HOOKS) - else: - # If we just executed post-hooks and there are no more tasks left, - # we'll just exit ¯\_(ツ)_/¯ - assert self.state is State.POST_HOOKS - return None - - def _add_tasks(self, *tasks: Task) -> None: - for task in tasks: - future = self.executor.submit( - task.execute, - args=self.args, - fal_dbt=self.fal_dbt, - ) - future.task, future.group = task, self - self.futures.append(future) - - @property - def is_done(self) -> int: - return len(self.futures) == 0 - - -def parallel_executor( - args: argparse.Namespace, - fal_dbt: FalDbt, - scheduler: Scheduler, -) -> int: - def get_futures(future_groups): - return { - # Unpack all running futures into a single set - # to be consumed by wait(). - future - for future_group in future_groups - for future in future_group.futures - } - - def create_futures(executor: ThreadPoolExecutor) -> List[FutureGroup]: - return [ - # FutureGroup's are the secondary layer of the executor, - # managing the parallelization of tasks. - FutureGroup( - args, - fal_dbt, - task_group=task_group, - executor=executor, - ) - for task_group in scheduler.iter_available_groups() - ] - - with ThreadPoolExecutor(fal_dbt.threads) as executor: - future_groups = create_futures(executor) - futures = get_futures(future_groups) - while futures: - # Get the first completed futures, mark them done. - completed_futures, _ = wait(futures, return_when=FIRST_COMPLETED) - for future in completed_futures: - group: FutureGroup = future.group - group.process(future) - if group.is_done: - scheduler.finish(group.task_group, status=group.status) - - # And load all the tasks that were blocked by those futures. - future_groups.extend(create_futures(executor)) - futures = get_futures(future_groups) - - _show_failed_groups(scheduler, fal_dbt) - - return _exit_code(scheduler) - - -def _exit_code(scheduler: Scheduler) -> int: - return int(any(scheduler.filter_groups(Status.FAILURE))) diff --git a/projects/adapter/src/fal/dbt/planner/plan.py b/projects/adapter/src/fal/dbt/planner/plan.py deleted file mode 100644 index 627aab43..00000000 --- a/projects/adapter/src/fal/dbt/planner/plan.py +++ /dev/null @@ -1,206 +0,0 @@ -from __future__ import annotations - -from typing import Callable, Iterator, List, Set - -import networkx as nx -from fal.dbt.node_graph import NodeKind -from fal.dbt.cli.selectors import ExecutionPlan, _is_before_script, _is_after_script -from fal.dbt.integration.logger import LOGGER -from dataclasses import dataclass - - -@dataclass -class OriginGraph: - graph: nx.DiGraph - - def copy_graph(self) -> nx.DiGraph: - return self.graph.copy() # type: ignore - - def _plot(self, graph=None): - """ - For development and debugging purposes - """ - if not graph: - graph = self.graph - - import matplotlib.pyplot as plt - - import networkx.drawing.layout as layout - - nx.draw_networkx( - graph, - arrows=True, - pos=layout.circular_layout(graph), - labels={ - node: node.replace(".", "\n") - .replace("model\n", "") - .replace("script\n", "") - .replace("\npy", ".py") - for node in graph.nodes - }, - ) - plt.show() - - -@dataclass -class FilteredGraph(OriginGraph): - graph: nx.DiGraph - - @classmethod - def from_execution_plan( - cls, - origin_graph: OriginGraph, - execution_plan: ExecutionPlan, - ) -> FilteredGraph: - graph = origin_graph.copy_graph() - for node in origin_graph.graph.nodes: - if node not in execution_plan.nodes: - graph.remove_node(node) - - if execution_plan.after_scripts or execution_plan.before_scripts: - LOGGER.warn( - "Using before/after scripts are now deprecated. " - "Please consider migrating to pre-hooks/post-hooks or Python models." - ) - - return cls(graph) - - -@dataclass -class ScriptConnectedGraph(OriginGraph): - graph: nx.DiGraph - - @classmethod - def from_filtered_graph( - cls, - filtered_graph: FilteredGraph, - ): - graph = filtered_graph.copy_graph() - shuffled_graph = cls(graph) - shuffled_graph._shuffle() - return shuffled_graph - - def _shuffle(self): - def _pattern_matching(node_check: Callable[[str], bool], nodes: Set[str]): - matched = { - maybe_script for maybe_script in nodes if node_check(maybe_script) - } - return matched, nodes - matched - - def get_before_scripts(graph: nx.DiGraph, node: str): - return _pattern_matching(_is_before_script, set(graph.predecessors(node))) - - def get_after_scripts(graph: nx.DiGraph, node: str): - return _pattern_matching(_is_after_script, set(graph.successors(node))) - - def _add_edges_from_to(from_nodes: Set[str], to_nodes: Set[str]): - self.graph.add_edges_from( - (from_n, to_n) for from_n in from_nodes for to_n in to_nodes - ) - - old_graph = self.copy_graph() - node: str - for node in old_graph.nodes: - - after_scripts, other_succs = get_after_scripts(old_graph, node) - # Keep the original node to succs edges and add a new one from the script to succs - _add_edges_from_to(after_scripts, other_succs) - - before_scripts, other_preds = get_before_scripts(old_graph, node) - # Keep the original preds to node edge and add a new one from preds to the scripts - _add_edges_from_to(other_preds, before_scripts) - - # Add edges between node's after and succ's before scripts - for succ in other_succs: - succ_before_scripts, _succ_other_preds = get_before_scripts( - old_graph, succ - ) - - # Add edge between all after scripts to the succ's before scripts - _add_edges_from_to(after_scripts, succ_before_scripts) - - -@dataclass -class PlannedGraph(OriginGraph): - graph: nx.DiGraph - - @classmethod - def from_script_connected_graph( - cls, - shuffled_graph: ScriptConnectedGraph, - enable_chunking: bool = True, - ): - graph = shuffled_graph.copy_graph() - planned_graph = cls(graph) - if enable_chunking: - planned_graph.plan() - return planned_graph - - def _find_subgraphs(self) -> Iterator[List[str]]: - # Initially topologically sort the graph and find nodes - # that can be grouped together to be run as a single node - # by using the critical node approach. All nodes within a - # group must have the same ancestors, to avoid removing - # existing branches. - - current_stack = [] - allowed_ancestors = set() - - def split() -> Iterator[List[str]]: - if len(current_stack) > 1: - yield current_stack.copy() - current_stack.clear() - allowed_ancestors.clear() - - for node in nx.topological_sort(self.graph): - properties = self.graph.nodes[node] - if properties["kind"] in ( - NodeKind.FAL_MODEL, - NodeKind.FAL_SCRIPT, - ) or properties.get("pre_hook"): - yield from split() - continue - - ancestors = nx.ancestors(self.graph, node) - if not current_stack: - allowed_ancestors = ancestors - - if not ancestors.issubset(allowed_ancestors): - yield from split() - - current_stack.append(node) - allowed_ancestors |= {node, *ancestors} - - if properties.get("post_hook"): - yield from split() - - yield from split() - - def _reduce_subgraph( - self, - nodes: List[str], - ) -> None: - subgraph = self.graph.subgraph(nodes).copy() - - # Use the same set of properties as the last - # node, since only it can have any post hooks. - self.graph.add_node( - subgraph, - **self.graph.nodes[nodes[-1]].copy(), - exit_node=nodes[-1], - ) - - for node in nodes: - for predecessor in self.graph.predecessors(node): - self.graph.add_edge(predecessor, subgraph) - - for successor in self.graph.successors(node): - self.graph.add_edge(subgraph, successor) - - self.graph.remove_node(node) - - def plan(self): - # Implementation of Gorkem's Critical Nodes Algorithm - # with a few modifications. - for nodes in self._find_subgraphs(): - self._reduce_subgraph(nodes) diff --git a/projects/adapter/src/fal/dbt/planner/schedule.py b/projects/adapter/src/fal/dbt/planner/schedule.py deleted file mode 100644 index b80424fd..00000000 --- a/projects/adapter/src/fal/dbt/planner/schedule.py +++ /dev/null @@ -1,200 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from pathlib import Path -from typing import Iterator, List, Optional - -import networkx as nx - -from fal.dbt.integration.project import DbtModel -from fal.dbt.node_graph import DbtModelNode, NodeGraph, NodeKind, ScriptNode -from fal.dbt.planner.tasks import ( - SUCCESS, - Task, - DBTTask, - FalIsolatedHookTask, - FalLocalHookTask, - FalModelTask, - TaskGroup, - Status, - HookType, -) -from fal.dbt.utils import DynamicIndexProvider -from fal.dbt.fal_script import Hook, IsolatedHook, create_hook, TimingType - - -def create_hook_task( - hook: Hook, - bound_model: DbtModel, - hook_type: HookType = HookType.HOOK, - timing_type: Optional[TimingType] = None, -) -> Task: - local_hook = FalLocalHookTask( - Path(hook.path), - bound_model=bound_model, - arguments=hook.arguments, - hook_type=hook_type, - _timing_type=timing_type, - ) - if isinstance(hook, IsolatedHook): - return FalIsolatedHookTask( - hook.environment_name, - local_hook, - ) - else: - return local_hook - - -def create_group( - node: str | nx.DiGraph, properties: dict, node_graph: NodeGraph -) -> TaskGroup: - kind = properties["kind"] - if isinstance(node, nx.DiGraph): - # When we are processing a subgraph, we need to know - # the exit point of that graph in order to properly - # bind the post-hooks. We'll do that by sorting each - # node and putting the `exit_node` at the end (boolean sorting). - model_ids = sorted( - list(node), - key=lambda node: node == properties["exit_node"], - ) - else: - model_ids = [node] - - bound_model_name = model_ids[-1] - flow_node = node_graph.get_node(bound_model_name) - - if kind is NodeKind.DBT_MODEL: - task = DBTTask(model_ids=model_ids) - elif kind is NodeKind.FAL_MODEL: - assert isinstance(flow_node, DbtModelNode) - model_script = create_hook( - { - "path": str(flow_node.model.python_model), - "environment": properties.get("environment"), - } - ) - model_script_task = create_hook_task( - model_script, - flow_node.model, - hook_type=HookType.MODEL_SCRIPT, - ) - task = FalModelTask(model_ids=model_ids, script=model_script_task) - else: - assert kind is NodeKind.FAL_SCRIPT - assert isinstance(flow_node, ScriptNode) - task = FalLocalHookTask.from_fal_script(flow_node.script) - - pre_hooks = properties.get("pre_hook", []) - post_hooks = properties.get("post_hook", []) - if pre_hooks or post_hooks: - assert flow_node, "hook nodes must be attached to a model node" - assert isinstance(flow_node, DbtModelNode) - - pre_hook_tasks = [ - create_hook_task(hook=pre_hook, bound_model=flow_node.model, timing_type=TimingType.PRE) - for pre_hook in pre_hooks - ] - post_hook_tasks = [ - create_hook_task(hook=post_hook, bound_model=flow_node.model, timing_type=TimingType.POST) - for post_hook in post_hooks - ] - - return TaskGroup(task, pre_hooks=pre_hook_tasks, post_hooks=post_hook_tasks) - - -@dataclass -class Scheduler: - groups: List[TaskGroup] - - def __post_init__(self) -> None: - index_provider = DynamicIndexProvider() - for group in self.groups: - for task in group.iter_tasks(): - task.set_run_index(index_provider) - - def filter_groups(self, status: Status) -> List[TaskGroup]: - return [group for group in self.groups if group.status is status] - - @property - def pending_groups(self) -> List[TaskGroup]: - return self.filter_groups(Status.PENDING) - - def __bool__(self) -> bool: - return bool(self.pending_groups) - - def _calculate_score(self, target_group: TaskGroup) -> tuple[int, int]: - # Determine the priority of the group by doing a bunch - # of calculations. This doesn't really need to be 100% precise, - # since if we don't have this we'll have to schedule randomly. - - # 1. Number of groups which are only waiting this group (direct dependants) - # 2. Number of groups which are waiting this group and other groups (indirect dependants) - # ... - - direct_dependants = 0 - indirect_dependants = 0 - - for group in self.pending_groups: - if group is target_group: - continue - - if any(dependency is target_group for dependency in group.dependencies): - indirect_dependants += 1 - if len(group.dependencies) == 1: - direct_dependants += 1 - - return (direct_dependants, indirect_dependants) - - def _stage_group(self, target_group: TaskGroup) -> None: - target_group.status = Status.RUNNING - - def finish(self, target_group: TaskGroup, status: int) -> None: - # When a staged group's execution is finished, we'll remove it - # altogether and unblock all of its dependencies. - - if status == SUCCESS: - self._succeed(target_group) - else: - self._fail(target_group) - - def _fail(self, target_group: TaskGroup) -> None: - target_group.status = Status.FAILURE - for group in self.pending_groups: - if target_group in group.dependencies: - group.status = Status.SKIPPED - - def _succeed(self, target_group: TaskGroup) -> None: - target_group.status = Status.SUCCESS - for group in self.pending_groups.copy(): - if target_group in group.dependencies: - group.dependencies.remove(target_group) - - def iter_available_groups(self) -> Iterator[TaskGroup]: - # Whenever a group is finished we'll remove that from other - # groups' dependencies. So in here we'll find all unblocked - # groups (groups without any dependencies) and use the scoring - # algorithm to determine the priority of each groups (kind of like - # a dynamic topological sort). - unblocked_groups = [ - group for group in self.pending_groups if not group.dependencies - ] - unblocked_groups.sort(key=self._calculate_score, reverse=True) - - for group in unblocked_groups: - self._stage_group(group) - yield group - - -def schedule_graph(graph: nx.DiGraph, node_graph: NodeGraph) -> Scheduler: - task_groups = { - node: create_group(node, properties, node_graph) - for node, properties in graph.nodes(data=True) - } - - for name, task_group in task_groups.items(): - task_group.dependencies = [ - task_groups[ancestor] for ancestor in nx.ancestors(graph, name) - ] - - return Scheduler(list(task_groups.values())) diff --git a/projects/adapter/src/fal/dbt/planner/tasks.py b/projects/adapter/src/fal/dbt/planner/tasks.py deleted file mode 100644 index 3fab750b..00000000 --- a/projects/adapter/src/fal/dbt/planner/tasks.py +++ /dev/null @@ -1,297 +0,0 @@ -from __future__ import annotations - -import argparse -import threading -import json -from pathlib import Path -import sys -import traceback -import uuid -from functools import partial -from contextlib import contextmanager -from dataclasses import dataclass, field -from enum import Enum, auto -from typing import Iterator, List, Any, Optional, Dict, Tuple, Union - -from fal.dbt.integration.logger import LOGGER - -from fal.dbt.fal_script import FalScript, TimingType -from fal.dbt.utils import print_run_info, DynamicIndexProvider -from fal.dbt.integration.project import DbtModel, FalDbt, NodeStatus - -from datetime import datetime, timezone - -SUCCESS = 0 -FAILURE = 1 - - -class Task: - def set_run_index(self, index_provider: DynamicIndexProvider) -> None: - self._run_index = index_provider.next() - - @property - def run_index(self) -> int: - run_index = getattr(self, "_run_index", None) - assert run_index is not None - return run_index - - def execute(self, args: argparse.Namespace, fal_dbt: FalDbt) -> int: - raise NotImplementedError - - -class HookType(Enum): - HOOK = "HOOK" - SCRIPT = "SCRIPT" - MODEL_SCRIPT = "MODEL_SCRIPT" - - -class Status(Enum): - PENDING = auto() - RUNNING = auto() - SKIPPED = auto() - SUCCESS = auto() - FAILURE = auto() - - -def _unique_id_to_model_name(unique_id: str) -> str: - split_list = unique_id.split(".") - # if its a unique id 'model.fal_test.model_with_before_scripts' - return split_list[len(split_list) - 1] - - -def _unique_ids_to_model_names(id_list: List[str]) -> List[str]: - return list(map(_unique_id_to_model_name, id_list)) - - -def _mark_dbt_nodes_status_and_response( - fal_dbt: FalDbt, - status: NodeStatus, - dbt_node: Optional[str] = None, - adapter_response: Optional[dict] = None, -): - for model in fal_dbt.models: - if dbt_node is not None: - if model.unique_id == dbt_node: - model.status = status - - if adapter_response is not None: - model.adapter_response = adapter_response - else: - model.status = status - - -def _map_cli_output_model_results( - run_results: Dict[Any, Any] -) -> Iterator[Tuple[str, NodeStatus, Optional[dict]]]: - if not isinstance(run_results.get("results"), list): - raise Exception("Could not read dbt run results") - - for result in run_results["results"]: - if not result.get("unique_id") or not result.get("status"): - continue - - yield result["unique_id"], NodeStatus(result["status"]), result.get( - "adapter_response" - ) - - -def _run_script(script: FalScript) -> Dict[str, Any]: - print_run_info([script]) - - # DBT seems to be dealing with only UTC times - # so we'll follow that convention. - started_at = datetime.now(tz=timezone.utc) - try: - with _modify_path(script.faldbt): - script.exec() - except: - LOGGER.error("Error in script {}:\n{}", script.id, traceback.format_exc()) - # TODO: what else to do? - status = NodeStatus.Fail - else: - status = NodeStatus.Success - finally: - LOGGER.debug("Finished script {}", script.id) - finished_at = datetime.now(tz=timezone.utc) - - return { - "path": str(script.path), - "unique_id": str(script.relative_path), - "status": status, - "thread_id": threading.current_thread().name, - "is_hook": script.is_hook, - "execution_time": (finished_at - started_at).total_seconds(), - "timing": [ - { - "name": "execute", - # DBT's suffix for UTC is Z, but isoformat() uses +00:00. So - # we'll manually cast it to the proper format. - # https://stackoverflow.com/a/42777551 - "started_at": started_at.isoformat().replace("+00:00", "Z"), - "finished_at": finished_at.isoformat().replace("+00:00", "Z"), - } - ], - } - - -def run_script(script: FalScript, run_index: int) -> int: - results = _run_script(script) - run_results_file = Path(script.faldbt.target_path) / f"fal_results_{run_index}.json" - with open(run_results_file, "w") as stream: - json.dump( - { - "results": [results], - "elapsed_time": results["execution_time"], - }, - stream, - ) - return SUCCESS if results["status"] == NodeStatus.Success else FAILURE - - -@contextmanager -def _modify_path(fal_dbt: FalDbt): - sys.path.append(fal_dbt.scripts_dir) - try: - yield - finally: - sys.path.remove(fal_dbt.scripts_dir) - - -@dataclass -class DBTTask(Task): - model_ids: List[str] - - def execute(self, args: argparse.Namespace, fal_dbt: FalDbt) -> int: - from fal.dbt.cli.dbt_runner import dbt_run_through_python - - model_names = _unique_ids_to_model_names(self.model_ids) - output = dbt_run_through_python( - args, model_names, fal_dbt.target_path, self.run_index - ) - - for node, status, adapter_response in _map_cli_output_model_results( - output.run_results - ): - _mark_dbt_nodes_status_and_response(fal_dbt, status, node, adapter_response) - - return output.return_code - - -@dataclass -class FalModelTask(DBTTask): - script: Union[FalLocalHookTask, FalIsolatedHookTask] - - def set_run_index(self, index_provider: DynamicIndexProvider) -> None: - super().set_run_index(index_provider) - self.script.set_run_index(index_provider) - - def execute(self, args: argparse.Namespace, fal_dbt: FalDbt) -> int: - # Run the ephemeral model - dbt_result = super().execute(args, fal_dbt) - - # And then run the Python script if it didn't fail. - if dbt_result != SUCCESS: - return dbt_result - - script_result = self.script.execute(args, fal_dbt) - status = NodeStatus.Success if script_result == SUCCESS else NodeStatus.Error - _mark_dbt_nodes_status_and_response( - fal_dbt, status, self.script.bound_model.unique_id - ) - return script_result - - -@dataclass -class FalLocalHookTask(Task): - hook_path: Path - bound_model: Optional[DbtModel] = None - arguments: Optional[Dict[str, Any]] = None - hook_type: HookType = HookType.HOOK - _timing_type: Optional[TimingType] = None - - @classmethod - def from_fal_script(cls, script: FalScript): - if ( - script.model is not None - and script.model.python_model is not None - and script.path == script.model.python_model - ): - hook_type = HookType.MODEL_SCRIPT - else: - hook_type = HookType.HOOK if script.is_hook else HookType.SCRIPT - - return cls( - script.path, - script.model, - script.hook_arguments, - hook_type, - script.timing_type, - ) - - def execute(self, args: argparse.Namespace, fal_dbt: FalDbt) -> int: - script = self.build_fal_script(fal_dbt) - return run_script(script, self.run_index) - - def build_fal_script(self, fal_dbt: FalDbt): - if self.hook_type is HookType.MODEL_SCRIPT: - return FalScript.model_script(fal_dbt, model=self.bound_model) - else: - return FalScript( - fal_dbt, - model=self.bound_model, - path=str(self.hook_path), - hook_arguments=self.arguments, - is_hook=self.hook_type is HookType.HOOK, - timing_type=self._timing_type, - ) - - -@dataclass -class FalIsolatedHookTask(Task): - environment_name: str - local_hook: FalLocalHookTask - - def set_run_index(self, index_provider: DynamicIndexProvider) -> None: - super().set_run_index(index_provider) - self.local_hook.set_run_index(index_provider) - - def execute(self, args: argparse.Namespace, fal_dbt: FalDbt) -> int: - try: - environment = fal_dbt._load_environment(self.environment_name) - except: - import traceback - LOGGER.error("Could not find environment: {}\n{}", self.environment_name, traceback.format_exc()) - return FAILURE - - with environment.connect() as connection: - execute_local_task = partial( - self.local_hook.execute, args=args, fal_dbt=fal_dbt - ) - result = connection.run(execute_local_task) - assert isinstance(result, int), result - return result - - @property - def bound_model(self) -> DbtModel: - return self.local_hook.bound_model - - -@dataclass -class TaskGroup: - task: Task - pre_hooks: List[Task] = field(default_factory=list) - post_hooks: List[Task] = field(default_factory=list) - dependencies: List[TaskGroup] = field(default_factory=list) - status: Status = Status.PENDING - - def __post_init__(self): - self._id = str(uuid.uuid4()) - - @property - def id(self) -> str: - return self._id - - def iter_tasks(self) -> Iterator[Task]: - yield from self.pre_hooks - yield self.task - yield from self.post_hooks diff --git a/projects/adapter/src/fal/dbt/telemetry/__init__.py b/projects/adapter/src/fal/dbt/telemetry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/src/fal/dbt/telemetry/telemetry.py b/projects/adapter/src/fal/dbt/telemetry/telemetry.py deleted file mode 100644 index 6a1632f1..00000000 --- a/projects/adapter/src/fal/dbt/telemetry/telemetry.py +++ /dev/null @@ -1,430 +0,0 @@ -""" -fal telemetry code uses source code from: - -https://github.com/ploomber/ploomber - -Modifications are made to ensure that the code works with fal. -""" - -from __future__ import annotations - -import datetime -import http.client as httplib -import warnings -import posthog -import pkg_resources -import yaml -import os -from pathlib import Path -import sys -import uuid -from functools import wraps -from typing import Any, List, Optional -import inspect -from contextlib import contextmanager -from dbt.config.runtime import RuntimeConfig - -from fal.dbt.utils import cache_static - -import platform - - -TELEMETRY_VERSION = "0.0.2" -DEFAULT_HOME_DIR = "~/.fal" -CONF_DIR = "stats" -FAL_HOME_DIR = os.getenv("FAL_HOME_DIR") - -posthog.project_api_key = "phc_Yf1tsGPPb4POvqVjelT3rPPv2c3FH91zYURyyL30Phy" - -invocation_id = uuid.uuid4() - - -def shutdown(): - posthog.shutdown() - # HACK: while https://github.com/PostHog/posthog-python/pull/52 happens - from posthog.request import _session as posthog_session - - posthog_session.close() - - -def str_param(item: Any) -> str: - if not isinstance(item, str): - raise TypeError(f"Variable not supported/wrong type: {item} should be a str") - return item - - -def opt_str_param(item: Any) -> Optional[str]: - if item is None: - return item - return str_param(item) - - -def python_version(): - py_version = sys.version_info - return f"{py_version.major}.{py_version.minor}.{py_version.micro}" - - -def is_online(): - """Check if host is online""" - conn = httplib.HTTPSConnection("www.google.com", timeout=1) - - try: - conn.request("HEAD", "/") - return True - except Exception: - return False - finally: - conn.close() - - -# Will output if the code is within a container -def is_docker(): - try: - cgroup = Path("/proc/self/cgroup") - docker_env = Path("/.dockerenv") - return ( - docker_env.exists() - or cgroup.exists() - and any("docker" in line for line in cgroup.read_text().splitlines()) - ) - except OSError: - return False - - -def is_github(): - """Return True if inside a GitHub Action""" - return os.getenv("GITHUB_ACTIONS") is not None - - -def is_gitlab(): - """Return True if inside a GitLab CI""" - return os.getenv("GITLAB_CI") is not None - - -def get_os(): - """ - The function will output the client platform - """ - return platform.system() - - -def dbt_installed_version(): - """Returns: dbt version""" - try: - return pkg_resources.get_distribution("dbt-core").version - except pkg_resources.DistributionNotFound: - return - - -def get_dbt_adapter_type(config: RuntimeConfig | None) -> str | None: - """Returns: the configured DBT adapter or None if it's not in a runner context""" - if config is not None: - target = config.to_target_dict() - return target["type"] - return None - - -def fal_installed_version(): - """Returns: fal version""" - try: - return pkg_resources.get_distribution("dbt-postgres-python").version - except pkg_resources.DistributionNotFound: - return - - -def is_airflow(): - """Returns: True for Airflow env""" - return "AIRFLOW_CONFIG" in os.environ or "AIRFLOW_HOME" in os.environ - - -def get_home_dir(): - """ - Checks if fal home was set through the env variable. - returns the actual home_dir path. - """ - return FAL_HOME_DIR if FAL_HOME_DIR else DEFAULT_HOME_DIR - - -def check_dir_exist(input_location=None): - """ - Checks if a specific directory exists, creates if not. - In case the user didn't set a custom dir, will turn to the default home - """ - home_dir = get_home_dir() - - if input_location: - p = Path(home_dir, input_location) - else: - p = Path(home_dir) - - p = p.expanduser() - - if not p.exists(): - p.mkdir(parents=True) - - return p - - -def check_uid(): - """ - Checks if local user id exists as a uid file, creates if not. - """ - uid_path = Path(check_dir_exist(CONF_DIR), "uid.yaml") - conf = read_conf_file(uid_path) # file already exist due to version check - if "uid" not in conf.keys(): - uid = str(uuid.uuid4()) - err = write_conf_file(uid_path, {"uid": uid}, error=True) - if err: - return f"NO_UID", err, True - else: - return uid, None, True - - return conf.get("uid") or "NO_UID", None, False - - -def check_stats_enabled() -> bool: - """ - Check if the user allows us to use telemetry. In order of precedence: - 1. If FAL_STATS_ENABLED is defined, check its value - 2. If DO_NOT_TRACK is defined, check its value - 3. Otherwise use the value in stats_enabled in the config.yaml file - """ - if "FAL_STATS_ENABLED" in os.environ: - val = os.environ["FAL_STATS_ENABLED"].lower().strip() - return val != "0" and val != "false" and val != "" - - if "DO_NOT_TRACK" in os.environ: - val = os.environ["DO_NOT_TRACK"].lower().strip() - return val != "1" and val != "true" - - # Check if local config exists - config_path = Path(check_dir_exist(CONF_DIR), "config.yaml") - if not config_path.exists(): - write_conf_file(config_path, {"stats_enabled": True}) - return True - else: # read and return config - conf = read_conf_file(config_path) - return conf.get("stats_enabled", True) - - -def check_first_time_usage(): - """ - The function checks for first time usage if the conf file exists and the - uid file doesn't exist. - """ - config_path = Path(check_dir_exist(CONF_DIR), "config.yaml") - uid_path = Path(check_dir_exist(CONF_DIR), "uid.yaml") - uid_conf = read_conf_file(uid_path) - return config_path.exists() and "uid" not in uid_conf.keys() - - -def read_conf_file(conf_path): - try: - with conf_path.open("r") as file: - conf = yaml.safe_load(file) - return conf - except Exception as e: - warnings.warn(f"Can't read config file {e}") - return {} - - -def write_conf_file(conf_path, to_write, error=None): - try: # Create for future runs - with conf_path.open("w") as file: - yaml.dump(to_write, file) - except Exception as e: - warnings.warn(f"Can't write to config file: {e}") - if error: - return e - - -@cache_static -def get_dbt_config(): - try: - from dbt.flags import PROFILES_DIR - from fal.dbt.cli.args import parse_args - from fal.dbt.integration.parse import get_dbt_config - - args = parse_args(sys.argv[1:]) - - profiles_dir: str = PROFILES_DIR # type: ignore - if args.profiles_dir is not None: - profiles_dir = args.profiles_dir - - project_dir = os.path.realpath(os.path.expanduser(args.project_dir)) - profiles_dir = os.path.realpath(os.path.expanduser(profiles_dir)) - return get_dbt_config( - project_dir=project_dir, - profiles_dir=profiles_dir, - ) - except BaseException: - # Hide the error to not break the app for telemetry - pass - - -def log_api( - action: str, - total_runtime=None, - additional_props: Optional[dict] = None, - *, - dbt_config=None, -): - """ - This function logs through an API call, assigns parameters if missing like - timestamp, event id and stats information. - """ - - if not check_stats_enabled(): - return - - if not is_online(): - return - - additional_props = additional_props or {} - - event_id = uuid.uuid4() - - client_time = datetime.datetime.now() - - uid, uid_error, is_install = check_uid() - - if "NO_UID" in uid: - additional_props["uid_issue"] = str(uid_error) if uid_error is not None else "" - - config_hash = "" - if dbt_config is None: - dbt_config = get_dbt_config() - if dbt_config is not None and hasattr(dbt_config, "hashed_name"): - config_hash = str(dbt_config.hashed_name()) - - opt_str_param(uid) - str_param(action) - - props = { - "tool": "fal-cli", - "config_hash": config_hash, - "event_id": str(event_id), - "invocation_id": str(invocation_id), - "user_id": uid, - "action": action, - "client_time": str(client_time), - "total_runtime": str(total_runtime), - "python_version": python_version(), - "fal_version": fal_installed_version(), - "dbt_version": dbt_installed_version(), - "dbt_adapter": get_dbt_adapter_type(dbt_config), - "docker_container": is_docker(), - "airflow": is_airflow(), - "github_action": is_github(), - "gitlab_ci": is_gitlab(), - "argv": sys.argv, - "os": get_os(), - "telemetry_version": TELEMETRY_VERSION, - "$geoip_disable": True, # This disables GeoIp despite the backend setting - "$ip": None, # This disables IP tracking - } - - all_props = {**props, **additional_props} - - if "argv" in all_props: - all_props["argv"] = _clean_args_list(all_props["argv"]) - - if is_install: - posthog.capture(distinct_id=uid, event="install_success", properties=all_props) - - posthog.capture(distinct_id=uid, event=action, properties=all_props) - - -@contextmanager -def log_time(action: str, additional_props: Optional[dict] = None, *, dbt_config=None): - log_api( - action=f"{action}_started", - additional_props=additional_props, - dbt_config=dbt_config, - ) - - start = datetime.datetime.now() - - try: - yield - except Exception as e: - log_api( - action=f"{action}_error", - total_runtime=str(datetime.datetime.now() - start), - additional_props={ - **(additional_props or {}), - "exception": str(type(e)), - }, - dbt_config=dbt_config, - ) - raise - else: - log_api( - action=f"{action}_success", - total_runtime=str(datetime.datetime.now() - start), - additional_props=additional_props, - dbt_config=dbt_config, - ) - - -# NOTE: should we log differently depending on the error type? -# NOTE: how should we handle chained exceptions? -def log_call(action, args: List[str] = [], *, dbt_config=None): - """Runs a function and logs it""" - - def _log_call(func): - @wraps(func) - def wrapper(*func_args, **func_kwargs): - sig = inspect.signature(func).bind(*func_args, **func_kwargs) - sig.apply_defaults() - log_args = dict(map(lambda arg: (arg, sig.arguments.get(arg)), args)) - with log_time( - action, additional_props={"args": log_args}, dbt_config=dbt_config - ): - return func(*func_args, **func_kwargs) - - return wrapper - - return _log_call - - -def _clean_args_list(args: List[str]) -> List[str]: - ALLOWLIST = [ - "--disable-logging", - "--project-dir", - "--profiles-dir", - "--defer", - "--threads", - "--thread", - "--state", - "--full-refresh", - "-s", - "--select", - "-m", - "--models", - "--model", - "--exclude", - "--selector", - "--all", - "--scripts", - "--script", - "--before", - "run", - "fal", - "-v", - "--version", - "--debug", - "flow", - "--vars", - "--var", - "--target", - "--globals", - ] - REDACTED = "[REDACTED]" - output = [] - for item in args: - if item in ALLOWLIST: - output.append(item) - else: - output.append(REDACTED) - return output diff --git a/projects/adapter/src/fal/dbt/typing.py b/projects/adapter/src/fal/dbt/typing.py deleted file mode 100644 index a1903acb..00000000 --- a/projects/adapter/src/fal/dbt/typing.py +++ /dev/null @@ -1,118 +0,0 @@ -# This file is auto-generated by tools/generate_typing_context.py, please -# don't manually alter the contents. - -from __future__ import annotations -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - import pandas as pd - from typing import Optional, Dict, List, Protocol, Any - from fal.dbt.integration.project import DbtModel, DbtTest, DbtSource, Feature - from fal.dbt.fal_script import Context, CurrentModel - - class _List_Sources(Protocol): - - def __call__(self) -> List[DbtSource]: - """ - List tables available for `source` usage - """ - ... - - class _List_Models_Ids(Protocol): - - def __call__(self) -> Dict[str, str]: - """ - List model ids available for `ref` usage, formatting like `[ref_name, ...]` - """ - ... - - class _List_Models(Protocol): - - def __call__(self) -> List[DbtModel]: - """ - List models - """ - ... - - class _List_Tests(Protocol): - - def __call__(self) -> List[DbtTest]: - """ - List tests - """ - ... - - class _List_Features(Protocol): - - def __call__(self) -> List[Feature]: ... - class _Ref(Protocol): - - def __call__( - self, target_1: str, target_2: Optional[str] = None - ) -> pd.DataFrame: - """ - Download a dbt model as a pandas.DataFrame automagically. - """ - ... - - class _Source(Protocol): - - def __call__( - self, target_source_name: str, target_table_name: str - ) -> pd.DataFrame: - """ - Download a dbt source as a pandas.DataFrame automagically. - """ - ... - - class _Write_To_Source(Protocol): - - def __call__( - self, - data: pd.DataFrame, - target_source_name: str, - target_table_name: str, - *, - dtype: Any = None, - mode: str = "append", - ): - """ - Write a pandas.DataFrame to a dbt source automagically. - """ - ... - - class _Execute_Sql(Protocol): - - def __call__(self, sql: str) -> pd.DataFrame: - """Execute a sql query.""" - ... - - # Manually introduced annotations, update manually in tools/generate_typing_context.py template. - class _Write_To_Model(Protocol): - def __call__( - self, - data: pd.DataFrame, - *, - dtype: Any = None, - mode: str = "overwrite", - target_1: str = ..., - target_2: Optional[str] = ..., - ): - """ - Write a pandas.DataFrame to a dbt model automagically. - """ - ... - - -context: Context -write_to_model: _Write_To_Model - -list_sources: _List_Sources -list_models_ids: _List_Models_Ids -list_models: _List_Models -list_tests: _List_Tests -list_features: _List_Features -ref: _Ref -source: _Source -write_to_source: _Write_To_Source -execute_sql: _Execute_Sql diff --git a/projects/adapter/src/fal/dbt/utils.py b/projects/adapter/src/fal/dbt/utils.py deleted file mode 100644 index 820e3700..00000000 --- a/projects/adapter/src/fal/dbt/utils.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Fal utilities.""" -import copy -from fal.dbt.integration.logger import LOGGER -from typing import List, TYPE_CHECKING, TypeVar - -try: - from functools import lru_cache -except ImportError: - from backports.functools_lru_cache import lru_cache - -if TYPE_CHECKING: - from fal.dbt.fal_script import FalScript - - -def print_run_info(scripts: List["FalScript"]): - """Print information on the current fal run.""" - models_str = "\n".join(map(lambda script: script.id, scripts)) - LOGGER.info(f"Starting fal run for following models and scripts: \n{models_str}\n") - - -class DynamicIndexProvider: - def __init__(self) -> None: - self._index = 0 - - def next(self) -> int: - """Increment the counter and return the last value.""" - index = self._index - self._index += 1 - return index - - def __int__(self) -> int: - """Return the last value.""" - return self._index - - -class _ReInitialize: - def __init__(self, *args, **kwargs): - self._serialization_state = { - "args": copy.deepcopy(args), - "kwargs": copy.deepcopy(kwargs), - } - super().__init__(*args, **kwargs) - - def __getstate__(self): - return self._serialization_state - - def __setstate__(self, state): - super().__init__(*state["args"], **state["kwargs"]) - -Cls = TypeVar("Cls", bound=type) - -def has_side_effects(cls: "Cls") -> "Cls": - """The given class has possible side-effects that might - make the regular serialization problematic (e.g. registering - adapters to DBT's global factory).""" - return type(cls.__name__, (_ReInitialize, cls), {}) - - -def cache_static(func): - """Cache the result of a function.""" - return lru_cache(maxsize=None)(func) diff --git a/projects/adapter/tests/__init__.py b/projects/adapter/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/projects/adapter/tests/_fal_testing/__init__.py b/projects/adapter/tests/_fal_testing/__init__.py deleted file mode 100644 index 9ce7bee1..00000000 --- a/projects/adapter/tests/_fal_testing/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from _fal_testing.utils import * diff --git a/projects/adapter/tests/_fal_testing/utils.py b/projects/adapter/tests/_fal_testing/utils.py deleted file mode 100644 index c1bd3f33..00000000 --- a/projects/adapter/tests/_fal_testing/utils.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import sys -import inspect -from pathlib import Path - - -def create_artifact(context, suffix, additional_data=None): - model = context.current_model - model_name = model.name if model else "GLOBAL" - model_status = model.status if model else None - - output = f"Model name: {model_name}" - output += f"\nStatus: {model_status}" - output += f"\nModel dataframe name: {model_name}" - if additional_data: - output += f"\n{additional_data}" - - create_file(output, Path(model_name).with_suffix(suffix)) - - -def create_file(output, file_name): - temp_dir = Path(os.getenv("temp_dir", ".")) - temp_file = temp_dir / file_name - temp_file.write_text(output) - - -def create_model_artifact(context, additional_data=None): - create_artifact(context, ".txt", additional_data) - - -def create_script_artifact(context, prompt, additional_data=None): - create_artifact(context, f".{prompt}.txt", additional_data) - - -def create_dynamic_artifact(context, additional_data=None): - _, outer_frame, *_ = inspect.stack() - return create_script_artifact( - context, Path(outer_frame.filename).stem, additional_data - ) - - -def get_environment_type(): - # To determine whether this is a fal-created environment or not - # we'll check whether the executable that is running this script - # is located under any of the designated fal environment directories. - from fal.dbt.packages.environments.virtual_env import _BASE_VENV_DIR - from fal.dbt.packages.environments.conda import _BASE_CONDA_DIR - - executable_path = Path(sys.executable) - for environment_type, prefix in [ - ("venv", _BASE_VENV_DIR), - ("conda", _BASE_CONDA_DIR), - ]: - if prefix in executable_path.parents: - return environment_type - else: - return "local" diff --git a/projects/adapter/tests/conftest.py b/projects/adapter/tests/conftest.py deleted file mode 100644 index 1773a159..00000000 --- a/projects/adapter/tests/conftest.py +++ /dev/null @@ -1,49 +0,0 @@ -import stat -import shutil -import os -import pytest -import tempfile -from glob import iglob -from pathlib import Path - - -def pytest_configure(config): - """ - Allows plugins and conftest files to perform initial configuration. - This hook is called for every plugin and initial conftest - file after command line options have been parsed. - """ - os.environ['FAL_STATS_ENABLED'] = 'False' - - # tests_path = Path(__file__).parent - # target_path = tests_path / 'mock' / 'mockTarget' - # TODO (target-path): Wait for https://github.com/dbt-labs/dbt-core/issues/7465 to be fixed - # os.environ['DBT_TARGET_PATH'] = str(target_path) - -def _delete_dot_git_at(path): - for root, dirs, files in os.walk(path): - for dir_ in dirs: - os.chmod(Path(root, dir_), stat.S_IRWXU) - for file_ in files: - os.chmod(Path(root, file_), stat.S_IRWXU) - -def _delete_all_dot_git(): - if os.name == 'nt': - for path in iglob('**/.git', recursive=True): - _delete_dot_git_at(path) - -@pytest.fixture() -def tmp_directory(): - old = os.getcwd() - tmp = tempfile.mkdtemp() - os.chdir(str(tmp)) - - yield tmp - - # some tests create sample git repos, if we are on windows, we need to - # change permissions to be able to delete the files - _delete_all_dot_git() - - os.chdir(old) - - shutil.rmtree(str(tmp)) diff --git a/projects/adapter/tests/docker-compose.yml b/projects/adapter/tests/docker-compose.yml deleted file mode 100644 index 2ca82311..00000000 --- a/projects/adapter/tests/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: '3.9' - -services: - db: - image: postgres:12 - container_name: fal_db - environment: - POSTGRES_USER: pguser - POSTGRES_PASSWORD: pass - POSTGRES_DB: test - ports: - - 5432:5432 diff --git a/projects/adapter/tests/graph/test_node_graph.py b/projects/adapter/tests/graph/test_node_graph.py deleted file mode 100644 index be74fb1c..00000000 --- a/projects/adapter/tests/graph/test_node_graph.py +++ /dev/null @@ -1,126 +0,0 @@ -import pytest -import networkx as nx - -from unittest.mock import MagicMock -from unittest.mock import patch -from utils import assert_contains_only, create_mock_model - -from fal.dbt.node_graph import NodeGraph, _add_after_scripts, _add_before_scripts -import fal.dbt.integration.version as version - - - -# TODO: fix mocking for 1.5 and remove skips -@pytest.mark.skipif(version.is_version_plus("1.4.0"), reason="mocking <1.4 modules") -@patch("dbt.contracts.graph.parsed.ParsedModelNode") -@patch("fal.dbt.FalDbt") -def test_add_after_scripts(parsed_node, fal_dbt_class): - graph = nx.DiGraph() - node_lookup = {} - modelA = create_mock_model(parsed_node, "modelA", ["scriptA.py", "scriptB.py"], []) - - fal_dbt_instance = fal_dbt_class("/dir", "/profiles") - fal_dbt_instance.scripts_dir = "/dir" - graph, node_lookup = _add_after_scripts( - modelA, "model.modelA", fal_dbt_instance, graph, node_lookup - ) - - assert_contains_only( - list(node_lookup.keys()), - ["script.modelA.AFTER.scriptA.py", "script.modelA.AFTER.scriptB.py"], - ) - assert_contains_only( - list(graph.successors("model.modelA")), - ["script.modelA.AFTER.scriptA.py", "script.modelA.AFTER.scriptB.py"], - ) - - -@pytest.mark.skipif(version.is_version_plus("1.4.0"), reason="mocking <1.4 modules") -@patch("dbt.contracts.graph.parsed.ParsedModelNode") -@patch("fal.dbt.FalDbt") -def test_add_before_scripts(parsed_node, fal_dbt_class): - graph = nx.DiGraph() - node_lookup = {} - modelA = create_mock_model( - parsed_node, "modelA", [], [], before_script_paths=["scriptA.py", "scriptB.py"] - ) - - fal_dbt_instance = fal_dbt_class("/dir", "/profiles") - fal_dbt_instance.scripts_dir = "/dir" - graph, node_lookup = _add_before_scripts( - modelA, "model.modelA", fal_dbt_instance, graph, node_lookup - ) - - assert_contains_only( - list(node_lookup.keys()), - ["script.modelA.BEFORE.scriptA.py", "script.modelA.BEFORE.scriptB.py"], - ) - - assert_contains_only( - list(graph.predecessors("model.modelA")), - ["script.modelA.BEFORE.scriptA.py", "script.modelA.BEFORE.scriptB.py"], - ) - - -@patch("fal.dbt.FalDbt") -def test_empty_fal_dbt(fal_dbt_class): - fal_dbt_instance = fal_dbt_class("/dir", "/profiles") - fal_dbt_instance.scripts_dir = "/dir" - fal_dbt_instance.list_models = MagicMock(return_value=[]) - node_graph = NodeGraph.from_fal_dbt(fal_dbt_instance) - - assert list(node_graph.node_lookup.keys()) == [] - - -@pytest.mark.skipif(version.is_version_plus("1.4.0"), reason="mocking <1.4 modules") -@patch("dbt.contracts.graph.parsed.ParsedModelNode") -@patch("fal.dbt.FalDbt") -def test_create_with_fal_dbt(parsed_node, fal_dbt_class): - modelA = create_mock_model(parsed_node, "modelA", ["scriptA.py", "scriptB.py"], []) - modelB = create_mock_model(parsed_node, "modelB", ["scriptB.py"], ["model.modelA"]) - modelC = create_mock_model( - parsed_node, "modelC", ["scriptC.py"], ["model.modelA", "model.modelB"] - ) - fal_dbt_instance = fal_dbt_class("/dir", "/profiles") - fal_dbt_instance.scripts_dir = "/dir" - fal_dbt_instance.list_models = MagicMock(return_value=[modelA, modelB, modelC]) - - node_graph = NodeGraph.from_fal_dbt(fal_dbt_instance) - - assert_contains_only( - list(node_graph.node_lookup.keys()), - [ - "model.modelA", - "model.modelB", - "model.modelC", - "script.modelA.AFTER.scriptA.py", - "script.modelA.AFTER.scriptB.py", - "script.modelB.AFTER.scriptB.py", - "script.modelC.AFTER.scriptC.py", - ], - ) - - assert_contains_only( - node_graph.get_descendants("model.modelA"), - [ - "model.modelC", - "script.modelA.AFTER.scriptB.py", - "script.modelC.AFTER.scriptC.py", - "script.modelA.AFTER.scriptA.py", - "model.modelB", - "script.modelB.AFTER.scriptB.py", - ], - ) - - assert_contains_only( - node_graph.get_descendants("model.modelB"), - [ - "script.modelB.AFTER.scriptB.py", - "model.modelC", - "script.modelC.AFTER.scriptC.py", - ], - ) - - assert_contains_only( - node_graph.get_descendants("model.modelC"), ["script.modelC.AFTER.scriptC.py"] - ) diff --git a/projects/adapter/tests/graph/test_selectors.py b/projects/adapter/tests/graph/test_selectors.py deleted file mode 100644 index 444b85ab..00000000 --- a/projects/adapter/tests/graph/test_selectors.py +++ /dev/null @@ -1,251 +0,0 @@ -from typing import Any, Dict -from fal.dbt.cli.selectors import ExecutionPlan -from fal.dbt.node_graph import NodeGraph -import networkx as nx -from argparse import Namespace -from utils import assert_contains_only -from unittest.mock import MagicMock - -PROJECT_NAME = "test_project" - - -def test_execution_plan_only_dbt(): - ids_to_execute = ["modelA", "modelB"] - plan = ExecutionPlan(ids_to_execute, PROJECT_NAME) - assert plan.after_scripts == [] - assert plan.before_scripts == [] - assert plan.dbt_models == ["modelA", "modelB"] - - -def test_execution_plan_all_empty(): - ids_to_execute = [] - plan = ExecutionPlan(ids_to_execute, PROJECT_NAME) - assert plan.after_scripts == [] - assert plan.before_scripts == [] - assert plan.dbt_models == [] - - -def test_execution_plan_mixed(): - ids_to_execute = [ - "modelA", - "script.modelA.AFTER.scriptName.py", - "script.modelA.BEFORE.scriptName.py", - "script.modelB.BEFORE.scriptName.py", - "script.modelB.BEFORE.scriptNameA.py", - "script.modelB.AFTER.scriptNameB.py", - ] - plan = ExecutionPlan(ids_to_execute, PROJECT_NAME) - assert_contains_only( - plan.after_scripts, - [ - "script.modelA.AFTER.scriptName.py", - "script.modelB.AFTER.scriptNameB.py", - ], - ) - assert_contains_only( - plan.before_scripts, - [ - "script.modelA.BEFORE.scriptName.py", - "script.modelB.BEFORE.scriptName.py", - "script.modelB.BEFORE.scriptNameA.py", - ], - ) - assert plan.dbt_models == ["modelA"] - - -def test_create_plan_before_downstream(): - parsed = Namespace(select=["scriptC.py+"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert execution_plan.before_scripts == ["script.model.BEFORE.scriptC.py"] - assert execution_plan.dbt_models == ["model.test_project.modelA"] - assert_contains_only( - execution_plan.after_scripts, - [ - "script.model.AFTER.scriptA.py", - "script.model.AFTER.scriptB.py", - ], - ) - - -def test_create_plan_start_model_downstream(): - parsed = Namespace(select=["modelA+"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert execution_plan.before_scripts == [] - assert execution_plan.dbt_models == ["model.test_project.modelA"] - assert_contains_only( - execution_plan.after_scripts, - [ - "script.model.AFTER.scriptA.py", - "script.model.AFTER.scriptB.py", - ], - ) - - -def test_create_plan_start_model_upstream(): - parsed = Namespace(select=["+modelA"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert_contains_only( - execution_plan.before_scripts, - [ - "script.model.BEFORE.scriptC.py", - "script.model.BEFORE.scriptD.py", - ], - ) - assert execution_plan.dbt_models == ["model.test_project.modelA"] - assert execution_plan.after_scripts == [] - - -def test_create_plan_large_graph_model_levels(): - def _model(s: str) -> str: - return f"model.test_project.{s}" - - def _after_script_for_model(model: str) -> str: - return f"script.{model}.AFTER.script.py" - - digraph = nx.DiGraph() - - for n in range(100): - modeln_name = f"model{n}" - modeln = _model(modeln_name) - - digraph.add_edge(modeln, _after_script_for_model(modeln_name)) - - modelnext = f"model.test_project.model{n+1}" - digraph.add_edge(modeln, modelnext) - - for m in range(10): # Avoid cycles with these ranges - modelm = _model(f"model{n}_{m}") - digraph.add_edge(modeln, modelm) - - node_lookup: Dict[str, Any] = {node: None for node in digraph.nodes} - graph = NodeGraph(digraph, node_lookup) - - parsed = Namespace(select=["model0+70"]) - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert execution_plan.before_scripts == [] - assert_contains_only( - execution_plan.dbt_models, - # model0, model1, ..., model70 - [_model(f"model{n}") for n in range(71)] - # model0_0, model0_1, ..., model0_9, model1_0, ..., model69_0, ..., model69_9 - # not the children of model70, it ends in model70 - + [_model(f"model{n}_{m}") for m in range(10) for n in range(70)], - ) - assert_contains_only( - execution_plan.after_scripts, - # after script for model0, model1, ..., model69 - # not model70 because that is one level too far - [_after_script_for_model(f"model{n}") for n in range(70)], - ) - - -def test_create_plan_start_model_upstream_and_downstream(): - parsed = Namespace(select=["+modelA+"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert_contains_only( - execution_plan.before_scripts, - [ - "script.model.BEFORE.scriptC.py", - "script.model.BEFORE.scriptD.py", - ], - ) - assert execution_plan.dbt_models == ["model.test_project.modelA"] - assert_contains_only( - execution_plan.after_scripts, - [ - "script.model.AFTER.scriptA.py", - "script.model.AFTER.scriptB.py", - ], - ) - - -def test_create_plan_start_after_downstream(): - parsed = Namespace(select=["scriptA.py+"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert execution_plan.before_scripts == [] - assert execution_plan.dbt_models == [] - assert execution_plan.after_scripts == [ - "script.model.AFTER.scriptA.py", - ] - - -def test_create_plan_no_graph_selectors(): - parsed = Namespace(select=["modelA", "modelB"]) - graph = _create_test_graph() - - execution_plan = ExecutionPlan.create_plan_from_graph( - parsed, graph, MagicMock(project_name=PROJECT_NAME) - ) - - assert execution_plan.before_scripts == [] - assert_contains_only( - execution_plan.dbt_models, - [ - "model.test_project.modelA", - "model.test_project.modelB", - ], - ) - assert execution_plan.after_scripts == [] - - -def _create_test_graph(): - scriptA = "script.model.AFTER.scriptA.py" - scriptB = "script.model.AFTER.scriptB.py" - scriptC = "script.model.BEFORE.scriptC.py" - scriptD = "script.model.BEFORE.scriptD.py" - - modelA = "model.test_project.modelA" - modelB = "model.test_project.modelB" - - sourceA = "source.test_project.results.some_source" - - graph = nx.DiGraph() - - graph.add_node(sourceA) - - graph.add_node(scriptA) - graph.add_node(scriptB) - graph.add_node(scriptC) - graph.add_node(scriptD) - graph.add_node(modelA) - graph.add_node(modelB) - - graph.add_edge(sourceA, modelA) - graph.add_edge(modelA, scriptA) - graph.add_edge(modelA, scriptB) - graph.add_edge(scriptC, modelA) - graph.add_edge(scriptD, modelA) - - node_lookup: Dict[str, Any] = { - node: None for node in graph.nodes if not node.startswith("source.") - } - return NodeGraph(graph, node_lookup) diff --git a/projects/adapter/tests/graph/utils.py b/projects/adapter/tests/graph/utils.py deleted file mode 100644 index 637b2040..00000000 --- a/projects/adapter/tests/graph/utils.py +++ /dev/null @@ -1,32 +0,0 @@ -from fal.dbt import DbtModel -from copy import deepcopy -from unittest.mock import MagicMock - - -def assert_contains_only(thisList, otherList): - assert len(thisList) == len(otherList) - for other in otherList: - assert other in thisList - - -def create_mock_model( - parsedNodeMockInstance, - name, - script_paths, - depends_on_models, - before_script_paths=[], -) -> DbtModel: - node = deepcopy(parsedNodeMockInstance) - node.unique_id = "model." + name - node.name = name - model = DbtModel(node) - - def script_calculations(before: bool = False): - if before: - return before_script_paths - else: - return script_paths - - model.get_scripts = MagicMock(side_effect=script_calculations) - model.get_depends_on_nodes = MagicMock(return_value=depends_on_models) - return model diff --git a/projects/adapter/tests/mock/.gitignore b/projects/adapter/tests/mock/.gitignore deleted file mode 100644 index 6840ffd4..00000000 --- a/projects/adapter/tests/mock/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -logs/ -mockTarget/ diff --git a/projects/adapter/tests/mock/dbt_project.yml b/projects/adapter/tests/mock/dbt_project.yml deleted file mode 100644 index b4c611f6..00000000 --- a/projects/adapter/tests/mock/dbt_project.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: "fal_test" -version: "1.0.0" -config-version: 2 -profile: "fal_test" -model-paths: ["models"] -analysis-paths: ["analysis"] -test-paths: ["tests"] -seed-paths: ["data"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] -# TODO (target-path): Deprecated but keeping until https://github.com/dbt-labs/dbt-core/issues/7465 is resolved -target-path: "mockTarget" -clean-targets: - - "target" - - "dbt_modules" diff --git a/projects/adapter/tests/mock/fal_scripts/test.py b/projects/adapter/tests/mock/fal_scripts/test.py deleted file mode 100644 index 2ae28399..00000000 --- a/projects/adapter/tests/mock/fal_scripts/test.py +++ /dev/null @@ -1 +0,0 @@ -pass diff --git a/projects/adapter/tests/mock/mockProfile/.user.yml b/projects/adapter/tests/mock/mockProfile/.user.yml deleted file mode 100644 index 1fb27d1d..00000000 --- a/projects/adapter/tests/mock/mockProfile/.user.yml +++ /dev/null @@ -1 +0,0 @@ -id: 71be9722-4fbd-44fb-8cc8-5ac5a1b93b30 diff --git a/projects/adapter/tests/mock/mockProfile/profiles.yml b/projects/adapter/tests/mock/mockProfile/profiles.yml deleted file mode 100644 index 695c44c7..00000000 --- a/projects/adapter/tests/mock/mockProfile/profiles.yml +++ /dev/null @@ -1,15 +0,0 @@ -config: - send_anonymous_usage_stats: False - -fal_test: - target: dev - outputs: - dev: - type: postgres - host: localhost - user: pguser - password: pass - port: 5432 - dbname: test - schema: dbt_fal - threads: 4 diff --git a/projects/adapter/tests/mock/mockTarget/run_results.json b/projects/adapter/tests/mock/mockTarget/run_results.json deleted file mode 100644 index 11e1d2d6..00000000 --- a/projects/adapter/tests/mock/mockTarget/run_results.json +++ /dev/null @@ -1,202 +0,0 @@ -{ - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/run-results/v4.json", - "dbt_version": "1.0.8", - "generated_at": "2022-07-13T00:03:00.614829Z", - "invocation_id": "3d3557d7-3906-4edc-917a-bc1826386c85", - "env": {} - }, - "results": [ - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.018100Z", - "completed_at": "2022-07-13T00:03:00.036981Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.070654Z", - "completed_at": "2022-07-13T00:03:00.310656Z" - } - ], - "thread_id": "Thread-4", - "execution_time": 0.30472898483276367, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_no_fal" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.013752Z", - "completed_at": "2022-07-13T00:03:00.043621Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.115693Z", - "completed_at": "2022-07-13T00:03:00.314892Z" - } - ], - "thread_id": "Thread-3", - "execution_time": 0.31365513801574707, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_feature_store" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.013292Z", - "completed_at": "2022-07-13T00:03:00.043313Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.109397Z", - "completed_at": "2022-07-13T00:03:00.321708Z" - } - ], - "thread_id": "Thread-2", - "execution_time": 0.32065606117248535, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_empty_scripts" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.012701Z", - "completed_at": "2022-07-13T00:03:00.031027Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.031896Z", - "completed_at": "2022-07-13T00:03:00.318803Z" - } - ], - "thread_id": "Thread-1", - "execution_time": 0.32396793365478516, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_empty_fal" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.355943Z", - "completed_at": "2022-07-13T00:03:00.396563Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.399972Z", - "completed_at": "2022-07-13T00:03:00.568783Z" - } - ], - "thread_id": "Thread-3", - "execution_time": 0.2295517921447754, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_with_scripts" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.344215Z", - "completed_at": "2022-07-13T00:03:00.368902Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.381350Z", - "completed_at": "2022-07-13T00:03:00.576331Z" - } - ], - "thread_id": "Thread-4", - "execution_time": 0.24345970153808594, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.model_with_before_scripts" - }, - { - "status": "success", - "timing": [ - { - "name": "compile", - "started_at": "2022-07-13T00:03:00.368392Z", - "completed_at": "2022-07-13T00:03:00.397277Z" - }, - { - "name": "execute", - "started_at": "2022-07-13T00:03:00.421443Z", - "completed_at": "2022-07-13T00:03:00.578696Z" - } - ], - "thread_id": "Thread-2", - "execution_time": 0.2357959747314453, - "adapter_response": { - "_message": "SELECT 1", - "code": "SELECT", - "rows_affected": 1 - }, - "message": "SELECT 1", - "failures": null, - "unique_id": "model.fal_test.other_with_scripts" - } - ], - "elapsed_time": 1.1397802829742432, - "args": { - "write_json": true, - "use_colors": true, - "printer_width": 80, - "version_check": true, - "partial_parse": true, - "static_parser": true, - "profiles_dir": "/Users/matteo/Projects/fal/fal/tests/mock/mockProfile", - "send_anonymous_usage_stats": false, - "event_buffer_size": 100000, - "project_dir": "/Users/matteo/Projects/fal/fal/tests/mock", - "which": "run", - "rpc_method": "run", - "indirect_selection": "eager" - } -} diff --git a/projects/adapter/tests/mock/models/model_empty_fal.sql b/projects/adapter/tests/mock/models/model_empty_fal.sql deleted file mode 100644 index efc77c79..00000000 --- a/projects/adapter/tests/mock/models/model_empty_fal.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 7 AS a diff --git a/projects/adapter/tests/mock/models/model_empty_scripts.sql b/projects/adapter/tests/mock/models/model_empty_scripts.sql deleted file mode 100644 index 50e32f19..00000000 --- a/projects/adapter/tests/mock/models/model_empty_scripts.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 3 AS a diff --git a/projects/adapter/tests/mock/models/model_feature_store.sql b/projects/adapter/tests/mock/models/model_feature_store.sql deleted file mode 100644 index 6b41c695..00000000 --- a/projects/adapter/tests/mock/models/model_feature_store.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 2 AS a diff --git a/projects/adapter/tests/mock/models/model_no_fal.sql b/projects/adapter/tests/mock/models/model_no_fal.sql deleted file mode 100644 index 3d8b9595..00000000 --- a/projects/adapter/tests/mock/models/model_no_fal.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 4 AS a diff --git a/projects/adapter/tests/mock/models/model_with_before_scripts.sql b/projects/adapter/tests/mock/models/model_with_before_scripts.sql deleted file mode 100644 index df433a96..00000000 --- a/projects/adapter/tests/mock/models/model_with_before_scripts.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 5 AS a diff --git a/projects/adapter/tests/mock/models/model_with_scripts.sql b/projects/adapter/tests/mock/models/model_with_scripts.sql deleted file mode 100644 index 3efd0b34..00000000 --- a/projects/adapter/tests/mock/models/model_with_scripts.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 1 AS a diff --git a/projects/adapter/tests/mock/models/other_with_scripts.sql b/projects/adapter/tests/mock/models/other_with_scripts.sql deleted file mode 100644 index 30f3753e..00000000 --- a/projects/adapter/tests/mock/models/other_with_scripts.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -SELECT 6 AS a diff --git a/projects/adapter/tests/mock/models/schema.yml b/projects/adapter/tests/mock/models/schema.yml deleted file mode 100644 index 1339743d..00000000 --- a/projects/adapter/tests/mock/models/schema.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: 2 - -sources: - - name: test_sources - database: test - schema: public - tables: - - name: single_col - - name: sql_col - -models: - - name: model_with_scripts - meta: - fal: - scripts: - - fal_scripts/test.py - - - name: other_with_scripts - meta: - fal: - scripts: - - fal_scripts/test.py - - - name: model_with_before_scripts - meta: - fal: - scripts: - before: - - fal_scripts/test.py - - - name: model_feature_store - columns: - - name: a - - name: b - - name: c - meta: - fal: - feature_store: - entity_column: a - timestamp_column: b - - name: model_empty_scripts - meta: - fal: - scripts: - - name: model_no_fal - meta: - other: things - - name: model_empty_fal - meta: - fal: - -# global -fal: - scripts: diff --git a/projects/adapter/tests/planner/data.py b/projects/adapter/tests/planner/data.py deleted file mode 100644 index bb10b89f..00000000 --- a/projects/adapter/tests/planner/data.py +++ /dev/null @@ -1,43 +0,0 @@ -from fal.dbt.node_graph import NodeKind -from tests.planner.utils import to_graph - -GRAPH_1 = [ - ("A", {"kind": NodeKind.DBT_MODEL, "post_hook": ["S1", "S2"], "to": ["B"]}), - ("B", {"kind": NodeKind.DBT_MODEL, "post_hook": ["S3", "S4"], "to": ["E"]}), - ("C", {"kind": NodeKind.DBT_MODEL, "to": ["D"]}), - ("D", {"kind": NodeKind.DBT_MODEL, "to": ["E"]}), - ("E", {"kind": NodeKind.FAL_MODEL, "post_hook": ["S5"], "to": ["F"]}), - ("F", {"kind": NodeKind.DBT_MODEL}), -] - -GRAPH_2 = [ - ( - "A", - { - "kind": NodeKind.DBT_MODEL, - "post_hook": ["SA1", "SA2", "SA3"], - "to": ["B", "B1", "B2"], - }, - ), - ("B", {"kind": NodeKind.DBT_MODEL, "post_hook": ["SB1"], "to": ["L"]}), - ("B1", {"kind": NodeKind.DBT_MODEL, "to": ["D"]}), - ("B2", {"kind": NodeKind.DBT_MODEL, "to": ["D"]}), - ("C", {"kind": NodeKind.DBT_MODEL, "to": ["D"]}), - ("D", {"kind": NodeKind.DBT_MODEL, "post_hook": ["SD1"], "to": ["E"]}), - ("E", {"kind": NodeKind.DBT_MODEL, "to": ["F"]}), - ("F", {"kind": NodeKind.DBT_MODEL, "to": ["G"]}), - ("G", {"kind": NodeKind.DBT_MODEL, "to": ["H", "J"]}), - ("H", {"kind": NodeKind.FAL_MODEL, "to": ["K"]}), - ("J", {"kind": NodeKind.DBT_MODEL, "to": ["K"]}), - ("K", {"kind": NodeKind.DBT_MODEL, "post_hook": ["SK1"], "to": ["L"]}), - ("L", {"kind": NodeKind.DBT_MODEL, "post_hook": ["SL1"]}), -] - - -GRAPHS = [ - {"graph": to_graph(GRAPH_1), "subgraphs": set()}, - { - "graph": to_graph(GRAPH_2), - "subgraphs": {frozenset(("B1", "B2")), frozenset(("E", "F", "G"))}, - }, -] diff --git a/projects/adapter/tests/planner/test_plan.py b/projects/adapter/tests/planner/test_plan.py deleted file mode 100644 index f26bc9a8..00000000 --- a/projects/adapter/tests/planner/test_plan.py +++ /dev/null @@ -1,71 +0,0 @@ -from cgitb import enable -from platform import node - -import networkx as nx -import pytest - -from fal.dbt.cli.selectors import ExecutionPlan -from tests.planner.data import GRAPH_1, GRAPH_2, GRAPHS -from tests.planner.utils import plan_graph, to_graph, to_plan - - -@pytest.mark.parametrize("graph_info", GRAPHS) -def test_planner_sub_graphs(graph_info): - graph = graph_info["graph"] - new_graph = plan_graph(graph, to_plan(graph)) - - subgraphs = [ - maybe_subgraph - for maybe_subgraph in new_graph.nodes - if isinstance(maybe_subgraph, nx.DiGraph) - ] - assert {frozenset(subgraph.nodes) for subgraph in subgraphs} == graph_info[ - "subgraphs" - ] - - -@pytest.mark.parametrize("graph_info", GRAPHS) -def test_planner_disabled_chunking(graph_info): - graph = graph_info["graph"] - new_graph = plan_graph(graph, to_plan(graph), enable_chunking=False) - - subgraphs = [ - maybe_subgraph - for maybe_subgraph in new_graph.nodes - if isinstance(maybe_subgraph, nx.DiGraph) - ] - assert len(subgraphs) == 0 - - -@pytest.mark.parametrize( - "selected_nodes", [[], ["A", "F"], ["A", "B", "C", "D", "E", "F"]] -) -def test_planner_execution_plan(selected_nodes): - graph = to_graph(GRAPH_1) - new_graph = plan_graph(graph, ExecutionPlan(selected_nodes, "")) - assert list(new_graph.nodes) == selected_nodes - - -@pytest.mark.parametrize( - "excluded_nodes, expected_subgraphs", - [ - ([], {frozenset(("B1", "B2")), frozenset(("E", "F", "G"))}), - # If we exclude A and B, that means B1 and B2 are now top-level. C is also top-level and - # all of them are direct dependencies of D (which as some post-hooks), so we'll group them into - # one. - (["A", "B"], {frozenset(("B1", "B2", "C", "D")), frozenset(("E", "F", "G"))}), - ], -) -def test_chunking_with_execution_plan(excluded_nodes, expected_subgraphs): - graph = to_graph(GRAPH_2) - execution_plan = ExecutionPlan( - [node for node in graph.nodes if node not in excluded_nodes], "" - ) - new_graph = plan_graph(graph, execution_plan) - - subgraphs = [ - maybe_subgraph - for maybe_subgraph in new_graph.nodes - if isinstance(maybe_subgraph, nx.DiGraph) - ] - assert {frozenset(subgraph.nodes) for subgraph in subgraphs} == expected_subgraphs diff --git a/projects/adapter/tests/planner/test_schedule.py b/projects/adapter/tests/planner/test_schedule.py deleted file mode 100644 index c132f48f..00000000 --- a/projects/adapter/tests/planner/test_schedule.py +++ /dev/null @@ -1,155 +0,0 @@ -import networkx as nx -import pytest - -from pathlib import Path -from fal.dbt.node_graph import NodeKind -from fal.dbt.planner.tasks import FAILURE, SUCCESS, DBTTask, FalModelTask -from fal.dbt.planner.tasks import DBTTask, FalModelTask, Status -from tests.planner.data import GRAPH_1, GRAPHS -from tests.planner.utils import to_scheduler - - -def test_scheduler(): - scheduler = to_scheduler(GRAPH_1) - - # A -> B \ - # -> E -> F - # C -> D / - - group_A, group_C = scheduler.iter_available_groups() - assert group_A.task.model_ids == ["A"] - assert group_C.task.model_ids == ["C"] - - # When both A and C are still running, the scheduler shouldn't - # yield anything. - assert len(list(scheduler.iter_available_groups())) == 0 - assert_running(scheduler, "A", "C") - - # But when A is unblocked, it can successfully yield B - scheduler.finish(group_A, SUCCESS) - (group_B,) = scheduler.iter_available_groups() - assert group_B.task.model_ids == ["B"] - assert_running(scheduler, "B", "C") - - # The rest of the graph is still blocked - assert len(list(scheduler.iter_available_groups())) == 0 - - # When C is done, it should yield D - scheduler.finish(group_C, SUCCESS) - (group_D,) = scheduler.iter_available_groups() - assert group_D.task.model_ids == ["D"] - assert_running(scheduler, "B", "D") - - # And when both B and D are done, it will yield E - scheduler.finish(group_B, SUCCESS) - scheduler.finish(group_D, SUCCESS) - (group_E,) = scheduler.iter_available_groups() - assert group_E.task.model_ids == ["E"] - assert_running(scheduler, "E") - - # And finally when E is done, it will yield F - scheduler.finish(group_E, SUCCESS) - (group_F,) = scheduler.iter_available_groups() - assert group_F.task.model_ids == ["F"] - assert_running(scheduler, "F") - - -def assert_running(scheduler, *tasks): - assert { - skipped_model - for group in scheduler.filter_groups(Status.RUNNING) - for skipped_model in group.task.model_ids - } == set(tasks) - - -def assert_skipped(scheduler, *tasks): - assert { - skipped_model - for group in scheduler.filter_groups(Status.SKIPPED) - for skipped_model in group.task.model_ids - } == set(tasks) - - -def assert_failed(scheduler, *tasks): - assert { - failed_model - for group in scheduler.filter_groups(Status.FAILURE) - for failed_model in group.task.model_ids - } == set(tasks) - - -def test_scheduler_error_handling(): - scheduler = to_scheduler(GRAPH_1) - - # A -> B \ - # -> E -> F - # C -> D / - - # Run A and C as usual - group_A, group_C = scheduler.iter_available_groups() - assert group_A.task.model_ids == ["A"] - assert group_C.task.model_ids == ["C"] - scheduler.finish(group_A, SUCCESS) - - # But once A is completed, take B and make it fail. - (group_B,) = scheduler.iter_available_groups() - assert group_B.task.model_ids == ["B"] - scheduler.finish(group_B, FAILURE) - - # B's failure shouldn't affect C or D, since they are - # completely independant. - scheduler.finish(group_C, SUCCESS) - (group_D,) = scheduler.iter_available_groups() - assert group_D.task.model_ids == ["D"] - - # When D is done, we won't have any more tasks to continue - # since E and F rrequires B which just failed. - scheduler.finish(group_D, SUCCESS) - assert len(list(scheduler.iter_available_groups())) == 0 - - # Ensure that only B has failed. - assert_failed(scheduler, "B") - assert_skipped(scheduler, "E", "F") - - -@pytest.mark.parametrize("graph_info", GRAPHS) -def test_scheduler_task_separation(graph_info): - graph = graph_info["graph"] - scheduler = to_scheduler(graph) - - all_dbt_tasks, all_fal_tasks, all_post_hooks = set(), set(), set() - for group in scheduler.pending_groups: - if isinstance(group.task, FalModelTask): - all_fal_tasks.update(group.task.model_ids) - elif isinstance(group.task, DBTTask): - all_dbt_tasks.update(group.task.model_ids) - - all_post_hooks.update(post_hook.hook_path for post_hook in group.post_hooks) - - assert all_dbt_tasks == set( - nx.subgraph_view( - graph, - lambda node: graph.nodes[node]["kind"] is NodeKind.DBT_MODEL, - ) - ) - assert all_fal_tasks == set( - nx.subgraph_view( - graph, - lambda node: graph.nodes[node]["kind"] is NodeKind.FAL_MODEL, - ) - ) - assert all_post_hooks == { - Path(post_hook.path) - for properties in graph.nodes.values() - for post_hook in properties.get("post_hook", []) - } - - -@pytest.mark.parametrize("graph_info", GRAPHS) -def test_scheduler_dependency_management(graph_info): - scheduler = to_scheduler(graph_info["graph"]) - - while scheduler: - for group in scheduler.iter_available_groups(): - assert not group.dependencies - scheduler.finish(group, SUCCESS) diff --git a/projects/adapter/tests/planner/test_tasks.py b/projects/adapter/tests/planner/test_tasks.py deleted file mode 100644 index 55d4567f..00000000 --- a/projects/adapter/tests/planner/test_tasks.py +++ /dev/null @@ -1,99 +0,0 @@ -from dataclasses import dataclass, field - -import pytest - -from fal.dbt.planner.tasks import FAILURE, SUCCESS, DBTTask, FalLocalHookTask, FalModelTask -from fal.dbt.utils import DynamicIndexProvider - - -@dataclass -class FakeCLIOutput: - return_code: int - run_results: dict - - -@dataclass -class FakeFalDbt: - target_path: str - models: list = field(default_factory=list) - - -@dataclass -class FakeModel: - unique_id: str - - -class FakeScript: - pass - - -def mock_dbt_run(mocker, return_code): - mocker.patch( - "fal.dbt.cli.dbt_runner.dbt_run_through_python", - return_value=FakeCLIOutput(return_code, {}), - ) - mocker.patch( - "fal.dbt.planner.tasks._map_cli_output_model_results", - return_value=[], - ) - - -def mock_script_construction(mocker, return_code): - mocker.patch( - "fal.dbt.fal_script.FalScript.__new__", - return_value=FakeScript(), - ) - mocker.patch( - "fal.dbt.fal_script.FalScript.model_script", - return_value=FakeScript(), - ) - mocker.patch( - "fal.dbt.planner.tasks.run_script", - return_value=return_code, - ) - - -@pytest.mark.parametrize("return_code", [SUCCESS, FAILURE]) -def test_dbt_task(mocker, return_code): - task = DBTTask(["a", "b"]) - task.set_run_index(DynamicIndexProvider()) - - fal_dbt = FakeFalDbt("/test") - mock_dbt_run(mocker, return_code) - assert task.execute(None, fal_dbt) == return_code - - -def test_fal_model_task_when_dbt_fails(mocker): - task = FalModelTask( - ["a", "b"], - script=FalLocalHookTask("something.py", bound_model=FakeModel("model")), - ) - task.set_run_index(DynamicIndexProvider()) - - fal_dbt = FakeFalDbt("/test") - mock_dbt_run(mocker, FAILURE) - assert task.execute(None, fal_dbt) == FAILURE - - -@pytest.mark.parametrize("return_code", [SUCCESS, FAILURE]) -def test_fal_model_task_when_dbt_succeeds(mocker, return_code): - task = FalModelTask( - ["a", "b"], - script=FalLocalHookTask("something.py", bound_model=FakeModel("model")), - ) - task.set_run_index(DynamicIndexProvider()) - - fal_dbt = FakeFalDbt("/test") - mock_dbt_run(mocker, SUCCESS) - mock_script_construction(mocker, return_code) - assert task.execute(None, fal_dbt) == return_code - - -@pytest.mark.parametrize("return_code", [SUCCESS, FAILURE]) -def test_fal_hook(mocker, return_code): - task = FalLocalHookTask("something.py", bound_model=FakeModel("model")) - task.set_run_index(DynamicIndexProvider()) - - fal_dbt = FakeFalDbt("/test") - mock_script_construction(mocker, return_code) - assert task.execute(None, fal_dbt) == return_code diff --git a/projects/adapter/tests/planner/utils.py b/projects/adapter/tests/planner/utils.py deleted file mode 100644 index 85d5f40a..00000000 --- a/projects/adapter/tests/planner/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -from collections import defaultdict -from typing import Any -from pathlib import Path - -import networkx as nx - -from fal.dbt.cli.selectors import ExecutionPlan -from fal.dbt.node_graph import DbtModelNode, NodeGraph -from fal.dbt.planner.plan import ( - FilteredGraph, - OriginGraph, - PlannedGraph, - ScriptConnectedGraph, -) -from fal.dbt.planner.schedule import schedule_graph -from fal.dbt.fal_script import LocalHook - - -class ModelDict(defaultdict): - def get(self, key): - return super().__getitem__(key) - - -class FakeDbtModel: - @property - def python_model(self): - return Path("...") - - -def to_graph(data: list[tuple[str, dict[str, Any]]]) -> nx.DiGraph: - graph = nx.DiGraph() - - nodes, edges = [], [] - for node, _properties in data: - properties = _properties.copy() - for hook_type in ["pre_hook", "post_hook"]: - if hook_type in properties: - properties[hook_type] = [ - LocalHook(path) for path in properties[hook_type] - ] - - edges.extend((node, edge) for edge in properties.pop("to", [])) - nodes.append((node, properties)) - - graph.add_nodes_from(nodes) - graph.add_edges_from(edges) - return graph - - -def to_plan(graph: nx.DiGraph) -> ExecutionPlan: - return ExecutionPlan(list(graph.nodes), "") - - -def plan_graph( - graph: nx.DiGraph, execution_plan: ExecutionPlan, enable_chunking: bool = True -) -> nx.DiGraph: - origin_graph = OriginGraph(graph) - filtered_graph = FilteredGraph.from_execution_plan( - origin_graph, execution_plan=execution_plan - ) - connected_graph = ScriptConnectedGraph.from_filtered_graph(filtered_graph) - planned_graph = PlannedGraph.from_script_connected_graph( - connected_graph, enable_chunking=enable_chunking - ) - return planned_graph.graph - - -def to_scheduler(graph): - if isinstance(graph, list): - graph = to_graph(graph) - new_graph = plan_graph(graph, to_plan(graph)) - node_graph = NodeGraph( - graph, ModelDict(lambda: DbtModelNode("...", FakeDbtModel())) - ) - return schedule_graph(new_graph, node_graph) diff --git a/projects/adapter/tests/runner/test_dbt_runner.py b/projects/adapter/tests/runner/test_dbt_runner.py deleted file mode 100644 index 19fa95b0..00000000 --- a/projects/adapter/tests/runner/test_dbt_runner.py +++ /dev/null @@ -1,39 +0,0 @@ -from fal.dbt.cli.args import parse_args -from fal.dbt.cli.dbt_runner import get_dbt_command_list -from tests.graph.utils import assert_contains_only -import os - - -def test_get_dbt_command_list_with_select(): - parsed = parse_args(["flow", "run"]) - models = ["modelA", "modelB"] - command_list = get_dbt_command_list(parsed, models) - assert_contains_only( - command_list, - [ - "run", - "--threads", - "1", - "--select", - "modelA", - "modelB", - "--project-dir", - str(os.getcwd()), - ], - ) - - -def test_get_dbt_command_list_with_empty_models_list(): - parsed = parse_args(["flow", "run"]) - models = [] - command_list = get_dbt_command_list(parsed, models) - assert_contains_only( - command_list, - [ - "run", - "--threads", - "1", - "--project-dir", - str(os.getcwd()), - ], - ) diff --git a/projects/adapter/tests/test_cli.py b/projects/adapter/tests/test_cli.py deleted file mode 100644 index b840a55b..00000000 --- a/projects/adapter/tests/test_cli.py +++ /dev/null @@ -1,462 +0,0 @@ -import inspect -import os -import re -import shutil -import tempfile -import pytest -from pathlib import Path - -from dbt.exceptions import DbtProjectError -from fal.dbt.cli import cli - -profiles_dir = os.path.join(Path.cwd(), "tests/mock/mockProfile") -project_dir = os.path.join(Path.cwd(), "tests/mock") - -pytest.skip(allow_module_level=True) - -class ProjectTemporaryDirectory(tempfile.TemporaryDirectory): - def __init__(self, *args, **kargs): - super().__init__(*args, **kargs) - - # Copy project_dir to a clean temp directory - shutil.rmtree(self.name, ignore_errors=True) - shutil.copytree(project_dir, self.name) - - def __enter__(self): - os.environ["DBT_TARGET_PATH"] = os.path.join(self.name, "mockTarget") - return super().__enter__() - - def __exit__(self, exc, value, tb): - del os.environ["DBT_TARGET_PATH"] - return super().__exit__(exc, value, tb) - - -def test_run(): - try: - cli([ - # fmt: off - "fal", "run", - "--profiles-dir", profiles_dir, - # fmt: on - ]) - assert False, "Should not reach" - except DbtProjectError as e: - assert "no dbt_project.yml found at expected path" in str(e.msg).lower() - - -def test_flow_run(): - try: - cli([ - # fmt: off - "fal", "flow", "run", - "--profiles-dir", profiles_dir, - # fmt: on - ]) - assert False, "Should not reach" - except DbtProjectError as e: - assert "no dbt_project.yml found at expected path" in str(e).lower() - - -def test_no_arg(capfd): - captured = _run_fal([], capfd) - assert re.match("usage: dbt-fal (.|\n)* COMMAND", captured.err) - assert "the following arguments are required: COMMAND" in captured.err - - -def test_run_with_project_dir(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - # TODO: should it run without a run_results and no selection flags? - cli([ - # fmt: off - "fal", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - # fmt: on - ]) - - -def test_version(capfd): - import pkg_resources - - version = pkg_resources.get_distribution("dbt-postgres-python").version - captured = _run_fal(["--version"], capfd) - assert f"fal {version}" in captured.out - - -def test_flow_run_with_project_dir(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - # fmt: on - ], - capfd, - ) - - executing_re = re.compile( - r": dbt run --threads 1 --project-dir [\w\d\/\-\_]+ --profiles-dir [\w\d\/\-\_]+tests/mock/mockProfile" - ) - found = executing_re.findall(captured.out) - # We run each model separately - assert len(found) == 7 - - -def test_flow_run_with_project_dir_and_select(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--select", "test.py+" - # fmt: on - ], - capfd, - ) - - executing_re = re.compile( - r": dbt run --threads 1 --project-dir [\w\/\-\_]+ --profiles-dir [\w\/\-\_]+tests/mock/mockProfile \--select|\--models model_with_before_scripts" - ) - found = executing_re.findall(captured.out) - assert len(found) == 1 - assert "test.py" in captured.out - assert ( - "--select model_with_before_scripts" - or "--models model_with_before_scripts" in captured.out - ) - - -def test_flow_run_with_defer(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--defer", "--state", (tmp_dir + "/target") - # fmt: on - ], - capfd, - ) - - executing_re = re.compile( - r": dbt run --threads 1 --project-dir [\w\/\-\_]+ --profiles-dir [\w\/\-\_]+tests/mock/mockProfile --defer --state [\w\/\-\_]+/target" - ) - found = executing_re.findall(captured.out) - # We run each model separately - assert len(found) == 7 - - -def test_flow_run_with_full_refresh(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--full-refresh", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - # fmt: on - ], - capfd, - ) - - executing_re = re.compile( - r": dbt run --threads 1 --project-dir [\w\d\/\-\_]+ --profiles-dir [\w\d\/\-\_]+tests/mock/mockProfile --full-refresh" - ) - found = executing_re.findall(captured.out) - # We run each model separately - assert len(found) == 7 - - -def test_flow_run_with_vars(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - var_str = "some: value" - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--vars", var_str, - # fmt: on - ], - capfd, - ) - - executing_re = re.compile( - r": dbt run --threads 1 --project-dir [\w\/\-\_]+ --profiles-dir [\w\/\-\_]+tests/mock/mockProfile --vars " + var_str - ) - found = executing_re.findall(captured.out) - # We run each model separately - assert len(found) == 7 - - -def test_selection(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--select", "model_with_scripts", # not included (overwritten) - "--select", "other_with_scripts", - # fmt: on - ], - capfd, - ) - - assert "model_with_scripts" not in captured.out - assert "other_with_scripts" in captured.out - - captured = _run_fal( - [ - # fmt: off - "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--select", "model_with_scripts", # not included (overwritten) - "--select", "model_feature_store", "model_empty_scripts", - # fmt: on - ], - capfd, - ) - - assert "model_with_scripts" not in captured.out - assert "model_no_fal" not in captured.out - assert "model_feature_store" in captured.out - assert "model_empty_scripts" in captured.out - - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--select", "model_with_scripts", - # fmt: on - ], - capfd, - ) - assert "model_with_scripts" in captured.out - assert "model_feature_store" not in captured.out - assert "model_empty_scripts" not in captured.out - # It has no keyword in meta - assert "model_no_fal" not in captured.out - - -def test_no_run_results(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - shutil.rmtree(os.path.join(tmp_dir, "mockTarget")) - - # Without selection flag - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - # fmt: on - ], - capfd, - ) - assert ( - "Cannot define models to run without selection flags or dbt run_results artifact" - in str(captured.out) - ) - - # With selection flag - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--all", - # fmt: on - ], - capfd, - ) - - # Just as warning - assert "Could not read dbt run_results artifact" in captured.out - - -def test_before(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--select", "model_with_scripts", - "--before", - # fmt: on - ], - capfd, - ) - assert "model_with_scripts" not in captured.out - assert "test.py" not in captured.out - assert "model_with_before_scripts" not in captured.out - assert "model_feature_store" not in captured.out - assert "model_empty_scripts" not in captured.out - assert "model_no_fal" not in captured.out - - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--before", - # fmt: on - ], - capfd, - ) - assert "model_with_scripts" not in captured.out - assert "model_feature_store" not in captured.out - assert "model_empty_scripts" not in captured.out - assert "model_no_fal" not in captured.out - assert "model_with_before_scripts" in captured.out - - -def test_target(capfd): - with ProjectTemporaryDirectory() as tmp_dir: - - captured = _run_fal( - [ - # fmt: off - "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--target", "false_target", - # fmt: on - ], - capfd, - ) - assert ( - "The profile 'fal_test' does not have a target named 'false_target'" - in captured.out - ) - - -@pytest.mark.parametrize( - "broken_schema", - [ - """ - version: 2 - models: - - name: model_with_scripts - meta: - fal: - scripts: - - path: fal_scripts/test.py - """, - """ - version: 2 - models: - - name: model_with_scripts - meta: - fal: - scripts: - after: - - path: fal_scripts/test.py - """, - """ - version: 2 - models: - - name: model_with_scripts - meta: - fal: - pre-hook: - - xxx: fal_scripts/test.py - """, - ], -) -def test_broken_schemas(broken_schema, monkeypatch): - with ProjectTemporaryDirectory() as tmp_dir: - monkeypatch.chdir(tmp_dir) - path = Path(tmp_dir) - - for model in (path / "models").rglob("*.sql"): - if model.stem != "model_with_scripts": - model.unlink() - - with open(path / "models" / "schema.yml", "w") as f: - f.write(inspect.cleandoc(broken_schema)) - - with pytest.raises((ValueError, TypeError)): - cli( - [ - # fmt: off - "fal", "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--exclude=model_with_scripts", - # fmt: on - ], - ) - -@pytest.mark.parametrize( - "schema", - [ - """ - version: 2 - models: - - name: model_with_scripts - meta: - fal: - post-hook: - - fal_scripts/test.py - """, - """ - version: 2 - models: - - name: model_with_scripts - meta: - fal: - post-hook: - - path: fal_scripts/test.py - """, - ], -) -def test_schemas(schema, monkeypatch): - with ProjectTemporaryDirectory() as tmp_dir: - monkeypatch.chdir(tmp_dir) - path = Path(tmp_dir) - - for model in (path / "models").rglob("*.sql"): - if model.stem != "model_with_scripts": - model.unlink() - - with open(path / "models" / "schema.yml", "w") as f: - f.write(inspect.cleandoc(schema)) - - cli( - [ - # fmt: off - "fal", "flow", "run", - "--project-dir", tmp_dir, - "--profiles-dir", profiles_dir, - "--exclude=model_with_scripts", - # fmt: on - ], - ) - -def _run_fal(args, capfd): - # Given fal arguments, runs fal and returns capfd output - try: - cli(["fal"] + args) - except SystemExit: - pass - except Exception as e: - print(e) - return capfd.readouterr() diff --git a/projects/adapter/tests/test_import.py b/projects/adapter/tests/test_import.py deleted file mode 100644 index 1788011c..00000000 --- a/projects/adapter/tests/test_import.py +++ /dev/null @@ -1,33 +0,0 @@ -from fal.dbt import FalDbt -from pathlib import Path -import os - - -profiles_dir = os.path.join(Path.cwd(), "tests/mock/mockProfile") -project_dir = os.path.join(Path.cwd(), "tests/mock") - - -def test_initialize(): - faldbt = FalDbt( - profiles_dir=profiles_dir, - project_dir=project_dir, - ) - - # TODO: look at df data - df = faldbt.ref("model_with_scripts") - - faldbt.write_to_model(df, "model_with_scripts", mode="append") - - faldbt.write_to_source(df, "test_sources", "single_col") - - # TODO: look at df data - df = faldbt.source("test_sources", "single_col") - - # TODO: look at df data (should be double now) - df = faldbt.ref("model_with_scripts") - - sources = faldbt.list_sources() - assert "test_sources" in [source.name for source in sources] - - models = faldbt.list_models() - assert "model_with_scripts" in [model.name for model in models] diff --git a/projects/adapter/tests/test_module_check.py b/projects/adapter/tests/test_module_check.py deleted file mode 100644 index 1329d922..00000000 --- a/projects/adapter/tests/test_module_check.py +++ /dev/null @@ -1,165 +0,0 @@ -import ast -from fal.dbt.cli.model_generator.module_check import ( - generate_dbt_dependencies, - write_to_model_check, -) -from inspect import cleandoc - - -def test_finding_functions_with_literals(): - program = cleandoc( - """ - mdl = ref('model_a') - ref('model_b') - ref('package', 'model_c') - src = source('db', 'table_a') - source('db', 'table_b') - """ - ) - module = ast.parse(program) - results = generate_dbt_dependencies(module) - - assert "{{ ref('model_a') }}" in results - assert "{{ ref('model_b') }}" in results - assert "{{ ref('package', 'model_c') }}" in results - assert "{{ source('db', 'table_a') }}" in results - assert "{{ source('db', 'table_b') }}" in results - - -def test_ignoring_functions_with_vars_or_exprs(): - program = cleandoc( - """ - mdl = ref(some_var) - ref(other) - ref('a' + 'b') - src = source('db', table_var) - source(*['db', 'table_b']) - """ - ) - module = ast.parse(program) - results = generate_dbt_dependencies(module) - - assert "{{ ref(some_var) }}" not in results - assert "{{ ref(other) }}" not in results - assert "{{ ref('a' + 'b') }}" not in results - assert "{{ source('db', table_var) }}" not in results - assert "{{ source(*['db', 'table_b']) }}" not in results - - -def test_finding_functions_non_top_level(): - program = cleandoc( - """ - if True: - mdl = ref('model_a') - else: - mdl = ref('model_b') - def my_funct(): - return ref('package', 'model_c') - - for x in []: - if y: - for z in [let for let in (lambda x: x + source('db', 'table_a'))(y)]: - source('db', 'table_b') - """ - ) - module = ast.parse(program) - results = generate_dbt_dependencies(module) - - assert "{{ ref('model_a') }}" in results - assert "{{ ref('model_b') }}" in results - assert "{{ ref('package', 'model_c') }}" in results - assert "{{ source('db', 'table_a') }}" in results - assert "{{ source('db', 'table_b') }}" in results - - -def test_finding_functions_in_docstring(): - program = cleandoc( - """ - ''' - deps: - ref('model_a') and source('db', 'table_a') - ''' - mdl = ref(context.current_model.name) - """ - ) - module = ast.parse(program) - results = generate_dbt_dependencies(module) - - assert "{{ ref('model_a') }}" in results - assert "{{ source('db', 'table_a') }}" in results - - -def test_write_to_model_once_top_level(): - program = cleandoc( - """ - df = ref('model') - write_to_model(df) - """ - ) - module = ast.parse(program) - try: - write_to_model_check(module) - except AssertionError: - assert False, "Should not have thrown" - - -def test_write_to_model_never(): - program = cleandoc( - """ - df = ref('model') - """ - ) - module = ast.parse(program) - try: - write_to_model_check(module) - raise # Should have thrown - except AssertionError: - pass - - -def test_write_to_model_inner_level(): - program = cleandoc( - """ - df = ref('model') - if True: - write_to_model(df) - """ - ) - module = ast.parse(program) - try: - write_to_model_check(module) - except AssertionError: - assert False, "Should not have thrown" - - -def test_write_to_model_once_top_level_once_inner_level(): - program = cleandoc( - """ - df = ref('model') - if True: - write_to_model(df) - write_to_model(df) - """ - ) - module = ast.parse(program) - try: - write_to_model_check(module) - except AssertionError: - assert False, "Should not have thrown" - - -def test_write_to_model_more_than_once_top_level(): - program = cleandoc( - """ - df = ref('model') - write_to_model(df) - - df = ref('model') - write_to_model(df, mode='append') - """ - ) - module = ast.parse(program) - try: - write_to_model_check(module) - except AssertionError: - assert False, "Should not have thrown" diff --git a/projects/adapter/tests/test_project.py b/projects/adapter/tests/test_project.py deleted file mode 100644 index a084243c..00000000 --- a/projects/adapter/tests/test_project.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -from pathlib import Path -from mock import Mock, patch, ANY -import requests - -from fal.dbt import FalDbt - -profiles_dir = os.path.join(Path.cwd(), "tests/mock/mockProfile") -project_dir = os.path.join(Path.cwd(), "tests/mock") - - -def test_scripts(): - faldbt = FalDbt( - profiles_dir=profiles_dir, - project_dir=project_dir, - ) - - assert isinstance(faldbt._global_script_paths, dict) - assert 0 == len(faldbt._global_script_paths["after"]) - - faldbt._global_script_paths - models = faldbt.list_models() - - # Find the correct one - for model in models: - if model.name == "model_feature_store": - assert 0 == len(model.get_scripts(before=False)) - if model.name == "model_with_scripts": - assert 1 == len(model.get_scripts(before=False)) - assert 0 == len(model.get_scripts(before=True)) - if model.name == "model_with_before_scripts": - assert 1 == len(model.get_scripts(before=True)) - assert 0 == len(model.get_scripts(before=False)) - - -def test_features(): - faldbt = FalDbt( - profiles_dir=profiles_dir, - project_dir=project_dir, - ) - - # Feature definitions - features = faldbt.list_features() - - assert 1 == len(features) - fs_def = features[0] - assert "a" == fs_def.entity_column - assert "model_feature_store" == fs_def.model diff --git a/projects/adapter/tests/test_regressions.py b/projects/adapter/tests/test_regressions.py deleted file mode 100644 index d817902b..00000000 --- a/projects/adapter/tests/test_regressions.py +++ /dev/null @@ -1,23 +0,0 @@ -from fal.dbt import FalDbt -from pathlib import Path -import os -import pandas as pd - -profiles_dir = os.path.join(Path.cwd(), "tests/mock/mockProfile") -project_dir = os.path.join(Path.cwd(), "tests/mock") - - -# https://github.com/fal-ai/fal/issues/154 -def test_write_to_source_not_processing_jinja(): - faldbt = FalDbt( - profiles_dir=profiles_dir, - project_dir=project_dir, - ) - - df = pd.DataFrame({"sql": [r"SELECT 1 FROM {{ wrong jinja }}"]}) - - faldbt.write_to_source(df, "test_sources", "sql_col") - - # The sql string should not be processed by jinja and written as is - df = faldbt.source("test_sources", "sql_col") - assert df.sql.get(0) == r"SELECT 1 FROM {{ wrong jinja }}" diff --git a/projects/adapter/tests/test_telemetry.py b/projects/adapter/tests/test_telemetry.py deleted file mode 100644 index 4512152f..00000000 --- a/projects/adapter/tests/test_telemetry.py +++ /dev/null @@ -1,396 +0,0 @@ -import pathlib -import sys -from unittest.mock import Mock, call, patch, ANY -from pathlib import Path - -import pytest -import yaml -import datetime -from fal.dbt.telemetry import telemetry - - -@pytest.fixture -def ignore_fal_stats_enabled_env_var(monkeypatch): - """ - GitHub Actions configuration scripts set the FAL_STATS_ENABLED - environment variable to prevent CI events from going to posthog, this - inferes with some tests. This fixture removes its value temporarily - """ - monkeypatch.delenv("FAL_STATS_ENABLED", raising=True) - - -@pytest.fixture -def ignore_env_var_and_set_tmp_default_home_dir( - tmp_directory, ignore_fal_stats_enabled_env_var, monkeypatch -): - """ - ignore_fal_stats_enabled_env_var + overrides DEFAULT_HOME_DIR - to prevent the local configuration to interfere with tests - """ - monkeypatch.setattr(telemetry, "DEFAULT_HOME_DIR", ".") - - -# Validations tests -def test_str_validation(): - res = telemetry.str_param("Test") - assert isinstance(res, str) - res = telemetry.str_param("TEST") - assert "TEST" == res - with pytest.raises(TypeError) as exc_info: - telemetry.str_param(3) - - exception_raised = exc_info.value - assert type(exception_raised) == TypeError - - -def test_opt_str_validation(): - res = telemetry.opt_str_param("") - assert isinstance(res, str) - res = telemetry.opt_str_param("TEST") - assert "TEST" == res - res = telemetry.opt_str_param(None) - assert not res - - with pytest.raises(TypeError) as exc_info: - telemetry.opt_str_param(3) - - exception_raised = exc_info.value - assert type(exception_raised) == TypeError - - -def test_check_stats_enabled(ignore_env_var_and_set_tmp_default_home_dir): - stats_enabled = telemetry.check_stats_enabled() - assert stats_enabled is True - - -@pytest.mark.parametrize( - "yaml_value, expected_first, env_value, expected_second", - [ - ["true", True, "false", False], - ["TRUE", True, "FALSE", False], - ["false", False, "true", True], - ["FALSE", False, "TRUE", True], - ], -) -def test_env_var_takes_precedence( - monkeypatch, - ignore_env_var_and_set_tmp_default_home_dir, - yaml_value, - expected_first, - env_value, - expected_second, -): - - stats = Path("stats") - stats.mkdir() - - (stats / "config.yaml").write_text(f"stats_enabled: {yaml_value}") - - assert telemetry.check_stats_enabled() is expected_first - - monkeypatch.setenv("FAL_STATS_ENABLED", env_value, prepend=False) - - assert telemetry.check_stats_enabled() is expected_second - - -def test_first_usage(monkeypatch, tmp_directory): - monkeypatch.setattr(telemetry, "DEFAULT_HOME_DIR", ".") - - stats = Path("stats") - stats.mkdir() - - # This isn't a first time usage since the config file doesn't exist yet. - assert not telemetry.check_first_time_usage() - (stats / "config.yaml").write_text("stats_enabled: True") - - assert telemetry.check_first_time_usage() - - -# Ref: https://stackoverflow.com/questions/43878953/how-does-one-detect-if- -# one-is-running-within-a-docker-container-within-python -def test_docker_env(monkeypatch): - def mock(input_path): - return "dockerenv" in str(input_path) - - monkeypatch.setattr(pathlib.Path, "exists", mock) - docker = telemetry.is_docker() - assert docker is True - - -# Ref https://airflow.apache.org/docs/apache-airflow/stable/ -# cli-and-env-variables-ref.html?highlight=airflow_home#envvar-AIRFLOW_HOME -@pytest.mark.parametrize("env_variable", ["AIRFLOW_CONFIG", "AIRFLOW_HOME"]) -def test_airflow_env(monkeypatch, env_variable): - monkeypatch.setenv(env_variable, True) - airflow = telemetry.is_airflow() - assert airflow is True - - -# Ref https://stackoverflow.com/questions/110362/how-can-i-find- -# the-current-os-in-python -@pytest.mark.parametrize("os_param", ["Windows", "Linux", "MacOS", "Ubuntu"]) -def test_os_type(monkeypatch, os_param): - mock = Mock() - mock.return_value = os_param - monkeypatch.setattr(telemetry.platform, "system", mock) - os_type = telemetry.get_os() - assert os_type == os_param - - -def test_uid_file(): - uid, uid_error, is_install = telemetry.check_uid() - assert isinstance(uid, str) - - -def test_basedir_creation(): - base_dir = telemetry.check_dir_exist() - assert base_dir.exists() - - -def test_python_version(): - version = telemetry.python_version() - assert isinstance(version, str) - - -def test_stats_off(monkeypatch): - mock = Mock() - posthog_mock = Mock() - mock.patch(telemetry, "check_stats_enabled", False) - telemetry.log_api("test_action") - - assert posthog_mock.call_count == 0 - - -def test_offline_stats(monkeypatch): - mock = Mock() - posthog_mock = Mock() - mock.patch(telemetry, "is_online", False) - telemetry.log_api("test_action") - - assert posthog_mock.call_count == 0 - - -def test_is_online(): - assert telemetry.is_online() - - -def test_is_online_timeout(): - # Check the total run time is less than 1.5 secs - start_time = datetime.datetime.now() - telemetry.is_online() - end_time = datetime.datetime.now() - total_runtime = end_time - start_time - assert total_runtime < datetime.timedelta(milliseconds=1500) - - -def test_is_not_online(monkeypatch): - mock_httplib = Mock() - mock_httplib.HTTPSConnection().request.side_effect = Exception - monkeypatch.setattr(telemetry, "httplib", mock_httplib) - assert not telemetry.is_online() - - -def write_to_conf_file(tmp_directory, monkeypatch, last_check): - stats = Path("stats") - stats.mkdir() - conf_path = stats / "config.yaml" - version_path = stats / "uid.yaml" - monkeypatch.setattr(telemetry, "DEFAULT_HOME_DIR", ".") - conf_path.write_text("version_check_enabled: True\n") - version_path.write_text(f"last_version_check: {last_check}\n") - - -def test_python_major_version(): - version = telemetry.python_version() - major = version.split(".")[0] - assert int(major) == 3 - - -def test_creates_config_directory( - monkeypatch, tmp_directory, ignore_fal_stats_enabled_env_var -): - monkeypatch.setattr(telemetry, "DEFAULT_HOME_DIR", ".") - - # TODO: enable passing `config` object for FalDbt() object creation cases - @telemetry.log_call("some_action") - def my_function(): - pass - - my_function() - - assert Path("stats").is_dir() - assert Path("stats", "uid.yaml").is_file() - assert Path("stats", "config.yaml").is_file() - - -@pytest.fixture -def mock_telemetry(monkeypatch): - mock = Mock() - mock_dt = Mock() - mock_dt.now.side_effect = [1, 2] - monkeypatch.setattr(telemetry, "log_api", mock) - monkeypatch.setattr(telemetry.datetime, "datetime", mock_dt) - yield mock - - -def test_log_call_success(mock_telemetry): - @telemetry.log_call("some_action", ["a", "c", "d", "e"]) - def my_function(a, b, c=0, *, d=1, e): - pass - - my_function(1, 2, e=3) - - mock_telemetry.assert_has_calls( - [ - call( - action="some_action_started", - additional_props={ - "args": {"a": 1, "c": 0, "d": 1, "e": 3}, - }, - dbt_config=None, - ), - call( - action="some_action_success", - total_runtime="1", - additional_props={ - "args": {"a": 1, "c": 0, "d": 1, "e": 3}, - }, - dbt_config=None, - ), - ] - ) - - -def test_log_call_exception(mock_telemetry): - @telemetry.log_call("some_action", ["a", "c", "d", "e"]) - def my_function(a, b, c=0, *, d=1, e): - raise ValueError("some error") - - with pytest.raises(ValueError): - my_function(1, 2, e=3) - - mock_telemetry.assert_has_calls( - [ - call( - action="some_action_started", - additional_props={ - "args": {"a": 1, "c": 0, "d": 1, "e": 3}, - }, - dbt_config=None, - ), - call( - action="some_action_error", - total_runtime="1", - additional_props={ - "exception": str(type(ValueError())), - "args": {"a": 1, "c": 0, "d": 1, "e": 3}, - }, - dbt_config=None, - ), - ] - ) - - -def test_redaction(): - with patch("posthog.capture") as mock_capture: - - # TODO: seems wrong to replace the function in the module _for real_. should be mocked - telemetry.check_uid = lambda: ("test_uid", None, True) - telemetry.check_stats_enabled = lambda: True - telemetry.is_online = lambda: True - telemetry.log_api( - "some_action", - additional_props={ - "argv": [ - "/Users/user/.pyenv/shims/fal", - "flow", - "run", - "--disable-logging", - "--project-dir", - "some_dir", - "--profiles-dir", - "some_other_dir", - "--defer", - "--threads", - "--state", - "some_state", - "-s", - "--select", - "some_selector", - "-m", - "--models", - "some_model", - "--exclude", - "one_more_model", - "--selector", - "some_other_selector", - "--all", - "--scripts", - "some_script", - "--before", - "--debug", - "--vars", - "{env: 'some'}", - "--globals", - ] - }, - ) - - mock_capture.assert_called_with( - distinct_id="test_uid", - event="some_action", - properties={ - "tool": "fal-cli", - "config_hash": "", - "event_id": ANY, - "invocation_id": ANY, - "user_id": "test_uid", - "action": "some_action", - "client_time": ANY, - "total_runtime": ANY, - "python_version": ANY, - "fal_version": ANY, - "dbt_version": ANY, - "dbt_adapter": ANY, - "docker_container": ANY, - "airflow": ANY, - "github_action": ANY, - "gitlab_ci": ANY, - "os": ANY, - "telemetry_version": ANY, - "$geoip_disable": True, - "$ip": None, - "argv": [ - "[REDACTED]", - "flow", - "run", - "--disable-logging", - "--project-dir", - "[REDACTED]", - "--profiles-dir", - "[REDACTED]", - "--defer", - "--threads", - "--state", - "[REDACTED]", - "-s", - "--select", - "[REDACTED]", - "-m", - "--models", - "[REDACTED]", - "--exclude", - "[REDACTED]", - "--selector", - "[REDACTED]", - "--all", - "--scripts", - "[REDACTED]", - "--before", - "--debug", - "--vars", - "[REDACTED]", - "--globals", - ], - }, - ) diff --git a/tools/generate_typing_context.py b/tools/generate_typing_context.py deleted file mode 100644 index 7c80589f..00000000 --- a/tools/generate_typing_context.py +++ /dev/null @@ -1,129 +0,0 @@ -# Automatically generate the typing declaration of exposed faldbt functions. -# Usage: -# $ python tools/generate_typing_context.py - - -import ast -import copy -import textwrap -import subprocess - -TEMPLATE = """ -# This file is auto-generated by tools/generate_typing_context.py, please -# don't manually alter the contents. - -from __future__ import annotations -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - import pandas as pd - from typing import Optional, Dict, List, Protocol, Any - from fal.dbt.integration.project import DbtModel, DbtTest, DbtSource, Feature - from fal.dbt.fal_script import Context, CurrentModel - -{protocols} - - # Manually introduced annotations, update manually in tools/generate_typing_context.py template. - class _Write_To_Model(Protocol): - def __call__( - self, - data: pd.DataFrame, - *, - dtype: Any = None, - mode: str = "overwrite", - target_1: str = ..., - target_2: Optional[str] = ..., - ): - ''' - Write a pandas.DataFrame to a dbt model automagically. - ''' - ... - - -context: Context -write_to_model: _Write_To_Model - -{annotations} -""" - -TYPING_CONTEXT_FILE = "projects/adapter/src/fal/dbt/typing.py" -FAL_DBT_FILE = "projects/adapter/src/fal/dbt/integration/project.py" -FAL_DBT_CLS = "FalDbt" -MANUAL_ANNOTATIONS = ["write_to_model"] - - -def collect_methods(file, class_name): - with open(file) as stream: - tree = ast.parse(stream.read()) - - for node in tree.body: - if isinstance(node, ast.ClassDef) and node.name == class_name: - yield from filter( - lambda node: ( - isinstance(node, ast.FunctionDef) - and not node.name.startswith("_") - and not any( - isinstance(decorator, ast.Name) and decorator.id == "property" - for decorator in node.decorator_list - ) - ), - node.body, - ) - - -def generate_protocols(file, class_name): - protocols, annotations = [], [] - for method in collect_methods(file, class_name): - call_function: ast.FunctionDef = copy.deepcopy(method) # type: ignore - - if call_function.name in MANUAL_ANNOTATIONS: - continue - - call_function.name = "__call__" - call_function.decorator_list.clear() - - if ast.get_docstring(call_function): - call_function.body = call_function.body[:1] - else: - call_function.body = [] - - call_function.body.append(ast.Expr(ast.Constant(...))) - - protocols.append( - ast.ClassDef( - name="_" + method.name.title(), - bases=[ast.Name(id="Protocol")], - body=[call_function], - decorator_list=[], - keywords=[], - ) - ) - annotations.append( - ast.AnnAssign( - target=ast.Name(id=method.name), - annotation=ast.Name(id=protocols[-1].name), - simple=1, - ) - ) - - return protocols, annotations - - -def main(): - protocols, annotations = generate_protocols(FAL_DBT_FILE, FAL_DBT_CLS) - - with open(TYPING_CONTEXT_FILE, "w") as stream: - stream.write( - TEMPLATE.format( - protocols=textwrap.indent( - "\n".join(map(ast.unparse, protocols)), " " - ), - annotations="\n".join(map(ast.unparse, annotations)), - ) - ) - - subprocess.run(["black", TYPING_CONTEXT_FILE]) - - -if __name__ == "__main__": - main()