From 3762a9cae0d66a1250133d2abd4e0a80441d8913 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Tue, 7 May 2024 14:59:46 +0200 Subject: [PATCH 1/6] WIP: Use openbb coverage to retrieve tools --- openbb_agents/tools.py | 287 ++----- poetry.lock | 1646 +++++++++++++++------------------------- pyproject.toml | 19 +- tests/conftest.py | 62 ++ tests/test_tools.py | 66 +- 5 files changed, 807 insertions(+), 1273 deletions(-) create mode 100644 tests/conftest.py diff --git a/openbb_agents/tools.py b/openbb_agents/tools.py index 2275fa2..391361d 100644 --- a/openbb_agents/tools.py +++ b/openbb_agents/tools.py @@ -1,19 +1,27 @@ """Load OpenBB functions at OpenAI tools for function calling in Langchain""" -import inspect -from functools import wraps -from types import ModuleType -from typing import Callable, List, Union +from typing import Any -import tiktoken from langchain.schema import Document from langchain.tools import StructuredTool -from langchain.tools.base import ToolException from langchain_community.vectorstores import FAISS, VectorStore from langchain_openai import OpenAIEmbeddings from openbb import obb -from pydantic.v1 import ValidationError, create_model -from pydantic.v1.fields import FieldInfo -from pydantic_core import PydanticUndefinedType +from pydantic import BaseModel + + +def enable_openbb_llm_mode(): + from openbb import obb + + obb.user.preferences.output_type = "llm" # type: ignore + obb.system.python_settings.docstring_sections = ["description", "examples"] # type: ignore + obb.system.python_settings.docstring_max_length = 1024 # type: ignore + + import openbb + + openbb.build() + + +enable_openbb_llm_mode() def create_tool_index(tools: list[StructuredTool]) -> VectorStore: @@ -27,212 +35,63 @@ def create_tool_index(tools: list[StructuredTool]) -> VectorStore: return vector_store -def _fetch_obb_module(openbb_command_root: str) -> ModuleType: - module_path_split = openbb_command_root.split("/")[1:] - module_path = ".".join(module_path_split) - - # Iteratively get module - module = obb - for attr in module_path.split("."): - module = getattr(module, attr) - - return module - - -def _fetch_schemas(openbb_command_root: str) -> dict: - # Ugly hack to make it compatiable with the look-up (even though we convert - # it back) so that we have a nicer API for the user. - module_root_path = openbb_command_root.replace("/", ".") - schemas = { - k.replace(".", "/"): v - for k, v in obb.coverage.command_model.items() - if module_root_path in k - } - return schemas - - -def _fetch_callables(openbb_command_root): - module = _fetch_obb_module(openbb_command_root) - - if inspect.ismethod( - module - ): # Handle case where a final command endpoint is passed. - members_dict = {module.__name__: module} - else: # If a command root is passed instead - members = inspect.getmembers(module) - members_dict = { - x[0]: x[1] for x in members if "__" not in x[0] and "_run" not in x[0] - } - - schemas = _fetch_schemas(openbb_command_root) - # Create callables dict, with the same key as used in the schemas - callables = {} - for k in schemas.keys(): - try: - callables[k] = members_dict[k.split("/")[-1]] - except ( - KeyError - ): # Sometimes we don't have a specific callable for an endpoint, so we skip. - pass - return callables - - -def _fetch_outputs(schema): - outputs = [] - output_fields = schema["openbb"]["Data"]["fields"] - for name, t in output_fields.items(): - if isinstance(t.annotation, type): - type_str = t.annotation.__name__ - else: - type_str = str(t.annotation).replace("typing.", "") - outputs.append((name, type_str)) - return outputs - - -def from_schema_to_pydantic_model(model_name, schema): - create_model_kwargs = {} - for field, field_info in schema.items(): - field_type = field_info.annotation - - # Handle default values - if not isinstance(field_info.default, PydanticUndefinedType): - field_default_value = field_info.default - new_field_info = ( - FieldInfo( # Weird hack, because of how the default field value works - description=field_info.description, - default=field_default_value, - ) - ) - else: - new_field_info = FieldInfo( - description=field_info.description, +def create_document(dict): + ... + + +class OpenBBFunctionDescription(BaseModel): + name: str + input: Any + output: Any + callable: Any + + +def get_openbb_coverage_providers() -> dict: + return obb.coverage.providers # type: ignore + + +def get_openbb_user_credentials() -> dict: + return obb.user.credentials.model_dump() # type: ignore + + +def get_openbb_coverage_command_schemas() -> dict: + return obb.coverage.command_schemas() # type: ignore + + +def get_valid_list_of_providers() -> list[str]: + credentials = get_openbb_user_credentials() + valid_providers = [] + for name, value in credentials.items(): + if value is not None: + valid_providers.append(name.split("_api_key")[0].split("_token")[0]) + return valid_providers + + +def get_valid_openbb_function_names() -> list[str]: + valid_providers = get_valid_list_of_providers() + valid_function_names = set() + for provider in valid_providers: + valid_function_names |= set(get_openbb_coverage_providers()[provider]) + return sorted(list(valid_function_names)) + + +def get_valid_openbb_function_descriptions() -> list[OpenBBFunctionDescription]: + command_schemas = get_openbb_coverage_command_schemas() + obb_function_descriptions = [] + for obb_function_name in get_valid_openbb_function_names(): + dict_ = command_schemas[obb_function_name] + obb_function_descriptions.append( + OpenBBFunctionDescription( + name=obb_function_name, + input=dict_["input"], + output=dict_["output"], + callable=dict_["callable"], ) - create_model_kwargs[field] = (field_type, new_field_info) - return create_model(model_name, **create_model_kwargs) - - -def return_results(func): - """Return the results rather than the OBBject.""" - - def wrapper_func(*args, **kwargs): - try: - result = func(*args, **kwargs).results - encoding = tiktoken.encoding_for_model("gpt-4-1106-preview") - num_tokens = len(encoding.encode(str(result))) - if num_tokens > 90000: - raise ToolException( - "The returned output is too large to fit into context. Consider using another tool, or trying again with different input arguments." # noqa: E501 - ) - return result - # Necessary to catch general exception in this case, since we want the - # LLM to be able to correct a bad call, if possible. - except Exception as err: - raise ToolException(err) from err - - return wrapper_func - - -def from_openbb_to_langchain_func( - openbb_command_root: str, openbb_callable: Callable, openbb_schema: dict -) -> StructuredTool: - func_schema = openbb_schema["openbb"]["QueryParams"]["fields"] - # Lookup the default provider's input arguments... - default_provider = obb.coverage.commands[openbb_command_root.replace("/", ".")][0] - # ... and add them to the func schema. - func_schema.update(openbb_schema[default_provider]["QueryParams"]["fields"]) - pydantic_model = from_schema_to_pydantic_model( - model_name=f"{openbb_command_root}InputModel", schema=func_schema - ) - - outputs = _fetch_outputs(openbb_schema) - description = openbb_callable.__doc__.split("\n")[0] - description += "\nThe following data is available in the output:\n\n" - description += ", ".join(e[0].replace("_", " ") for e in outputs) - - tool = StructuredTool( - name=openbb_command_root, # We use the command root for the name of the tool - func=return_results(openbb_callable), - description=description, - args_schema=pydantic_model, - handle_tool_error=True, - ) - - # We have to do some magic here to prevent a bad input argument from - # breaking the langchain flow - # https://github.com/langchain-ai/langchain/issues/13662#issuecomment-1831242057 - def handle_validation_error(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except ValidationError as err: - return str(err) - - return wrapper - - # Monkey-patch the run method - object.__setattr__(tool, "run", handle_validation_error(tool.run)) - - return tool - - -def map_openbb_functions_to_langchain_tools( - openbb_command_root, schemas_dict, callables_dict -): - tools = [] - for route in callables_dict.keys(): - tool = from_openbb_to_langchain_func( - openbb_command_root=route, - openbb_callable=callables_dict[route], - openbb_schema=schemas_dict[route], ) - tools.append(tool) - return tools - - -def map_openbb_routes_to_langchain_tools( - openbb_commands_root: Union[str, List[str]], -) -> list[StructuredTool]: - """Map a collection of OpenBB callables from a command root to StructuredTools. - - Examples - -------- - >>> fundamental_tools = map_openbb_collection_to_langchain_tools( - ... "/equity/fundamental" - ... ) - >>> crypto_price_tools = map_openbb_collection_to_langchain_tools( - ... "/crypto/price" - ... ) - - - """ - openbb_commands_root_list = ( - [openbb_commands_root] - if isinstance(openbb_commands_root, str) - else openbb_commands_root - ) - - tools: List = [] - for obb_cmd_root in openbb_commands_root_list: - schemas = _fetch_schemas(obb_cmd_root) - callables = _fetch_callables(obb_cmd_root) - tools += map_openbb_functions_to_langchain_tools( - openbb_command_root=obb_cmd_root, - schemas_dict=schemas, - callables_dict=callables, - ) - return tools - + return obb_function_descriptions -def get_all_openbb_tools(): - tool_routes = list(obb.coverage.commands.keys()) - tool_routes = [ - route.replace(".", "/") for route in tool_routes if "metrics" not in route - ] - tools = [] - for route in tool_routes: - schema = _fetch_schemas(route) - callables = _fetch_callables(route) - tools += map_openbb_functions_to_langchain_tools(route, schema, callables) - return tools +def make_vector_index_description( + openbb_function_description: OpenBBFunctionDescription, +) -> str: + ... diff --git a/poetry.lock b/poetry.lock index 7c84aae..f7173f0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -140,6 +140,17 @@ doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd- test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (<0.22)"] +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + [[package]] name = "appnope" version = "0.1.3" @@ -445,17 +456,6 @@ files = [ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, ] -[[package]] -name = "chardet" -version = "5.2.0" -description = "Universal encoding detector for Python 3" -optional = false -python-versions = ">=3.7" -files = [ - {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, - {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, -] - [[package]] name = "charset-normalizer" version = "3.3.2" @@ -597,112 +597,15 @@ traitlets = ">=4" [package.extras] test = ["pytest"] -[[package]] -name = "contourpy" -version = "1.2.0" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.9" -files = [ - {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"}, - {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"}, - {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"}, - {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"}, - {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"}, - {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"}, - {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"}, - {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"}, - {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"}, - {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"}, - {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"}, - {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"}, - {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"}, - {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"}, - {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"}, - {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"}, - {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"}, - {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"}, - {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"}, - {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"}, - {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"}, - {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"}, - {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"}, - {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"}, - {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"}, - {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"}, - {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"}, - {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"}, - {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"}, - {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"}, - {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"}, - {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"}, - {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"}, - {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"}, - {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"}, - {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"}, - {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"}, - {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"}, - {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"}, - {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"}, - {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"}, - {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"}, - {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"}, - {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"}, -] - -[package.dependencies] -numpy = ">=1.20,<2.0" - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] - -[[package]] -name = "cssselect2" -version = "0.7.0" -description = "CSS selectors for Python ElementTree" -optional = false -python-versions = ">=3.7" -files = [ - {file = "cssselect2-0.7.0-py3-none-any.whl", hash = "sha256:fd23a65bfd444595913f02fc71f6b286c29261e354c41d722ca7a261a49b5969"}, - {file = "cssselect2-0.7.0.tar.gz", hash = "sha256:1ccd984dab89fc68955043aca4e1b03e0cf29cad9880f6e28e3ba7a74b14aa5a"}, -] - -[package.dependencies] -tinycss2 = "*" -webencodings = "*" - -[package.extras] -doc = ["sphinx", "sphinx_rtd_theme"] -test = ["flake8", "isort", "pytest"] - -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - [[package]] name = "dataclasses-json" -version = "0.6.3" +version = "0.6.5" description = "Easily serialize dataclasses to and from JSON." optional = false -python-versions = ">=3.7,<4.0" +python-versions = "<4.0,>=3.7" files = [ - {file = "dataclasses_json-0.6.3-py3-none-any.whl", hash = "sha256:4aeb343357997396f6bca1acae64e486c3a723d8f5c76301888abeccf0c45176"}, - {file = "dataclasses_json-0.6.3.tar.gz", hash = "sha256:35cb40aae824736fdf959801356641836365219cfe14caeb115c39136f775d2a"}, + {file = "dataclasses_json-0.6.5-py3-none-any.whl", hash = "sha256:f49c77aa3a85cac5bf5b7f65f4790ca0d2be8ef4d92c75e91ba0103072788a39"}, + {file = "dataclasses_json-0.6.5.tar.gz", hash = "sha256:1c287594d9fcea72dc42d6d3836cf14848c2dc5ce88f65ed61b36b57f515fe26"}, ] [package.dependencies] @@ -749,13 +652,13 @@ files = [ [[package]] name = "defusedxml" -version = "0.7.1" +version = "0.8.0rc2" description = "XML bomb protection for Python stdlib modules" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" files = [ - {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, - {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, + {file = "defusedxml-0.8.0rc2-py2.py3-none-any.whl", hash = "sha256:1c812964311154c3bf4aaf3bc1443b31ee13530b7f255eaaa062c0553c76103d"}, + {file = "defusedxml-0.8.0rc2.tar.gz", hash = "sha256:138c7d540a78775182206c7c97fe65b246a2f40b29471e1a2f1b0da76e7a3942"}, ] [[package]] @@ -897,70 +800,16 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pyt typing = ["typing-extensions (>=4.8)"] [[package]] -name = "fonttools" -version = "4.47.2" -description = "Tools to manipulate font files" +name = "filetype" +version = "1.2.0" +description = "Infer file type and MIME type of any file/buffer. No external dependencies." optional = false -python-versions = ">=3.8" +python-versions = "*" files = [ - {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3b629108351d25512d4ea1a8393a2dba325b7b7d7308116b605ea3f8e1be88df"}, - {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c19044256c44fe299d9a73456aabee4b4d06c6b930287be93b533b4737d70aa1"}, - {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8be28c036b9f186e8c7eaf8a11b42373e7e4949f9e9f370202b9da4c4c3f56c"}, - {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f83a4daef6d2a202acb9bf572958f91cfde5b10c8ee7fb1d09a4c81e5d851fd8"}, - {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5a5318ba5365d992666ac4fe35365f93004109d18858a3e18ae46f67907670"}, - {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8f57ecd742545362a0f7186774b2d1c53423ed9ece67689c93a1055b236f638c"}, - {file = "fonttools-4.47.2-cp310-cp310-win32.whl", hash = "sha256:a1c154bb85dc9a4cf145250c88d112d88eb414bad81d4cb524d06258dea1bdc0"}, - {file = "fonttools-4.47.2-cp310-cp310-win_amd64.whl", hash = "sha256:3e2b95dce2ead58fb12524d0ca7d63a63459dd489e7e5838c3cd53557f8933e1"}, - {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:29495d6d109cdbabe73cfb6f419ce67080c3ef9ea1e08d5750240fd4b0c4763b"}, - {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0a1d313a415eaaba2b35d6cd33536560deeebd2ed758b9bfb89ab5d97dc5deac"}, - {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90f898cdd67f52f18049250a6474185ef6544c91f27a7bee70d87d77a8daf89c"}, - {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3480eeb52770ff75140fe7d9a2ec33fb67b07efea0ab5129c7e0c6a639c40c70"}, - {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0255dbc128fee75fb9be364806b940ed450dd6838672a150d501ee86523ac61e"}, - {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f791446ff297fd5f1e2247c188de53c1bfb9dd7f0549eba55b73a3c2087a2703"}, - {file = "fonttools-4.47.2-cp311-cp311-win32.whl", hash = "sha256:740947906590a878a4bde7dd748e85fefa4d470a268b964748403b3ab2aeed6c"}, - {file = "fonttools-4.47.2-cp311-cp311-win_amd64.whl", hash = "sha256:63fbed184979f09a65aa9c88b395ca539c94287ba3a364517698462e13e457c9"}, - {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4ec558c543609e71b2275c4894e93493f65d2f41c15fe1d089080c1d0bb4d635"}, - {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e040f905d542362e07e72e03612a6270c33d38281fd573160e1003e43718d68d"}, - {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dd58cc03016b281bd2c74c84cdaa6bd3ce54c5a7f47478b7657b930ac3ed8eb"}, - {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32ab2e9702dff0dd4510c7bb958f265a8d3dd5c0e2547e7b5f7a3df4979abb07"}, - {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a808f3c1d1df1f5bf39be869b6e0c263570cdafb5bdb2df66087733f566ea71"}, - {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac71e2e201df041a2891067dc36256755b1229ae167edbdc419b16da78732c2f"}, - {file = "fonttools-4.47.2-cp312-cp312-win32.whl", hash = "sha256:69731e8bea0578b3c28fdb43dbf95b9386e2d49a399e9a4ad736b8e479b08085"}, - {file = "fonttools-4.47.2-cp312-cp312-win_amd64.whl", hash = "sha256:b3e1304e5f19ca861d86a72218ecce68f391646d85c851742d265787f55457a4"}, - {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:254d9a6f7be00212bf0c3159e0a420eb19c63793b2c05e049eb337f3023c5ecc"}, - {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eabae77a07c41ae0b35184894202305c3ad211a93b2eb53837c2a1143c8bc952"}, - {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86a5ab2873ed2575d0fcdf1828143cfc6b977ac448e3dc616bb1e3d20efbafa"}, - {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13819db8445a0cec8c3ff5f243af6418ab19175072a9a92f6cc8ca7d1452754b"}, - {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4e743935139aa485fe3253fc33fe467eab6ea42583fa681223ea3f1a93dd01e6"}, - {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d49ce3ea7b7173faebc5664872243b40cf88814ca3eb135c4a3cdff66af71946"}, - {file = "fonttools-4.47.2-cp38-cp38-win32.whl", hash = "sha256:94208ea750e3f96e267f394d5588579bb64cc628e321dbb1d4243ffbc291b18b"}, - {file = "fonttools-4.47.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f750037e02beb8b3569fbff701a572e62a685d2a0e840d75816592280e5feae"}, - {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d71606c9321f6701642bd4746f99b6089e53d7e9817fc6b964e90d9c5f0ecc6"}, - {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86e0427864c6c91cf77f16d1fb9bf1bbf7453e824589e8fb8461b6ee1144f506"}, - {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a00bd0e68e88987dcc047ea31c26d40a3c61185153b03457956a87e39d43c37"}, - {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5d77479fb885ef38a16a253a2f4096bc3d14e63a56d6246bfdb56365a12b20c"}, - {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5465df494f20a7d01712b072ae3ee9ad2887004701b95cb2cc6dcb9c2c97a899"}, - {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4c811d3c73b6abac275babb8aa439206288f56fdb2c6f8835e3d7b70de8937a7"}, - {file = "fonttools-4.47.2-cp39-cp39-win32.whl", hash = "sha256:5b60e3afa9635e3dfd3ace2757039593e3bd3cf128be0ddb7a1ff4ac45fa5a50"}, - {file = "fonttools-4.47.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ee48bd9d6b7e8f66866c9090807e3a4a56cf43ffad48962725a190e0dd774c8"}, - {file = "fonttools-4.47.2-py3-none-any.whl", hash = "sha256:7eb7ad665258fba68fd22228a09f347469d95a97fb88198e133595947a20a184"}, - {file = "fonttools-4.47.2.tar.gz", hash = "sha256:7df26dd3650e98ca45f1e29883c96a0b9f5bb6af8d632a6a108bc744fa0bd9b3"}, + {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, + {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, ] -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0,<5)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - [[package]] name = "fqdn" version = "1.5.1" @@ -986,6 +835,51 @@ files = [ [package.dependencies] python-dateutil = ">=2.7" +[[package]] +name = "frozendict" +version = "2.4.2" +description = "A simple immutable dictionary" +optional = false +python-versions = ">=3.6" +files = [ + {file = "frozendict-2.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:19743495b1e92a7e4db56fcd6a5d36ea1d1b0f550822d6fd780e44d58f0b8c18"}, + {file = "frozendict-2.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81efb4ea854a1c93d954a67389eaf78c508acb2d4768321a835cda2754ec5c01"}, + {file = "frozendict-2.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5f1a4d9662b854dce52b560b60f51349905dc871826b8c6be20141a13067a53"}, + {file = "frozendict-2.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1412aeb325e4a28cfe32106c66c046372bb7fd5a9af1748193549c5d01a9e9c1"}, + {file = "frozendict-2.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7ce0535f02eba9746e4e2cf0abef0f0f2051d20fdccf4af31bc3d1adecf5a71"}, + {file = "frozendict-2.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:07153e6d2720fa1131bb180ce388c7042affb29561d8bcd1c0d6e683a8beaea2"}, + {file = "frozendict-2.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a90ea6d5248617a1222daef07d22fb146ff07635a36db327e1ce114bf3e304"}, + {file = "frozendict-2.4.2-cp310-cp310-win_arm64.whl", hash = "sha256:20a6f741c92fdeb3766924cde42b8ee445cf568e3be8aa983cb83e9fe5b61e63"}, + {file = "frozendict-2.4.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:146129502cd9d96de64e0c8f7dc4c66422da3d4bfccf891dd80a3821b358a926"}, + {file = "frozendict-2.4.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ac1f74ccf818977abbc1868090c06436b8f06534d306f808f15cffc304ae046"}, + {file = "frozendict-2.4.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d2ea4f10505ad15f53ce3742420682d916d0c4d566edb8e1019756e7cea30"}, + {file = "frozendict-2.4.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4a5841681e70d2862ca153543f2912e0bab034bf29e2d3610e86ea42506121c2"}, + {file = "frozendict-2.4.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d4a10119f17552cbeab48d4ae830ba091c6d47616589618adc31f251184579a7"}, + {file = "frozendict-2.4.2-cp36-cp36m-win_amd64.whl", hash = "sha256:7d13ffe649e9db6f4bb5e107d9be7dfd23e13101bc69f97aa5fa6cbf6aecaadd"}, + {file = "frozendict-2.4.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19e64630e164a297f83e9a1c69f1cd36fa4b3d1196c1f9fc006a0385aa198ea4"}, + {file = "frozendict-2.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bedb0a6587bae53bd53727b92a87c4cf90ad7a7e0bd2db562d439beb6982712e"}, + {file = "frozendict-2.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cc9d063131fd8adbeb18a473d222b5dc8301cac9505cfe578158f9a9bf55a9"}, + {file = "frozendict-2.4.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:92c46b155ea9eb9ecabc66ba2d9030f2634319f55c6448688965ece094f14b51"}, + {file = "frozendict-2.4.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f958d40637e0440bce2453019821c94fe86cfc5f3847ae11cd4f02c3548b1d1b"}, + {file = "frozendict-2.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:ac954be447a907face9b652207fbd943b9b552212890db959ba653e8f1dc3f56"}, + {file = "frozendict-2.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f7e0ff5e84742604a1b42c2de4f1e67630c0868cf52a5c585b54a99e06f6b453"}, + {file = "frozendict-2.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:84c36bfa819cd8442f6e0bdb86413c7678b2822a46b1a22cfa0f0dd30d9e5c45"}, + {file = "frozendict-2.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cead3bfe70c90c634a9b76807c9d7e75e6c5666ec96fa2cea8e7412ccf22a1f8"}, + {file = "frozendict-2.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fc6e3158107b5431255978b954758b1041cc70a3b8e7657373110512eb528e3"}, + {file = "frozendict-2.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4db1d6cc412bd865cab36723995208b82166a97bc6c724753bcd2b90cf24f164"}, + {file = "frozendict-2.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ff6fb5831539fffb09d71cc0cc0462b1f27c0160cb6c6fa2d1f4c1bc7fffe52a"}, + {file = "frozendict-2.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:79e1c94ad2a925ad5723d82a4134c6d851d5a7bc72b7e9da8b2087c42758a512"}, + {file = "frozendict-2.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:34704f9ffb21448d4b5c0f9239f8f058c0efab4bfdbe2956c5be978fef0b929c"}, + {file = "frozendict-2.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5280d685cd1659883a3010dec843afe3065416ae92e453498997d4474a898a39"}, + {file = "frozendict-2.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ca09a376114172e4d9918e6d576f58244c45e21f5af1245085699fd3a171c47"}, + {file = "frozendict-2.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55953aa2acf5bf183c664f3d0f540f8c8ac8f5fa97170f2098d413414318eb2b"}, + {file = "frozendict-2.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:476e4857e1d87b05c9102dd5409216ce4716cb7df619e6657429bc99279303cc"}, + {file = "frozendict-2.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4a8b298f39242d25770d029588ce9d4f524e9f4edc60d2d34b6178fb07c8a93e"}, + {file = "frozendict-2.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:c157b8a92743a7905b341edb0663044fecdc7780f96c59a2843d3da68d694b90"}, + {file = "frozendict-2.4.2-cp39-cp39-win_arm64.whl", hash = "sha256:cbab325c0a98b2f3ee291b36710623781b4977a3057f9103a7b0f11bcc23b177"}, + {file = "frozendict-2.4.2.tar.gz", hash = "sha256:741779e1d1a2e6bb2c623f78423bd5d14aad35dc0c57e6ccc89e54eaab5f1b8a"}, +] + [[package]] name = "frozenlist" version = "1.4.1" @@ -1755,137 +1649,25 @@ docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pyd openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] -[[package]] -name = "kiwisolver" -version = "1.4.5" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, - {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, -] - [[package]] name = "langchain" -version = "0.1.0" +version = "0.1.17" description = "Building applications with LLMs through composability" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.1.0-py3-none-any.whl", hash = "sha256:8652e74b039333a55c79faff4400b077ba1bd0ddce5255574e42d301c05c1733"}, - {file = "langchain-0.1.0.tar.gz", hash = "sha256:d43119f8d3fda2c8ddf8c3a19bd5b94b347e27d1867ff14a921b90bdbed0668a"}, + {file = "langchain-0.1.17-py3-none-any.whl", hash = "sha256:f6c5b5fdb529545e6cafbb4ba099031508e621ba1ed7985cf078a597ade3458b"}, + {file = "langchain-0.1.17.tar.gz", hash = "sha256:709b80afa00ae634dfc7042f3e4c20309267b21ffeacc7d7494d58bcae1862f7"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" jsonpatch = ">=1.33,<2.0" -langchain-community = ">=0.0.9,<0.1" -langchain-core = ">=0.1.7,<0.2" -langsmith = ">=0.0.77,<0.1.0" +langchain-community = ">=0.0.36,<0.1" +langchain-core = ">=0.1.48,<0.2.0" +langchain-text-splitters = ">=0.0.1,<0.1" +langsmith = ">=0.1.17,<0.2.0" numpy = ">=1,<2" pydantic = ">=1,<3" PyYAML = ">=5.3" @@ -1894,35 +1676,35 @@ SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" [package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] +azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] clarifai = ["clarifai (>=9.1.0)"] cli = ["typer (>=0.9.0,<0.10.0)"] -cohere = ["cohere (>=4,<5)"] +cohere = ["cohere (>=4,<6)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] +llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"] qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langchain-community" -version = "0.0.12" +version = "0.0.37" description = "Community contributed LangChain integrations." optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.0.12-py3-none-any.whl", hash = "sha256:13b988afaa24e570d2b9992aecccb2fe36d9c33feafd9804f3066dc2ff042d4d"}, - {file = "langchain_community-0.0.12.tar.gz", hash = "sha256:7cfe36c52b1fb86c1095d4dec0cf466a1c752a7446104e8b39cf0f70512a4851"}, + {file = "langchain_community-0.0.37-py3-none-any.whl", hash = "sha256:52e8557602dc099c2e236ec8a0599a956e2f08cfeb61e501815f5ec2d8545747"}, + {file = "langchain_community-0.0.37.tar.gz", hash = "sha256:db2b5829bb20bc5b04c126b69143dbc31a880e949e94110c236b2c176906889f"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" -langchain-core = ">=0.1.9,<0.2" -langsmith = ">=0.0.63,<0.1.0" +langchain-core = ">=0.1.51,<0.2.0" +langsmith = ">=0.1.0,<0.2.0" numpy = ">=1,<2" PyYAML = ">=5.3" requests = ">=2,<3" @@ -1931,27 +1713,25 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] [[package]] name = "langchain-core" -version = "0.1.10" +version = "0.1.52" description = "Building applications with LLMs through composability" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.1.10-py3-none-any.whl", hash = "sha256:d89952f6d0766cfc88d9f1e25b84d56f8d7bd63a45ad8ec1a9a038c9b49df16d"}, - {file = "langchain_core-0.1.10.tar.gz", hash = "sha256:3c9e1383264c102fcc6f865700dbb9416c4931a25d0ac2195f6311c6b867aa17"}, + {file = "langchain_core-0.1.52-py3-none-any.whl", hash = "sha256:62566749c92e8a1181c255c788548dc16dbc319d896cd6b9c95dc17af9b2a6db"}, + {file = "langchain_core-0.1.52.tar.gz", hash = "sha256:084c3fc452f5a6966c28ab3ec5dbc8b8d26fc3f63378073928f4e29d90b6393f"}, ] [package.dependencies] -anyio = ">=3,<5" jsonpatch = ">=1.33,<2.0" -langsmith = ">=0.0.63,<0.1.0" +langsmith = ">=0.1.0,<0.2.0" packaging = ">=23.2,<24.0" pydantic = ">=1,<3" PyYAML = ">=5.3" -requests = ">=2,<3" tenacity = ">=8.1.0,<9.0.0" [package.extras] @@ -1959,51 +1739,79 @@ extended-testing = ["jinja2 (>=3,<4)"] [[package]] name = "langchain-openai" -version = "0.0.2.post1" +version = "0.1.6" description = "An integration package connecting OpenAI and LangChain" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_openai-0.0.2.post1-py3-none-any.whl", hash = "sha256:ba468b94c23da9d8ccefe5d5a3c1c65b4b9702292523e53acc689a9110022e26"}, - {file = "langchain_openai-0.0.2.post1.tar.gz", hash = "sha256:f8e78db4a663feeac71d9f036b9422406c199ea3ef4c97d99ff392c93530e073"}, + {file = "langchain_openai-0.1.6-py3-none-any.whl", hash = "sha256:7f62ecb12d3cdd0d96679abea00e4e3ceb1f829f6d1f127a5f7b97c1315d157f"}, + {file = "langchain_openai-0.1.6.tar.gz", hash = "sha256:7d2e838e57ef231cb7689fd58ac5fa8a6e9e504174f8c5698c837739786e2030"}, ] [package.dependencies] -langchain-core = ">=0.1.7,<0.2" -numpy = ">=1,<2" -openai = ">=1.6.1,<2.0.0" -tiktoken = ">=0.5.2,<0.6.0" +langchain-core = ">=0.1.46,<0.2.0" +openai = ">=1.24.0,<2.0.0" +tiktoken = ">=0.5.2,<1" [[package]] -name = "langchainhub" -version = "0.1.14" -description = "" +name = "langchain-text-splitters" +version = "0.0.1" +description = "LangChain text splitting utilities" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchainhub-0.1.14-py3-none-any.whl", hash = "sha256:3d58a050a3a70684bca2e049a2425a2418d199d0b14e3c8aa318123b7f18b21a"}, - {file = "langchainhub-0.1.14.tar.gz", hash = "sha256:c1aeda38d66df1146f9e60e47bde7fb12bad902eb19dba78ac02f89e0f1f1867"}, + {file = "langchain_text_splitters-0.0.1-py3-none-any.whl", hash = "sha256:f5b802f873f5ff6a8b9259ff34d53ed989666ef4e1582e6d1adb3b5520e3839a"}, + {file = "langchain_text_splitters-0.0.1.tar.gz", hash = "sha256:ac459fa98799f5117ad5425a9330b21961321e30bc19a2a2f9f761ddadd62aa1"}, ] [package.dependencies] -requests = ">=2,<3" -types-requests = ">=2.31.0.2,<3.0.0.0" +langchain-core = ">=0.1.28,<0.2.0" + +[package.extras] +extended-testing = ["lxml (>=5.1.0,<6.0.0)"] [[package]] name = "langsmith" -version = "0.0.80" +version = "0.1.54" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.0.80-py3-none-any.whl", hash = "sha256:dee1c6ef9e8241b82a8851926624269954d0ff8e22d82e32e73455f387f4e245"}, - {file = "langsmith-0.0.80.tar.gz", hash = "sha256:6d22ee07eb41c65b3f5166b20041a026714952497d9e80d5be6879d3a5c14d84"}, + {file = "langsmith-0.1.54-py3-none-any.whl", hash = "sha256:e8ba2758dbdff0fccb35337c28a5ab641dd980b22e178d390b72a15c9ae9caff"}, + {file = "langsmith-0.1.54.tar.gz", hash = "sha256:86f5a90e48303de897f37a893f8bb635eabdaf23e674099e8bc0f2e9ca2f8faf"}, ] [package.dependencies] +orjson = ">=3.9.14,<4.0.0" pydantic = ">=1,<3" requests = ">=2,<3" +[[package]] +name = "litellm" +version = "1.36.1" +description = "Library to easily interface with LLM API providers" +optional = false +python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" +files = [ + {file = "litellm-1.36.1-py3-none-any.whl", hash = "sha256:3a41db4c139989ef24171a69eba72b814b1ac60591af2da7ed689e57bd62b015"}, + {file = "litellm-1.36.1.tar.gz", hash = "sha256:709cb3a9f31186a093c191c01e0df162e5b08c8cc257a2b746668f29fa99a628"}, +] + +[package.dependencies] +aiohttp = "*" +click = "*" +importlib-metadata = ">=6.8.0" +jinja2 = ">=3.1.2,<4.0.0" +openai = ">=1.0.0" +python-dotenv = ">=0.2.0" +requests = ">=2.31.0,<3.0.0" +tiktoken = ">=0.4.0" +tokenizers = "*" + +[package.extras] +extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "resend (>=0.8.0,<0.9.0)"] +proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "cryptography (>=42.0.5,<43.0.0)", "fastapi (>=0.109.1,<0.110.0)", "fastapi-sso (>=0.10.0,<0.11.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "python-multipart (>=0.0.9,<0.0.10)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"] + [[package]] name = "lxml" version = "5.1.0" @@ -2097,6 +1905,28 @@ html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=3.0.7)"] +[[package]] +name = "magentic" +version = "0.23.0" +description = "Seamlessly integrate LLMs as Python functions" +optional = false +python-versions = "<4.0,>=3.10" +files = [ + {file = "magentic-0.23.0-py3-none-any.whl", hash = "sha256:aa678546f8dee7794efdca99e7a509b942e20e7d8a100f57e7e1ec25f135737e"}, + {file = "magentic-0.23.0.tar.gz", hash = "sha256:8a4e7fcb4c3bddfe1f58da8c0dbdc2bcc9e3575ab88938dbdeed38f06ac7bedd"}, +] + +[package.dependencies] +filetype = "*" +litellm = {version = ">=1.36.0", optional = true, markers = "extra == \"litellm\""} +openai = ">=1.24.0" +pydantic = ">=2.0.0" +pydantic-settings = ">=2.0.0" + +[package.extras] +anthropic = ["anthropic (>=0.23.0)"] +litellm = ["litellm (>=1.36.0)"] + [[package]] name = "markupsafe" version = "2.1.3" @@ -2168,72 +1998,23 @@ files = [ [[package]] name = "marshmallow" -version = "3.20.2" +version = "3.21.2" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.20.2-py3-none-any.whl", hash = "sha256:c21d4b98fee747c130e6bc8f45c4b3199ea66bc00c12ee1f639f0aeca034d5e9"}, - {file = "marshmallow-3.20.2.tar.gz", hash = "sha256:4c1daff273513dc5eb24b219a8035559dc573c8f322558ef85f5438ddd1236dd"}, + {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, + {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] -docs = ["alabaster (==0.7.15)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] -lint = ["pre-commit (>=2.4,<4.0)"] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "pytz", "simplejson"] -[[package]] -name = "matplotlib" -version = "3.8.2" -description = "Python plotting package" -optional = false -python-versions = ">=3.9" -files = [ - {file = "matplotlib-3.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:09796f89fb71a0c0e1e2f4bdaf63fb2cefc84446bb963ecdeb40dfee7dfa98c7"}, - {file = "matplotlib-3.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9c6976748a25e8b9be51ea028df49b8e561eed7809146da7a47dbecebab367"}, - {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78e4f2cedf303869b782071b55fdde5987fda3038e9d09e58c91cc261b5ad18"}, - {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e208f46cf6576a7624195aa047cb344a7f802e113bb1a06cfd4bee431de5e31"}, - {file = "matplotlib-3.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:46a569130ff53798ea5f50afce7406e91fdc471ca1e0e26ba976a8c734c9427a"}, - {file = "matplotlib-3.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:830f00640c965c5b7f6bc32f0d4ce0c36dfe0379f7dd65b07a00c801713ec40a"}, - {file = "matplotlib-3.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d86593ccf546223eb75a39b44c32788e6f6440d13cfc4750c1c15d0fcb850b63"}, - {file = "matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a5430836811b7652991939012f43d2808a2db9b64ee240387e8c43e2e5578c8"}, - {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9576723858a78751d5aacd2497b8aef29ffea6d1c95981505877f7ac28215c6"}, - {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ba9cbd8ac6cf422f3102622b20f8552d601bf8837e49a3afed188d560152788"}, - {file = "matplotlib-3.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:03f9d160a29e0b65c0790bb07f4f45d6a181b1ac33eb1bb0dd225986450148f0"}, - {file = "matplotlib-3.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:3773002da767f0a9323ba1a9b9b5d00d6257dbd2a93107233167cfb581f64717"}, - {file = "matplotlib-3.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:4c318c1e95e2f5926fba326f68177dee364aa791d6df022ceb91b8221bd0a627"}, - {file = "matplotlib-3.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:091275d18d942cf1ee9609c830a1bc36610607d8223b1b981c37d5c9fc3e46a4"}, - {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b0f3b8ea0e99e233a4bcc44590f01604840d833c280ebb8fe5554fd3e6cfe8d"}, - {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7b1704a530395aaf73912be741c04d181f82ca78084fbd80bc737be04848331"}, - {file = "matplotlib-3.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533b0e3b0c6768eef8cbe4b583731ce25a91ab54a22f830db2b031e83cca9213"}, - {file = "matplotlib-3.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:0f4fc5d72b75e2c18e55eb32292659cf731d9d5b312a6eb036506304f4675630"}, - {file = "matplotlib-3.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:deaed9ad4da0b1aea77fe0aa0cebb9ef611c70b3177be936a95e5d01fa05094f"}, - {file = "matplotlib-3.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:172f4d0fbac3383d39164c6caafd3255ce6fa58f08fc392513a0b1d3b89c4f89"}, - {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7d36c2209d9136cd8e02fab1c0ddc185ce79bc914c45054a9f514e44c787917"}, - {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5864bdd7da445e4e5e011b199bb67168cdad10b501750367c496420f2ad00843"}, - {file = "matplotlib-3.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef8345b48e95cee45ff25192ed1f4857273117917a4dcd48e3905619bcd9c9b8"}, - {file = "matplotlib-3.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:7c48d9e221b637c017232e3760ed30b4e8d5dfd081daf327e829bf2a72c731b4"}, - {file = "matplotlib-3.8.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa11b3c6928a1e496c1a79917d51d4cd5d04f8a2e75f21df4949eeefdf697f4b"}, - {file = "matplotlib-3.8.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1095fecf99eeb7384dabad4bf44b965f929a5f6079654b681193edf7169ec20"}, - {file = "matplotlib-3.8.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:bddfb1db89bfaa855912261c805bd0e10218923cc262b9159a49c29a7a1c1afa"}, - {file = "matplotlib-3.8.2.tar.gz", hash = "sha256:01a978b871b881ee76017152f1f1a0cbf6bd5f7b8ff8c96df0df1bd57d8755a1"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -kiwisolver = ">=1.3.1" -numpy = ">=1.21,<2" -packaging = ">=20.0" -pillow = ">=8" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" - [[package]] name = "matplotlib-inline" version = "0.1.6" @@ -2370,6 +2151,17 @@ files = [ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] +[[package]] +name = "multitasking" +version = "0.0.11" +description = "Non-blocking Python methods using decorators" +optional = false +python-versions = "*" +files = [ + {file = "multitasking-0.0.11-py3-none-any.whl", hash = "sha256:1e5b37a5f8fc1e6cfaafd1a82b6b1cc6d2ed20037d3b89c25a84f499bd7b3dd4"}, + {file = "multitasking-0.0.11.tar.gz", hash = "sha256:4d6bc3cc65f9b2dca72fb5a787850a88dae8f620c2b36ae9b55248e51bcd6026"}, +] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -2734,13 +2526,13 @@ files = [ [[package]] name = "openai" -version = "1.7.2" +version = "1.26.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.7.2-py3-none-any.whl", hash = "sha256:8f41b90a762f5fd9d182b45851041386fed94c8ad240a70abefee61a68e0ef53"}, - {file = "openai-1.7.2.tar.gz", hash = "sha256:c73c78878258b07f1b468b0602c6591f25a1478f49ecb90b9bd44b7cc80bce73"}, + {file = "openai-1.26.0-py3-none-any.whl", hash = "sha256:884ced523fb0225780f8b0e0ed6f7e014049c32d049a41ad0ac962869f1055d1"}, + {file = "openai-1.26.0.tar.gz", hash = "sha256:642e857b60855702ee6ff665e8fa80946164f77b92e58fd24e01b545685b8405"}, ] [package.dependencies] @@ -2757,101 +2549,97 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "openbb" -version = "4.1.0" +version = "4.1.7" description = "OpenBB" optional = false -python-versions = ">=3.8,<3.12" -files = [ - {file = "openbb-4.1.0-py3-none-any.whl", hash = "sha256:e4e7d7f64d1d3e2a1aa9b7ac9b457fe142a9ce7f858d044c2c32dc3ccaa9ee87"}, - {file = "openbb-4.1.0.tar.gz", hash = "sha256:3f385708a34f536abe619148fe9be0e49fe3cbbc7885bb71fcd9f82938cb0892"}, -] - -[package.dependencies] -openbb-benzinga = ">=1.1.0,<2.0.0" -openbb-core = ">=1.1.0,<2.0.0" -openbb-crypto = ">=1.1.0,<2.0.0" -openbb-currency = ">=1.1.0,<2.0.0" -openbb-derivatives = ">=1.1.0,<2.0.0" -openbb-economy = ">=1.1.0,<2.0.0" -openbb-equity = ">=1.1.0,<2.0.0" -openbb-etf = ">=1.1.0,<2.0.0" -openbb-federal-reserve = ">=1.1.0,<2.0.0" -openbb-fixedincome = ">=1.1.0,<2.0.0" -openbb-fmp = ">=1.1.0,<2.0.0" -openbb-fred = ">=1.1.0,<2.0.0" -openbb-index = ">=1.1.0,<2.0.0" -openbb-intrinio = ">=1.1.0,<2.0.0" -openbb-news = ">=1.1.0,<2.0.0" -openbb-oecd = ">=1.1.0,<2.0.0" -openbb-polygon = ">=1.1.0,<2.0.0" -openbb-regulators = ">=1.1.0,<2.0.0" -openbb-sec = ">=1.1.0,<2.0.0" -openbb-tiingo = ">=1.1.0,<2.0.0" -openbb-tradingeconomics = ">=1.1.0,<2.0.0" +python-versions = "<3.12,>=3.8" +files = [ + {file = "openbb-4.1.7-py3-none-any.whl", hash = "sha256:f1711b44587e9f04a875e4205f018e568256933a7e69ecb4a0cd39aac18a4408"}, + {file = "openbb-4.1.7.tar.gz", hash = "sha256:d878e589475057d6c8128b3570a768e58dd4c7c53ed0dbf6af9bca24d9696b98"}, +] + +[package.dependencies] +openbb-benzinga = ">=1.1.5,<2.0.0" +openbb-commodity = ">=1.0.4,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" +openbb-crypto = ">=1.1.5,<2.0.0" +openbb-currency = ">=1.1.5,<2.0.0" +openbb-derivatives = ">=1.1.5,<2.0.0" +openbb-economy = ">=1.1.5,<2.0.0" +openbb-equity = ">=1.1.5,<2.0.0" +openbb-etf = ">=1.1.5,<2.0.0" +openbb-federal-reserve = ">=1.1.5,<2.0.0" +openbb-fixedincome = ">=1.1.5,<2.0.0" +openbb-fmp = ">=1.1.5,<2.0.0" +openbb-fred = ">=1.1.5,<2.0.0" +openbb-index = ">=1.1.5,<2.0.0" +openbb-intrinio = ">=1.1.5,<2.0.0" +openbb-news = ">=1.1.5,<2.0.0" +openbb-oecd = ">=1.1.5,<2.0.0" +openbb-polygon = ">=1.1.5,<2.0.0" +openbb-regulators = ">=1.1.5,<2.0.0" +openbb-sec = ">=1.1.5,<2.0.0" +openbb-tiingo = ">=1.1.5,<2.0.0" +openbb-tradingeconomics = ">=1.1.5,<2.0.0" +openbb-yfinance = ">=1.1.5,<2.0.0" [package.extras] -all = ["openbb-alpha-vantage (>=1.1.0,<2.0.0)", "openbb-biztoc (>=1.1.0,<2.0.0)", "openbb-cboe (>=1.1.0,<2.0.0)", "openbb-charting (>=1.1.0,<2.0.0)", "openbb-ecb (>=1.1.0,<2.0.0)", "openbb-econometrics (>=1.1.0,<2.0.0)", "openbb-government-us (>=1.1.0,<2.0.0)", "openbb-nasdaq (>=1.1.1,<2.0.0)", "openbb-quantitative (>=1.1.0,<2.0.0)", "openbb-seeking-alpha (>=1.1.0,<2.0.0)", "openbb-stockgrid (>=1.1.0,<2.0.0)", "openbb-technical (>=1.1.0,<2.0.0)", "openbb-yfinance (>=1.1.0,<2.0.0)"] -alpha-vantage = ["openbb-alpha-vantage (>=1.1.0,<2.0.0)"] -biztoc = ["openbb-biztoc (>=1.1.0,<2.0.0)"] -cboe = ["openbb-cboe (>=1.1.0,<2.0.0)"] -charting = ["openbb-charting (>=1.1.0,<2.0.0)"] -ecb = ["openbb-ecb (>=1.1.0,<2.0.0)"] -econometrics = ["openbb-econometrics (>=1.1.0,<2.0.0)"] -finra = ["openbb-finra (>=1.1.0,<2.0.0)"] -government = ["openbb-government-us (>=1.1.0,<2.0.0)"] -nasdaq = ["openbb-nasdaq (>=1.1.1,<2.0.0)"] -quantitative = ["openbb-quantitative (>=1.1.0,<2.0.0)"] -seeking-alpha = ["openbb-seeking-alpha (>=1.1.0,<2.0.0)"] -stockgrid = ["openbb-stockgrid (>=1.1.0,<2.0.0)"] -technical = ["openbb-technical (>=1.1.0,<2.0.0)"] -wsj = ["openbb-wsj (>=1.1.0,<2.0.0)"] -yfinance = ["openbb-yfinance (>=1.1.0,<2.0.0)"] +all = ["openbb-alpha-vantage (>=1.1.5,<2.0.0)", "openbb-biztoc (>=1.1.5,<2.0.0)", "openbb-cboe (>=1.1.5,<2.0.0)", "openbb-charting (>=2.0.3,<3.0.0)", "openbb-ecb (>=1.1.5,<2.0.0)", "openbb-econometrics (>=1.1.5,<2.0.0)", "openbb-finra (>=1.1.5,<2.0.0)", "openbb-finviz (>=1.0.4,<2.0.0)", "openbb-government-us (>=1.1.5,<2.0.0)", "openbb-nasdaq (>=1.1.5,<2.0.0)", "openbb-quantitative (>=1.1.5,<2.0.0)", "openbb-seeking-alpha (>=1.1.5,<2.0.0)", "openbb-stockgrid (>=1.1.5,<2.0.0)", "openbb-technical (>=1.1.6,<2.0.0)", "openbb-tmx (>=1.0.2,<2.0.0)", "openbb-tradier (>=1.0.2,<2.0.0)", "openbb-wsj (>=1.1.5,<2.0.0)"] +alpha-vantage = ["openbb-alpha-vantage (>=1.1.5,<2.0.0)"] +biztoc = ["openbb-biztoc (>=1.1.5,<2.0.0)"] +cboe = ["openbb-cboe (>=1.1.5,<2.0.0)"] +charting = ["openbb-charting (>=2.0.3,<3.0.0)"] +ecb = ["openbb-ecb (>=1.1.5,<2.0.0)"] +econometrics = ["openbb-econometrics (>=1.1.5,<2.0.0)"] +finra = ["openbb-finra (>=1.1.5,<2.0.0)"] +finviz = ["openbb-finviz (>=1.0.4,<2.0.0)"] +government-us = ["openbb-government-us (>=1.1.5,<2.0.0)"] +nasdaq = ["openbb-nasdaq (>=1.1.5,<2.0.0)"] +quantitative = ["openbb-quantitative (>=1.1.5,<2.0.0)"] +seeking-alpha = ["openbb-seeking-alpha (>=1.1.5,<2.0.0)"] +stockgrid = ["openbb-stockgrid (>=1.1.5,<2.0.0)"] +technical = ["openbb-technical (>=1.1.6,<2.0.0)"] +tmx = ["openbb-tmx (>=1.0.2,<2.0.0)"] +tradier = ["openbb-tradier (>=1.0.2,<2.0.0)"] +wsj = ["openbb-wsj (>=1.1.5,<2.0.0)"] [[package]] name = "openbb-benzinga" -version = "1.1.0" +version = "1.1.5" description = "Benzinga extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_benzinga-1.1.0-py3-none-any.whl", hash = "sha256:2749cd82a2c00530789842bb86a3b2d5ff70925cac0fb33a935219fc9b05846c"}, - {file = "openbb_benzinga-1.1.0.tar.gz", hash = "sha256:385436dc533f9630ac481be65e98db8eb7285f8efa017d3f69183f02b033eae0"}, + {file = "openbb_benzinga-1.1.5-py3-none-any.whl", hash = "sha256:a0d2082d45fdb834c4cfe0c55b62d7462babed999bd0deb1c419da690aaa8787"}, + {file = "openbb_benzinga-1.1.5.tar.gz", hash = "sha256:b2ce16c51aeffa27142fc8b7b41e4a6ebe92029dc8d9d2cbe885bbf2b3bfcd9b"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] -name = "openbb-charting" -version = "1.1.0" -description = "Charting extension for OpenBB" +name = "openbb-commodity" +version = "1.0.4" +description = "Commodity extension for OpenBB" optional = false -python-versions = ">=3.8,<3.12" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_charting-1.1.0-py3-none-any.whl", hash = "sha256:52890f2f5b88a24eb7b249215deb6426af8ed81d2256bd95f4529483d1e72f73"}, - {file = "openbb_charting-1.1.0.tar.gz", hash = "sha256:84208e84f3b73cea8124dafdc172b4663ce6bbae60f9370b6a279b9651fe19cd"}, + {file = "openbb_commodity-1.0.4-py3-none-any.whl", hash = "sha256:9a238e4d1d85bf39838fb6e21b278ab4f7cef70629abd5d9a99511e794451c10"}, + {file = "openbb_commodity-1.0.4.tar.gz", hash = "sha256:aece2e34eaa66c337166b7bd4e7ce3aacf058a61e0c1b71a60cdf5650d24b4d4"}, ] [package.dependencies] -nbformat = ">=5.9.2,<6.0.0" -openbb-core = ">=1.1.0,<2.0.0" -pandas-ta = ">=0.3.14b,<0.4.0" -plotly = ">=5.17.0,<6.0.0" -pywry = ">=0.6.1,<0.7.0" -reportlab = ">=4.0.4,<5.0.0" -scipy = ">=1.10.0,<2.0.0" -statsmodels = ">=0.14.0,<0.15.0" -svglib = ">=1.5.1,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-core" -version = "1.1.0" +version = "1.1.6" description = "OpenBB package with core functionality" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_core-1.1.0-py3-none-any.whl", hash = "sha256:5060848428eae444fbb5b7c59fbbe523ce288627406f5979f6b654e106ccf892"}, - {file = "openbb_core-1.1.0.tar.gz", hash = "sha256:6555435d757cc97165425f7d33f72776a3fb56382716aa162e5ca473479f4182"}, + {file = "openbb_core-1.1.6-py3-none-any.whl", hash = "sha256:4161d6e7654cab2185fcc39e5172385c8226e3cf6bfd5d4b1ff1076f70a6374c"}, + {file = "openbb_core-1.1.6.tar.gz", hash = "sha256:bf56a8a0dc26d72e679a2c68b1f57bf6703ed130b52348a77a2f6cfa32c9fcae"}, ] [package.dependencies] @@ -2860,7 +2648,7 @@ fastapi = ">=0.104.1,<0.105.0" html5lib = ">=1.1,<2.0" importlib-metadata = ">=6.8.0,<7.0.0" pandas = ">=1.5.3" -posthog = ">=3.0.1,<4.0.0" +posthog = ">=3.3.1,<4.0.0" pydantic = ">=2.5.1,<3.0.0" python-dotenv = ">=1.0.0,<2.0.0" python-jose = ">=3.3.0,<4.0.0" @@ -2873,274 +2661,345 @@ websockets = ">=12.0,<13.0" [[package]] name = "openbb-crypto" -version = "1.1.0" +version = "1.1.5" description = "Crypto extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_crypto-1.1.0-py3-none-any.whl", hash = "sha256:dbc90fe06000057e1690121fd248a00055dc867950c8f1b5a29da0f5655a0377"}, - {file = "openbb_crypto-1.1.0.tar.gz", hash = "sha256:bafe3a1a895bc9ead4426bd6e3a9c0d2358f5d5af671f085d67d38a408445c9f"}, + {file = "openbb_crypto-1.1.5-py3-none-any.whl", hash = "sha256:61ce16b69ebbe8d3a92b8a52ff3194567f3c4294e4613c118fa55c5b99f033cf"}, + {file = "openbb_crypto-1.1.5.tar.gz", hash = "sha256:a92dc2064eefb96d9ade3d2c9bad6db64d8eecf1ce3e68aba8e1fd5c589ddeee"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-currency" -version = "1.1.0" +version = "1.1.5" description = "Currency extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_currency-1.1.0-py3-none-any.whl", hash = "sha256:a93c28b76fa85ee65170fd4a85da53d860fe81b0405dd1468d45f2176309ad28"}, - {file = "openbb_currency-1.1.0.tar.gz", hash = "sha256:1e82646c3fe0951732933b673ae63b582f765f916031020461a9428c3524bcec"}, + {file = "openbb_currency-1.1.5-py3-none-any.whl", hash = "sha256:a481428ac6a056c0c63f31d01ff6a615b3514a8867af9c0ba8bfcf864075e76d"}, + {file = "openbb_currency-1.1.5.tar.gz", hash = "sha256:d0754dbf9a636050d4b69d1fc7c370e9eec6db9165c0baf22ad546e45f59b47f"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-derivatives" -version = "1.1.0" +version = "1.1.5" description = "Derivatives extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_derivatives-1.1.0-py3-none-any.whl", hash = "sha256:1e41375a852b240f37477ddf53e4b5c4b586cfccc4894ea3b996bb170ad2c36e"}, - {file = "openbb_derivatives-1.1.0.tar.gz", hash = "sha256:e233c06b503d5d55178ddd11cb4652c7f987b29790963a0a7a858f138dfafa40"}, + {file = "openbb_derivatives-1.1.5-py3-none-any.whl", hash = "sha256:4d85985a43967d466572f53620dd83032e391f6d93f0c0433884cf801bd23bf3"}, + {file = "openbb_derivatives-1.1.5.tar.gz", hash = "sha256:db1ff010aa48aedcde7f6e781aa1dcc665c8cd9a239f70eb4f01c922a2bf721b"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-economy" -version = "1.1.0" +version = "1.1.5" description = "Economy extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_economy-1.1.0-py3-none-any.whl", hash = "sha256:98bb154d0cfbf8c41e88cacfb13ecaaecdaef05bc7ed117fec51c30b26e117d5"}, - {file = "openbb_economy-1.1.0.tar.gz", hash = "sha256:b4f4fd67b15791bc992dc481294679a0c6d437cc82e349445dba6beb6239637d"}, + {file = "openbb_economy-1.1.5-py3-none-any.whl", hash = "sha256:ee54d3c30418c3b3118c85a757ea927bba582095ba6a91751ee0e77f442a40de"}, + {file = "openbb_economy-1.1.5.tar.gz", hash = "sha256:a336a419d45353e33ec34df4f9952583a3a80daeadd17ce8a070342839e9cec4"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-equity" -version = "1.1.0" +version = "1.1.5" description = "Equity extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_equity-1.1.0-py3-none-any.whl", hash = "sha256:69f530ca555c028514c264dc68d6971105c9d00e62396c421698b4183e702a2c"}, - {file = "openbb_equity-1.1.0.tar.gz", hash = "sha256:20219e30ea6c46c75b46a19dc526438923ea60b9b622db9dbfe5be1366c5e13d"}, + {file = "openbb_equity-1.1.5-py3-none-any.whl", hash = "sha256:d58109ff7860739ca20889953eca0256a7b2aa54cde3c4a81facb10d7c7d9605"}, + {file = "openbb_equity-1.1.5.tar.gz", hash = "sha256:e8e9ebf0cf660b39620b94baae43ce07d9239fb785269289fff287d4631d58a1"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-etf" -version = "1.1.0" +version = "1.1.5" description = "ETF extension for OpenBB" optional = false -python-versions = ">=3.8,<3.12" +python-versions = "<3.12,>=3.8" files = [ - {file = "openbb_etf-1.1.0-py3-none-any.whl", hash = "sha256:e40ad4d303e494e32d686878d95706d9906b5465ec7c53fde347be30e706a25b"}, - {file = "openbb_etf-1.1.0.tar.gz", hash = "sha256:6b5980dcdca1495fbd691adfb6bb88c5ea5d321f78aa7d25d8cbfa86b683dd26"}, + {file = "openbb_etf-1.1.5-py3-none-any.whl", hash = "sha256:f11eb17f5a0a4af3b72e0d8a5b42cb6c1a2594d35841d288d0c1b75c87ea99fc"}, + {file = "openbb_etf-1.1.5.tar.gz", hash = "sha256:8195a912827d9390181b97d960d20ad6da3e245b31af881ee01c1bcbc07cd661"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-federal-reserve" -version = "1.1.0" +version = "1.1.5" description = "US Federal Reserve Data Extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_federal_reserve-1.1.0-py3-none-any.whl", hash = "sha256:629d51819f7be1f52cbff026155002000946e57c5b410e1c3d94e257576a883a"}, - {file = "openbb_federal_reserve-1.1.0.tar.gz", hash = "sha256:aeac532e6e44eaa8efc2ae01608e08129d5fad348d5a8e0db4acd64a57999ce7"}, + {file = "openbb_federal_reserve-1.1.5-py3-none-any.whl", hash = "sha256:f55342063c7cdab5df0c96af23e8471b339cfd2cf14c92bd1a11a1fba2dd7254"}, + {file = "openbb_federal_reserve-1.1.5.tar.gz", hash = "sha256:79e3a5ceb38302b552be24108d35d14b0c8b7b529eedd206d4fb4b20d66ec809"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-fixedincome" -version = "1.1.0" +version = "1.1.5" description = "Fixed income extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_fixedincome-1.1.0-py3-none-any.whl", hash = "sha256:73a9e2cfc1f93b5da9b1c3cafab7731357e30e0901d24f8e32cc9b1308506297"}, - {file = "openbb_fixedincome-1.1.0.tar.gz", hash = "sha256:cd1d6b43ffd4f5ce88f98006194ddf7a70880a78cbe86379c0dec8bbb4a75f4f"}, + {file = "openbb_fixedincome-1.1.5-py3-none-any.whl", hash = "sha256:84f1bc861891a171c17c861e54698f2d4743eb699220f7a1eaab9882849b0096"}, + {file = "openbb_fixedincome-1.1.5.tar.gz", hash = "sha256:dcb003eb928e0a8a90b5504b4f800e586684c8dfcbf4de17bc5dd82a9291f12b"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-fmp" -version = "1.1.0" +version = "1.1.5" description = "FMP extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_fmp-1.1.0-py3-none-any.whl", hash = "sha256:89da6a44c0e89dfb40093f703dd708820f166156c311230b70a14bedf4a521aa"}, - {file = "openbb_fmp-1.1.0.tar.gz", hash = "sha256:577b93714d49c6a891126805359f1befdf0c21a539e927136b4df9347209f126"}, + {file = "openbb_fmp-1.1.5-py3-none-any.whl", hash = "sha256:a41ed60b4f5944810d4d9d44f6ea270a68e94051752a1fe9ebf13c4a694bed77"}, + {file = "openbb_fmp-1.1.5.tar.gz", hash = "sha256:f340d60b0d2709d9371d9765d8c3dfbbce731d6f24ea3f8039e72443c6802b6e"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-fred" -version = "1.1.0" +version = "1.1.5" description = "FRED extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_fred-1.1.0-py3-none-any.whl", hash = "sha256:cd806ef08395bf2123c68151bffcb54e956a2ffa50cec2efc11911be9f409b7a"}, - {file = "openbb_fred-1.1.0.tar.gz", hash = "sha256:daf1f879109e52a516147cc831a3c9dd7f8ee12c5f19883e736b114dede88b16"}, + {file = "openbb_fred-1.1.5-py3-none-any.whl", hash = "sha256:fa0bb9533719c5d2ed9f6c6d8eb4ce887fd601bb9ca1667c59c0cb78307777e8"}, + {file = "openbb_fred-1.1.5.tar.gz", hash = "sha256:541e913e0e1835beddfad88fffd3afb1a3067fe570dbdeb5f9f1fb731f4e7f95"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-index" -version = "1.1.0" +version = "1.1.5" description = "Index extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_index-1.1.0-py3-none-any.whl", hash = "sha256:91cdd5b3dea064c0db515495e71c6837e7e1838fb5c738724bc08baaf9336c77"}, - {file = "openbb_index-1.1.0.tar.gz", hash = "sha256:f05a933af3f91b3fc4d3675fa9fc930ff6752aeccd6d971505c5cc2f41962cc3"}, + {file = "openbb_index-1.1.5-py3-none-any.whl", hash = "sha256:d92baa9dd50b87d86797c455db6a1fc6ec335bb44d43f69a08d83535288d1657"}, + {file = "openbb_index-1.1.5.tar.gz", hash = "sha256:022bf419993bb9aa94ac7e7e59088ad947c983222a177132e8e6dd2673a88f8e"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-intrinio" -version = "1.1.0" +version = "1.1.5" description = "Intrinio extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_intrinio-1.1.0-py3-none-any.whl", hash = "sha256:e5dac85c6aed64a5b83087566f9b473431ab0fdb083340d3e4811eefe432ce99"}, - {file = "openbb_intrinio-1.1.0.tar.gz", hash = "sha256:217104d6f0afc33898c3650311d64d9f1527e43d53ad0c39ba1826436d9d74a6"}, + {file = "openbb_intrinio-1.1.5-py3-none-any.whl", hash = "sha256:536cdc3da6bf7380bd9a14c374aa675a9abdcdf2ee5801245de7cf353de94987"}, + {file = "openbb_intrinio-1.1.5.tar.gz", hash = "sha256:a8722773248179337d60555c2ac6e7d63a0e59cadf1f7bdc6f53420bef6b864a"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" requests-cache = ">=1.1.0,<2.0.0" [[package]] name = "openbb-news" -version = "1.1.0" +version = "1.1.5" description = "News extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_news-1.1.0-py3-none-any.whl", hash = "sha256:336bb25c240d13caf6b02cd72e22c322426aa9b01cf51600dbe91e5dd6097410"}, - {file = "openbb_news-1.1.0.tar.gz", hash = "sha256:a19ef4fd5eb1e42c53052dfa163767d3c8f246c76afeef71de2d29ccddd842e6"}, + {file = "openbb_news-1.1.5-py3-none-any.whl", hash = "sha256:b5fcc5003d4259af5e0c6e74b9f222b43d9ce4dd6d0274f8d0a89711de772090"}, + {file = "openbb_news-1.1.5.tar.gz", hash = "sha256:baee0789a1b338a86fa4050c4b1b5e8be58d40c9c51d8c268b57e0d46e78c844"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-oecd" -version = "1.1.0" +version = "1.1.5" description = "OECD extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_oecd-1.1.0-py3-none-any.whl", hash = "sha256:951145ad1558ae77e29844367cf1fbed7b3baefbdcc4b4e9721e18938565f93a"}, - {file = "openbb_oecd-1.1.0.tar.gz", hash = "sha256:6b0e2ee929b92398a93f88b9f4d682331522e94be364cf691cf538327fc42072"}, + {file = "openbb_oecd-1.1.5-py3-none-any.whl", hash = "sha256:47e19ed2591c15b96b501330be3f06c7a82a38160e8befeeb688f64cfd9c5e38"}, + {file = "openbb_oecd-1.1.5.tar.gz", hash = "sha256:4ccffeef6927401bb82f533390a6d9edb39eb18692969c271bae9fe15724ac11"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +defusedxml = ">=0.8.0rc2,<0.9.0" +openbb-core = ">=1.1.6,<2.0.0" urllib3 = ">1.26.16" [[package]] name = "openbb-polygon" -version = "1.1.0" +version = "1.1.5" description = "Polygon extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_polygon-1.1.0-py3-none-any.whl", hash = "sha256:4cd9fa1fa14c5f04cbf8a587d13ee791fb8c3a2765ddcb27d4d2780a0411cbb1"}, - {file = "openbb_polygon-1.1.0.tar.gz", hash = "sha256:b41b63d449f45c65a847f789ccb12ab4ae5c08914cb41488a0b1aeaa6a469382"}, + {file = "openbb_polygon-1.1.5-py3-none-any.whl", hash = "sha256:ef9f8c719d33eb1bf5285b9ca762648bca215adf86483b55db1133602dc85186"}, + {file = "openbb_polygon-1.1.5.tar.gz", hash = "sha256:7382ca4487bfb7556d3c4ea865139b14c882339c2de723423894acf9b82547f1"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-regulators" -version = "1.1.0" +version = "1.1.5" description = "Markets and Agency Regulators extension for OpenBB" optional = false -python-versions = ">=3.8,<3.12" +python-versions = "<3.12,>=3.8" files = [ - {file = "openbb_regulators-1.1.0-py3-none-any.whl", hash = "sha256:67a97ded8cc4677f87cd13b0b8d4b0da135e52e165ffd9f7b9753cbb4109691a"}, - {file = "openbb_regulators-1.1.0.tar.gz", hash = "sha256:b7546dbcfc05042a6f2da2c038baae10384eb489845a08f593c29400403f58c7"}, + {file = "openbb_regulators-1.1.5-py3-none-any.whl", hash = "sha256:d1defbd26143a1826182c3bd342f94141a08e2fb8bb8fa43771bacc15ab41355"}, + {file = "openbb_regulators-1.1.5.tar.gz", hash = "sha256:7ba633162feb4c1ad1a3b9f7e6ca4e734aa1ae79cda4b7ab8c75478e7e48d022"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-sec" -version = "1.1.0" +version = "1.1.5" description = "SEC extension for OpenBB" optional = false -python-versions = ">=3.8,<3.12" +python-versions = "<3.12,>=3.8" files = [ - {file = "openbb_sec-1.1.0-py3-none-any.whl", hash = "sha256:8e4cee59dc52eb398ad046eaaf2233a3bb6a0deeb2c5d399102731b2ae2695cf"}, - {file = "openbb_sec-1.1.0.tar.gz", hash = "sha256:91ccc73cca53e04a9e1925d1fe787060be71c21b868abaf46bb351f59ada2c4b"}, + {file = "openbb_sec-1.1.5-py3-none-any.whl", hash = "sha256:3318e92ad17ad7f661cf5e59ee527064c893f8100658a266ef4a64c48b613c96"}, + {file = "openbb_sec-1.1.5.tar.gz", hash = "sha256:e09eb1945a0ba968baf4b6014efe144d6df80cd11e545133ea32a054ac5699b2"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" pytest-freezegun = ">=0.4.2,<0.5.0" requests-cache = ">=1.1.0,<2.0.0" xmltodict = ">=0.13.0,<0.14.0" [[package]] name = "openbb-tiingo" -version = "1.1.0" +version = "1.1.5" description = "Tiingo extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_tiingo-1.1.0-py3-none-any.whl", hash = "sha256:95c2d8fca52a77285cf65b3ce41316329ffbc089f6e033f08d0ab4066f62adba"}, - {file = "openbb_tiingo-1.1.0.tar.gz", hash = "sha256:9561ee27ad228f25149ab6144da7772d4bd04d30076dc4ed479741655e6e60e8"}, + {file = "openbb_tiingo-1.1.5-py3-none-any.whl", hash = "sha256:79bbf35aec3022c2c98c1ec49320dfddfec99dbbde6083d40382e969e96c643e"}, + {file = "openbb_tiingo-1.1.5.tar.gz", hash = "sha256:ff7ef99f80adcb7a9afa0f3368e854e0fe32780088f71d2d067f40efba87e0a7"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" [[package]] name = "openbb-tradingeconomics" -version = "1.1.0" +version = "1.1.5" description = "Trading Economics extension for OpenBB" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" +files = [ + {file = "openbb_tradingeconomics-1.1.5-py3-none-any.whl", hash = "sha256:020eed3eb75f8aecb68180a8a53420aeaa435317912bc47ff5aa3e553b00c632"}, + {file = "openbb_tradingeconomics-1.1.5.tar.gz", hash = "sha256:2c3a14cff1f69aa602a625cab18321a5db667c75cc96af0d57f81fc0ba4d5adc"}, +] + +[package.dependencies] +openbb-core = ">=1.1.6,<2.0.0" + +[[package]] +name = "openbb-yfinance" +version = "1.1.5" +description = "yfinance extension for OpenBB" +optional = false +python-versions = "<4.0,>=3.8" files = [ - {file = "openbb_tradingeconomics-1.1.0-py3-none-any.whl", hash = "sha256:763ecc77087b8e39ddf2ae29bcfa1b8ac665a77667cf6496fd74a2dcf6af04af"}, - {file = "openbb_tradingeconomics-1.1.0.tar.gz", hash = "sha256:bb6e40fb8f7be453ad155347961bf537c733ed3fba96db9c12563edc1676165a"}, + {file = "openbb_yfinance-1.1.5-py3-none-any.whl", hash = "sha256:75faf9734adaa66f761e9830837ee848af92a63f5fd8a891f7620566193f8f4f"}, + {file = "openbb_yfinance-1.1.5.tar.gz", hash = "sha256:430ac4a2ef1e704450b6682da6586d6b370f1aa469a696df314ef911887c78b2"}, ] [package.dependencies] -openbb-core = ">=1.1.0,<2.0.0" +openbb-core = ">=1.1.6,<2.0.0" +yfinance = ">=0.2.27,<0.3.0" + +[[package]] +name = "orjson" +version = "3.10.3" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7"}, + {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d"}, + {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7"}, + {file = "orjson-3.10.3-cp310-none-win32.whl", hash = "sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109"}, + {file = "orjson-3.10.3-cp310-none-win_amd64.whl", hash = "sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b"}, + {file = "orjson-3.10.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16"}, + {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08"}, + {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5"}, + {file = "orjson-3.10.3-cp311-none-win32.whl", hash = "sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b"}, + {file = "orjson-3.10.3-cp311-none-win_amd64.whl", hash = "sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5"}, + {file = "orjson-3.10.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42"}, + {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069"}, + {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534"}, + {file = "orjson-3.10.3-cp312-none-win32.whl", hash = "sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0"}, + {file = "orjson-3.10.3-cp312-none-win_amd64.whl", hash = "sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0"}, + {file = "orjson-3.10.3-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754"}, + {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195"}, + {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b"}, + {file = "orjson-3.10.3-cp38-none-win32.whl", hash = "sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134"}, + {file = "orjson-3.10.3-cp38-none-win_amd64.whl", hash = "sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290"}, + {file = "orjson-3.10.3-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d"}, + {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25"}, + {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8"}, + {file = "orjson-3.10.3-cp39-none-win32.whl", hash = "sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063"}, + {file = "orjson-3.10.3-cp39-none-win_amd64.whl", hash = "sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912"}, + {file = "orjson-3.10.3.tar.gz", hash = "sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818"}, +] [[package]] name = "overrides" @@ -3228,23 +3087,6 @@ sql-other = ["SQLAlchemy (>=1.4.36)"] test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.8.0)"] -[[package]] -name = "pandas-ta" -version = "0.3.14b" -description = "An easy to use Python 3 Pandas Extension with 130+ Technical Analysis Indicators. Can be called from a Pandas DataFrame or standalone like TA-Lib. Correlation tested with TA-Lib." -optional = false -python-versions = "*" -files = [ - {file = "pandas_ta-0.3.14b.tar.gz", hash = "sha256:0fa35aec831d2815ea30b871688a8d20a76b288a7be2d26cc00c35cd8c09a993"}, -] - -[package.dependencies] -pandas = "*" - -[package.extras] -dev = ["alphaVantage-api", "matplotlib", "mplfinance", "scipy", "sklearn", "statsmodels", "stochastic", "talib", "tqdm", "vectorbt", "yfinance"] -test = ["ta-lib"] - [[package]] name = "pandocfilters" version = "1.5.0" @@ -3272,23 +3114,15 @@ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] [[package]] -name = "patsy" -version = "0.5.6" -description = "A Python package for describing statistical models and for building design matrices." +name = "peewee" +version = "3.17.3" +description = "a little orm" optional = false python-versions = "*" files = [ - {file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"}, - {file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"}, + {file = "peewee-3.17.3.tar.gz", hash = "sha256:ef15f90b628e41a584be8306cdc3243c51f73ce88b06154d9572f6d0284a0169"}, ] -[package.dependencies] -numpy = ">=1.4" -six = "*" - -[package.extras] -test = ["pytest", "pytest-cov", "scipy"] - [[package]] name = "pexpect" version = "4.9.0" @@ -3403,21 +3237,6 @@ files = [ docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] -[[package]] -name = "plotly" -version = "5.18.0" -description = "An open-source, interactive data visualization library for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "plotly-5.18.0-py3-none-any.whl", hash = "sha256:23aa8ea2f4fb364a20d34ad38235524bd9d691bf5299e800bca608c31e8db8de"}, - {file = "plotly-5.18.0.tar.gz", hash = "sha256:360a31e6fbb49d12b007036eb6929521343d6bee2236f8459915821baefa2cbb"}, -] - -[package.dependencies] -packaging = "*" -tenacity = ">=6.2.0" - [[package]] name = "pluggy" version = "1.3.0" @@ -3579,18 +3398,18 @@ files = [ [[package]] name = "pydantic" -version = "2.5.3" +version = "2.7.1" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"}, - {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"}, + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.14.6" +pydantic-core = "2.18.2" typing-extensions = ">=4.6.1" [package.extras] @@ -3598,121 +3417,114 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.14.6" -description = "" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"}, - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"}, - {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"}, - {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"}, - {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"}, - {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"}, - {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"}, - {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"}, - {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"}, - {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"}, - {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"}, - {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"}, - {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"}, - {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"}, - {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"}, - {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"}, - {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pydantic-settings" +version = "2.2.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_settings-2.2.1-py3-none-any.whl", hash = "sha256:0235391d26db4d2190cb9b31051c4b46882d28a51533f97440867f012d4da091"}, + {file = "pydantic_settings-2.2.1.tar.gz", hash = "sha256:00b9f6a5e95553590434c0fa01ead0b216c3e10bc54ae02e37f359948643c5ed"}, +] + +[package.dependencies] +pydantic = ">=2.3.0" +python-dotenv = ">=0.21.0" + +[package.extras] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + [[package]] name = "pygments" version = "2.17.2" @@ -3728,20 +3540,6 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] -[[package]] -name = "pyparsing" -version = "3.1.1" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - [[package]] name = "pytest" version = "7.4.4" @@ -3900,25 +3698,6 @@ files = [ {file = "pywinpty-2.0.12.tar.gz", hash = "sha256:8197de460ae8ebb7f5d1701dfa1b5df45b157bb832e92acba316305e18ca00dd"}, ] -[[package]] -name = "pywry" -version = "0.6.2" -description = "" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pywry-0.6.2-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:45d6bb827bf76b2532a9d70b539209d70f37dfb13e9862549b7bff8500ad2495"}, - {file = "pywry-0.6.2-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:1d9ffd826a3a08c132843340e6d896efb7b972b301d045e3239a7dc08d9cac2f"}, - {file = "pywry-0.6.2-py3-none-win_amd64.whl", hash = "sha256:4f0e5b502555ee8b8e799baeaebe63243a84b7ce51df01a1c439dbc4e8227b9e"}, - {file = "pywry-0.6.2.tar.gz", hash = "sha256:9bd88c36ab0860728d9e64360010f8abcede43645656030e4a63e69e81a98c95"}, -] - -[package.dependencies] -setproctitle = "*" - -[package.extras] -dev = ["auditwheel", "wheel"] - [[package]] name = "pyyaml" version = "6.0.1" @@ -4200,26 +3979,6 @@ files = [ {file = "regex-2023.12.25.tar.gz", hash = "sha256:29171aa128da69afdf4bde412d5bedc335f2ca8fcfe4489038577d05f16181e5"}, ] -[[package]] -name = "reportlab" -version = "4.0.9" -description = "The Reportlab Toolkit" -optional = false -python-versions = ">=3.7,<4" -files = [ - {file = "reportlab-4.0.9-py3-none-any.whl", hash = "sha256:c9656216321897486e323be138f7aea67851cedc116b8cc35f8ec7f8cc763538"}, - {file = "reportlab-4.0.9.tar.gz", hash = "sha256:f32bff66a0fda234202e1e33eaf77f25008871a61cb01cd91584a521a04c0047"}, -] - -[package.dependencies] -chardet = "*" -pillow = ">=9.0.0" - -[package.extras] -accel = ["rl-accel (>=0.9.0,<1.1)"] -pycairo = ["freetype-py (>=2.3.0,<2.4)", "rlPyCairo (>=0.2.0,<1)"] -renderpm = ["rl-renderPM (>=4.0.3,<4.1)"] - [[package]] name = "requests" version = "2.31.0" @@ -4420,28 +4179,28 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.1.13" +version = "0.1.15" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.13-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e3fd36e0d48aeac672aa850045e784673449ce619afc12823ea7868fcc41d8ba"}, - {file = "ruff-0.1.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9fb6b3b86450d4ec6a6732f9f60c4406061b6851c4b29f944f8c9d91c3611c7a"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b13ba5d7156daaf3fd08b6b993360a96060500aca7e307d95ecbc5bb47a69296"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9ebb40442f7b531e136d334ef0851412410061e65d61ca8ce90d894a094feb22"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226b517f42d59a543d6383cfe03cccf0091e3e0ed1b856c6824be03d2a75d3b6"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f0312ba1061e9b8c724e9a702d3c8621e3c6e6c2c9bd862550ab2951ac75c16"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2f59bcf5217c661254bd6bc42d65a6fd1a8b80c48763cb5c2293295babd945dd"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6894b00495e00c27b6ba61af1fc666f17de6140345e5ef27dd6e08fb987259d"}, - {file = "ruff-0.1.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a1600942485c6e66119da294c6294856b5c86fd6df591ce293e4a4cc8e72989"}, - {file = "ruff-0.1.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ee3febce7863e231a467f90e681d3d89210b900d49ce88723ce052c8761be8c7"}, - {file = "ruff-0.1.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dcaab50e278ff497ee4d1fe69b29ca0a9a47cd954bb17963628fa417933c6eb1"}, - {file = "ruff-0.1.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f57de973de4edef3ad3044d6a50c02ad9fc2dff0d88587f25f1a48e3f72edf5e"}, - {file = "ruff-0.1.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7a36fa90eb12208272a858475ec43ac811ac37e91ef868759770b71bdabe27b6"}, - {file = "ruff-0.1.13-py3-none-win32.whl", hash = "sha256:a623349a505ff768dad6bd57087e2461be8db58305ebd5577bd0e98631f9ae69"}, - {file = "ruff-0.1.13-py3-none-win_amd64.whl", hash = "sha256:f988746e3c3982bea7f824c8fa318ce7f538c4dfefec99cd09c8770bd33e6539"}, - {file = "ruff-0.1.13-py3-none-win_arm64.whl", hash = "sha256:6bbbc3042075871ec17f28864808540a26f0f79a4478c357d3e3d2284e832998"}, - {file = "ruff-0.1.13.tar.gz", hash = "sha256:e261f1baed6291f434ffb1d5c6bd8051d1c2a26958072d38dfbec39b3dda7352"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, + {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, + {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, + {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, + {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, ] [[package]] @@ -4744,106 +4503,6 @@ files = [ {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, ] -[[package]] -name = "setproctitle" -version = "1.3.3" -description = "A Python module to customize the process title" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754"}, - {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d"}, - {file = "setproctitle-1.3.3-cp310-cp310-win32.whl", hash = "sha256:a1fcac43918b836ace25f69b1dca8c9395253ad8152b625064415b1d2f9be4fb"}, - {file = "setproctitle-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:200620c3b15388d7f3f97e0ae26599c0c378fdf07ae9ac5a13616e933cbd2086"}, - {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8"}, - {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18"}, - {file = "setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476"}, - {file = "setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085"}, - {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc"}, - {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3"}, - {file = "setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4"}, - {file = "setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7"}, - {file = "setproctitle-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:816330675e3504ae4d9a2185c46b573105d2310c20b19ea2b4596a9460a4f674"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f960bc22d8d8e4ac886d1e2e21ccbd283adcf3c43136161c1ba0fa509088e0"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e6e7adff74796ef12753ff399491b8827f84f6c77659d71bd0b35870a17d8f"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53bc0d2358507596c22b02db079618451f3bd720755d88e3cccd840bafb4c41c"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6d20f9541f5f6ac63df553b6d7a04f313947f550eab6a61aa758b45f0d5657"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c1c84beab776b0becaa368254801e57692ed749d935469ac10e2b9b825dbdd8e"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:507e8dc2891021350eaea40a44ddd887c9f006e6b599af8d64a505c0f718f170"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b1067647ac7aba0b44b591936118a22847bda3c507b0a42d74272256a7a798e9"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2e71f6365744bf53714e8bd2522b3c9c1d83f52ffa6324bd7cbb4da707312cd8"}, - {file = "setproctitle-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:7f1d36a1e15a46e8ede4e953abb104fdbc0845a266ec0e99cc0492a4364f8c44"}, - {file = "setproctitle-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9a402881ec269d0cc9c354b149fc29f9ec1a1939a777f1c858cdb09c7a261df"}, - {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ff814dea1e5c492a4980e3e7d094286077054e7ea116cbeda138819db194b2cd"}, - {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:accb66d7b3ccb00d5cd11d8c6e07055a4568a24c95cf86109894dcc0c134cc89"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554eae5a5b28f02705b83a230e9d163d645c9a08914c0ad921df363a07cf39b1"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a911b26264dbe9e8066c7531c0591cfab27b464459c74385b276fe487ca91c12"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2982efe7640c4835f7355fdb4da313ad37fb3b40f5c69069912f8048f77b28c8"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3f4274b80709d8bcab2f9a862973d453b308b97a0b423a501bcd93582852e3"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:af2c67ae4c795d1674a8d3ac1988676fa306bcfa1e23fddb5e0bd5f5635309ca"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af4061f67fd7ec01624c5e3c21f6b7af2ef0e6bab7fbb43f209e6506c9ce0092"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37a62cbe16d4c6294e84670b59cf7adcc73faafe6af07f8cb9adaf1f0e775b19"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a83ca086fbb017f0d87f240a8f9bbcf0809f3b754ee01cec928fff926542c450"}, - {file = "setproctitle-1.3.3-cp38-cp38-win32.whl", hash = "sha256:059f4ce86f8cc92e5860abfc43a1dceb21137b26a02373618d88f6b4b86ba9b2"}, - {file = "setproctitle-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab92e51cd4a218208efee4c6d37db7368fdf182f6e7ff148fb295ecddf264287"}, - {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7951820b77abe03d88b114b998867c0f99da03859e5ab2623d94690848d3e45"}, - {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc94cf128676e8fac6503b37763adb378e2b6be1249d207630f83fc325d9b11"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f5d9027eeda64d353cf21a3ceb74bb1760bd534526c9214e19f052424b37e42"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e4a8104db15d3462e29d9946f26bed817a5b1d7a47eabca2d9dc2b995991503"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32c41ace41f344d317399efff4cffb133e709cec2ef09c99e7a13e9f3b9483c"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf16381c7bf7f963b58fb4daaa65684e10966ee14d26f5cc90f07049bfd8c1e"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e18b7bd0898398cc97ce2dfc83bb192a13a087ef6b2d5a8a36460311cb09e775"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69d565d20efe527bd8a9b92e7f299ae5e73b6c0470f3719bd66f3cd821e0d5bd"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ddedd300cd690a3b06e7eac90ed4452348b1348635777ce23d460d913b5b63c3"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:415bfcfd01d1fbf5cbd75004599ef167a533395955305f42220a585f64036081"}, - {file = "setproctitle-1.3.3-cp39-cp39-win32.whl", hash = "sha256:21112fcd2195d48f25760f0eafa7a76510871bbb3b750219310cf88b04456ae3"}, - {file = "setproctitle-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:5a740f05d0968a5a17da3d676ce6afefebeeeb5ce137510901bf6306ba8ee002"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:200ede6fd11233085ba9b764eb055a2a191fb4ffb950c68675ac53c874c22e20"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d3a953c50776751e80fe755a380a64cb14d61e8762bd43041ab3f8cc436092f"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e08e232b78ba3ac6bc0d23ce9e2bee8fad2be391b7e2da834fc9a45129eb87"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1da82c3e11284da4fcbf54957dafbf0655d2389cd3d54e4eaba636faf6d117a"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:aeaa71fb9568ebe9b911ddb490c644fbd2006e8c940f21cb9a1e9425bd709574"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:59335d000c6250c35989394661eb6287187854e94ac79ea22315469ee4f4c244"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3ba57029c9c50ecaf0c92bb127224cc2ea9fda057b5d99d3f348c9ec2855ad3"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d876d355c53d975c2ef9c4f2487c8f83dad6aeaaee1b6571453cb0ee992f55f6"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:224602f0939e6fb9d5dd881be1229d485f3257b540f8a900d4271a2c2aa4e5f4"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7f27e0268af2d7503386e0e6be87fb9b6657afd96f5726b733837121146750d"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5e7266498cd31a4572378c61920af9f6b4676a73c299fce8ba93afd694f8ae7"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c5609ad51cd99d388e55651b19148ea99727516132fb44680e1f28dd0d1de9"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:eae8988e78192fd1a3245a6f4f382390b61bce6cfcc93f3809726e4c885fa68d"}, - {file = "setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae"}, -] - -[package.extras] -test = ["pytest"] - [[package]] name = "setuptools" version = "69.0.3" @@ -4895,60 +4554,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.25" +version = "2.0.30" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4344d059265cc8b1b1be351bfb88749294b87a8b2bbe21dfbe066c4199541ebd"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9e2e59cbcc6ba1488404aad43de005d05ca56e069477b33ff74e91b6319735"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84daa0a2055df9ca0f148a64fdde12ac635e30edbca80e87df9b3aaf419e144a"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc8b7dabe8e67c4832891a5d322cec6d44ef02f432b4588390017f5cec186a84"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f5693145220517b5f42393e07a6898acdfe820e136c98663b971906120549da5"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db854730a25db7c956423bb9fb4bdd1216c839a689bf9cc15fada0a7fb2f4570"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-win32.whl", hash = "sha256:14a6f68e8fc96e5e8f5647ef6cda6250c780612a573d99e4d881581432ef1669"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-win_amd64.whl", hash = "sha256:87f6e732bccd7dcf1741c00f1ecf33797383128bd1c90144ac8adc02cbb98643"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:342d365988ba88ada8af320d43df4e0b13a694dbd75951f537b2d5e4cb5cd002"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f37c0caf14b9e9b9e8f6dbc81bc56db06acb4363eba5a633167781a48ef036ed"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9373708763ef46782d10e950b49d0235bfe58facebd76917d3f5cbf5971aed"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24f571990c05f6b36a396218f251f3e0dda916e0c687ef6fdca5072743208f5"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75432b5b14dc2fff43c50435e248b45c7cdadef73388e5610852b95280ffd0e9"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:884272dcd3ad97f47702965a0e902b540541890f468d24bd1d98bcfe41c3f018"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-win32.whl", hash = "sha256:e607cdd99cbf9bb80391f54446b86e16eea6ad309361942bf88318bcd452363c"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d505815ac340568fd03f719446a589162d55c52f08abd77ba8964fbb7eb5b5f"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0dacf67aee53b16f365c589ce72e766efaabd2b145f9de7c917777b575e3659d"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b801154027107461ee992ff4b5c09aa7cc6ec91ddfe50d02bca344918c3265c6"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a21853f5daeb50412d459cfb13cb82c089ad4c04ec208cd14dddd99fc23b39"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29049e2c299b5ace92cbed0c1610a7a236f3baf4c6b66eb9547c01179f638ec5"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b64b183d610b424a160b0d4d880995e935208fc043d0302dd29fee32d1ee3f95"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f7a7d7fcc675d3d85fbf3b3828ecd5990b8d61bd6de3f1b260080b3beccf215"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-win32.whl", hash = "sha256:cf18ff7fc9941b8fc23437cc3e68ed4ebeff3599eec6ef5eebf305f3d2e9a7c2"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-win_amd64.whl", hash = "sha256:91f7d9d1c4dd1f4f6e092874c128c11165eafcf7c963128f79e28f8445de82d5"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bb209a73b8307f8fe4fe46f6ad5979649be01607f11af1eb94aa9e8a3aaf77f0"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:798f717ae7c806d67145f6ae94dc7c342d3222d3b9a311a784f371a4333212c7"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd402169aa00df3142149940b3bf9ce7dde075928c1886d9a1df63d4b8de62"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d3cab3076af2e4aa5693f89622bef7fa770c6fec967143e4da7508b3dceb9b9"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74b080c897563f81062b74e44f5a72fa44c2b373741a9ade701d5f789a10ba23"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-win32.whl", hash = "sha256:87d91043ea0dc65ee583026cb18e1b458d8ec5fc0a93637126b5fc0bc3ea68c4"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-win_amd64.whl", hash = "sha256:75f99202324383d613ddd1f7455ac908dca9c2dd729ec8584c9541dd41822a2c"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:420362338681eec03f53467804541a854617faed7272fe71a1bfdb07336a381e"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c88f0c7dcc5f99bdb34b4fd9b69b93c89f893f454f40219fe923a3a2fd11625"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3be4987e3ee9d9a380b66393b77a4cd6d742480c951a1c56a23c335caca4ce3"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a159111a0f58fb034c93eeba211b4141137ec4b0a6e75789ab7a3ef3c7e7e3"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8b8cb63d3ea63b29074dcd29da4dc6a97ad1349151f2d2949495418fd6e48db9"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:736ea78cd06de6c21ecba7416499e7236a22374561493b456a1f7ffbe3f6cdb4"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-win32.whl", hash = "sha256:10331f129982a19df4284ceac6fe87353ca3ca6b4ca77ff7d697209ae0a5915e"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-win_amd64.whl", hash = "sha256:c55731c116806836a5d678a70c84cb13f2cedba920212ba7dcad53260997666d"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:605b6b059f4b57b277f75ace81cc5bc6335efcbcc4ccb9066695e515dbdb3900"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:665f0a3954635b5b777a55111ababf44b4fc12b1f3ba0a435b602b6387ffd7cf"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecf6d4cda1f9f6cb0b45803a01ea7f034e2f1aed9475e883410812d9f9e3cfcf"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51db269513917394faec5e5c00d6f83829742ba62e2ac4fa5c98d58be91662f"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:790f533fa5c8901a62b6fef5811d48980adeb2f51f1290ade8b5e7ba990ba3de"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1b1180cda6df7af84fe72e4530f192231b1f29a7496951db4ff38dac1687202d"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-win32.whl", hash = "sha256:555651adbb503ac7f4cb35834c5e4ae0819aab2cd24857a123370764dc7d7e24"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-win_amd64.whl", hash = "sha256:dc55990143cbd853a5d038c05e79284baedf3e299661389654551bd02a6a68d7"}, - {file = "SQLAlchemy-2.0.25-py3-none-any.whl", hash = "sha256:a86b4240e67d4753dc3092d9511886795b3c2852abe599cffe108952f7af7ac3"}, - {file = "SQLAlchemy-2.0.25.tar.gz", hash = "sha256:a2c69a7664fb2d54b8682dd774c3b54f67f84fa123cf84dda2a5f40dcaa04e08"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b48154678e76445c7ded1896715ce05319f74b1e73cf82d4f8b59b46e9c0ddc"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2753743c2afd061bb95a61a51bbb6a1a11ac1c44292fad898f10c9839a7f75b2"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7bfc726d167f425d4c16269a9a10fe8630ff6d14b683d588044dcef2d0f6be7"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f61ada6979223013d9ab83a3ed003ded6959eae37d0d685db2c147e9143797"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a365eda439b7a00732638f11072907c1bc8e351c7665e7e5da91b169af794af"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bba002a9447b291548e8d66fd8c96a6a7ed4f2def0bb155f4f0a1309fd2735d5"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-win32.whl", hash = "sha256:0138c5c16be3600923fa2169532205d18891b28afa817cb49b50e08f62198bb8"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-win_amd64.whl", hash = "sha256:99650e9f4cf3ad0d409fed3eec4f071fadd032e9a5edc7270cd646a26446feeb"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:955991a09f0992c68a499791a753523f50f71a6885531568404fa0f231832aa0"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f69e4c756ee2686767eb80f94c0125c8b0a0b87ede03eacc5c8ae3b54b99dc46"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c9db1ce00e59e8dd09d7bae852a9add716efdc070a3e2068377e6ff0d6fdaa"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1429a4b0f709f19ff3b0cf13675b2b9bfa8a7e79990003207a011c0db880a13"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:efedba7e13aa9a6c8407c48facfdfa108a5a4128e35f4c68f20c3407e4376aa9"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16863e2b132b761891d6c49f0a0f70030e0bcac4fd208117f6b7e053e68668d0"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-win32.whl", hash = "sha256:2ecabd9ccaa6e914e3dbb2aa46b76dede7eadc8cbf1b8083c94d936bcd5ffb49"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-win_amd64.whl", hash = "sha256:0b3f4c438e37d22b83e640f825ef0f37b95db9aa2d68203f2c9549375d0b2260"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5a79d65395ac5e6b0c2890935bad892eabb911c4aa8e8015067ddb37eea3d56c"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a5baf9267b752390252889f0c802ea13b52dfee5e369527da229189b8bd592e"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cb5a646930c5123f8461f6468901573f334c2c63c795b9af350063a736d0134"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:296230899df0b77dec4eb799bcea6fbe39a43707ce7bb166519c97b583cfcab3"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c62d401223f468eb4da32627bffc0c78ed516b03bb8a34a58be54d618b74d472"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3b69e934f0f2b677ec111b4d83f92dc1a3210a779f69bf905273192cf4ed433e"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-win32.whl", hash = "sha256:77d2edb1f54aff37e3318f611637171e8ec71472f1fdc7348b41dcb226f93d90"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-win_amd64.whl", hash = "sha256:b6c7ec2b1f4969fc19b65b7059ed00497e25f54069407a8701091beb69e591a5"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a8e3b0a7e09e94be7510d1661339d6b52daf202ed2f5b1f9f48ea34ee6f2d57"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b60203c63e8f984df92035610c5fb76d941254cf5d19751faab7d33b21e5ddc0"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1dc3eabd8c0232ee8387fbe03e0a62220a6f089e278b1f0aaf5e2d6210741ad"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:40ad017c672c00b9b663fcfcd5f0864a0a97828e2ee7ab0c140dc84058d194cf"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e42203d8d20dc704604862977b1470a122e4892791fe3ed165f041e4bf447a1b"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-win32.whl", hash = "sha256:2a4f4da89c74435f2bc61878cd08f3646b699e7d2eba97144030d1be44e27584"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-win_amd64.whl", hash = "sha256:b6bf767d14b77f6a18b6982cbbf29d71bede087edae495d11ab358280f304d8e"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc0c53579650a891f9b83fa3cecd4e00218e071d0ba00c4890f5be0c34887ed3"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:311710f9a2ee235f1403537b10c7687214bb1f2b9ebb52702c5aa4a77f0b3af7"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:408f8b0e2c04677e9c93f40eef3ab22f550fecb3011b187f66a096395ff3d9fd"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37a4b4fb0dd4d2669070fb05b8b8824afd0af57587393015baee1cf9890242d9"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a943d297126c9230719c27fcbbeab57ecd5d15b0bd6bfd26e91bfcfe64220621"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0a089e218654e740a41388893e090d2e2c22c29028c9d1353feb38638820bbeb"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-win32.whl", hash = "sha256:fa561138a64f949f3e889eb9ab8c58e1504ab351d6cf55259dc4c248eaa19da6"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-win_amd64.whl", hash = "sha256:7d74336c65705b986d12a7e337ba27ab2b9d819993851b140efdf029248e818e"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8c62fe2480dd61c532ccafdbce9b29dacc126fe8be0d9a927ca3e699b9491a"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2383146973a15435e4717f94c7509982770e3e54974c71f76500a0136f22810b"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8409de825f2c3b62ab15788635ccaec0c881c3f12a8af2b12ae4910a0a9aeef6"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0094c5dc698a5f78d3d1539853e8ecec02516b62b8223c970c86d44e7a80f6c7"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:edc16a50f5e1b7a06a2dcc1f2205b0b961074c123ed17ebda726f376a5ab0953"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f7703c2010355dd28f53deb644a05fc30f796bd8598b43f0ba678878780b6e4c"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-win32.whl", hash = "sha256:1f9a727312ff6ad5248a4367358e2cf7e625e98b1028b1d7ab7b806b7d757513"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-win_amd64.whl", hash = "sha256:a0ef36b28534f2a5771191be6edb44cc2673c7b2edf6deac6562400288664221"}, + {file = "SQLAlchemy-2.0.30-py3-none-any.whl", hash = "sha256:7108d569d3990c71e26a42f60474b4c02c8586c4681af5fd67e51a044fdea86a"}, + {file = "SQLAlchemy-2.0.30.tar.gz", hash = "sha256:2b1708916730f4830bc69d6f49d37f7698b5bd7530aca7f04f785f8849e95255"}, ] [package.dependencies] @@ -5016,72 +4675,6 @@ anyio = ">=3.4.0,<5" [package.extras] full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] -[[package]] -name = "statsmodels" -version = "0.14.1" -description = "Statistical computations and models for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "statsmodels-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43af9c0b07c9d72f275cf14ea54a481a3f20911f0b443181be4769def258fdeb"}, - {file = "statsmodels-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16975ab6ad505d837ba9aee11f92a8c5b49c4fa1ff45b60fe23780b19e5705e"}, - {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e278fe74da5ed5e06c11a30851eda1af08ef5af6be8507c2c45d2e08f7550dde"}, - {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0564d92cb05b219b4538ed09e77d96658a924a691255e1f7dd23ee338df441b"}, - {file = "statsmodels-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5385e22e72159a09c099c4fb975f350a9f3afeb57c1efce273b89dcf1fe44c0f"}, - {file = "statsmodels-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a8aae75a2e08ebd990e5fa394f8e32738b55785cb70798449a3f4207085e667"}, - {file = "statsmodels-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b69a63ad6c979a6e4cde11870ffa727c76a318c225a7e509f031fbbdfb4e416a"}, - {file = "statsmodels-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7562cb18a90a114f39fab6f1c25b9c7b39d9cd5f433d0044b430ca9d44a8b52c"}, - {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3abaca4b963259a2bf349c7609cfbb0ce64ad5fb3d92d6f08e21453e4890248"}, - {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f727fe697f6406d5f677b67211abe5a55101896abdfacdb3f38410405f6ad8"}, - {file = "statsmodels-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6838ac6bdb286daabb5e91af90fd4258f09d0cec9aace78cc441cb2b17df428"}, - {file = "statsmodels-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:709bfcef2dbe66f705b17e56d1021abad02243ee1a5d1efdb90f9bad8b06a329"}, - {file = "statsmodels-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f32a7cd424cf33304a54daee39d32cccf1d0265e652c920adeaeedff6d576457"}, - {file = "statsmodels-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f8c30181c084173d662aaf0531867667be2ff1bee103b84feb64f149f792dbd2"}, - {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de2b97413913d52ad6342dece2d653e77f78620013b7705fad291d4e4266ccb"}, - {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3420f88289c593ba2bca33619023059c476674c160733bd7d858564787c83d3"}, - {file = "statsmodels-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c008e16096f24f0514e53907890ccac6589a16ad6c81c218f2ee6752fdada555"}, - {file = "statsmodels-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:bc0351d279c4e080f0ce638a3d886d312aa29eade96042e3ba0a73771b1abdfb"}, - {file = "statsmodels-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf293ada63b2859d95210165ad1dfcd97bd7b994a5266d6fbeb23659d8f0bf68"}, - {file = "statsmodels-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44ca8cb88fa3d3a4ffaff1fb8eb0e98bbf83fc936fcd9b9eedee258ecc76696a"}, - {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5373d176239993c095b00d06036690a50309a4e00c2da553b65b840f956ae6"}, - {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532dfe899f8b6632cd8caa0b089b403415618f51e840d1817a1e4b97e200c73"}, - {file = "statsmodels-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:4fe0a60695952b82139ae8750952786a700292f9e0551d572d7685070944487b"}, - {file = "statsmodels-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04293890f153ffe577e60a227bd43babd5f6c1fc50ea56a3ab1862ae85247a95"}, - {file = "statsmodels-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e70a2e93d54d40b2cb6426072acbc04f35501b1ea2569f6786964adde6ca572"}, - {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab3a73d16c0569adbba181ebb967e5baaa74935f6d2efe86ac6fc5857449b07d"}, - {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa5bcff335440ee93e28745eab63559a20cd34eea0375c66d96b016de909b3"}, - {file = "statsmodels-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:bc43765710099ca6a942b5ffa1bac7668965052542ba793dd072d26c83453572"}, - {file = "statsmodels-0.14.1.tar.gz", hash = "sha256:2260efdc1ef89f39c670a0bd8151b1d0843567781bcafec6cda0534eb47a94f6"}, -] - -[package.dependencies] -numpy = ">=1.18,<2" -packaging = ">=21.3" -pandas = ">=1.0,<2.1.0 || >2.1.0" -patsy = ">=0.5.4" -scipy = ">=1.4,<1.9.2 || >1.9.2" - -[package.extras] -build = ["cython (>=0.29.33)"] -develop = ["colorama", "cython (>=0.29.33)", "cython (>=0.29.33,<4.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.3.0)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"] -docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] - -[[package]] -name = "svglib" -version = "1.5.1" -description = "A pure-Python library for reading and converting SVG" -optional = false -python-versions = ">=3.7" -files = [ - {file = "svglib-1.5.1.tar.gz", hash = "sha256:3ae765d3a9409ee60c0fb4d24c2deb6a80617aa927054f5bcd7fc98f0695e587"}, -] - -[package.dependencies] -cssselect2 = ">=0.2.0" -lxml = "*" -reportlab = "*" -tinycss2 = ">=0.6.0" - [[package]] name = "sympy" version = "1.12" @@ -5098,17 +4691,18 @@ mpmath = ">=0.19" [[package]] name = "tenacity" -version = "8.2.3" +version = "8.3.0" description = "Retry code until it succeeds" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, + {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"}, + {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"}, ] [package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "terminado" @@ -5577,20 +5171,6 @@ files = [ {file = "types_python_dateutil-2.8.19.20240106-py3-none-any.whl", hash = "sha256:efbbdc54590d0f16152fa103c9879c7d4a00e82078f6e2cf01769042165acaa2"}, ] -[[package]] -name = "types-requests" -version = "2.31.0.20240106" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.31.0.20240106.tar.gz", hash = "sha256:0e1c731c17f33618ec58e022b614a1a2ecc25f7dc86800b36ef341380402c612"}, - {file = "types_requests-2.31.0.20240106-py3-none-any.whl", hash = "sha256:da997b3b6a72cc08d09f4dba9802fdbabc89104b35fe24ee588e674037689354"}, -] - -[package.dependencies] -urllib3 = ">=2" - [[package]] name = "typing-extensions" version = "4.9.0" @@ -5969,6 +5549,34 @@ files = [ idna = ">=2.0" multidict = ">=4.0" +[[package]] +name = "yfinance" +version = "0.2.38" +description = "Download market data from Yahoo! Finance API" +optional = false +python-versions = "*" +files = [ + {file = "yfinance-0.2.38-py2.py3-none-any.whl", hash = "sha256:07525cf84414272723a3e2b9d4c0a2898ddb60cc0828aa190de26664fac6f676"}, + {file = "yfinance-0.2.38.tar.gz", hash = "sha256:483eecae0743d829fc337f21d80da4612f5257d5c1f35570efc4a5e98e4401a7"}, +] + +[package.dependencies] +appdirs = ">=1.4.4" +beautifulsoup4 = ">=4.11.1" +frozendict = ">=2.3.4" +html5lib = ">=1.1" +lxml = ">=4.9.1" +multitasking = ">=0.0.7" +numpy = ">=1.16.5" +pandas = ">=1.3.0" +peewee = ">=3.16.2" +pytz = ">=2022.5" +requests = ">=2.31" + +[package.extras] +nospam = ["requests-cache (>=1.0)", "requests-ratelimiter (>=0.3.1)"] +repair = ["scipy (>=1.6.3)"] + [[package]] name = "zipp" version = "3.17.0" @@ -5987,4 +5595,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.11,<3.12" -content-hash = "2698844cfb41be3b6fa9cc6ccd58a0f3903459defc86c45b23d43bee368f9348" +content-hash = "8128cbe25967bcd6fafb8f10d8425a6a59c13d13559e9031991cb9dd6afc97bf" diff --git a/pyproject.toml b/pyproject.toml index ba81a84..47fd6ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,24 +8,20 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.11,<3.12" jupyterlab = "^4.0.9" -langchain = "^0.1" -openbb = "4.1.0" -openai = "^1.3.5" sentence-transformers = "^2.2.2" tiktoken = "^0.5.1" faiss-cpu = "^1.7.4" -langchainhub = "^0.1.14" -python-json-logger = "^2.0.7" -fastapi = "^0.104.1" -uvicorn = "^0.24.0" -matplotlib = "^3.8.2" -openbb-charting = "^1.0.0" -langchain-openai = "^0.0.2.post1" +magentic = {extras = ["litellm"], version = "^0.23.0"} +pydantic = "2.7.1" +openbb = "4.1.7" +langchain = "^0.1.17" +langchain-community = "^0.0.37" +langchain-openai = "^0.1.6" [tool.poetry.group.dev.dependencies] pre-commit = "^3.5.0" -ruff = "^0.1.7" pytest = "^7.4.3" +ruff = ">=0.1.6" [tool.ruff.lint] select = [ @@ -38,4 +34,3 @@ select = [ [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" - diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..c5b8445 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,62 @@ +from unittest.mock import patch + +import pytest +from openbb import obb +from pydantic import BaseModel + + +@pytest.fixture +def mock_obb_user_credentials(monkeypatch): + class TestCredentials(BaseModel): + fmp_api_key: str | None + intrinio_token: str | None + benzinga_api_key: str | None + + mock_credentials = TestCredentials( + # NB: We explicitly set the benzinga key to None! + fmp_api_key="some-value", + intrinio_token="another-value", + benzinga_api_key=None, + ) + monkeypatch.setattr(obb.user, "credentials", mock_credentials) + + +@pytest.fixture +def mock_obb_coverage_providers(mock_obb_user_credentials): + mock_provider_coverage_dict = { + "fmp": ["function_a", "function_b"], + "intrinio": ["function_a", "function_c"], + "benzinga": ["function_d"], + } + with patch("openbb_agents.tools.get_openbb_coverage_providers") as mock: + mock.return_value = mock_provider_coverage_dict + yield mock + + +@pytest.fixture +def mock_obb_coverage_command_schema(mock_obb_coverage_providers): + mock_coverage_command_schema_dict = { + "function_a": { + "input": "mock input model for a", + "output": "mock output model for a", + "callable": "mock callable for a", + }, + "function_b": { + "input": "mock input model for b", + "output": "mock output model for b", + "callable": "mock callable for b", + }, + "function_c": { + "input": "mock input model for c", + "output": "mock output model for c", + "callable": "mock callable for c", + }, + "function_d": { + "input": "mock input model for d", + "output": "mock output model for d", + "callable": "mock callable for d", + }, + } + with patch("openbb_agents.tools.get_openbb_coverage_command_schemas") as mock: + mock.return_value = mock_coverage_command_schema_dict + yield mock diff --git a/tests/test_tools.py b/tests/test_tools.py index 153f94b..ac0a55e 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,34 +1,44 @@ -from openbb import obb +from openbb_agents.tools import ( + OpenBBFunctionDescription, + get_valid_list_of_providers, + get_valid_openbb_function_descriptions, + get_valid_openbb_function_names, +) -from openbb_agents.tools import from_openbb_to_langchain_func +def test_get_valid_list_of_providers(mock_obb_user_credentials): + actual_result = get_valid_list_of_providers() + expected_result = ["fmp", "intrinio"] -def test_from_openbb_to_langchain_func(): - """Test that we can create a StructuredTool from an OpenBB function.""" - test_openbb_command_root = "/equity/profile" + assert actual_result == expected_result - test_openbb_callable = obb.equity.profile - test_openbb_schema = obb.coverage.command_model[".equity.profile"] - actual_result = from_openbb_to_langchain_func( - openbb_command_root=test_openbb_command_root, - openbb_callable=test_openbb_callable, - openbb_schema=test_openbb_schema, - ) +def test_get_valid_openbb_function_names(mock_obb_coverage_providers): + actual_result = get_valid_openbb_function_names() + expected_result = ["function_a", "function_b", "function_c"] + assert actual_result == expected_result - assert actual_result.name == "/equity/profile" - assert "Equity Info" in actual_result.description # Check for docstring - assert "name" in actual_result.description # Check for output field - assert actual_result.args_schema.__name__ == "/equity/profileInputModel" - assert actual_result.args_schema.schema() == { - "title": "/equity/profileInputModel", - "type": "object", - "properties": { - "symbol": { - "title": "Symbol", - "description": "Symbol to get data for.", - "type": "string", - } - }, - "required": ["symbol"], - } + +def test_get_valid_openbb_function_descriptions(mock_obb_coverage_command_schema): + actual_result = get_valid_openbb_function_descriptions() + expected_result = [ + OpenBBFunctionDescription( + name="function_a", + input="mock input model for a", + output="mock output model for a", + callable="mock callable for a", + ), + OpenBBFunctionDescription( + name="function_b", + input="mock input model for b", + output="mock output model for b", + callable="mock callable for b", + ), + OpenBBFunctionDescription( + name="function_c", + input="mock input model for c", + output="mock output model for c", + callable="mock callable for c", + ), + ] + assert actual_result == expected_result From 5010835f5048f814f91985439c3b9a3f194944a0 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Tue, 7 May 2024 17:18:06 +0200 Subject: [PATCH 2/6] Port to magentic + use async WIP: Create vector database of tools + some tests. WIP: Remove unused code. WIP: use magentic for generating subquestions WIP: add magentic tool search chain WIP: Remove unused functions Port subquestion answer, + add testing module WIP: Port final answer generation to magentic + tests. WIP: Add error handling, final answer agent. WIP: Improve error handling with dummy function. Improve prompts and add logging to tool search. WIP: Handle case where provider has no functions. WIP: Tweak prompts, tweak tests. WIP: Refactor + add tests for error handling. WIP: Check for answerable questions. WIP: Add async openbb agent. WIP: Add async variants + tests. WIP: Update llm assertion message. WIP: Improve llm assert format. WIP: Build OpenBB before running tests. WIP: Tweak test.yml. WIP: Tweak asserts. WIP: Bump version + fix assert. WIP: Remove unused notebook. WIP: Remove unused notebook. WIP: Update README. --- .github/workflows/test.yml | 6 +- README.md | 8 +- langchain-tool-retrieval.ipynb | 905 --------------------------------- openbb-agent.ipynb | 406 --------------- openbb_agents/agent.py | 307 ++++++++--- openbb_agents/chains.py | 512 +++++++++---------- openbb_agents/models.py | 42 +- openbb_agents/prompts.py | 83 ++- openbb_agents/testing.py | 41 ++ openbb_agents/tools.py | 113 ++-- openbb_agents/utils.py | 2 +- poetry.lock | 91 +++- pyproject.toml | 12 +- tests/conftest.py | 46 +- tests/test_agent.py | 29 ++ tests/test_chains.py | 285 +++++++++++ tests/test_tools.py | 89 +++- 17 files changed, 1196 insertions(+), 1781 deletions(-) delete mode 100644 langchain-tool-retrieval.ipynb delete mode 100644 openbb-agent.ipynb create mode 100644 openbb_agents/testing.py create mode 100644 tests/test_agent.py create mode 100644 tests/test_chains.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 70e28da..d52c134 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,5 +26,9 @@ jobs: run: | python -m pip install --upgrade pip poetry install + - name: Build OpenBB + run: poetry run python -c "import openbb; openbb.build(); print('Done')" - name: Run Pytest - run : poetry run pytest tests/ + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run : poetry run pytest -n 8 tests/ diff --git a/README.md b/README.md index 7433c97..5eb566b 100644 --- a/README.md +++ b/README.md @@ -80,12 +80,8 @@ pre-commit install ``` ### Testing - -We are in the process of adding tests. - -We use `pytest` as our test-runner: - +We use `pytest` as our test runner: ``` sh -pytest tests/ +pytest -n 8 tests/ ``` diff --git a/langchain-tool-retrieval.ipynb b/langchain-tool-retrieval.ipynb deleted file mode 100644 index 1770b6c..0000000 --- a/langchain-tool-retrieval.ipynb +++ /dev/null @@ -1,905 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# OpenBB Agents" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "# import dependencies, in specific langchain\n", - "import os\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.agents.output_parsers import JSONAgentOutputParser\n", - "from langchain.agents.format_scratchpad import format_log_to_str\n", - "from langchain.agents import AgentExecutor\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain import hub\n", - "from langchain.tools.render import render_text_description_and_args\n", - "from langchain.output_parsers import PydanticOutputParser\n", - "from langchain.pydantic_v1 import BaseModel, Field\n", - "from langchain.output_parsers import RetryWithErrorOutputParser\n", - "from langchain.llms import OpenAI\n", - "\n", - "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"False\" # Avoid some warnings from HuggingFace\n", - "\n", - "# Set up OpenAI API key\n", - "import openai\n", - "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", - "openai.api_key = \"\"\n", - "\n", - "# Set up OpenBB Personal Access Token from https://my.openbb.co/app/platform/pat\n", - "from openbb import obb\n", - "from openbb_agents.utils import map_openbb_collection_to_langchain_tools # provides access to OpenBB Tools\n", - "obb.account.login(pat=\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Set up OpenBB tools for retrieval\n", - "\n", - "The following will return all OpenBB tools that we want our agent to have access. This matches the layout architecture defined by OpenBB." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "A total of 27 OpenBB tools has been prepared for function calling\n", - "\n", - "Processing each OpenBB tool description into a list of docs...\n", - "\n", - "Create embeddings for each of these OpenBB tool descriptions...\n" - ] - } - ], - "source": [ - "# can also give a single string, but since we want our agent to have more context we provide access to more functions\n", - "# TODO: In future, we might implement a `universal_openbb_tools` collection that allows quick LLM access to all OpenBB functions. \n", - "# We don't run into context size issues doing this, since we rely on embeddings for our tools., the context size is not an issue. \n", - "# However, with many tools in a vector store, retrieval may become a challenge.\n", - "openbb_tools = map_openbb_collection_to_langchain_tools(\n", - " openbb_commands_root = [\n", - " \"/equity/fundamental\",\n", - " \"/equity/compare\",\n", - " \"/equity/estimates\"\n", - " ]\n", - ")\n", - "print(f\"A total of {len(openbb_tools)} OpenBB tools has been prepared for function calling\\n\")\n", - "\n", - "\n", - "print(\"Processing each OpenBB tool description into a list of docs...\\n\")\n", - "# Parse the description (i.e. docstring + output fields) for each of these tools\n", - "docs = [\n", - " Document(page_content=t.description, metadata={\"index\": i})\n", - " for i, t in enumerate(openbb_tools)\n", - "]\n", - "\n", - "print(\"Create embeddings for each of these OpenBB tool descriptions...\")\n", - "# Create embeddings from each of these function descriptions\n", - "# this will be important for when we want the agent to know what\n", - "# function to use for a particular query\n", - "vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "def get_tools(query):\n", - " \"Retrieve the most relevant documents to a query.\"\n", - " # Set a docs retriever that looks for a score_threshold of 0.65, this means that if the search retrieval\n", - " # is confident in a few endpoints we don't rule out any due to a hardcoded number of docs to be retrieved\n", - " # Empirically this is what we found that worked best for our particular application. This may vary depending on the use case.\n", - " retriever = vector_store.as_retriever(\n", - " search_type=\"similarity_score_threshold\",\n", - " search_kwargs={'score_threshold': 0.65}\n", - " )\n", - " docs = retriever.get_relevant_documents(query)\n", - " \n", - " # This is a fallback mechanism in case the threshold is too high, causing too few tools to be returned.\n", - " # In this case, we fall back to getting the top k=2 results with higher similarity scores.\n", - " if len(docs) < 2:\n", - " retriever = vector_store.as_retriever(\n", - " search_kwargs={\"k\": 2}\n", - " )\n", - " \n", - " docs = retriever.get_relevant_documents(query)\n", - " \n", - " return [openbb_tools[d.metadata[\"index\"]] for d in docs]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's try out our new vector store that contains our tool embeddings to build some intuition around how they work." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tools retrieved for 'market cap':\n", - " /equity/fundamental/metrics - Key Metrics. Key metrics for a given company.\n", - " /equity/fundamental/multiples - Equity Valuation Multiples. Valuation multiples for a stock ticker.\n", - " /equity/fundamental/overview - Company Overview. General information about a company.\n", - " /equity/fundamental/income - Income Statement. Report on a company's financial performance.\n", - "\n", - "Tools retrieved for 'peers':\n", - " /equity/compare/peers - Equity Peers. Company peers.\n", - " /equity/fundamental/metrics - Key Metrics. Key metrics for a given company.\n" - ] - } - ], - "source": [ - "print(\"Tools retrieved for 'market cap':\")\n", - "fetched_tools = get_tools(\"market cap\")\n", - "for tool in fetched_tools:\n", - " print(\" \" + tool.name + \" - \" + tool.description.split('\\n')[0])\n", - "\n", - "print(\"\\nTools retrieved for 'peers':\")\n", - "fetched_tools = get_tools(\"peers\")\n", - "for tool in fetched_tools:\n", - " print(\" \" + tool.name + \" - \" + tool.description.split('\\n')[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## AI-powered Financial Analyst\n", - "\n", - "There are two example prompts to choose from below (but feel free to try your own), one requires linear reasoning (where future answers may depend on previous answers), while the other requires independent reasoning (fetching and combining different pieces of independent information).\n", - "\n", - "These prompts answer 2 very different scenarios and in this notebook, we are going to demonstrate that our OpenBB agent is capable of handling both efficiently, utilizing the same architecture\n", - "\n", - "- **Prompt 1** - This prompt is very deterministic which allows us to access right or wrong immediately because we can check the facts.\n", - "It also involves a few complex operations such as extracting a list of tickers from an endpoint and iterating through that list using a different endpoint. Then based on those outputs, a reasoning is made.\n", - "\n", - "- **Prompt 2** - This prompt is not deterministic and allows us to leverage LLMs to provide alpha by uncovering insights that would be hard for a human to discover. Instead of telling the agent what to do, we expect the agent to provide a reasoning of what it would do to perform a typical analyst task, without guardrails.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Prompt 1\n", - "PROMPT = \"\"\"\\\n", - "Check what are TSLA peers. From those, check which one has the highest market cap.\n", - "Then, on the ticker that has the highest market cap get the most recent price target estimate from an analyst,\n", - "and tell me who it was and on what date the estimate was made.\n", - "\"\"\"\n", - "\n", - "# Prompt 2\n", - "# PROMPT = \"Perform a fundamentals financial analysis of AMZN using the most recently available data. What do you find that's interesting?\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Task decomposition\n", - "\n", - "The goal is to simplify the user prompt into simpler tasks and execute those for more accuracy.\n", - "\n", - "1. Break a larger query down into subquery.\n", - "2. Then for each subquery create a set of keywords that allow you to fetch the right tool to execute that same subquery." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "# We experiment with Pydantic here to enforce structured output\n", - "# \"Pydantic is all you need\" - https://www.youtube.com/watch?v=yj-wSRJwrrc\n", - "\n", - "class SubQuestion(BaseModel):\n", - " \"Pydantic data model we want each subquestion to have, including each field and what they represent\"\n", - " id: int = Field(\n", - " description=\"The unique ID of the subquestion.\"\n", - " )\n", - " question: str = Field(\n", - " description=\"The subquestion itself.\"\n", - " )\n", - " query: str = Field(\n", - " description=\"The query to pass to the `fetch_tools` function to retrieve the appropriate tool to answer the question.\"\n", - " )\n", - " depends_on: list[int] = Field(\n", - " description=\"The list of subquestion ids whose answer is required to answer this subquestion.\",\n", - " default=[]\n", - " )\n", - "\n", - "class SubQuestionList(BaseModel):\n", - " \"Pydantic data model output we want to enforce, which is a list of the previous SubQuestion Pydantic model\"\n", - " subquestions: list[SubQuestion] = Field(\n", - " description=\"The list of SubQuestion objects.\"\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "def task_decomposition(task: str):\n", - " \"Break a larger query down into subquery. Then for each subquery create a set of keywords that allow you to fetch the right tool to execute that same subquery.\"\n", - " subquestion_parser = PydanticOutputParser(pydantic_object=SubQuestionList)\n", - " \n", - " system_message = \"\"\"\\\n", - " You are a world-class state-of-the-art agent.\n", - " \n", - " You can access multiple tools, via a \"fetch_tools\" function that will retrieve the necessary tools.\n", - " The `fetch_tools` function accepts a string of keywords as input specifying the type of tool to retrieve.\n", - " Each retrieved tool represents a different data source or API that can retrieve the required data.\n", - " \n", - " Your purpose is to help answer a complex user question by generating a list of subquestions,\n", - " as well as the corresponding keyword query to the \"fetch_tools\" function\n", - " to retrieve the relevant tools to answer each corresponding subquestion.\n", - " You must also specify the dependencies between subquestions, since sometimes one\n", - " subquestion will require the outcome of another in order to fully answer.\n", - " \n", - " These are the guidelines you consider when completing your task:\n", - " * Be as specific as possible\n", - " * Avoid using acronyms\n", - " * The subquestions should be relevant to the user's question\n", - " * The subquestions should be answerable by the tools retrieved by the query to `fetch_tools`\n", - " * You can generate multiple subquestions\n", - " * You don't need to query for a tool if you don't think it's relevant\n", - " * A subquestion may not depend on a subquestion that proceeds it (i.e. comes after it.)\n", - " \n", - " ## Output format\n", - " {format_instructions}\n", - " \n", - " ### Example responses\n", - " ```json\n", - " {{\"subquestions\": [\n", - " {{\n", - " \"id\": 1,\n", - " \"question\": \"What are the latest financial statements of AMZN?\", \n", - " \"query\": \"financial statements\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 2,\n", - " \"question\": \"What is the most recent revenue and profit margin of AMZN?\", \n", - " \"query\": \"revenue profit margin ratios\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 3,\n", - " \"question\": \"What is the current price to earnings (P/E) ratio of AMZN?\", \n", - " \"query\": \"ratio price to earnings\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 4,\n", - " \"question\": \"Who are the peers of AMZN?\", \n", - " \"query\": \"peers\",\n", - " \"depends_on\": []\n", - " }},\n", - " {{\n", - " \"id\": 5,\n", - " \"question\": \"Which of AMZN's peers have the largest market cap?\", \n", - " \"query\": \"market cap\",\n", - " \"depends_on\": [4]\n", - " }}\n", - " ]}}\n", - " ```\n", - " \"\"\"\n", - " \n", - " human_message = \"\"\"\\\n", - " ## User Question\n", - " {input}\n", - " \"\"\"\n", - " \n", - " prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\"system\", system_message),\n", - " (\"human\", human_message),\n", - " ]\n", - " )\n", - " prompt = prompt.partial(\n", - " format_instructions=subquestion_parser.get_format_instructions()\n", - " )\n", - " \n", - " llm = ChatOpenAI(\n", - " model=\"gpt-4\"\n", - " ) # gpt-3.5-turbo works well, but gpt-4-1106-preview isn't good at returning JSON.\n", - " \n", - " subquestion_chain = {\"input\": lambda x: x[\"input\"]} | prompt | llm | subquestion_parser\n", - "\n", - " subquestion_list = subquestion_chain.invoke({\"input\": task})\n", - "\n", - " return subquestion_list" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1 - Who are the peers of Tesla (TSLA)?\n", - " Query: Tesla peers\n", - "2 - Which of TSLA's peers has the largest market cap?\n", - " Query: market cap\n", - " Depends on: [1]\n", - "3 - What is the most recent price target estimate for the company with the largest market cap?\n", - " Query: price target estimate\n", - " Depends on: [2]\n", - "4 - Who made the most recent price target estimate for the company with the largest market cap?\n", - " Query: analyst name price target estimate\n", - " Depends on: [2]\n", - "5 - On what date was the most recent price target estimate for the company with the largest market cap made?\n", - " Query: date price target estimate\n", - " Depends on: [2]\n" - ] - } - ], - "source": [ - "subquestion_list = task_decomposition(PROMPT)\n", - "\n", - "# Shows the result from task decomposition\n", - "for subquestion in subquestion_list.subquestions:\n", - " print(f\"{subquestion.id} - {subquestion.question}\")\n", - " print(f\" Query: {subquestion.query}\")\n", - " if subquestion.depends_on:\n", - " print(f\" Depends on: {subquestion.depends_on}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Tool retrieval\n", - "\n", - "Use the previously generated queries in order to fetch the tools necessary to answer the subquestion" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1 - Who are the peers of Tesla (TSLA)?\n", - " Query: Tesla peers\n", - " Depends on: []\n", - " Fetched tools:\n", - " /equity/compare/peers: Equity Peers. Company peers.\n", - " /equity/fundamental/metrics: Key Metrics. Key metrics for a given company.\n", - "\n", - "2 - Which of TSLA's peers has the largest market cap?\n", - " Query: market cap\n", - " Depends on: [1]\n", - " Fetched tools:\n", - " /equity/fundamental/metrics: Key Metrics. Key metrics for a given company.\n", - " /equity/fundamental/multiples: Equity Valuation Multiples. Valuation multiples for a stock ticker.\n", - " /equity/fundamental/overview: Company Overview. General information about a company.\n", - " /equity/fundamental/income: Income Statement. Report on a company's financial performance.\n", - "\n", - "3 - What is the most recent price target estimate for the company with the largest market cap?\n", - " Query: price target estimate\n", - " Depends on: [2]\n", - " Fetched tools:\n", - " /equity/estimates/consensus: Price Target Consensus. Price target consensus data.\n", - " /equity/estimates/price_target: Price Target. Price target data.\n", - " /equity/estimates/historical: Historical Analyst Estimates. Analyst stock recommendations.\n", - " /equity/fundamental/multiples: Equity Valuation Multiples. Valuation multiples for a stock ticker.\n", - "\n", - "4 - Who made the most recent price target estimate for the company with the largest market cap?\n", - " Query: analyst name price target estimate\n", - " Depends on: [2]\n", - " Fetched tools:\n", - " /equity/estimates/historical: Historical Analyst Estimates. Analyst stock recommendations.\n", - " /equity/estimates/consensus: Price Target Consensus. Price target consensus data.\n", - " /equity/estimates/price_target: Price Target. Price target data.\n", - " /equity/fundamental/historical_eps: Historical earnings-per-share for a given company.\n", - "\n", - "5 - On what date was the most recent price target estimate for the company with the largest market cap made?\n", - " Query: date price target estimate\n", - " Depends on: [2]\n", - " Fetched tools:\n", - " /equity/estimates/consensus: Price Target Consensus. Price target consensus data.\n", - " /equity/estimates/price_target: Price Target. Price target data.\n", - " /equity/estimates/historical: Historical Analyst Estimates. Analyst stock recommendations.\n", - " /equity/fundamental/historical_eps: Historical earnings-per-share for a given company.\n", - "\n" - ] - } - ], - "source": [ - "subquestions_and_tools = []\n", - "for subquestion in subquestion_list.subquestions:\n", - " tools = get_tools(subquestion.query)\n", - " subquestions_and_tools.append(\n", - " { \"id\": subquestion.id,\n", - " \"subquestion\": subquestion.question,\n", - " \"query\": subquestion.query,\n", - " \"tools\": tools,\n", - " \"depends_on\": subquestion.depends_on,\n", - " }\n", - " )\n", - "\n", - "# Shows the result from the fetched tools for each subquestion's query \n", - "for subq in subquestions_and_tools:\n", - " print(f\"{subq['id']} - {subq['subquestion']}\")\n", - " print(f\" Query: {subq['query']}\")\n", - " if subquestion.depends_on:\n", - " print(f\" Depends on: {subq['depends_on']}\")\n", - " print(\" Fetched tools:\")\n", - " for tool in subq[\"tools\"]:\n", - " print(\" \" + tool.name + \": \" + tool.description.split('\\n')[0])\n", - " print(\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agent to execute on each subquestion\n", - "\n", - "The ReAct agent answers each of the subquestions. This is done by providing it with the subquestion and its corresponding fetched tools.\n", - "\n", - "ReAct paper: https://react-lm.github.io/" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "def langchain_react_agent(tools):\n", - " \"Define a ReAct agent bound with specific tools.\"\n", - " # This retrieves the ReAct agent chat prompt template available in Langchain Hub\n", - " # https://smith.langchain.com/hub/hwchase17/react-json?organizationId=10beea65-e722-5aa1-9f93-034c22e3cd6e\n", - " prompt = hub.pull(\"hwchase17/react-multi-input-json\")\n", - " # Replace the 'tools' and 'tool_names' content of the prompt with information given to the agent\n", - " # Note that tool_names is a field available in each tool, so it can be inferred from same argument\n", - " prompt = prompt.partial(\n", - " tools=render_text_description_and_args(tools),\n", - " tool_names=\", \".join([t.name for t in tools]),\n", - " )\n", - "\n", - " llm = ChatOpenAI(model=\"gpt-4-1106-preview\").bind(stop=[\"\\nObservation\"])\n", - "\n", - " chain = (\n", - " {\n", - " \"input\": lambda x: x[\"input\"],\n", - " \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\n", - " }\n", - " | prompt\n", - " | llm\n", - " | JSONAgentOutputParser()\n", - " )\n", - "\n", - " # Agent executor with access to the chain and tools at its disposal\n", - " agent_executor = AgentExecutor(\n", - " agent=chain,\n", - " tools=tools,\n", - " verbose=False, # <-- set this to False to cut down on output spam. But it's useful for debugging!\n", - " return_intermediate_steps=False,\n", - " handle_parsing_errors=True,\n", - " )\n", - " return agent_executor" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Who are the peers of Tesla (TSLA)?\n", - "- The peers of Tesla (TSLA) are XPeng Inc. (XPEV), Li Auto Inc. (LI), Rivian Automotive, Inc. (RIVN), Lucid Group, Inc. (LCID), General Motors Company (GM), NIO Inc. (NIO), Ford Motor Company (F), Fisker Inc. (FSR), and Mullen Automotive, Inc. (MULN).\n", - "- These companies are considered peers because they operate in the same sector and industry, focusing on electric vehicles and automotive manufacturing, which is the primary business of Tesla.\n", - "\n", - "\n", - "Which of TSLA's peers has the largest market cap?\n", - "- General Motors Company (GM) has the largest market cap among Tesla's peers, with a market cap of $43,275,565,978.\n", - "- The market caps of other peers are as follows: Li Auto Inc. (LI) at $37,031,658,909, Rivian Automotive, Inc. (RIVN) at $15,920,206,546, NIO Inc. (NIO) at $12,930,495,049, Ford Motor Company (F) at $40,343,346,985, XPeng Inc. (XPEV) at $11,916,511,602, Lucid Group, Inc. (LCID) at $9,661,394,668, Fisker Inc. (FSR) at $553,878,493, and Mullen Automotive, Inc. (MULN) at $62,790,894.\n", - "- The data was retrieved from the company overview tool, which provides the latest available market capitalization figures for each company.\n", - "\n", - "\n", - "What is the most recent price target estimate for the company with the largest market cap?\n", - "- The most recent price target estimate for General Motors Company (GM) is $37.0.\n", - "- This estimate was provided by analyst Dan Levy from Barclays.\n", - "- The estimate was made on November 1, 2023.\n", - "\n", - "\n", - "Who made the most recent price target estimate for the company with the largest market cap?\n", - "- The most recent price target estimate for General Motors Company (GM) was made by Dan Levy from Barclays on November 1, 2023.\n", - "- This observation is based on the most recent data retrieved from the price target tool.\n", - "\n", - "\n", - "On what date was the most recent price target estimate for the company with the largest market cap made?\n", - "- The most recent price target estimate for General Motors Company (GM) was made on November 1, 2023.\n", - "- This estimate was provided by analyst Dan Levy from Barclays with a price target of $37.0.\n", - "\n", - "\n" - ] - } - ], - "source": [ - "# Go through each subquestion and create an agent with the necessary tools and context to execute on it\\n\n", - "for i, subquestion in enumerate(subquestions_and_tools):\n", - "\n", - " # We handle each dependency manually since we don't want agents to share memory as this can go over context length\n", - " deps = [dep for dep in subquestions_and_tools if dep[\"id\"] in subquestion[\"depends_on\"]]\n", - "\n", - " dependencies = \"\"\n", - " for dep in deps:\n", - " dependencies += \"subquestion: \" + dep[\"subquestion\"] + \"\\n\"\n", - " # if for some reason there's no temporal dependency between the agents being run\n", - " # this ensures the code doesn't break here\n", - " if \"observation\" in dep:\n", - " dependencies += \"observations:\\n\" + str(dep[\"observation\"]) + \"\\n\\n\"\n", - "\n", - " input = f\"\"\"\\\n", - "Given the following high-level question: {PROMPT}\n", - "Answer the following subquestion: {subquestion['subquestion']}\n", - "\n", - "Give your answer in a bullet-point list.\n", - "Explain your reasoning, and make reference to and provide the relevant retrieved data as part of your answer.\n", - "\n", - "Remember to use the tools provided to you to answer the question, and STICK TO THE INPUT SCHEMA.\n", - "\n", - "Example output format:\n", - "```\n", - "- \n", - "- \n", - "- \n", - "... REPEAT AS MANY TIMES AS NECESSARY TO ANSWER THE SUBQUESTION.\n", - "```\n", - "\n", - "If necessary, make use of the following subquestions and their answers to answer your subquestion:\n", - "{dependencies}\n", - "\n", - "Return only your answer as a bulleted list as a single string. Don't respond with JSON or any other kind of data structure.\n", - "\"\"\"\n", - "\n", - " try:\n", - " result = langchain_react_agent(tools=subquestion[\"tools\"]).invoke({\"input\": input})\n", - " output = result[\"output\"]\n", - " except Exception as err: # Terrible practice, but it'll do for now.\n", - " print(err)\n", - " # We'll include the error message in the future\n", - " output = \"I was unable to answer the subquestion using the available tool.\" \n", - "\n", - "\n", - " # This is very cheeky but we are basically going into the subquestions_and_tools and for this current subquestion\n", - " # we are adding the output as an observation. This is important because then above we do the dependencies check-up\n", - " # which allows us to retrieve the correct output to be used in another subquestion.\n", - " # Note: this works because subquestions are done in order to execute prompt. Otherwise it wouldn't since we would\n", - " # be looking for an \"observation\" that doesn't exist yet.\n", - " subquestion[\"observation\"] = output\n", - "\n", - " print(subquestion['subquestion'])\n", - " if isinstance(output, dict):\n", - " for val in output.values():\n", - " print(val)\n", - " else:\n", - " print(output)\n", - " print(\"\\n\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Verdict\n", - "\n", - "To combine all of the subquestion answers to generate a final answer." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "def render_subquestions_and_answers(subquestions):\n", - " \"Combines all subquestions and their answers\"\n", - " output = \"\"\n", - " for subquestion in subquestions:\n", - " output += \"Subquestion: \" + subquestion[\"subquestion\"] + \"\\n\"\n", - " output += \"Observations: \\n\" + str(subquestion[\"observation\"]) + \"\\n\\n\"\n", - "\n", - " return output" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "def verdict(question: str, subquestions: dict):\n", - " \"Based on the high-level question, it combines the subquestions and their answers to give one final concise answer\"\n", - " system_message = \"\"\"\\\n", - " Given the following high-level question: \n", - " \n", - " {input}\n", - " \n", - " And the following subquestions and subsequent observations:\n", - " \n", - " {subquestions}\n", - " \n", - " Answer the high-level question. Give your answer in a bulleted list.\n", - " \"\"\"\n", - " \n", - " \n", - " prompt = ChatPromptTemplate.from_messages([(\"system\", system_message)])\n", - " \n", - " llm = ChatOpenAI(model=\"gpt-4\") # Let's use the big model for the final answer.\n", - " \n", - " final_chain = (\n", - " {\n", - " \"input\": lambda x: x[\"input\"],\n", - " \"subquestions\": lambda x: render_subquestions_and_answers(x[\"subquestions\"]),\n", - " }\n", - " | prompt\n", - " | llm\n", - " )\n", - " \n", - " result = final_chain.invoke({\"input\": question, \"subquestions\": subquestions})\n", - "\n", - " return result" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "- The peers of Tesla (TSLA) are XPeng Inc. (XPEV), Li Auto Inc. (LI), Rivian Automotive, Inc. (RIVN), Lucid Group, Inc. (LCID), General Motors Company (GM), NIO Inc. (NIO), Ford Motor Company (F), Fisker Inc. (FSR), and Mullen Automotive, Inc. (MULN).\n", - "- Among these peers, General Motors Company (GM) has the largest market capitalization at $43,275,565,978.\n", - "- The most recent price target estimate for General Motors Company (GM) is $37.0.\n", - "- This estimate was provided by analyst Dan Levy from Barclays.\n", - "- The price target estimate was made on November 1, 2023.\n" - ] - } - ], - "source": [ - "result = verdict(\n", - " question=PROMPT,\n", - " subquestions=subquestions_and_tools\n", - ")\n", - "print(result.content) # Et voila" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
01
0symbolGM
1price31.6
2beta1.49
3vol_avg16439042
4mkt_cap43275565978
\n", - "
" - ], - "text/plain": [ - " 0 1\n", - "0 symbol GM\n", - "1 price 31.6\n", - "2 beta 1.49\n", - "3 vol_avg 16439042\n", - "4 mkt_cap 43275565978" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "obb.equity.fundamental.overview(\"GM\").to_df().head()" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
symbolpublished_datenews_urlnews_titleanalyst_nameanalyst_companyprice_targetadj_price_targetprice_when_postednews_publishernews_base_url
0GM2023-11-01 02:32:00https://www.streetinsider.com/Upgrades/Barclay...Barclays Upgrades General Motors (GM) to Overw...Dan LevyBarclays37.037.028.2StreetInsiderstreetinsider.com
\n", - "
" - ], - "text/plain": [ - " symbol published_date \\\n", - "0 GM 2023-11-01 02:32:00 \n", - "\n", - " news_url \\\n", - "0 https://www.streetinsider.com/Upgrades/Barclay... \n", - "\n", - " news_title analyst_name \\\n", - "0 Barclays Upgrades General Motors (GM) to Overw... Dan Levy \n", - "\n", - " analyst_company price_target adj_price_target price_when_posted \\\n", - "0 Barclays 37.0 37.0 28.2 \n", - "\n", - " news_publisher news_base_url \n", - "0 StreetInsider streetinsider.com " - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "obb.equity.estimates.price_target(\"GM\").to_df().head(1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/openbb-agent.ipynb b/openbb-agent.ipynb deleted file mode 100644 index 662a8dd..0000000 --- a/openbb-agent.ipynb +++ /dev/null @@ -1,406 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import dependencies, in specific langchain\n", - "import os\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.agents.output_parsers import JSONAgentOutputParser\n", - "from langchain.agents.format_scratchpad import format_log_to_str\n", - "from langchain.agents import AgentExecutor\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain import hub\n", - "from langchain.tools.render import render_text_description_and_args\n", - "from langchain.output_parsers import PydanticOutputParser\n", - "from langchain.pydantic_v1 import BaseModel, Field\n", - "from langchain.output_parsers import RetryWithErrorOutputParser\n", - "from langchain.llms import OpenAI\n", - "\n", - "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"False\" # Avoid some warnings from HuggingFace\n", - "\n", - "# Set up OpenAI API key\n", - "import openai\n", - "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", - "openai.api_key = \"\"\n", - "\n", - "# Set up OpenBB Personal Access Token from https://my.openbb.co/app/platform/pat\n", - "from openbb import obb\n", - "from openbb_agents.utils import map_openbb_collection_to_langchain_tools # provides access to OpenBB Tools\n", - "obb.account.login(pat=\"\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "openbb_tools = map_openbb_collection_to_langchain_tools(\n", - " openbb_commands_root = [\n", - " \"/equity/fundamental\",\n", - " \"/equity/compare\",\n", - " \"/equity/estimates\"\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "user_query = \"\"\"\\\n", - "Check what are TSLA peers. From those, check which one has the highest market cap.\n", - "Then, on the ticker that has the highest market cap get the most recent price target estimate from an analyst,\n", - "and tell me who it was and on what date the estimate was made.\n", - "\"\"\"\n", - "\n", - "# user_query = \"Perform a fundamentals financial analysis of AMZN using the most recently available data. What do you find that's interesting?\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import langchain\n", - "\n", - "class SubQuestion(BaseModel):\n", - " \"Pydantic data model we want each subquestion to have, including each field and what they represent\"\n", - " id: int = Field(\n", - " description=\"The unique ID of the subquestion.\"\n", - " )\n", - " question: str = Field(\n", - " description=\"The subquestion itself.\"\n", - " )\n", - " query: str = Field(\n", - " description=\"The query to pass to the `fetch_tools` function to retrieve the appropriate tool to answer the question.\"\n", - " )\n", - " depends_on: list[int] = Field(\n", - " description=\"The list of subquestion ids whose answer is required to answer this subquestion.\",\n", - " default=[]\n", - " )\n", - "\n", - "class SubQuestionList(BaseModel):\n", - " \"Pydantic data model output we want to enforce, which is a list of the previous SubQuestion Pydantic model\"\n", - " subquestions: list[SubQuestion] = Field(\n", - " description=\"The list of SubQuestion objects.\"\n", - " )\n", - "\n", - "def task_decomposition(task: str):\n", - " \"Break a larger query down into subquery. Then for each subquery create a set of keywords that allow you to fetch the right tool to execute that same subquery.\"\n", - " subquestion_parser = PydanticOutputParser(pydantic_object=SubQuestionList)\n", - " \n", - " system_message = \"\"\"\\\n", - " You are a world-class state-of-the-art agent.\n", - " \n", - " You can access multiple tools, via a \"fetch_tools\" function that will retrieve the necessary tools.\n", - " The `fetch_tools` function accepts a string of keywords as input specifying the type of tool to retrieve.\n", - " Each retrieved tool represents a different data source or API that can retrieve the required data.\n", - " \n", - " Your purpose is to help answer a complex user question by generating a list of subquestions,\n", - " as well as the corresponding keyword query to the \"fetch_tools\" function\n", - " to retrieve the relevant tools to answer each corresponding subquestion.\n", - " You must also specify the dependencies between subquestions, since sometimes one\n", - " subquestion will require the outcome of another in order to fully answer.\n", - " \n", - " These are the guidelines you consider when completing your task:\n", - " * Be as specific as possible\n", - " * Avoid using acronyms\n", - " * The subquestions should be relevant to the user's question\n", - " * The subquestions should be answerable by the tools retrieved by the query to `fetch_tools`\n", - " * You can generate multiple subquestions\n", - " * You don't need to query for a tool if you don't think it's relevant\n", - " * A subquestion may not depend on a subquestion that proceeds it (i.e. comes after it.)\n", - " \n", - " ## Output format\n", - " {format_instructions}\n", - " \n", - " ### Example responses\n", - " ```json\n", - " {{\"subquestions\": [\n", - " {{\n", - " \"id\": 1,\n", - " \"question\": \"What are the latest financial statements of AMZN?\", \n", - " \"query\": \"financial statements\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 2,\n", - " \"question\": \"What is the most recent revenue and profit margin of AMZN?\", \n", - " \"query\": \"revenue profit margin ratios\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 3,\n", - " \"question\": \"What is the current price to earnings (P/E) ratio of AMZN?\", \n", - " \"query\": \"ratio price to earnings\",\n", - " \"depends_on\": []\n", - " }}, \n", - " {{\n", - " \"id\": 4,\n", - " \"question\": \"Who are the peers of AMZN?\", \n", - " \"query\": \"peers\",\n", - " \"depends_on\": []\n", - " }},\n", - " {{\n", - " \"id\": 5,\n", - " \"question\": \"Which of AMZN's peers have the largest market cap?\", \n", - " \"query\": \"market cap\",\n", - " \"depends_on\": [4]\n", - " }}\n", - " ]}}\n", - " ```\n", - " \"\"\"\n", - " \n", - " human_message = \"\"\"\\\n", - " ## User Question\n", - " {input}\n", - " \"\"\"\n", - " \n", - " prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\"system\", system_message),\n", - " (\"human\", human_message),\n", - " ]\n", - " )\n", - " prompt = prompt.partial(\n", - " format_instructions=subquestion_parser.get_format_instructions()\n", - " )\n", - " \n", - " llm = ChatOpenAI(\n", - " model=\"gpt-4\"\n", - " ) # gpt-3.5-turbo works well, but gpt-4-1106-preview isn't good at returning JSON.\n", - " \n", - " subquestion_chain = {\"input\": lambda x: x[\"input\"]} | prompt | llm | subquestion_parser\n", - "\n", - " subquestion_list = subquestion_chain.invoke({\"input\": task})\n", - "\n", - " return subquestion_list\n", - "\n", - "def langchain_react_agent(tools):\n", - " \"Define a ReAct agent bound with specific tools.\"\n", - " # This retrieves the ReAct agent chat prompt template available in Langchain Hub\n", - " # https://smith.langchain.com/hub/hwchase17/react-json?organizationId=10beea65-e722-5aa1-9f93-034c22e3cd6e\n", - " prompt = hub.pull(\"hwchase17/react-multi-input-json\")\n", - " # Replace the 'tools' and 'tool_names' content of the prompt with information given to the agent\n", - " # Note that tool_names is a field available in each tool, so it can be inferred from same argument\n", - " prompt = prompt.partial(\n", - " tools=render_text_description_and_args(tools),\n", - " tool_names=\", \".join([t.name for t in tools]),\n", - " )\n", - "\n", - " llm = ChatOpenAI(model=\"gpt-4-1106-preview\").bind(stop=[\"\\nObservation\"])\n", - "\n", - " chain = (\n", - " {\n", - " \"input\": lambda x: x[\"input\"],\n", - " \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\n", - " }\n", - " | prompt\n", - " | llm\n", - " | JSONAgentOutputParser()\n", - " )\n", - "\n", - " # Agent executor with access to the chain and tools at its disposal\n", - " agent_executor = AgentExecutor(\n", - " agent=chain,\n", - " tools=tools,\n", - " verbose=False, # <-- set this to False to cut down on output spam. But it's useful for debugging!\n", - " return_intermediate_steps=False,\n", - " handle_parsing_errors=True,\n", - " )\n", - " return agent_executor\n", - "\n", - "def render_subquestions_and_answers(subquestions):\n", - " \"Combines all subquestions and their answers\"\n", - " output = \"\"\n", - " for subquestion in subquestions:\n", - " output += \"Subquestion: \" + subquestion[\"subquestion\"] + \"\\n\"\n", - " output += \"Observations: \\n\" + str(subquestion[\"observation\"]) + \"\\n\\n\"\n", - "\n", - " return output\n", - "\n", - "def verdict(question: str, subquestions: dict):\n", - " \"Based on the high-level question, it combines the subquestions and their answers to give one final concise answer\"\n", - " system_message = \"\"\"\\\n", - " Given the following high-level question: \n", - " \n", - " {input}\n", - " \n", - " And the following subquestions and subsequent observations:\n", - " \n", - " {subquestions}\n", - " \n", - " Answer the high-level question. Give your answer in a bulleted list.\n", - " \"\"\"\n", - " \n", - " \n", - " prompt = ChatPromptTemplate.from_messages([(\"system\", system_message)])\n", - " \n", - " llm = ChatOpenAI(model=\"gpt-4\") # Let's use the big model for the final answer.\n", - " \n", - " final_chain = (\n", - " {\n", - " \"input\": lambda x: x[\"input\"],\n", - " \"subquestions\": lambda x: render_subquestions_and_answers(x[\"subquestions\"]),\n", - " }\n", - " | prompt\n", - " | llm\n", - " )\n", - " \n", - " result = final_chain.invoke({\"input\": question, \"subquestions\": subquestions})\n", - "\n", - " return result\n", - "\n", - "def openbb_agent(\n", - " openbb_tools: langchain.tools.base.StructuredTool,\n", - " user_query: str,\n", - " ):\n", - " # Parse the description (i.e. docstring + output fields) for each of these tools\n", - " docs = [\n", - " Document(page_content=t.description, metadata={\"index\": i})\n", - " for i, t in enumerate(openbb_tools)\n", - " ]\n", - "\n", - " # Create embeddings from each of these function descriptions\n", - " # this will be important for when we want the agent to know what\n", - " # function to use for a particular query\n", - " vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())\n", - "\n", - " subquestion_list = task_decomposition(user_query)\n", - "\n", - " subquestions_and_tools = []\n", - " for subquestion in subquestion_list.subquestions:\n", - "\n", - " # Tool retrieval\n", - " retriever = vector_store.as_retriever(\n", - " search_type=\"similarity_score_threshold\",\n", - " search_kwargs={'score_threshold': 0.65}\n", - " )\n", - " docs = retriever.get_relevant_documents(subquestion.query)\n", - " \n", - " # This is a fallback mechanism in case the threshold is too high, causing too few tools to be returned.\n", - " # In this case, we fall back to getting the top k=2 results with higher similarity scores.\n", - " if len(docs) < 2:\n", - " retriever = vector_store.as_retriever(\n", - " search_kwargs={\"k\": 2}\n", - " )\n", - " \n", - " docs = retriever.get_relevant_documents(subquestion.query)\n", - " \n", - " tools = [openbb_tools[d.metadata[\"index\"]] for d in docs]\n", - "\n", - " subquestions_and_tools.append(\n", - " { \"id\": subquestion.id,\n", - " \"subquestion\": subquestion.question,\n", - " \"query\": subquestion.query,\n", - " \"tools\": tools,\n", - " \"depends_on\": subquestion.depends_on,\n", - " }\n", - " )\n", - "\n", - " # Go through each subquestion and create an agent with the necessary tools and context to execute on it\\n\n", - " for i, subquestion in enumerate(subquestions_and_tools):\n", - "\n", - " # We handle each dependency manually since we don't want agents to share memory as this can go over context length\n", - " deps = [dep for dep in subquestions_and_tools if dep[\"id\"] in subquestion[\"depends_on\"]]\n", - "\n", - " dependencies = \"\"\n", - " for dep in deps:\n", - " dependencies += \"subquestion: \" + dep[\"subquestion\"] + \"\\n\"\n", - " # if for some reason there's no temporal dependency between the agents being run\n", - " # this ensures the code doesn't break here\n", - " if \"observation\" in dep:\n", - " dependencies += \"observations:\\n\" + str(dep[\"observation\"]) + \"\\n\\n\"\n", - "\n", - " input = f\"\"\"\\\n", - "Given the following high-level question: {user_query}\n", - "Answer the following subquestion: {subquestion['subquestion']}\n", - "\n", - "Give your answer in a bullet-point list.\n", - "Explain your reasoning, and make reference to and provide the relevant retrieved data as part of your answer.\n", - "\n", - "Remember to use the tools provided to you to answer the question, and STICK TO THE INPUT SCHEMA.\n", - "\n", - "Example output format:\n", - "```\n", - "- \n", - "- \n", - "- \n", - "... REPEAT AS MANY TIMES AS NECESSARY TO ANSWER THE SUBQUESTION.\n", - "```\n", - "\n", - "If necessary, make use of the following subquestions and their answers to answer your subquestion:\n", - "{dependencies}\n", - "\n", - "Return only your answer as a bulleted list as a single string. Don't respond with JSON or any other kind of data structure.\n", - "\"\"\"\n", - "\n", - " try:\n", - " result = langchain_react_agent(tools=subquestion[\"tools\"]).invoke({\"input\": input})\n", - " output = result[\"output\"]\n", - " except Exception as err: # Terrible practice, but it'll do for now.\n", - " print(err)\n", - " # We'll include the error message in the future\n", - " output = \"I was unable to answer the subquestion using the available tool.\" \n", - "\n", - "\n", - " # This is very cheeky but we are basically going into the subquestions_and_tools and for this current subquestion\n", - " # we are adding the output as an observation. This is important because then above we do the dependencies check-up\n", - " # which allows us to retrieve the correct output to be used in another subquestion.\n", - " # Note: this works because subquestions are done in order to execute prompt. Otherwise it wouldn't since we would\n", - " # be looking for an \"observation\" that doesn't exist yet.\n", - " subquestion[\"observation\"] = output\n", - "\n", - " \n", - " result = verdict(\n", - " question=user_query,\n", - " subquestions=subquestions_and_tools\n", - " )\n", - "\n", - " return result\n", - "\n", - "\n", - "openbb_agent(openbb_tools, user_query)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "michael", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/openbb_agents/agent.py b/openbb_agents/agent.py index 4d67dbc..df7976e 100644 --- a/openbb_agents/agent.py +++ b/openbb_agents/agent.py @@ -1,28 +1,29 @@ +import asyncio import logging -from typing import Optional -from openbb_agents.chains import ( - generate_final_response, +from langchain.vectorstores import VectorStore + +from .chains import ( + agenerate_subquestion_answer, + agenerate_subquestions_from_query, + asearch_tools, + generate_final_answer, generate_subquestion_answer, - generate_subquestions, - select_tools, + generate_subquestions_from_query, + search_tools, ) -from openbb_agents.models import SubQuestionAgentConfig -from openbb_agents.tools import ( - create_tool_index, - get_all_openbb_tools, - map_openbb_routes_to_langchain_tools, +from .models import AnsweredSubQuestion, SubQuestion +from .tools import ( + build_openbb_tool_vector_index, + build_vector_index_from_openbb_function_descriptions, + map_name_to_openbb_function_description, ) -from openbb_agents.utils import get_dependencies - -from . import VERBOSE +from .utils import get_dependencies logger = logging.getLogger(__name__) -def openbb_agent( - query: str, openbb_tools: Optional[list[str]] = None, verbose=VERBOSE -) -> str: +def openbb_agent(query: str, openbb_tools: list[str] | None = None) -> str: """Answer a query using the OpenBB Agent equipped with tools. By default all available openbb tools are used. You can have a query @@ -33,60 +34,252 @@ def openbb_agent( ---------- query : str The query you want to have answered. - openbb_tools : optional[list[str]] - Optional. Specify the OpenBB collections or commands that you use to use. If not + openbb_tools : list[Callable] + Optional. Specify the OpenBB functions you want to use. If not specified, every available OpenBB tool will be used. Examples -------- >>> # Use all OpenBB tools to answer the query - >>> openbb_agent("What is the market cap of TSLA?") + >>> openbb_agent("What is the stock price of TSLA?") >>> # Use only the specified tools to answer the query - >>> openbb_agent("What is the market cap of TSLA?", - ... openbb_tools=["/equity/fundamental", "/equity/price/historical"]) + >>> openbb_agent("What is the stock price of TSLA?", + ... openbb_tools=['.equity.price.quote']) """ + tool_vector_index = _handle_tool_vector_index(openbb_tools) + subquestions = generate_subquestions_from_query(user_query=query) - subquestion_list = generate_subquestions(query, verbose=verbose) - logger.info("Generated subquestions: %s", subquestion_list) + logger.info("Generated subquestions: %s", subquestions) - if openbb_tools: - tools = map_openbb_routes_to_langchain_tools(openbb_tools) - else: - tools = get_all_openbb_tools() - vector_index = create_tool_index(tools=tools) + answered_subquestions = [] + for subquestion in subquestions: + if _is_subquestion_answerable( + subquestion=subquestion, answered_subquestions=answered_subquestions + ): + logger.info("Answering subquestion: %s", subquestion) + answered_subquestion = _fetch_tools_and_answer_subquestion( + user_query=query, + subquestion=subquestion, + tool_vector_index=tool_vector_index, + answered_subquestions=answered_subquestions, + ) + answered_subquestions.append(answered_subquestion) + else: + logger.info("Skipping unanswerable subquestion: %s", subquestion) + return generate_final_answer( + user_query=query, + answered_subquestions=answered_subquestions, + ) + + +async def aopenbb_agent(query: str, openbb_tools: list[str] | None = None) -> str: + """Answer a query using the OpenBB Agent equipped with tools. + Async variant of `openbb_agent`. + + By default all available openbb tools are used. You can have a query + answered using a smaller subset of OpenBB tools by using the `openbb_tools` + argument. + + Parameters + ---------- + query : str + The query you want to have answered. + openbb_tools : list[Callable] + Optional. Specify the OpenBB functions you want to use. If not + specified, every available OpenBB tool will be used. + + Examples + -------- + >>> # Use all OpenBB tools to answer the query + >>> openbb_agent("What is the stock price of TSLA?") + >>> # Use only the specified tools to answer the query + >>> openbb_agent("What is the stock price of TSLA?", + ... openbb_tools=['.equity.price.quote']) + + """ + tool_vector_index = _handle_tool_vector_index(openbb_tools) + + subquestions = await agenerate_subquestions_from_query(user_query=query) + answered_subquestions = await _aprocess_subquestions( + user_query=query, + subquestions=subquestions, + tool_vector_index=tool_vector_index, + ) + + return generate_final_answer( + user_query=query, + answered_subquestions=answered_subquestions, + ) + + +async def _aprocess_subquestions( + user_query: str, subquestions: list[SubQuestion], tool_vector_index: VectorStore +) -> list[AnsweredSubQuestion]: answered_subquestions = [] - for subquestion in subquestion_list.subquestions: # TODO: Do in parallel - # Fetch tool for subquestion - logger.info("Attempting to select tools for: %s", {subquestion.question}) - selected_tools = select_tools( - vector_index=vector_index, - tools=tools, - subquestion=subquestion, - answered_subquestions=answered_subquestions, - verbose=verbose, + queued_subquestions = [] + + tasks = [] + while True: + unanswered_subquestions = _get_unanswered_subquestions( + answered_subquestions=answered_subquestions, subquestions=subquestions ) - # TODO: Improve filtering of tools (probably by storing them in a dict) - tool_names = [tool.name for tool in selected_tools.tools] - subquestion_tools = [tool for tool in tools if tool.name in tool_names] - logger.info("Retrieved tool(s): %s", tool_names) - - # Then attempt to answer subquestion - answered_subquestion = generate_subquestion_answer( - SubQuestionAgentConfig( - query=query, - subquestion=subquestion, - tools=subquestion_tools, - dependencies=get_dependencies( - answered_subquestions, subquestion - ), # TODO: Just do this in gneerate_subquestion_answer - ), - verbose=verbose, + logger.info("Pending subquestions: %s", unanswered_subquestions) + + new_answerable_subquestions = _get_answerable_subquestions( + subquestions=unanswered_subquestions, + answered_subquestions=answered_subquestions, ) - answered_subquestions.append(answered_subquestion) + logger.info("Answerable subquestions: %s", new_answerable_subquestions) + + for subquestion in new_answerable_subquestions: + logger.info("Scheduling subquestion for answer: %s", subquestion) + # Make sure we only submit newly answerable questions (since the + # other ones have been submitted already) + if subquestion not in queued_subquestions: + task = asyncio.create_task( + _afetch_tools_and_answer_subquestion( + user_query=user_query, + subquestion=subquestion, + tool_vector_index=tool_vector_index, + answered_subquestions=answered_subquestions, + ) + ) + tasks.append(task) + queued_subquestions.append(subquestion) + + if not tasks: + break + + done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) + tasks = [task for task in tasks if not task.done()] + + for task in done: + if task.exception(): + logger.error("Unexpected error in task: %s", task.exception()) + else: + answered_subquestion = task.result() + logger.info("Finished task for subquestion: %s", answered_subquestion) + answered_subquestions.append(answered_subquestion) + + return answered_subquestions + + +def _fetch_tools_and_answer_subquestion( + user_query: str, + subquestion: SubQuestion, + tool_vector_index: VectorStore, + answered_subquestions: list[AnsweredSubQuestion], +) -> AnsweredSubQuestion: + logger.info("Attempting to select tools for: %s", {subquestion.question}) + dependencies = get_dependencies( + answered_subquestions=answered_subquestions, subquestion=subquestion + ) + tools = search_tools( + subquestion=subquestion, + tool_vector_index=tool_vector_index, + answered_subquestions=dependencies, + ) + tool_names = [tool.__name__ for tool in tools] + logger.info("Retrieved tool(s): %s", tool_names) + + # Then attempt to answer subquestion + logger.info("Answering subquestion: %s", subquestion.question) + answered_subquestion = generate_subquestion_answer( + user_query=user_query, + subquestion=subquestion, + tools=tools, + dependencies=dependencies, + ) + + logger.info("Answered subquestion: %s", answered_subquestion.answer) + return answered_subquestion + - # Answer final question - return generate_final_response( - query=query, answered_subquestions=answered_subquestions, verbose=verbose +async def _afetch_tools_and_answer_subquestion( + user_query: str, + subquestion: SubQuestion, + tool_vector_index: VectorStore, + answered_subquestions: list[AnsweredSubQuestion], +) -> AnsweredSubQuestion: + logger.info("Attempting to select tools for: %s", {subquestion.question}) + dependencies = get_dependencies( + answered_subquestions=answered_subquestions, subquestion=subquestion ) + tools = await asearch_tools( + subquestion=subquestion, + tool_vector_index=tool_vector_index, + answered_subquestions=dependencies, + ) + tool_names = [tool.__name__ for tool in tools] + logger.info("Retrieved tool(s): %s", tool_names) + + # Then attempt to answer subquestion + logger.info("Answering subquestion: %s", subquestion.question) + answered_subquestion = await agenerate_subquestion_answer( + user_query=user_query, + subquestion=subquestion, + tools=tools, + dependencies=dependencies, + ) + + logger.info("Answered subquestion: %s", answered_subquestion.answer) + return answered_subquestion + + +def _get_unanswered_subquestions( + answered_subquestions: list[AnsweredSubQuestion], subquestions: list[SubQuestion] +) -> list[SubQuestion]: + answered_subquestion_ids = [ + answered_subquestion.subquestion.id + for answered_subquestion in answered_subquestions + ] + return [ + subquestion + for subquestion in subquestions + if subquestion.id not in answered_subquestion_ids + ] + + +def _is_subquestion_answerable( + subquestion: SubQuestion, answered_subquestions: list[AnsweredSubQuestion] +) -> bool: + if not subquestion.depends_on: + return True + + for id_ in subquestion.depends_on: + if id_ not in [ + answered_subquestion.subquestion.id + for answered_subquestion in answered_subquestions + ]: + return False + return True + + +def _get_answerable_subquestions( + subquestions: list[SubQuestion], answered_subquestions: list[AnsweredSubQuestion] +) -> list[SubQuestion]: + return [ + subquestion + for subquestion in subquestions + if _is_subquestion_answerable( + subquestion=subquestion, answered_subquestions=answered_subquestions + ) + ] + + +def _handle_tool_vector_index(openbb_tools: list[str] | None) -> VectorStore: + if not openbb_tools: + logger.info("Using all available OpenBB tools.") + tool_vector_index = build_openbb_tool_vector_index() + else: + logger.info("Using specified OpenBB tools: %s", openbb_tools) + openbb_function_descriptions = [ + map_name_to_openbb_function_description(obb_function_name) + for obb_function_name in openbb_tools + ] + tool_vector_index = build_vector_index_from_openbb_function_descriptions( + openbb_function_descriptions + ) + return tool_vector_index diff --git a/openbb_agents/chains.py b/openbb_agents/chains.py index ce493fd..d417195 100644 --- a/openbb_agents/chains.py +++ b/openbb_agents/chains.py @@ -1,311 +1,303 @@ import logging +from datetime import datetime +from typing import Any, Callable -from langchain import hub -from langchain.agents import AgentExecutor -from langchain.agents.format_scratchpad import ( - format_log_to_str, - format_to_openai_function_messages, -) -from langchain.agents.output_parsers import ( - JSONAgentOutputParser, - OpenAIFunctionsAgentOutputParser, -) -from langchain.output_parsers import PydanticOutputParser -from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain.tools import StructuredTool -from langchain.tools.render import ( - format_tool_to_openai_function, - render_text_description_and_args, -) from langchain.vectorstores import VectorStore -from langchain_openai import ChatOpenAI +from magentic import ( + AssistantMessage, + AsyncParallelFunctionCall, + FunctionCall, + FunctionResultMessage, + OpenaiChatModel, + ParallelFunctionCall, + SystemMessage, + UserMessage, + chatprompt, + prompt, + prompt_chain, +) +from pydantic import ValidationError from openbb_agents.models import ( AnsweredSubQuestion, - SelectedToolsList, SubQuestion, - SubQuestionAgentConfig, - SubQuestionList, ) from openbb_agents.prompts import ( FINAL_RESPONSE_PROMPT_TEMPLATE, + GENERATE_SUBQUESTION_SYSTEM_PROMPT_TEMPLATE, SUBQUESTION_ANSWER_PROMPT, - SUBQUESTION_GENERATOR_PROMPT, - TOOL_SEARCH_PROMPT, + TOOL_SEARCH_PROMPT_TEMPLATE, ) -from openbb_agents.utils import get_dependencies - -from . import VERBOSE logger = logging.getLogger(__name__) -def generate_final_response( - query: str, +def generate_final_answer( + user_query: str, answered_subquestions: list[AnsweredSubQuestion], - verbose=VERBOSE, ) -> str: - """Generate the final response to a query given answer to a list of subquestions.""" - - logger.info( - "Request to generate final response.", - extra={ - "query": query, - "answered_subquestions": [ - { - "subquestion": subq_and_a.subquestion.question, - "answer": subq_and_a.answer, - } - for subq_and_a in answered_subquestions - ], - }, + @prompt( + FINAL_RESPONSE_PROMPT_TEMPLATE, + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), ) + def _final_answer( + user_query: str, answered_subquestions: list[AnsweredSubQuestion] + ) -> str: + ... - system_message = FINAL_RESPONSE_PROMPT_TEMPLATE - prompt = ChatPromptTemplate.from_messages([("system", system_message)]) + return _final_answer( + user_query=user_query, answered_subquestions=answered_subquestions + ) - llm = ChatOpenAI(model="gpt-4", temperature=0.1, verbose=verbose) - chain = ( - { - "input": lambda x: x["input"], - "subquestions": lambda x: _render_subquestions_and_answers( - x["answered_subquestions"] - ), - } - | prompt - | llm +async def agenerate_final_answer( + user_query: str, + answered_subquestions: list[AnsweredSubQuestion], +) -> str: + @prompt( + FINAL_RESPONSE_PROMPT_TEMPLATE, + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), ) + async def _final_answer( + user_query: str, answered_subquestions: list[AnsweredSubQuestion] + ) -> str: + ... - result = chain.invoke( - {"input": query, "answered_subquestions": answered_subquestions}, + return await _final_answer( + user_query=user_query, answered_subquestions=answered_subquestions ) - return str(result.content) def generate_subquestion_answer( - subquestion_agent_config: SubQuestionAgentConfig, verbose=VERBOSE + user_query: str, + subquestion: SubQuestion, + dependencies: list[AnsweredSubQuestion], + tools: list[Callable], ) -> AnsweredSubQuestion: - """Generate an answer to a subquestion using tools and dependencies.""" - - logger.info( - "Request to generate answer for subquestion.", - extra={ - "subquestion": subquestion_agent_config.subquestion.question, - "dependencies": [ - { - "subquestion": subq_and_a.subquestion.question, - "answer": subq_and_a.answer, - } - for subq_and_a in subquestion_agent_config.dependencies - ], - "tools": [tool.name for tool in subquestion_agent_config.tools], - }, - ) + current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + messages: list[Any] = [SystemMessage(SUBQUESTION_ANSWER_PROMPT)] - # Format the dependency strings - dependencies_str = "" - for answered_subquestion in subquestion_agent_config.dependencies: - dependencies_str += ( - "subquestion: " + answered_subquestion.subquestion.question + "\n" - ) - dependencies_str += "observations:\n" + answered_subquestion.answer + "\n\n" + answer = None + while not answer: - prompt = SUBQUESTION_ANSWER_PROMPT.format( - query=subquestion_agent_config.query, - subquestion_query=subquestion_agent_config.subquestion.question, - dependencies=dependencies_str, - ) - - try: - result = make_react_agent( - tools=subquestion_agent_config.tools, verbose=verbose - ).invoke({"input": prompt}) - output = str(result["output"]) - except Exception as err: # Terrible practice, but it'll do for now. - print(err) - # We'll include the error message in the future - output = "I was unable to answer the subquestion using the available tools." - - answered_subquestion = AnsweredSubQuestion( - subquestion=subquestion_agent_config.subquestion, answer=output - ) - - logger.info( - "Answered subquestion.", - extra={ - "subquestion": answered_subquestion.subquestion.question, - "answer": answered_subquestion.answer, - }, - ) - - return answered_subquestion - - -def select_tools( - vector_index: VectorStore, - tools: list[StructuredTool], - subquestion: SubQuestion, - answered_subquestions: list[AnsweredSubQuestion], - verbose: bool = VERBOSE, -) -> SelectedToolsList: - """Use an agent to select tools given a subquestion and its dependencies.""" - - # Here we define the tool the agent will use to search the tool index. - def search_tools(query: str) -> list[tuple[str, str]]: - """Search a vector index for useful funancial tools.""" - returned_tools = _get_tools( - vector_index=vector_index, - tools=tools, - query=query, + @chatprompt( + *messages, + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), + functions=tools, + ) + def _answer_subquestion( + user_query: str, + subquestion: str, + dependencies: list[AnsweredSubQuestion], + current_datetime: str, + ) -> str | ParallelFunctionCall: + ... + + response = _answer_subquestion( # type: ignore + user_query=user_query, + subquestion=subquestion.question, + dependencies=dependencies, + current_datetime=current_datetime, ) - return [(tool.name, tool.description) for tool in returned_tools] - - dependencies = get_dependencies( - answered_subquestions=answered_subquestions, subquestion=subquestion - ) - dependencies_str = _render_subquestions_and_answers(dependencies) - - selected_tools_list_parser = PydanticOutputParser(pydantic_object=SelectedToolsList) - - prompt = ChatPromptTemplate.from_messages( - [ - ("system", TOOL_SEARCH_PROMPT), - ("human", "## User Question:\n{input}"), - MessagesPlaceholder(variable_name="agent_scratchpad"), - ] - ) - prompt = prompt.partial( - format_instructions=selected_tools_list_parser.get_format_instructions(), - subquestions=dependencies_str, - ) + if isinstance(response, ParallelFunctionCall): + for function_call in response._function_calls: + logger.info( + "Function call: %s(%s)", + function_call.function.__name__, + function_call.arguments, + ) + messages += _handle_function_call(function_call=function_call) + elif isinstance(response, str): + answer = response + return AnsweredSubQuestion(subquestion=subquestion, answer=answer) + + +async def agenerate_subquestion_answer( + user_query: str, + subquestion: SubQuestion, + dependencies: list[AnsweredSubQuestion], + tools: list[Callable], +) -> AnsweredSubQuestion: + current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + messages: list[Any] = [SystemMessage(SUBQUESTION_ANSWER_PROMPT)] - search_tool = StructuredTool.from_function(search_tools) - agent = make_openai_agent(prompt=prompt, tools=[search_tool], verbose=verbose) - result = agent.invoke({"input": subquestion.question}) + answer = None + while not answer: - # Parse the output into a pydantic model and return - selected_tools = selected_tools_list_parser.parse(result["output"]) - return selected_tools + @chatprompt( + *messages, + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), + functions=tools, + ) + async def _answer_subquestion( + user_query: str, + subquestion: str, + dependencies: list[AnsweredSubQuestion], + current_datetime: str, + ) -> str | AsyncParallelFunctionCall: + ... + + response = await _answer_subquestion( # type: ignore + user_query=user_query, + subquestion=subquestion.question, + dependencies=dependencies, + current_datetime=current_datetime, + ) + if isinstance(response, AsyncParallelFunctionCall): + async for function_call in response._function_calls: + logger.info( + "Function call: %s(%s)", + function_call.function.__name__, + function_call.arguments, + ) + messages += _handle_function_call(function_call=function_call) + elif isinstance(response, str): + answer = response + return AnsweredSubQuestion(subquestion=subquestion, answer=answer) + + +@chatprompt( + SystemMessage(GENERATE_SUBQUESTION_SYSTEM_PROMPT_TEMPLATE), + UserMessage("# User query\n{user_query}"), + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), +) +def generate_subquestions_from_query(user_query: str) -> list[SubQuestion]: + ... -def generate_subquestions(query: str, verbose=VERBOSE) -> SubQuestionList: - logger.info("Request to generate subquestions for query: %s", query) - subquestion_parser = PydanticOutputParser(pydantic_object=SubQuestionList) - system_message = SUBQUESTION_GENERATOR_PROMPT - human_message = """\ - ## User Question - {input} - """ +@chatprompt( + SystemMessage(GENERATE_SUBQUESTION_SYSTEM_PROMPT_TEMPLATE), + UserMessage("# User query\n{user_query}"), + model=OpenaiChatModel(model="gpt-4o", temperature=0.0), +) +async def agenerate_subquestions_from_query(user_query: str) -> list[SubQuestion]: + ... - prompt = ChatPromptTemplate.from_messages( - [ - ("system", system_message), - ("human", human_message), - ] - ) - prompt = prompt.partial( - format_instructions=subquestion_parser.get_format_instructions() - ) - llm = ChatOpenAI(model="gpt-4", temperature=0.0, verbose=verbose) - subquestion_chain = ( - {"input": lambda x: x["input"]} | prompt | llm | subquestion_parser +def search_tools( + subquestion: SubQuestion, + tool_vector_index: VectorStore, + answered_subquestions: list[AnsweredSubQuestion] | None = None, +) -> list[Callable]: + def llm_query_tool_index(query: str) -> str: + """Use natural language to search the tool index for tools.""" + logger.info("Searching tool index for: %s", query) + results = tool_vector_index.similarity_search(query=query, k=4) + return "\n".join([r.page_content for r in results]) + + @prompt_chain( + TOOL_SEARCH_PROMPT_TEMPLATE, + model=OpenaiChatModel(model="gpt-3.5-turbo", temperature=0.2), + functions=[llm_query_tool_index], ) - subquestion_list = subquestion_chain.invoke({"input": query}) - - return subquestion_list - - -def _get_tools( - vector_index: VectorStore, tools: list[StructuredTool], query: str -) -> list[StructuredTool]: - """Retrieve tools from a vector index given a query.""" - retriever = vector_index.as_retriever( - search_type="similarity_score_threshold", - search_kwargs={"score_threshold": 0.65}, + def _search_tools( + subquestion: str, answered_subquestions: list[AnsweredSubQuestion] | None + ) -> list[str]: + ... + + tool_names = _search_tools(subquestion.question, answered_subquestions) + callables = _get_callables_from_tool_search_results( + tool_vector_index=tool_vector_index, tool_names=tool_names ) - docs = retriever.get_relevant_documents(query) - - # This is a fallback mechanism in case the threshold is too high, - # causing too few tools to be returned. In this case, we fall back to - # getting the top k=2 results with higher similarity scores. - if len(docs) < 4: - retriever = vector_index.as_retriever(search_kwargs={"k": 2}) - docs = retriever.get_relevant_documents(query) - - tools = [tools[d.metadata["index"]] for d in docs] - return tools + return callables -def _render_subquestions_and_answers( - answered_subquestions: list[AnsweredSubQuestion], -) -> str: - "Combines all subquestions and their answers" - output = "" - for answered_subq in answered_subquestions: - output += "Subquestion: " + answered_subq.subquestion.question + "\n" - output += "Observations: \n" + answered_subq.answer + "\n\n" - - return output - - -def make_openai_agent(prompt, tools, model="gpt-4-1106-preview", verbose=VERBOSE): - """Create a new OpenAI agent from a list of tools.""" - llm = ChatOpenAI(model=model) - llm_with_tools = llm.bind( - functions=[format_tool_to_openai_function(t) for t in tools] +async def asearch_tools( + subquestion: SubQuestion, + tool_vector_index: VectorStore, + answered_subquestions: list[AnsweredSubQuestion] | None = None, +) -> list[Callable]: + def llm_query_tool_index(query: str) -> str: + """Use natural language to search the tool index for tools.""" + logger.info("Searching tool index for: %s", query) + results = tool_vector_index.similarity_search(query=query, k=4) + return "\n".join([r.page_content for r in results]) + + @prompt_chain( + TOOL_SEARCH_PROMPT_TEMPLATE, + model=OpenaiChatModel(model="gpt-3.5-turbo", temperature=0.2), + functions=[llm_query_tool_index], ) - chain = ( - { - "input": lambda x: x["input"], - "agent_scratchpad": lambda x: format_to_openai_function_messages( - x["intermediate_steps"] - ), - } - | prompt - | llm_with_tools - | OpenAIFunctionsAgentOutputParser() + async def _search_tools( + subquestion: str, answered_subquestions: list[AnsweredSubQuestion] | None + ) -> list[str]: + ... + + tool_names = await _search_tools(subquestion.question, answered_subquestions) + callables = _get_callables_from_tool_search_results( + tool_vector_index=tool_vector_index, tool_names=tool_names ) - - return AgentExecutor(agent=chain, tools=tools, verbose=verbose) + return callables -def make_react_agent( - tools, model="gpt-4-1106-preview", temperature=0.2, verbose=VERBOSE -): - """Create a new ReAct agent from a list of tools.""" +def _get_callables_from_tool_search_results( + tool_vector_index: VectorStore, + tool_names: list[str], +) -> list[Callable]: + callables = [] + for tool_name in tool_names: + for doc in tool_vector_index.docstore._dict.values(): # type: ignore + if doc.metadata["tool_name"] == tool_name: + callables.append(doc.metadata["callable"]) + break + return callables - # This retrieves the ReAct agent chat prompt template available in Langchain Hub - # https://smith.langchain.com/hub/hwchase17/react-json?organizationId=10beea65-e722-5aa1-9f93-034c22e3cd6e - prompt = hub.pull("hwchase17/react-multi-input-json") - # Replace the 'tools' and 'tool_names' content of the prompt with - # information given to the agent Note that tool_names is a field available - # in each tool, so it can be inferred from same argument - prompt = prompt.partial( - tools=render_text_description_and_args(tools), - tool_names=", ".join([t.name for t in tools]), - ) - - llm = ChatOpenAI(model=model, temperature=temperature).bind(stop=["\nObservation"]) - - chain = ( - { - "input": lambda x: x["input"], - "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]), - } - | prompt - | llm - | JSONAgentOutputParser() - ) - agent_executor = AgentExecutor( - agent=chain, - tools=tools, - verbose=verbose, - return_intermediate_steps=False, - handle_parsing_errors=True, +def _handle_function_call(function_call: FunctionCall) -> list[Any]: + try: + result = function_call() + return _build_messages_for_function_call( + function_call=function_call, result=result + ) + except ValidationError as val_err: + return _build_messages_for_validation_error( + function_call=function_call, val_err=val_err + ) + except Exception as err: + return _build_messages_for_generic_error(function_call=function_call, err=err) + + +def _build_messages_for_function_call( + function_call: FunctionCall, + result: Any, +) -> list[Any]: + return [ + AssistantMessage(function_call), + FunctionResultMessage(content=str(result), function_call=function_call), + ] + + +def _build_messages_for_validation_error( + function_call: FunctionCall, + val_err: ValidationError, +) -> list[Any]: + logger.error(f"Input schema validation error: {val_err}") + # Sidestep `magentic`'s input validation, which will still + # occur when we pass `function_call` to `AssistantMessage` + # https://github.com/jackmpcollins/magentic/issues/211 + dummy = lambda *args, **kwargs: ... # noqa: E731 + dummy.__name__ = function_call.function.__name__ + new_function_call = FunctionCall( + function=dummy, + **function_call.arguments, ) - return agent_executor + return [ + AssistantMessage(new_function_call), + FunctionResultMessage( + content=str(val_err), + function_call=new_function_call, + ), + ] + + +def _build_messages_for_generic_error( + function_call: FunctionCall, + err: Exception, +) -> list[Any]: + logger.error(f"Error calling function: {err}") + return [ + AssistantMessage(function_call), + FunctionResultMessage(content=str(err), function_call=function_call), + ] diff --git a/openbb_agents/models.py b/openbb_agents/models.py index 447a399..5dd1638 100644 --- a/openbb_agents/models.py +++ b/openbb_agents/models.py @@ -1,29 +1,14 @@ -from langchain.pydantic_v1 import BaseModel, Field -from langchain.tools import StructuredTool +from typing import Any + +from pydantic import BaseModel, Field class SubQuestion(BaseModel): id: int = Field(description="The unique ID of the subquestion.") question: str = Field(description="The subquestion itself.") - depends_on: list[int] = Field( + depends_on: list[int] | None = Field( description="The list of subquestion ids whose answer is required to answer this subquestion.", # noqa: E501 - default=[], - ) - - -class SelectedTool(BaseModel): - name: str = Field(description="The name of the tool.") - - -class SelectedToolsList(BaseModel): - tools: list[SelectedTool] = Field( - description="A list of SelectedTool objects chosen by an agent." - ) - - -class SubQuestionList(BaseModel): - subquestions: list[SubQuestion] = Field( - description="The list of SubQuestion objects." + default=None, ) @@ -34,15 +19,8 @@ class AnsweredSubQuestion(BaseModel): answer: str = Field(description="The answer to the subquestion.") -class SubQuestionAgentConfig(BaseModel): - query: str = Field(description="The top-level query to be answered.") - subquestion: SubQuestion = Field( - description="The specific subquestion to be answered by the agent." - ) - tools: list[StructuredTool] = Field( - description="A list of langchain StructuredTools for the agent to use." - ) - dependencies: list[AnsweredSubQuestion] = Field( - description="A list of previously-answered subquestions required by the agent to answer the question.", # noqa: E501 - default=[], - ) +class OpenBBFunctionDescription(BaseModel): + name: str + input_model: Any + output_model: Any + callable: Any diff --git a/openbb_agents/prompts.py b/openbb_agents/prompts.py index ece2b22..74872da 100644 --- a/openbb_agents/prompts.py +++ b/openbb_agents/prompts.py @@ -1,20 +1,18 @@ FINAL_RESPONSE_PROMPT_TEMPLATE = """\ Given the following high-level question: -{input} +{user_query} And the following subquestions and subsequent observations: -{subquestions} +{answered_subquestions} Answer the high-level question. Give your answer in a bulleted list. """ -TOOL_SEARCH_PROMPT = """\ +TOOL_SEARCH_PROMPT_TEMPLATE = """\ You are a world-class state-of-the-art search agent. -You are excellent at your job. - Your purpose is to search for tools that allow you to answer a user's subquestion. The subquestion could be a part of a chain of other subquestions. @@ -26,20 +24,21 @@ ... repeat as many times as necessary until you reach a maximum of 4 tools 4. Return the list of tools using the output schema. -YOU ARE ALLOWED TO DO MULTIPLE FUNCTION CALLS! DO NOT RELY ON A SINGLE CALL ONLY. - You can search for tools using the available tool, which uses your inputs to search a vector databse that relies on similarity search. These are the guidelines to consider when completing your task: * Immediately return no tools if you do not require any to answer the query. -* Don't use the stock ticker or symbol in the query -* Use keyword searches +* Never use the stock ticker or symbol or quantity in the query +* Always try use the category in the query (eg. crypto, stock, market, etc.) +* Only use keyword searches * Make multiple searches with different terms * You can return up to a maximum of 4 tools * Pay close attention to the data that available for each tool, and if it can answer the user's question * Return 0 tools if tools are NOT required to answer the user's question given the information contained in the context. +YOU ARE ALLOWED TO MAKE MULTIPLE QUERIES IF YOUR FIRST RESULT DOES NOT YIELD THE APPROPRIATE TOOL. + ## Example queries Below are some bad examples (to avoid) and good examples (to follow): @@ -56,55 +55,46 @@ Good: "market capitilization" Bad: "technology company peer lookup" Good: "market peers" - -## Output format -{format_instructions} +Bad: "net profit TSLA" +Good: "net profit" +Bad: "current price BTC" +Good: "price crypto" ## Example response ```json -{{"selected_tools": [ - {{ - "name": "/equity/price/historical", - }}, - {{ - "name": "/equity/fundamentals/overview", - }}, - {{ - "name": "/equity/fundamentals/ratios", - }}, -] -}} +[".equity.price.historical", ".equity.fundamentals.overview", ".equity.fundamentals.ratios"] ``` ## Previously-answered subquestions -{subquestions} +{answered_subquestions} REMEMBER YOU ARE ONLY TRYING TO FIND TOOLS THAT ANSWER THE USER'S SPECIFIC SUBQUESTION. THE PREVIOUS SUBQUESTIONS AND ANSWERS ARE PROVIDED ONLY FOR CONTEXT. YOU MAY ONLY RESPOND USING THE OUTPUT SCHEMA. + +## Subquestion +{subquestion} """ # noqa: E501 -SUBQUESTION_GENERATOR_PROMPT = """\ +GENERATE_SUBQUESTION_SYSTEM_PROMPT_TEMPLATE = """\ You are a world-class state-of-the-art agent called OpenBB Agent. Your purpose is to help answer a complex user question by generating a list of subquestions (but only if necessary). You must also specify the dependencies between subquestions, since sometimes one subquestion will require the outcome of another in order to fully answer. -These are the guidelines you consider when completing your task: +## Guidelines * Don't try to be too clever * Assume Subquestions are answerable by a downstream agent using tools to lookup the information. -* You can generate a minimum of 1 subquestion. +* You must generate at least 1 subquestion. * Generate only the subquestions required to answer the user's question * Generate as few subquestions as possible required to answer the user's question * A subquestion may not depend on a subquestion that proceeds it (i.e. comes after it.) +* Assume tools can be used to look-up the answer to the subquestions (eg. for marketcap, just create a subquestion asking for the marketcap rather than for the components to calculate it.) -## Output format -{format_instructions} - -### Example responses +### Example output ```json {{"subquestions": [ {{ @@ -137,20 +127,18 @@ """ # noqa: E501 SUBQUESTION_ANSWER_PROMPT = """\ -Given the following high-level question: {query} -Answer the following subquestion: {subquestion_query} - -IMPORTANT: Pretend you do not know up to which date your training data goes. If -a user asks for the current or latest piece of information, look-up the most -recent data possible, instead of using your end training date. +The current datetime is: {current_datetime} +IMPORTANT: If a user asks for the current or latest piece of information, +look-up the most recent data possible, instead of using your internal knowledge. Give your answer in a bullet-point list. Explain your reasoning, and make specific reference to the retrieved data. Provide the relevant retrieved data as part of your answer. Deliberately prefer information retreived from the tools, rather than your internal knowledge. Retrieve *only the data necessary* using tools to answer the question. +Remember to mention any related datetime-related information in your answer (eg. if there is a date assosciated with the retreived data) -Remember to use the tools provided to you to answer the question, and STICK TO THE INPUT SCHEMA. +Remember to use the tools provided to you to answer the question. Example output format: ``` @@ -164,16 +152,25 @@ data) if your initial attempt at calling the tool doesn't return the information you require. +If you receive the data you need, NEVER call other tools unnecessarily. + Important: when calling the function again, it is important to use different input arguments. -If the tools responds with an error or empty response, attempt calling the tool again using -different inputs. Don't give up after the first error. +If the tools responds with an error or empty response, pay attention to the error message +and attempt to call the tool again with corrections. If necessary, make use of the following subquestions and their answers to answer your subquestion: {dependencies} -Return only your answer as a bulleted list as a single string. Don't respond with JSON or any other kind of data structure. +# Tool Instructions +- Always specify the required symbol(s) +- Always specify all the necessary kwargs. +- Pay attention to default values and literal values. +- Always specify arguments in the correct order. +- Never exclude required arguments. + +Considering this high-level question purely as context: {user_query} -TRY DIFFERENT INPUTS IF THE TOOL RETURNS AN EMPTY RESPONSE. +Answer ONLY the following subquestion: {subquestion} """ # noqa: E501 diff --git a/openbb_agents/testing.py b/openbb_agents/testing.py new file mode 100644 index 0000000..40df872 --- /dev/null +++ b/openbb_agents/testing.py @@ -0,0 +1,41 @@ +from magentic import OpenaiChatModel, prompt +from pydantic import BaseModel, Field + + +class AssertResult(BaseModel): + assessment: str = Field( + description="Your assessment of whether the assertion is true or false." + ) + result: bool = Field(description="The final assertion result.") + + +def with_llm(model_output, assertion) -> bool: + """Use an LLM to assert a result. + + Works best for short, simple inputs and asserts. + + This is useful for unstructured outputs that cannot be easily or + deterministically parsed. Just keep in mind, it remains an LLM evaluator + under-the-hood, so it's not as fast as a direct assertion, costs money to + use, and may be less accurate and reliable (especially for longer inputs or + complicated asserts). + + Examples + -------- + >>> assert with_llm(model_output="I could not retrieve the stock price for apple", assertion="the stock price for apple was retrieved successfully") + AssertionError: The stock price for Apple was not retrieved successfully. + """ # noqa: E501 + + @prompt( + "Given the following model output: {model_output}, determine if the following assertion is true: {assertion}", # noqa: E501 + model=OpenaiChatModel( + model="gpt-3.5-turbo", + temperature=0.0, + ), + ) + def _llm_assert(model_output: str, assertion: str) -> AssertResult: + ... + + result = _llm_assert(model_output, assertion) + assert result.result, f"Assertion '{assertion}' for output '{model_output}' failed: {result.assessment}" # noqa: E501 + return result.result diff --git a/openbb_agents/tools.py b/openbb_agents/tools.py index 391361d..832b549 100644 --- a/openbb_agents/tools.py +++ b/openbb_agents/tools.py @@ -1,12 +1,16 @@ """Load OpenBB functions at OpenAI tools for function calling in Langchain""" +import logging from typing import Any from langchain.schema import Document -from langchain.tools import StructuredTool -from langchain_community.vectorstores import FAISS, VectorStore +from langchain_community.vectorstores.faiss import FAISS +from langchain_core.vectorstores import VectorStore from langchain_openai import OpenAIEmbeddings from openbb import obb -from pydantic import BaseModel + +from .models import OpenBBFunctionDescription + +logger = logging.getLogger(__name__) def enable_openbb_llm_mode(): @@ -24,43 +28,23 @@ def enable_openbb_llm_mode(): enable_openbb_llm_mode() -def create_tool_index(tools: list[StructuredTool]) -> VectorStore: - """Create a tool index of LangChain StructuredTools.""" - docs = [ - Document(page_content=t.description, metadata={"index": i}) - for i, t in enumerate(tools) - ] - - vector_store = FAISS.from_documents(docs, OpenAIEmbeddings()) - return vector_store - - -def create_document(dict): - ... - - -class OpenBBFunctionDescription(BaseModel): - name: str - input: Any - output: Any - callable: Any - - -def get_openbb_coverage_providers() -> dict: +def _get_openbb_coverage_providers() -> dict: return obb.coverage.providers # type: ignore -def get_openbb_user_credentials() -> dict: +def _get_openbb_user_credentials() -> dict: return obb.user.credentials.model_dump() # type: ignore -def get_openbb_coverage_command_schemas() -> dict: +def _get_openbb_coverage_command_schemas() -> dict: return obb.coverage.command_schemas() # type: ignore def get_valid_list_of_providers() -> list[str]: - credentials = get_openbb_user_credentials() - valid_providers = [] + credentials = _get_openbb_user_credentials() + + # By default we include yfinance, since it doesn't need a key + valid_providers = ["yfinance"] for name, value in credentials.items(): if value is not None: valid_providers.append(name.split("_api_key")[0].split("_token")[0]) @@ -71,27 +55,76 @@ def get_valid_openbb_function_names() -> list[str]: valid_providers = get_valid_list_of_providers() valid_function_names = set() for provider in valid_providers: - valid_function_names |= set(get_openbb_coverage_providers()[provider]) + try: + valid_function_names |= set(_get_openbb_coverage_providers()[provider]) + except KeyError: + pass return sorted(list(valid_function_names)) def get_valid_openbb_function_descriptions() -> list[OpenBBFunctionDescription]: - command_schemas = get_openbb_coverage_command_schemas() obb_function_descriptions = [] for obb_function_name in get_valid_openbb_function_names(): - dict_ = command_schemas[obb_function_name] obb_function_descriptions.append( - OpenBBFunctionDescription( - name=obb_function_name, - input=dict_["input"], - output=dict_["output"], - callable=dict_["callable"], - ) + map_name_to_openbb_function_description(obb_function_name) ) return obb_function_descriptions +def map_name_to_openbb_function_description( + obb_function_name: str, +) -> OpenBBFunctionDescription: + command_schemas = _get_openbb_coverage_command_schemas() + dict_ = command_schemas[obb_function_name] + return OpenBBFunctionDescription( + name=obb_function_name, + input_model=dict_["input"], + output_model=dict_["output"], + callable=dict_["callable"], + ) + + +def _get_flat_properties_from_pydantic_model_as_str(model: Any) -> str: + output_str = "" + schema_properties = model.schema()["properties"] + for name, props in schema_properties.items(): + output_str += f"{name}: {props['description']}\n" + return output_str + + def make_vector_index_description( openbb_function_description: OpenBBFunctionDescription, ) -> str: - ... + output_str = "" + output_str += openbb_function_description.name + output_str += openbb_function_description.callable.__doc__ + output_str += "\nOutputs:\n" + output_str += _get_flat_properties_from_pydantic_model_as_str( + openbb_function_description.output_model + ) + return output_str + + +def build_vector_index_from_openbb_function_descriptions( + openbb_function_descriptions: list[OpenBBFunctionDescription], +) -> VectorStore: + documents = [] + for function_description in openbb_function_descriptions: + documents.append( + Document( + page_content=make_vector_index_description(function_description), + metadata={ + "callable": function_description.callable, + "tool_name": function_description.name, + }, + ) + ) + vector_store = FAISS.from_documents(documents, embedding=OpenAIEmbeddings()) + return vector_store + + +def build_openbb_tool_vector_index() -> VectorStore: + logger.info("Building OpenBB tool vector index...") + return build_vector_index_from_openbb_function_descriptions( + get_valid_openbb_function_descriptions() + ) diff --git a/openbb_agents/utils.py b/openbb_agents/utils.py index b3bc2a9..3781475 100644 --- a/openbb_agents/utils.py +++ b/openbb_agents/utils.py @@ -13,6 +13,6 @@ def get_dependencies( dependency_subquestions = [ answered_subq for answered_subq in answered_subquestions - if answered_subq.subquestion.id in subquestion.depends_on + if answered_subq.subquestion.id in (subquestion.depends_on or []) ] return dependency_subquestions diff --git a/poetry.lock b/poetry.lock index f7173f0..65d93f2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -120,6 +120,30 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] +[[package]] +name = "anthropic" +version = "0.25.8" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anthropic-0.25.8-py3-none-any.whl", hash = "sha256:c7a0091916eb22a5e0012b725f5492779eedfcad2da8dc906082e1db7596a65c"}, + {file = "anthropic-0.25.8.tar.gz", hash = "sha256:93f6063e96d5dbeaa172edc177762f630e55b2f81595cedb760278b95a2dd03e"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + [[package]] name = "anyio" version = "3.7.1" @@ -701,6 +725,20 @@ six = ">=1.9.0" gmpy = ["gmpy"] gmpy2 = ["gmpy2"] +[[package]] +name = "execnet" +version = "2.1.1" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.8" +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + [[package]] name = "executing" version = "2.0.1" @@ -1788,13 +1826,13 @@ requests = ">=2,<3" [[package]] name = "litellm" -version = "1.36.1" +version = "1.37.0" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.36.1-py3-none-any.whl", hash = "sha256:3a41db4c139989ef24171a69eba72b814b1ac60591af2da7ed689e57bd62b015"}, - {file = "litellm-1.36.1.tar.gz", hash = "sha256:709cb3a9f31186a093c191c01e0df162e5b08c8cc257a2b746668f29fa99a628"}, + {file = "litellm-1.37.0-py3-none-any.whl", hash = "sha256:74f7ccf8c10491aa393cd1a77aff4db9fd61bbffe52a7326c96811db823e1005"}, + {file = "litellm-1.37.0.tar.gz", hash = "sha256:441fbdc119f3336f1693d2022c1887d6d98453dbab32920318c823ad8ddf0d66"}, ] [package.dependencies] @@ -1917,6 +1955,7 @@ files = [ ] [package.dependencies] +anthropic = {version = ">=0.23.0", optional = true, markers = "extra == \"anthropic\""} filetype = "*" litellm = {version = ">=1.36.0", optional = true, markers = "extra == \"litellm\""} openai = ">=1.24.0" @@ -2255,13 +2294,13 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] [[package]] name = "nest-asyncio" -version = "1.5.9" +version = "1.6.0" description = "Patch asyncio to allow nested event loops" optional = false python-versions = ">=3.5" files = [ - {file = "nest_asyncio-1.5.9-py3-none-any.whl", hash = "sha256:61ec07ef052e72e3de22045b81b2cc7d71fceb04c568ba0b2e4b2f9f5231bec2"}, - {file = "nest_asyncio-1.5.9.tar.gz", hash = "sha256:d1e1144e9c6e3e6392e0fcf5211cb1c8374b5648a98f1ebe48e5336006b41907"}, + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, ] [[package]] @@ -3560,6 +3599,24 @@ pluggy = ">=0.12,<2.0" [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-asyncio" +version = "0.23.6" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-asyncio-0.23.6.tar.gz", hash = "sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f"}, + {file = "pytest_asyncio-0.23.6-py3-none-any.whl", hash = "sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-freezegun" version = "0.4.2" @@ -3575,6 +3632,26 @@ files = [ freezegun = ">0.3" pytest = ">=3.0.0" +[[package]] +name = "pytest-xdist" +version = "3.6.1" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[package.dependencies] +execnet = ">=2.1" +pytest = ">=7.0.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + [[package]] name = "python-dateutil" version = "2.8.2" @@ -5595,4 +5672,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.11,<3.12" -content-hash = "8128cbe25967bcd6fafb8f10d8425a6a59c13d13559e9031991cb9dd6afc97bf" +content-hash = "6219a59550206675edb997d68463df8b765f9b18720f68559277a6c1eae6feeb" diff --git a/pyproject.toml b/pyproject.toml index 47fd6ca..2efb559 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "openbb-agents" -version = "0.0.1" -description = "LLMs X OpenBB" +version = "0.0.2" +description = "LLMs X OpenBB Platform" authors = ["Michael Struwig "] readme = "README.md" @@ -11,12 +11,15 @@ jupyterlab = "^4.0.9" sentence-transformers = "^2.2.2" tiktoken = "^0.5.1" faiss-cpu = "^1.7.4" -magentic = {extras = ["litellm"], version = "^0.23.0"} +magentic = {extras = ["anthropic", "litellm"], version = "^0.23.0"} pydantic = "2.7.1" openbb = "4.1.7" langchain = "^0.1.17" langchain-community = "^0.0.37" langchain-openai = "^0.1.6" +openbb-yfinance = "^1.1.5" +pytest-asyncio = "^0.23.6" +pytest-xdist = "^3.6.1" [tool.poetry.group.dev.dependencies] pre-commit = "^3.5.0" @@ -31,6 +34,9 @@ select = [ "I", # isort ] +[tool.pytest.ini_options] +addopts = "--pdbcls=IPython.terminal.debugger:TerminalPdb" # use ipdb instead of pdb + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/tests/conftest.py b/tests/conftest.py index c5b8445..24772a8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,12 @@ +from typing import Any, Callable from unittest.mock import patch import pytest +from langchain_core.vectorstores import VectorStore from openbb import obb -from pydantic import BaseModel +from pydantic import BaseModel, Field + +from openbb_agents.tools import build_openbb_tool_vector_index @pytest.fixture @@ -28,35 +32,61 @@ def mock_obb_coverage_providers(mock_obb_user_credentials): "intrinio": ["function_a", "function_c"], "benzinga": ["function_d"], } - with patch("openbb_agents.tools.get_openbb_coverage_providers") as mock: + with patch("openbb_agents.tools._get_openbb_coverage_providers") as mock: mock.return_value = mock_provider_coverage_dict yield mock @pytest.fixture -def mock_obb_coverage_command_schema(mock_obb_coverage_providers): +def mock_openbb_function_output_model() -> Any: + class TestOutputModel(BaseModel): + first_property: str = Field(description="The first property") + second_property: int = Field(description="The second property") + third_property: list[float] = Field(description="The third property") + + return TestOutputModel + + +@pytest.fixture +def mock_openbb_function_callable() -> Callable: + def test_callable(): + """A callable test function that does nothing, but has a docstring.""" + ... + + return test_callable + + +@pytest.fixture +def mock_obb_coverage_command_schema( + mock_obb_coverage_providers, +): mock_coverage_command_schema_dict = { "function_a": { "input": "mock input model for a", "output": "mock output model for a", - "callable": "mock callable for a", + "callable": "", }, "function_b": { "input": "mock input model for b", "output": "mock output model for b", - "callable": "mock callable for b", + "callable": "", }, "function_c": { "input": "mock input model for c", "output": "mock output model for c", - "callable": "mock callable for c", + "callable": "", }, "function_d": { "input": "mock input model for d", "output": "mock output model for d", - "callable": "mock callable for d", + "callable": "", }, } - with patch("openbb_agents.tools.get_openbb_coverage_command_schemas") as mock: + with patch("openbb_agents.tools._get_openbb_coverage_command_schemas") as mock: mock.return_value = mock_coverage_command_schema_dict yield mock + + +@pytest.fixture +def openbb_tool_vector_index() -> VectorStore: + return build_openbb_tool_vector_index() diff --git a/tests/test_agent.py b/tests/test_agent.py new file mode 100644 index 0000000..33a063b --- /dev/null +++ b/tests/test_agent.py @@ -0,0 +1,29 @@ +import pytest + +from openbb_agents.agent import aopenbb_agent, openbb_agent +from openbb_agents.testing import with_llm + + +def test_openbb_agent(openbb_tool_vector_index): + test_query = "What is the stock price of AAPL and MSFT?" + actual_result = openbb_agent( + query=test_query, + openbb_tools=[".equity.price.quote", ".equity.fundamental.metrics"], + ) + assert isinstance(actual_result, str) + assert with_llm(actual_result, "MSFT's stock price is in the model output.") + assert with_llm(actual_result, "AAPL's stock price is in the model output.") + assert with_llm(actual_result, "One of the stock prices is higher than the other.") + + +@pytest.mark.asyncio +async def test_aopenbb_agent(openbb_tool_vector_index): + test_query = "What is the stock price of AAPL and MSFT? Which is higher?" + actual_result = await aopenbb_agent( + query=test_query, + openbb_tools=[".equity.price.quote", ".equity.fundamental.metrics"], + ) + assert isinstance(actual_result, str) + assert with_llm(actual_result, "MSFT's stock price is in the model output.") + assert with_llm(actual_result, "AAPL's stock price is in the model output.") + assert with_llm(actual_result, "One of the stock prices is higher than the other.") diff --git a/tests/test_chains.py b/tests/test_chains.py new file mode 100644 index 0000000..d75d867 --- /dev/null +++ b/tests/test_chains.py @@ -0,0 +1,285 @@ +from typing import Literal + +import pytest +from openbb import obb +from pydantic import BaseModel + +from openbb_agents.chains import ( + agenerate_final_answer, + agenerate_subquestion_answer, + agenerate_subquestions_from_query, + asearch_tools, + generate_final_answer, + generate_subquestion_answer, + generate_subquestions_from_query, + search_tools, +) +from openbb_agents.models import AnsweredSubQuestion, SubQuestion +from openbb_agents.testing import with_llm + + +def test_generate_subquestions_from_query(): + test_query = "Calculate the P/E ratio of AAPL." + actual_result = generate_subquestions_from_query(user_query=test_query) + assert isinstance(actual_result, list) + assert len(actual_result) > 0 + assert isinstance(actual_result[0], SubQuestion) + + +@pytest.mark.asyncio +async def test_agenerate_subquestions_from_query(): + test_query = "Calculate the P/E ratio of AAPL." + actual_result = await agenerate_subquestions_from_query(user_query=test_query) + assert isinstance(actual_result, list) + assert len(actual_result) > 0 + assert isinstance(actual_result[0], SubQuestion) + + +def test_search_tools_no_dependencies(openbb_tool_vector_index): + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + actual_result = search_tools( + subquestion=test_subquestion, + answered_subquestions=None, + tool_vector_index=openbb_tool_vector_index, + ) + + assert len(actual_result) > 0 + assert actual_result[0].__name__ == "quote" + assert callable(actual_result[0]) + + +@pytest.mark.asyncio +async def test_asearch_tools_no_dependencies(openbb_tool_vector_index): + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + actual_result = await asearch_tools( + subquestion=test_subquestion, + answered_subquestions=None, + tool_vector_index=openbb_tool_vector_index, + ) + + assert len(actual_result) > 0 + assert actual_result[0].__name__ == "quote" + assert callable(actual_result[0]) + + +def test_generate_subquestion_answer_no_dependencies(): + test_user_query = "What is the current stock price of AAPL?" + test_subquestion = SubQuestion( + id=1, question="What is the stock price of AAPL? Use yfinance as the provider." + ) + test_tool = obb.equity.price.quote # type: ignore + actual_result: AnsweredSubQuestion = generate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[test_tool], + ) + assert with_llm( + actual_result.answer, "the stock price for apple was retrieved successfully" + ) + + +@pytest.mark.asyncio +async def test_agenerate_subquestion_answer_no_dependencies(): + test_user_query = "What is the current stock price of AAPL?" + test_subquestion = SubQuestion( + id=1, question="What is the stock price of AAPL? Use yfinance as the provider." + ) + test_tool = obb.equity.price.quote # type: ignore + actual_result: AnsweredSubQuestion = await agenerate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[test_tool], + ) + assert with_llm( + actual_result.answer, "the stock price for apple was retrieved successfully" + ) + + +def test_generate_subquestion_answer_with_dependencies(): + test_user_query = "What is the current stock price of MSFT's biggest competitor?" + test_subquestion = SubQuestion( + id=1, + question="What is the stock price of MSFT's biggest competitor? Use yfinance as the provider.", # noqa: E501 + depends_on=[2], + ) + test_dependencies = [ + AnsweredSubQuestion( + subquestion=SubQuestion( + id=2, question="What is the current biggest competitor to MSFT?" + ), + answer="The current biggest competitor to MSFT is AAPL.", + ) + ] + test_tool = obb.equity.price.quote # type: ignore + actual_result: AnsweredSubQuestion = generate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=test_dependencies, + tools=[test_tool], + ) + assert with_llm( + actual_result.answer, "the stock price for apple was retrieved successfully" + ) + + +@pytest.mark.asyncio +async def test_agenerate_subquestion_answer_with_dependencies(): + test_user_query = "What is the current stock price of MSFT's biggest competitor?" + test_subquestion = SubQuestion( + id=1, + question="What is the stock price of MSFT's biggest competitor? Use yfinance as the provider.", # noqa: E501 + depends_on=[2], + ) + test_dependencies = [ + AnsweredSubQuestion( + subquestion=SubQuestion( + id=2, question="What is the current biggest competitor to MSFT?" + ), + answer="The current biggest competitor to MSFT is AAPL.", + ) + ] + test_tool = obb.equity.price.quote # type: ignore + actual_result: AnsweredSubQuestion = await agenerate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=test_dependencies, + tools=[test_tool], + ) + assert with_llm( + actual_result.answer, "the stock price for apple was retrieved successfully" + ) + + +def test_generate_subquestion_answer_with_generic_error_in_function_call(): + test_user_query = "What is the current stock price of AAPL?" + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + + def _get_stock_price(symbol: str) -> str: + raise ValueError("The backend is offline.") + + actual_result: AnsweredSubQuestion = generate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[_get_stock_price], + ) + assert isinstance(actual_result, AnsweredSubQuestion) + assert with_llm( + actual_result.answer, + "The backend is offline, and the answer could not be retrieved.", + ) + + +@pytest.mark.asyncio +async def test_agenerate_subquestion_answer_with_generic_error_in_function_call(): + test_user_query = "What is the current stock price of AAPL?" + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + + def _get_stock_price(symbol: str) -> str: + raise ValueError("The backend is currently offline.") + + actual_result: AnsweredSubQuestion = await agenerate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[_get_stock_price], + ) + assert isinstance(actual_result, AnsweredSubQuestion) + assert with_llm( + actual_result.answer, + "The backend is offline, and the answer could not be retrieved.", + ) + + +def test_generate_subquestion_answer_self_heals_with_input_validation_error_in_function_call(): # noqa: E501 + test_user_query = "What is the current stock price of AAPL? Preferably in EUR." + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + + def _get_stock_price(symbol: str, currency: Literal["USD", "EUR"]) -> str: + class StockPricePayload(BaseModel): + symbol: str + currency: Literal["USD"] # Only USD is allowed, but we ask for EUR. + + _ = StockPricePayload(symbol=symbol, currency=currency) # type: ignore + return "The stock price is USD 95." + + actual_result: AnsweredSubQuestion = generate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[_get_stock_price], + ) + assert isinstance(actual_result, AnsweredSubQuestion) + assert with_llm( + actual_result.answer, "The stock price could not be retrieved in EUR." + ) + assert with_llm(actual_result.answer, "The stock price is 95 USD.") + + +@pytest.mark.asyncio +async def test_agenerate_subquestion_answer_self_heals_with_input_validation_error_in_function_call(): # noqa: E501 + test_user_query = "What is the current stock price of AAPL? Preferably in EUR." + test_subquestion = SubQuestion(id=1, question="What is the stock price of AAPL?") + + def _get_stock_price(symbol: str, currency: Literal["USD", "EUR"]) -> str: + class StockPricePayload(BaseModel): + symbol: str + currency: Literal["USD"] # Only USD is allowed, but we ask for EUR. + + _ = StockPricePayload(symbol=symbol, currency=currency) # type: ignore + return "The stock price is USD 95." + + actual_result: AnsweredSubQuestion = generate_subquestion_answer( + user_query=test_user_query, + subquestion=test_subquestion, + dependencies=[], + tools=[_get_stock_price], + ) + assert isinstance(actual_result, AnsweredSubQuestion) + assert with_llm( + actual_result.answer, "The stock price could not be retrieved in EUR." + ) + assert with_llm(actual_result.answer, "The stock price is 95 USD.") + + +def test_generate_final_answer(): + test_user_query = "Who has the highest stock price? AMZN or TSLA?" + test_answered_subquestions = [ + AnsweredSubQuestion( + subquestion=SubQuestion(id=1, question="What is the stock price of AMZN?"), + answer="The stock price of AMZN is $100.", + ), + AnsweredSubQuestion( + subquestion=SubQuestion(id=2, question="What is the stock price of TSLA?"), + answer="The stock price of TSLA is $200.", + ), + ] + + actual_result = generate_final_answer( + user_query=test_user_query, + answered_subquestions=test_answered_subquestions, + ) + assert with_llm(actual_result, "The answer says TSLA has the highest stock price.") + + +@pytest.mark.asyncio +async def test_agenerate_final_answer(): + test_user_query = "Who has the highest stock price? AMZN or TSLA?" + test_answered_subquestions = [ + AnsweredSubQuestion( + subquestion=SubQuestion(id=1, question="What is the stock price of AMZN?"), + answer="The stock price of AMZN is $100.", + ), + AnsweredSubQuestion( + subquestion=SubQuestion(id=2, question="What is the stock price of TSLA?"), + answer="The stock price of TSLA is $200.", + ), + ] + + actual_result = await agenerate_final_answer( + user_query=test_user_query, + answered_subquestions=test_answered_subquestions, + ) + assert with_llm(actual_result, "The answer says TSLA has the highest stock price.") diff --git a/tests/test_tools.py b/tests/test_tools.py index ac0a55e..2ba8b8e 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,15 +1,19 @@ +from pydantic import BaseModel, Field + +from openbb_agents.models import OpenBBFunctionDescription from openbb_agents.tools import ( - OpenBBFunctionDescription, + _get_flat_properties_from_pydantic_model_as_str, + build_vector_index_from_openbb_function_descriptions, get_valid_list_of_providers, get_valid_openbb_function_descriptions, get_valid_openbb_function_names, + make_vector_index_description, ) def test_get_valid_list_of_providers(mock_obb_user_credentials): actual_result = get_valid_list_of_providers() - expected_result = ["fmp", "intrinio"] - + expected_result = ["yfinance", "fmp", "intrinio"] assert actual_result == expected_result @@ -24,21 +28,82 @@ def test_get_valid_openbb_function_descriptions(mock_obb_coverage_command_schema expected_result = [ OpenBBFunctionDescription( name="function_a", - input="mock input model for a", - output="mock output model for a", - callable="mock callable for a", + input_model="mock input model for a", + output_model="mock output model for a", + callable="", ), OpenBBFunctionDescription( name="function_b", - input="mock input model for b", - output="mock output model for b", - callable="mock callable for b", + input_model="mock input model for b", + output_model="mock output model for b", + callable="", ), OpenBBFunctionDescription( name="function_c", - input="mock input model for c", - output="mock output model for c", - callable="mock callable for c", + input_model="mock input model for c", + output_model="mock output model for c", + callable="", ), ] assert actual_result == expected_result + + +def test_get_flat_properties_from_pydantic_model_as_str(): + class TestModel(BaseModel): + first_property: str = Field(description="The first property") + second_property: int = Field(description="The second property") + third_property: list[float] = Field(description="The third property") + + actual_result = _get_flat_properties_from_pydantic_model_as_str(model=TestModel) + expected_result = """\ +first_property: The first property +second_property: The second property +third_property: The third property +""" + assert actual_result == expected_result + + +def test_make_vector_index_description( + mock_openbb_function_callable, mock_openbb_function_output_model +): + test_obb_function_description = OpenBBFunctionDescription( + name="Test Function", + input_model="", + output_model=mock_openbb_function_output_model, + callable=mock_openbb_function_callable, + ) + + actual_result = make_vector_index_description( + openbb_function_description=test_obb_function_description + ) + assert ( + "A callable test function that does nothing, but has a docstring." + in actual_result + ) + assert "first_property: The first property" in actual_result + assert "second_property: The second property" in actual_result + assert "third_property: The third property" in actual_result + + +def test_build_vector_index( + mock_openbb_function_output_model, mock_openbb_function_callable +): + test_openbb_function_descriptions = [ + OpenBBFunctionDescription( + name="function_a", + input_model="mock input model for a", + output_model=mock_openbb_function_output_model, + callable=mock_openbb_function_callable, + ), + OpenBBFunctionDescription( + name="function_b", + input_model="mock input model for b", + output_model=mock_openbb_function_output_model, + callable=mock_openbb_function_callable, + ), + ] + + actual_result = build_vector_index_from_openbb_function_descriptions( + openbb_function_descriptions=test_openbb_function_descriptions + ) + assert len(actual_result.docstore._dict) == 2 # type: ignore From 43c6201796e6677b3f89f6f456ae6f01f200f075 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Thu, 16 May 2024 13:15:25 +0200 Subject: [PATCH 3/6] Bump magentic. --- poetry.lock | 22 +++++++++++----------- pyproject.toml | 2 +- tests/conftest.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/poetry.lock b/poetry.lock index 65d93f2..8100934 100644 --- a/poetry.lock +++ b/poetry.lock @@ -122,13 +122,13 @@ files = [ [[package]] name = "anthropic" -version = "0.25.8" +version = "0.25.9" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" files = [ - {file = "anthropic-0.25.8-py3-none-any.whl", hash = "sha256:c7a0091916eb22a5e0012b725f5492779eedfcad2da8dc906082e1db7596a65c"}, - {file = "anthropic-0.25.8.tar.gz", hash = "sha256:93f6063e96d5dbeaa172edc177762f630e55b2f81595cedb760278b95a2dd03e"}, + {file = "anthropic-0.25.9-py3-none-any.whl", hash = "sha256:d0b17d442160356a531593b237de55d3125cc6fa708f1268c214107e61c81c57"}, + {file = "anthropic-0.25.9.tar.gz", hash = "sha256:a4ec810b1cfbf3340af99b6f5bf599a83d66986e0f572a5f3bc4ebcab284f629"}, ] [package.dependencies] @@ -1826,13 +1826,13 @@ requests = ">=2,<3" [[package]] name = "litellm" -version = "1.37.0" +version = "1.37.12" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.37.0-py3-none-any.whl", hash = "sha256:74f7ccf8c10491aa393cd1a77aff4db9fd61bbffe52a7326c96811db823e1005"}, - {file = "litellm-1.37.0.tar.gz", hash = "sha256:441fbdc119f3336f1693d2022c1887d6d98453dbab32920318c823ad8ddf0d66"}, + {file = "litellm-1.37.12-py3-none-any.whl", hash = "sha256:5b42b3c98e329a24f48112f488dd1c5e4c635fb5b1a098b50c2cab789cb08172"}, + {file = "litellm-1.37.12.tar.gz", hash = "sha256:2188991e9aa903c9adcec9a265b58ad3b6d9acbe0b0f194b3321d69fa4102729"}, ] [package.dependencies] @@ -1848,7 +1848,7 @@ tokenizers = "*" [package.extras] extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "resend (>=0.8.0,<0.9.0)"] -proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "cryptography (>=42.0.5,<43.0.0)", "fastapi (>=0.109.1,<0.110.0)", "fastapi-sso (>=0.10.0,<0.11.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "python-multipart (>=0.0.9,<0.0.10)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"] +proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "cryptography (>=42.0.5,<43.0.0)", "fastapi (>=0.111.0,<0.112.0)", "fastapi-sso (>=0.10.0,<0.11.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "python-multipart (>=0.0.9,<0.0.10)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"] [[package]] name = "lxml" @@ -1945,13 +1945,13 @@ source = ["Cython (>=3.0.7)"] [[package]] name = "magentic" -version = "0.23.0" +version = "0.24.0" description = "Seamlessly integrate LLMs as Python functions" optional = false python-versions = "<4.0,>=3.10" files = [ - {file = "magentic-0.23.0-py3-none-any.whl", hash = "sha256:aa678546f8dee7794efdca99e7a509b942e20e7d8a100f57e7e1ec25f135737e"}, - {file = "magentic-0.23.0.tar.gz", hash = "sha256:8a4e7fcb4c3bddfe1f58da8c0dbdc2bcc9e3575ab88938dbdeed38f06ac7bedd"}, + {file = "magentic-0.24.0-py3-none-any.whl", hash = "sha256:0193d16f7eb322dfca1bbb6045e32587fa1bb252b7965fb6494c0f3d6eaf1ea8"}, + {file = "magentic-0.24.0.tar.gz", hash = "sha256:f4e47410c096337564e7dc693ad0ea95b67f22a76bb930bfc8753169fd2c2e60"}, ] [package.dependencies] @@ -5672,4 +5672,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.11,<3.12" -content-hash = "6219a59550206675edb997d68463df8b765f9b18720f68559277a6c1eae6feeb" +content-hash = "ac95d59495c93995ea3a6c6f8d0920d3c973d1b2dad07586a41aef29e6c34e5e" diff --git a/pyproject.toml b/pyproject.toml index 2efb559..f6182a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ jupyterlab = "^4.0.9" sentence-transformers = "^2.2.2" tiktoken = "^0.5.1" faiss-cpu = "^1.7.4" -magentic = {extras = ["anthropic", "litellm"], version = "^0.23.0"} +magentic = {extras = ["anthropic", "litellm"], version = "^0.24.0"} pydantic = "2.7.1" openbb = "4.1.7" langchain = "^0.1.17" diff --git a/tests/conftest.py b/tests/conftest.py index 24772a8..2d89ee2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,7 +22,7 @@ class TestCredentials(BaseModel): intrinio_token="another-value", benzinga_api_key=None, ) - monkeypatch.setattr(obb.user, "credentials", mock_credentials) + monkeypatch.setattr(obb.user, "credentials", mock_credentials) # type: ignore @pytest.fixture From 9cea5bf1bc1182af3029e5b5382bb16d4f66eb44 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Thu, 16 May 2024 13:17:00 +0200 Subject: [PATCH 4/6] Remove workaround for input validation bypass. --- openbb_agents/chains.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/openbb_agents/chains.py b/openbb_agents/chains.py index d417195..482deb6 100644 --- a/openbb_agents/chains.py +++ b/openbb_agents/chains.py @@ -274,20 +274,11 @@ def _build_messages_for_validation_error( val_err: ValidationError, ) -> list[Any]: logger.error(f"Input schema validation error: {val_err}") - # Sidestep `magentic`'s input validation, which will still - # occur when we pass `function_call` to `AssistantMessage` - # https://github.com/jackmpcollins/magentic/issues/211 - dummy = lambda *args, **kwargs: ... # noqa: E731 - dummy.__name__ = function_call.function.__name__ - new_function_call = FunctionCall( - function=dummy, - **function_call.arguments, - ) return [ - AssistantMessage(new_function_call), + AssistantMessage(function_call), FunctionResultMessage( content=str(val_err), - function_call=new_function_call, + function_call=function_call, ), ] From 17ff5456c46810bdfa5d35aa4d52d6a44a8d1ab8 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Thu, 16 May 2024 13:18:08 +0200 Subject: [PATCH 5/6] Remove unnecessary error handling. --- openbb_agents/chains.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/openbb_agents/chains.py b/openbb_agents/chains.py index 482deb6..d236e39 100644 --- a/openbb_agents/chains.py +++ b/openbb_agents/chains.py @@ -16,7 +16,6 @@ prompt, prompt_chain, ) -from pydantic import ValidationError from openbb_agents.models import ( AnsweredSubQuestion, @@ -251,10 +250,6 @@ def _handle_function_call(function_call: FunctionCall) -> list[Any]: return _build_messages_for_function_call( function_call=function_call, result=result ) - except ValidationError as val_err: - return _build_messages_for_validation_error( - function_call=function_call, val_err=val_err - ) except Exception as err: return _build_messages_for_generic_error(function_call=function_call, err=err) @@ -269,20 +264,6 @@ def _build_messages_for_function_call( ] -def _build_messages_for_validation_error( - function_call: FunctionCall, - val_err: ValidationError, -) -> list[Any]: - logger.error(f"Input schema validation error: {val_err}") - return [ - AssistantMessage(function_call), - FunctionResultMessage( - content=str(val_err), - function_call=function_call, - ), - ] - - def _build_messages_for_generic_error( function_call: FunctionCall, err: Exception, From f964cc9b0b22f914f89da7289d2f1d3cc1497e04 Mon Sep 17 00:00:00 2001 From: Michael Struwig Date: Thu, 16 May 2024 13:24:41 +0200 Subject: [PATCH 6/6] Make test more robust. --- tests/test_chains.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/test_chains.py b/tests/test_chains.py index d75d867..0f281be 100644 --- a/tests/test_chains.py +++ b/tests/test_chains.py @@ -212,9 +212,6 @@ class StockPricePayload(BaseModel): tools=[_get_stock_price], ) assert isinstance(actual_result, AnsweredSubQuestion) - assert with_llm( - actual_result.answer, "The stock price could not be retrieved in EUR." - ) assert with_llm(actual_result.answer, "The stock price is 95 USD.") @@ -238,9 +235,6 @@ class StockPricePayload(BaseModel): tools=[_get_stock_price], ) assert isinstance(actual_result, AnsweredSubQuestion) - assert with_llm( - actual_result.answer, "The stock price could not be retrieved in EUR." - ) assert with_llm(actual_result.answer, "The stock price is 95 USD.")