diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml index 8b168044c18d..be0fdca31f5a 100644 --- a/.github/workflows/platform-backend-ci.yml +++ b/.github/workflows/platform-backend-ci.yml @@ -6,11 +6,13 @@ on: paths: - ".github/workflows/platform-backend-ci.yml" - "autogpt_platform/backend/**" + - "autogpt_platform/autogpt_libs/**" pull_request: branches: [master, dev, release-*] paths: - ".github/workflows/platform-backend-ci.yml" - "autogpt_platform/backend/**" + - "autogpt_platform/autogpt_libs/**" merge_group: concurrency: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f9ef30e34dd2..724eb8ef1c52 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -98,6 +98,11 @@ repos: files: ^autogpt_platform/autogpt_libs/ args: [--fix] + - id: ruff-format + name: Format (Ruff) - AutoGPT Platform - Libs + alias: ruff-lint-platform-libs + files: ^autogpt_platform/autogpt_libs/ + - repo: local # isort needs the context of which packages are installed to function, so we # can't use a vendored isort pre-commit hook (which runs in its own isolated venv). @@ -140,7 +145,7 @@ repos: # everything in .gitignore, so it works fine without any config or arguments. hooks: - id: black - name: Lint (Black) + name: Format (Black) - repo: https://github.com/PyCQA/flake8 rev: 7.0.0 diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py index d9f081e5cb4b..dde516c1d8fa 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py @@ -72,7 +72,7 @@ def feature_flag( """ def decorator( - func: Callable[P, Union[T, Awaitable[T]]] + func: Callable[P, Union[T, Awaitable[T]]], ) -> Callable[P, Union[T, Awaitable[T]]]: @wraps(func) async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T: diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py index 523f6cf8ec87..10db444247c6 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py @@ -23,7 +23,6 @@ class LoggingConfig(BaseSettings): - level: str = Field( default="INFO", description="Logging level", diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py index b3682d42cf0d..24e20986549c 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py @@ -24,10 +24,10 @@ ), ("", ""), ("hello", "hello"), - ("hello\x1B[31m world", "hello world"), - ("\x1B[36mHello,\x1B[32m World!", "Hello, World!"), + ("hello\x1b[31m world", "hello world"), + ("\x1b[36mHello,\x1b[32m World!", "Hello, World!"), ( - "\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found", + "\x1b[1m\x1b[31mError:\x1b[0m\x1b[31m file not found", "Error: file not found", ), ], diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py new file mode 100644 index 000000000000..76c9abaa0729 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py @@ -0,0 +1,31 @@ +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class RateLimitSettings(BaseSettings): + redis_host: str = Field( + default="redis://localhost:6379", + description="Redis host", + validation_alias="REDIS_HOST", + ) + + redis_port: str = Field( + default="6379", description="Redis port", validation_alias="REDIS_PORT" + ) + + redis_password: str = Field( + default="password", + description="Redis password", + validation_alias="REDIS_PASSWORD", + ) + + requests_per_minute: int = Field( + default=60, + description="Maximum number of requests allowed per minute per API key", + validation_alias="RATE_LIMIT_REQUESTS_PER_MINUTE", + ) + + model_config = SettingsConfigDict(case_sensitive=True, extra="ignore") + + +RATE_LIMIT_SETTINGS = RateLimitSettings() diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py new file mode 100644 index 000000000000..7ac122429c22 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py @@ -0,0 +1,51 @@ +import time +from typing import Tuple + +from redis import Redis + +from .config import RATE_LIMIT_SETTINGS + + +class RateLimiter: + def __init__( + self, + redis_host: str = RATE_LIMIT_SETTINGS.redis_host, + redis_port: str = RATE_LIMIT_SETTINGS.redis_port, + redis_password: str = RATE_LIMIT_SETTINGS.redis_password, + requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute, + ): + self.redis = Redis( + host=redis_host, + port=int(redis_port), + password=redis_password, + decode_responses=True, + ) + self.window = 60 + self.max_requests = requests_per_minute + + async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]: + """ + Check if request is within rate limits. + + Args: + api_key_id: The API key identifier to check + + Returns: + Tuple of (is_allowed, remaining_requests, reset_time) + """ + now = time.time() + window_start = now - self.window + key = f"ratelimit:{api_key_id}:1min" + + pipe = self.redis.pipeline() + pipe.zremrangebyscore(key, 0, window_start) + pipe.zadd(key, {str(now): now}) + pipe.zcount(key, window_start, now) + pipe.expire(key, self.window) + + _, _, request_count, _ = pipe.execute() + + remaining = max(0, self.max_requests - request_count) + reset_time = int(now + self.window) + + return request_count <= self.max_requests, remaining, reset_time diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py new file mode 100644 index 000000000000..496697d8b1e2 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py @@ -0,0 +1,32 @@ +from fastapi import HTTPException, Request +from starlette.middleware.base import RequestResponseEndpoint + +from .limiter import RateLimiter + + +async def rate_limit_middleware(request: Request, call_next: RequestResponseEndpoint): + """FastAPI middleware for rate limiting API requests.""" + limiter = RateLimiter() + + if not request.url.path.startswith("/api"): + return await call_next(request) + + api_key = request.headers.get("Authorization") + if not api_key: + return await call_next(request) + + api_key = api_key.replace("Bearer ", "") + + is_allowed, remaining, reset_time = await limiter.check_rate_limit(api_key) + + if not is_allowed: + raise HTTPException( + status_code=429, detail="Rate limit exceeded. Please try again later." + ) + + response = await call_next(request) + response.headers["X-RateLimit-Limit"] = str(limiter.max_requests) + response.headers["X-RateLimit-Remaining"] = str(remaining) + response.headers["X-RateLimit-Reset"] = str(reset_time) + + return response diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock index 01dc74f26851..5860e72bc377 100644 --- a/autogpt_platform/autogpt_libs/poetry.lock +++ b/autogpt_platform/autogpt_libs/poetry.lock @@ -1415,29 +1415,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.8.0" +version = "0.8.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.8.0-py3-none-linux_armv6l.whl", hash = "sha256:fcb1bf2cc6706adae9d79c8d86478677e3bbd4ced796ccad106fd4776d395fea"}, - {file = "ruff-0.8.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:295bb4c02d58ff2ef4378a1870c20af30723013f441c9d1637a008baaf928c8b"}, - {file = "ruff-0.8.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7b1f1c76b47c18fa92ee78b60d2d20d7e866c55ee603e7d19c1e991fad933a9a"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb0d4f250a7711b67ad513fde67e8870109e5ce590a801c3722580fe98c33a99"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e55cce9aa93c5d0d4e3937e47b169035c7e91c8655b0974e61bb79cf398d49c"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f4cd64916d8e732ce6b87f3f5296a8942d285bbbc161acee7fe561134af64f9"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c5c1466be2a2ebdf7c5450dd5d980cc87c8ba6976fb82582fea18823da6fa362"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2dabfd05b96b7b8f2da00d53c514eea842bff83e41e1cceb08ae1966254a51df"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:facebdfe5a5af6b1588a1d26d170635ead6892d0e314477e80256ef4a8470cf3"}, - {file = "ruff-0.8.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a8e86bae0dbd749c815211ca11e3a7bd559b9710746c559ed63106d382bd9c"}, - {file = "ruff-0.8.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:85e654f0ded7befe2d61eeaf3d3b1e4ef3894469cd664ffa85006c7720f1e4a2"}, - {file = "ruff-0.8.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:83a55679c4cb449fa527b8497cadf54f076603cc36779b2170b24f704171ce70"}, - {file = "ruff-0.8.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:812e2052121634cf13cd6fddf0c1871d0ead1aad40a1a258753c04c18bb71bbd"}, - {file = "ruff-0.8.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:780d5d8523c04202184405e60c98d7595bdb498c3c6abba3b6d4cdf2ca2af426"}, - {file = "ruff-0.8.0-py3-none-win32.whl", hash = "sha256:5fdb6efecc3eb60bba5819679466471fd7d13c53487df7248d6e27146e985468"}, - {file = "ruff-0.8.0-py3-none-win_amd64.whl", hash = "sha256:582891c57b96228d146725975fbb942e1f30a0c4ba19722e692ca3eb25cc9b4f"}, - {file = "ruff-0.8.0-py3-none-win_arm64.whl", hash = "sha256:ba93e6294e9a737cd726b74b09a6972e36bb511f9a102f1d9a7e1ce94dd206a6"}, - {file = "ruff-0.8.0.tar.gz", hash = "sha256:a7ccfe6331bf8c8dad715753e157457faf7351c2b69f62f32c165c2dbcbacd44"}, + {file = "ruff-0.8.1-py3-none-linux_armv6l.whl", hash = "sha256:fae0805bd514066f20309f6742f6ee7904a773eb9e6c17c45d6b1600ca65c9b5"}, + {file = "ruff-0.8.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8a4f7385c2285c30f34b200ca5511fcc865f17578383db154e098150ce0a087"}, + {file = "ruff-0.8.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd054486da0c53e41e0086e1730eb77d1f698154f910e0cd9e0d64274979a209"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2029b8c22da147c50ae577e621a5bfbc5d1fed75d86af53643d7a7aee1d23871"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2666520828dee7dfc7e47ee4ea0d928f40de72056d929a7c5292d95071d881d1"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:333c57013ef8c97a53892aa56042831c372e0bb1785ab7026187b7abd0135ad5"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:288326162804f34088ac007139488dcb43de590a5ccfec3166396530b58fb89d"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b12c39b9448632284561cbf4191aa1b005882acbc81900ffa9f9f471c8ff7e26"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:364e6674450cbac8e998f7b30639040c99d81dfb5bbc6dfad69bc7a8f916b3d1"}, + {file = "ruff-0.8.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22346f845fec132aa39cd29acb94451d030c10874408dbf776af3aaeb53284c"}, + {file = "ruff-0.8.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b2f2f7a7e7648a2bfe6ead4e0a16745db956da0e3a231ad443d2a66a105c04fa"}, + {file = "ruff-0.8.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:adf314fc458374c25c5c4a4a9270c3e8a6a807b1bec018cfa2813d6546215540"}, + {file = "ruff-0.8.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a885d68342a231b5ba4d30b8c6e1b1ee3a65cf37e3d29b3c74069cdf1ee1e3c9"}, + {file = "ruff-0.8.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d2c16e3508c8cc73e96aa5127d0df8913d2290098f776416a4b157657bee44c5"}, + {file = "ruff-0.8.1-py3-none-win32.whl", hash = "sha256:93335cd7c0eaedb44882d75a7acb7df4b77cd7cd0d2255c93b28791716e81790"}, + {file = "ruff-0.8.1-py3-none-win_amd64.whl", hash = "sha256:2954cdbe8dfd8ab359d4a30cd971b589d335a44d444b6ca2cb3d1da21b75e4b6"}, + {file = "ruff-0.8.1-py3-none-win_arm64.whl", hash = "sha256:55873cc1a473e5ac129d15eccb3c008c096b94809d693fc7053f588b67822737"}, + {file = "ruff-0.8.1.tar.gz", hash = "sha256:3583db9a6450364ed5ca3f3b4225958b24f78178908d5c4bc0f46251ccca898f"}, ] [[package]] @@ -1852,4 +1852,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<4.0" -content-hash = "54bf6e076ec4d09be2307f07240018459dd6594efdc55a2dc2dc1d673184587e" +content-hash = "0aef772b321a1a00163abdc6a88efb21874b84bf25c68e84992c3ae3af026a17" diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml index e6d92933233d..0c59dd929e1e 100644 --- a/autogpt_platform/autogpt_libs/pyproject.toml +++ b/autogpt_platform/autogpt_libs/pyproject.toml @@ -21,7 +21,7 @@ supabase = "^2.10.0" [tool.poetry.group.dev.dependencies] redis = "^5.2.0" -ruff = "^0.8.0" +ruff = "^0.8.1" [build-system] requires = ["poetry-core"] diff --git a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py index dbe8d7220a40..e52e2ba5b245 100644 --- a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py +++ b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py @@ -1,7 +1,7 @@ import logging import time from enum import Enum -from typing import Any, Dict +from typing import Any import httpx @@ -64,7 +64,7 @@ def __init__(self): }, ) - def _get_headers(self, api_key: str) -> Dict[str, str]: + def _get_headers(self, api_key: str) -> dict[str, str]: """Get headers for FAL API requests.""" return { "Authorization": f"Key {api_key}", @@ -72,8 +72,8 @@ def _get_headers(self, api_key: str) -> Dict[str, str]: } def _submit_request( - self, url: str, headers: Dict[str, str], data: Dict[str, Any] - ) -> Dict[str, Any]: + self, url: str, headers: dict[str, str], data: dict[str, Any] + ) -> dict[str, Any]: """Submit a request to the FAL API.""" try: response = httpx.post(url, headers=headers, json=data) @@ -83,7 +83,7 @@ def _submit_request( logger.error(f"FAL API request failed: {str(e)}") raise RuntimeError(f"Failed to submit request: {str(e)}") - def _poll_status(self, status_url: str, headers: Dict[str, str]) -> Dict[str, Any]: + def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]: """Poll the status endpoint until completion or failure.""" try: response = httpx.get(status_url, headers=headers) diff --git a/autogpt_platform/backend/backend/blocks/github/triggers.py b/autogpt_platform/backend/backend/blocks/github/triggers.py index ce24a649fe2f..938dce84faea 100644 --- a/autogpt_platform/backend/backend/blocks/github/triggers.py +++ b/autogpt_platform/backend/backend/blocks/github/triggers.py @@ -111,7 +111,9 @@ class Output(GitHubTriggerBase.Output): def __init__(self): from backend.integrations.webhooks.github import GithubWebhookType - example_payload = json.loads(self.EXAMPLE_PAYLOAD_FILE.read_text()) + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) super().__init__( id="6c60ec01-8128-419e-988f-96a063ee2fea", diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py index f8b6781e0ce1..5940bf7ecc24 100644 --- a/autogpt_platform/backend/backend/data/model.py +++ b/autogpt_platform/backend/backend/data/model.py @@ -120,7 +120,7 @@ def SchemaField( title: Optional[str] = None, description: Optional[str] = None, placeholder: Optional[str] = None, - advanced: Optional[bool] = None, + advanced: Optional[bool] = False, secret: bool = False, exclude: bool = False, hidden: Optional[bool] = None, diff --git a/autogpt_platform/backend/linter.py b/autogpt_platform/backend/linter.py index 9bba9d1963b6..a85fd6e9dc23 100644 --- a/autogpt_platform/backend/linter.py +++ b/autogpt_platform/backend/linter.py @@ -2,7 +2,10 @@ import subprocess directory = os.path.dirname(os.path.realpath(__file__)) -target_dirs = ["../backend", "../autogpt_libs"] + +BACKEND_DIR = "." +LIBS_DIR = "../autogpt_libs" +TARGET_DIRS = [BACKEND_DIR, LIBS_DIR] def run(*command: str) -> None: @@ -12,17 +15,19 @@ def run(*command: str) -> None: def lint(): try: - run("ruff", "check", *target_dirs, "--exit-zero") - run("isort", "--diff", "--check", "--profile", "black", ".") - run("black", "--diff", "--check", ".") - run("pyright", *target_dirs) + run("ruff", "check", *TARGET_DIRS, "--exit-zero") + run("ruff", "format", "--diff", "--check", LIBS_DIR) + run("isort", "--diff", "--check", "--profile", "black", BACKEND_DIR) + run("black", "--diff", "--check", BACKEND_DIR) + run("pyright", *TARGET_DIRS) except subprocess.CalledProcessError as e: print("Lint failed, try running `poetry run format` to fix the issues: ", e) raise e def format(): - run("ruff", "check", "--fix", *target_dirs) - run("isort", "--profile", "black", ".") - run("black", ".") - run("pyright", *target_dirs) + run("ruff", "check", "--fix", *TARGET_DIRS) + run("ruff", "format", LIBS_DIR) + run("isort", "--profile", "black", BACKEND_DIR) + run("black", BACKEND_DIR) + run("pyright", *TARGET_DIRS)