Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

半荘情報、統計情報の取得 #1

Merged
merged 8 commits into from
Feb 9, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions .coderabbit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
language: "jp"
early_access: false
reviews:
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
path_filters:
- "!**/.toml"
- "!**/.yaml"
path_instructions:
- path: "**/*.py"
instructions: |
あなたは @coderabbitai(別名 github-actions[bot])で、OpenAIによって訓練された言語モデルです。
あなたの目的は、非常に経験豊富なソフトウェアエンジニアとして機能し、コードの一部を徹底的にレビューし、
以下のようなキーエリアを改善するためのコードスニペットを提案することです:
- ロジック
- セキュリティ
- パフォーマンス
- データ競合
- 一貫性
- エラー処理
- 保守性
- モジュール性
- 複雑性
- 最適化
- ベストプラクティス: DRY, SOLID, KISS

些細なコードスタイルの問題や、コメント・ドキュメントの欠落についてはコメントしないでください。
重要な問題を特定し、解決して全体的なコード品質を向上させることを目指してくださいが、細かい問題は意図的に無視してください。
auto_review:
enabled: true
ignore_title_keywords:
- "WIP"
- "DO NOT MERGE"
drafts: false
base_branches:
- "develop"
- "feature/*"
chat:
auto_reply: true
23 changes: 23 additions & 0 deletions .gcloudignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# This file specifies files that are *not* uploaded to Google Cloud
# using gcloud. It follows the same syntax as .gitignore, with the addition of
# "#!include" directives (which insert the entries of the given .gitignore-style
# file at that point).
#
# For more information, run:
# $ gcloud topic gcloudignore
#
.gcloudignore
# If you would like to upload your .git directory, .gitignore file or files
# from your .gitignore file, remove the corresponding line
# below:
.git
.gitignore

# Python pycache:
__pycache__/
.mypy_cache/
env/
# Ignored by the build system
/setup.cfg
pyproject.toml
poetry.lock
14 changes: 14 additions & 0 deletions .github/pr-labeler.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
feature:
- 'feature/*'
- 'feat/*'
refactor:
- 'refactor/*'
bug:
- 'fix/*'
minor:
- 'release/*'
- 'feature/*'
- 'feat/*'
- 'refactor/*'
kackyt marked this conversation as resolved.
Show resolved Hide resolved
chore:
- 'chore/*'
31 changes: 31 additions & 0 deletions .github/release-drafter.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name-template: '$RESOLVED_VERSION'
tag-template: '$RESOLVED_VERSION'
categories:
- title: '🚀 Features'
labels:
- 'feature'
- 'enhancement'
- title: '🐛 Bug Fixes'
labels:
- 'fix'
- 'bugfix'
- 'bug'
- title: '🧰 Maintenance'
label: 'chore'
change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks.
version-resolver:
major:
labels:
- 'major'
minor:
labels:
- 'minor'
patch:
labels:
- 'patch'
default: patch
template: |
## Changes

$CHANGES
15 changes: 15 additions & 0 deletions .github/workflows/pr_label.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
name: pull request label
on:
pull_request:
types: [opened]
permissions:
pull-requests: write
contents: read
jobs:
pr-labeler:
runs-on: ubuntu-latest
steps:
- uses: TimonVS/pr-labeler-action@v4
name: make label
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
33 changes: 33 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
name: make release
on:
pull_request:
types:
- closed
branches:
- main
push:
tags:
- '*'
workflow_dispatch: {}
env:
cache-unique-key: mahjong-rust-ai
permissions:
id-token: write
contents: read
jobs:
release:
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: read
runs-on: ubuntu-latest
if: ${{ github.event.pull_request.merged == true }}
steps:
- name: Create Release
uses: release-drafter/release-drafter@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
publish: true
Empty file added api/__init__.py
Empty file.
52 changes: 52 additions & 0 deletions api/auth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import os
from typing import Any, Dict

import jwt
from fastapi import Depends, HTTPException
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer

AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN")
AUTH0_AUDIENCE = os.environ.get("AUTH0_AUDIENCE")
AUTH0_SCOPE = os.environ.get("AUTH0_SCOPE")

ALGORITHMS = ["RS256"]
CACHE_JWK_KEY = None

token_auth_scheme = HTTPBearer()
jwks_url = f"https://{AUTH0_DOMAIN}/.well-known/jwks.json"
jwks_client = jwt.PyJWKClient(jwks_url)


async def verify_token(
token: HTTPAuthorizationCredentials = Depends(token_auth_scheme),
) -> Dict[str, Any]:
if AUTH0_DOMAIN is None:
return {}

try:
rsa_key = jwks_client.get_signing_key_from_jwt(token.credentials).key
except jwt.exceptions.PyJWKClientError as e:
raise HTTPException(status_code=400, detail=e.__str__())
except jwt.exceptions.DecodeError as e:
raise HTTPException(status_code=400, detail=e.__str__())

try:
payload = jwt.decode(
token.credentials,
rsa_key,
algorithms=ALGORITHMS,
audience=AUTH0_AUDIENCE,
issuer=f"https://{AUTH0_DOMAIN}/",
)

if "scope" in payload:
token_scopes = payload["scope"].split()

if AUTH0_SCOPE not in token_scopes:
raise Exception("permission denied")
else:
raise Exception("permission denied")
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
kackyt marked this conversation as resolved.
Show resolved Hide resolved

return payload
Empty file added api/cruds/__init__.py
Empty file.
11 changes: 11 additions & 0 deletions api/cruds/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from typing import Iterator
from google.cloud import bigquery
from google.cloud.bigquery.dataset import DatasetListItem

import api.schemas.dataset as dataset_schema


async def get_datasets() -> list[dataset_schema.Dataset]:
client = bigquery.Client()
datasets: Iterator[DatasetListItem] = client.list_datasets()
return [dataset_schema.Dataset(id=dataset.dataset_id, friendly_name=dataset.friendly_name) for dataset in datasets]
kackyt marked this conversation as resolved.
Show resolved Hide resolved
67 changes: 67 additions & 0 deletions api/cruds/game.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from google.cloud import bigquery
from datetime import date

import api.schemas.game as game_schema


async def get_games_count(dataset_id: str, start_date: date, end_date: date) -> int:
client = bigquery.Client()
dataset = client.get_dataset(client.project + "." + dataset_id)
table = f"{client.project}.{dataset.dataset_id}.games"
kackyt marked this conversation as resolved.
Show resolved Hide resolved
query = f"""
SELECT
COUNT(id)
FROM
{table}
WHERE
dt BETWEEN @start_date AND @end_date
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("start_date", "DATE", start_date),
bigquery.ScalarQueryParameter("end_date", "DATE", end_date),
]
)
query_job = client.query(query, job_config=job_config)
rows = query_job.result()

return 0 if rows is None else next(rows)[0]


async def get_games(
dataset_id: str, start_date: date, end_date: date, limit: int, offset: int
) -> list[game_schema.Game]:
client = bigquery.Client()
kackyt marked this conversation as resolved.
Show resolved Hide resolved
dataset = client.get_dataset(client.project + "." + dataset_id)
table = f"{client.project}.{dataset.dataset_id}.games"
query = f"""
SELECT
id,
tonpu,
ariari,
has_aka,
demo,
soku,
level,
started_at
FROM
{table}
WHERE
dt BETWEEN @start_date AND @end_date
LIMIT @limit
OFFSET @offset
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("start_date", "DATE", start_date),
bigquery.ScalarQueryParameter("end_date", "DATE", end_date),
bigquery.ScalarQueryParameter("limit", "INT64", limit),
bigquery.ScalarQueryParameter("offset", "INT64", offset),
]
)
query_job = client.query(query, job_config=job_config)
rows = query_job.result()
games = []
for row in rows:
games.append(game_schema.Game(**row))
return games
88 changes: 88 additions & 0 deletions api/cruds/kyoku.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from google.cloud import bigquery
from datetime import date

import api.schemas.kyoku as kyoku_schema


async def get_kyokus_count(
dataset_id: str, start_date: date, end_date: date, game_id: str | None
) -> int:
client = bigquery.Client()
kackyt marked this conversation as resolved.
Show resolved Hide resolved
dataset = client.get_dataset(client.project + "." + dataset_id)
table = f"{client.project}.{dataset.dataset_id}.kyokus"
query = f"""
SELECT
COUNT(id)
FROM
{table}
WHERE
dt BETWEEN @start_date AND @end_date
"""

params = [
bigquery.ScalarQueryParameter("start_date", "DATE", start_date),
bigquery.ScalarQueryParameter("end_date", "DATE", end_date),
]

if game_id is not None:
query += " AND game_id = @game_id"
params.append(bigquery.ScalarQueryParameter("game_id", "STRING", game_id))

job_config = bigquery.QueryJobConfig(query_parameters=params)
query_job = client.query(query, job_config=job_config)
rows = query_job.result()

return 0 if rows is None else next(rows)[0]


async def get_kyokus(
dataset_id: str,
start_date: date,
end_date: date,
limit: int,
offset: int,
game_id: str | None,
) -> list[kyoku_schema.Kyoku]:
client = bigquery.Client()
dataset = client.get_dataset(client.project + "." + dataset_id)
table = f"{client.project}.{dataset.dataset_id}.kyokus"

params = [
bigquery.ScalarQueryParameter("start_date", "DATE", start_date),
bigquery.ScalarQueryParameter("end_date", "DATE", end_date),
bigquery.ScalarQueryParameter("limit", "INT64", limit),
bigquery.ScalarQueryParameter("offset", "INT64", offset),
]

if game_id is not None:
cond_game_id = " AND game_id = @game_id"
params.append(bigquery.ScalarQueryParameter("game_id", "STRING", game_id))
else:
cond_game_id = ""

query = f"""
SELECT
id,
game_id,
kyoku_num,
honba,
reachbou,
scores,
kazes
FROM
{table}
WHERE
dt BETWEEN @start_date AND @end_date
{cond_game_id}
LIMIT @limit
OFFSET @offset
"""

job_config = bigquery.QueryJobConfig(query_parameters=params)
query_job = client.query(query, job_config=job_config)
rows = query_job.result()
kyokus = []
for row in rows:
kyokus.append(kyoku_schema.Kyoku(**row))

return kyokus
Loading