-
Notifications
You must be signed in to change notification settings - Fork 44
/
pyproject.toml
119 lines (114 loc) · 3.87 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "pai_rag"
version = "0.1.0"
description = "Open source RAG framework built on Aliyun PAI"
authors = []
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.11.0,<3.12"
fastapi = "^0.110.1"
uvicorn = "^0.29.0"
llama-index-core = "0.10.62"
llama-index-embeddings-openai = "^0.1.7"
llama-index-embeddings-azure-openai = "^0.1.7"
llama-index-embeddings-dashscope = "^0.1.3"
llama-index-llms-openai = "^0.1.27"
llama-index-llms-azure-openai = "^0.1.6"
llama-index-llms-dashscope = "^0.1.2"
llama-index-readers-database = "^0.1.3"
llama-index-vector-stores-chroma = "^0.1.6"
llama-index-vector-stores-faiss = "^0.1.2"
llama-index-vector-stores-analyticdb = "^0.1.1"
llama-index-vector-stores-elasticsearch = "^0.2.0"
llama-index-vector-stores-milvus = "^0.1.10"
gradio = "3.41.0"
faiss-cpu = "^1.8.0"
hologres-vector = "^0.0.9"
dynaconf = "^3.2.5"
docx2txt = "^0.8"
click = "^8.1.7"
pydantic = "^2.7.0"
pytest = "^8.1.1"
llama-index-retrievers-bm25 = "^0.1.3"
jieba = "^0.42.1"
llama-index-embeddings-huggingface = "^0.2.0"
llama-index-postprocessor-flag-embedding-reranker = "^0.1.3"
flagembedding = "^1.2.10"
sentencepiece = "^0.2.0"
oss2 = "^2.18.5"
asgi-correlation-id = "^4.3.1"
openinference-instrumentation-llama-index = "^2.2.1"
torch = [
{version = "2.3.0+cpu", source = "pytorch_cpu", markers = "sys_platform != 'darwin'"},
{version = "2.2.2", markers = "sys_platform == 'darwin'"}
]
torchvision = [
{version = "0.18.0+cpu", source = "pytorch_cpu", markers = "sys_platform != 'darwin'"},
{version = "0.17.2", markers = "sys_platform == 'darwin'"}
]
transformers = "4.40.0"
openpyxl = "^3.1.2"
pdf2image = "^1.17.0"
llama-index-storage-chat-store-redis = "^0.1.3"
python-bidi = "0.4.2"
easyocr = "^1.7.1"
opencv-python = "^4.6.0.66"
llama-parse = "0.4.2"
pypdf2 = "^3.0.1"
pdfplumber = "^0.11.0"
pdfminer-six = "^20231228"
openinference-semantic-conventions = "^0.1.9"
llama-index-tools-google = "^0.1.5"
llama-index-tools-duckduckgo = "^0.1.1"
openinference-instrumentation = "^0.1.12"
llama-index-llms-huggingface = "^0.2.0"
pytest-asyncio = "^0.23.7"
pytest-cov = "^5.0.0"
xlrd = "^2.0.1"
markdown = "^3.6"
chardet = "^5.2.0"
locust = "^2.29.0"
gunicorn = "^22.0.0"
umap-learn = "^0.5.6"
protobuf = "3.20.2"
modelscope = "^1.16.0"
llama-index-multi-modal-llms-dashscope = "^0.1.2"
llama-index-vector-stores-alibabacloud-opensearch = "^0.1.0"
asyncpg = "^0.29.0"
pgvector = "^0.3.2"
pre-commit = "^3.8.0"
cn-clip = "^1.5.1"
llama-index-llms-paieas = "^0.1.0"
pymysql = "^1.1.1"
llama-index-experimental = "^0.2.0"
llama-index-readers-web = "^0.1.23"
milvus-lite = "^2.4.9"
rapidocr-onnxruntime = "^1.3.24"
rapid-table = "^0.1.3"
bs4 = "^0.0.2"
httpx = "0.27.0"
detectron2 = [
{markers = "sys_platform == 'linux'", url = "https://pai-rag.oss-cn-hangzhou.aliyuncs.com/packages/python_wheels/detectron2-0.6%2B864913fpt2.3.0cpu-cp311-cp311-linux_x86_64.whl"},
{markers = "sys_platform == 'win32'", url = "https://pai-rag.oss-cn-hangzhou.aliyuncs.com/packages/python_wheels/detectron2-0.6%2B864913fpt2.3.0cpu-cp311-cp311-win_amd64.whl"},
{markers = "sys_platform != 'win32' and sys_platform != 'linux' ", url = "https://pai-rag.oss-cn-hangzhou.aliyuncs.com/packages/python_wheels/detectron2-0.6%2B864913fpt2.2.2cpu-cp311-cp311-macosx_10_9_universal2.whl"}
]
magic-pdf = {version = "0.7.0b1", extras = ["full"]}
llama-index-callbacks-arize-phoenix = "0.1.6"
peft = "^0.12.0"
duckduckgo-search = "6.2.12"
aliyun-bootstrap = "^1.0.1"
docx = "^0.2.4"
[tool.poetry.scripts]
pai_rag = "pai_rag.main:run"
load_data = "pai_rag.tools.load_data_tool:run"
load_model = "pai_rag.utils.download_models:load_models"
run_eval_exp = "pai_rag.evaluation.run_evaluation_experiments:run"
[[tool.poetry.source]]
name = "pytorch_cpu"
url = "https://download.pytorch.org/whl/cpu"
priority = "explicit"
[tool.pytest.ini_options]
asyncio_mode = "auto"