forked from meta-llama/llama-recipes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pyproject.toml
46 lines (39 loc) · 1.39 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
[build-system]
requires = ["hatchling", "hatch-requirements-txt"]
build-backend = "hatchling.build"
[project]
name = "llama-recipes"
version = "0.0.1"
authors = [
{ name="Hamid Shojanazeri", email="hamidnazeri@meta.com" },
{ name="Matthias Reso", email="mreso@meta.com" },
{ name="Geeta Chauhan", email="gchauhan@meta.com" },
]
description = "Llama-recipes is a companion project to the Llama 2 model. It's goal is to provide examples to quickly get started with fine-tuning for domain adaptation and how to run inference for the fine-tuned models. "
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Python :: 3",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
]
dynamic = ["dependencies"]
[project.optional-dependencies]
vllm = ["vllm"]
tests = ["pytest-mock"]
auditnlg = ["auditnlg"]
[project.urls]
"Homepage" = "https://github.com/facebookresearch/llama-recipes/"
"Bug Tracker" = "https://github.com/facebookresearch/llama-recipes/issues"
[tool.hatch.build]
exclude = [
"dist/*",
]
[tool.hatch.build.targets.wheel]
packages = ["src/llama_recipes"]
[tool.hatch.metadata.hooks.requirements_txt]
files = ["requirements.txt"]
[tool.pytest.ini_options]
markers = [
"skip_missing_tokenizer: skip tests when we can not access meta-llama/Llama-2-7b-hf on huggingface hub (Log in with `huggingface-cli login` to unskip).",
]