diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..c1192b7 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "apex"] + path = apex + url = https://github.com/NVIDIA/apex.git diff --git a/apex b/apex new file mode 160000 index 0000000..a99e187 --- /dev/null +++ b/apex @@ -0,0 +1 @@ +Subproject commit a99e18758090e859238e531702915aeeaaaed8f6 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..fc2c20c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +numpy +pandas +scikit-learn +matplotlib +unidecode +seaborn +sentencepiece +emoji diff --git a/setup.py b/setup.py index 2745298..3f22108 100644 --- a/setup.py +++ b/setup.py @@ -1,48 +1,10 @@ import os -from setuptools import setup, find_packages -import torch +import sys -curdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'apex_utils') -os.chdir(curdir) +# shameless hack, but it works conveniently +os.system("pip install -r requirements.txt") -if not torch.cuda.is_available(): - print("Warning: Torch did not find available GPUs on this system.\n", - "If your intention is to cross-compile, this is not an error.") +os.chdir("apex") -print("torch.__version__ = ", torch.__version__) -TORCH_MAJOR = int(torch.__version__.split('.')[0]) -TORCH_MINOR = int(torch.__version__.split('.')[1]) +os.system("python " + " ".join(sys.argv) ) -if TORCH_MAJOR == 0 and TORCH_MINOR < 4: - raise RuntimeError("APEx requires Pytorch 0.4 or newer.\n" + - "The latest stable release can be obtained from https://pytorch.org/") - -print("Building module.") -setup( - name='apex', version='0.1', -# ext_modules=[cuda_ext,], - description='PyTorch Extensions written by NVIDIA', - packages=find_packages(where='.', - exclude=( - "build", - "csrc", - "include", - "tests", - "dist", - "docs", - "tests", - "examples", - "apex.egg-info", - )), - install_requires=[ - "numpy", - "pandas", - "scikit-learn", - "matplotlib", - "unidecode", - "seaborn", - "sentencepiece", - "emoji" - ] -)