-
Notifications
You must be signed in to change notification settings - Fork 22
/
setup.py
281 lines (235 loc) · 8.3 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
#
"""The setup file"""
import os
import glob
import sys
import sysconfig
import multiprocessing
import subprocess
import distutils.command.clean
from os.path import dirname, join
from subprocess import PIPE, Popen
from setuptools import setup, find_packages
import setuptools.command.install
from torch.utils.cpp_extension import CppExtension # pylint: disable=C0411
from torch.utils.cpp_extension import BuildExtension as Build # pylint: disable=C0411
from tools.cuda_porting.cuda_porting import port_cuda
if os.getenv("MAX_JOBS") is None:
os.environ["MAX_JOBS"] = str(multiprocessing.cpu_count())
CLEAN_MODE = False
for i, arg in enumerate(sys.argv):
if arg == "clean":
CLEAN_MODE = True
if not CLEAN_MODE:
pytorch_root = os.getenv("PYTORCH_REPO_PATH", default="")
if pytorch_root == "":
raise RuntimeError(
"Building error: PYTORCH_REPO_PATH must be set to"
" PyTorch repository when building, but now it is empty!"
)
sys.path.append(join(dirname(__file__), "torch_musa"))
from setup_helpers.env import check_negative_env_flag, build_type
from setup_helpers.cmake import CMake
with open("version.txt", "r", encoding="utf-8") as version_file:
version = version_file.readlines()[0].strip()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
RERUN_CMAKE = False
class Install(setuptools.command.install.install):
"""
Install class.
"""
class Clean(distutils.command.clean.clean):
"""
Clean class.
"""
def run(self):
# pylint: disable=import-outside-toplevel
import re
import shutil
with open(".gitignore", "r", encoding="utf-8") as f:
ignores = f.read()
pat = re.compile(r"^#( BEGIN NOT-CLEAN-FILES )?")
for wildcard in filter(None, ignores.split("\n")):
match = pat.match(wildcard)
if match:
if match.group(1):
# Marker is found and stop reading .gitignore.
break
# Ignore lines which begin with '#'.
else:
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
def get_pytorch_install_path():
"""
Get Pytorch installed path.
"""
try:
# pylint: disable=import-outside-toplevel
import torch
pytorch_install_root = os.path.dirname(os.path.abspath(torch.__file__))
except Exception as exe:
raise RuntimeError(
"Building error: import torch failed when building!"
) from exe
return pytorch_install_root
def get_mtgpu_arch():
"""
Get moorethreads gpu arch.
"""
mthreads_gmi = "mthreads-gmi"
# name and arch dict
name_arches = {
"MTT S2000": "11",
"MTT S3000": "21",
"MTT S80": "21",
"MTT S80ES": "21",
"MTT S4000": "22",
"MTT S90": "22",
}
# Get ID, processing and memory utilization for all GPUs
try:
with Popen([mthreads_gmi, "-q -i 0"], stdout=PIPE) as p:
stdout, _ = p.communicate()
except Exception as exception:
raise RuntimeError("Unable to run the mthreads-gmi command") from exception
output = stdout.decode("UTF-8")
lines = output.split(os.linesep)
for line in lines:
kvs = line.split(" : ")
if len(kvs) != 2:
continue
if kvs[0].strip().startswith("Product Name"):
name = kvs[1].strip()
return name_arches[name]
raise RuntimeError(
"Can not find Product Name in the output of 'mthreads-gmi -q -i 0'"
)
def build_musa_lib():
"""
Build musa python lib.
"""
# generate code for CUDA porting
build_dir = "build"
gen_porting_dir = "generated_cuda_compatible"
code_generated_dir = "torch_musa_codegen"
cuda_compatiable_path = os.path.join(BASE_DIR, build_dir, gen_porting_dir)
code_generated_path = os.path.join(BASE_DIR, build_dir, code_generated_dir)
if not os.path.isdir(cuda_compatiable_path):
port_cuda(pytorch_root, get_pytorch_install_path(), cuda_compatiable_path)
os.environ["MUSA_ARCH"] = get_mtgpu_arch()
cmake = CMake(build_dir, install_dir_prefix="torch_musa")
# use reference to os.environ first, because newly added env vars may be accessed in CMake.
env = os.environ
env["GENERATED_PORTING_DIR"] = cuda_compatiable_path
env["CODE_GENERATED_DIR"] = code_generated_path
# add `BUILD` prefix to avoid env being filtered.
env["BUILD_PYTORCH_REPO_PATH"] = env["PYTORCH_REPO_PATH"]
build_test = not check_negative_env_flag("BUILD_TEST")
cmake_python_library = (
f"{sysconfig.get_config_var('LIBDIR')}/{sysconfig.get_config_var('INSTSONAME')}"
)
# NOTE: `version` passed to cmake.generate won't take effect.
# Because CMakeLists.txt will read the `version.txt` itself.
cmake.generate(
version, cmake_python_library, True, build_test, env.copy(), RERUN_CMAKE
)
cmake.build(env)
if not CLEAN_MODE:
build_musa_lib()
def configure_extension_build():
"""
Config the extension.
"""
if CLEAN_MODE:
return None
extra_link_args = []
extra_compile_args = [
"-std=c++17",
"-Wall",
"-Wextra",
"-Werror",
"-fno-strict-aliasing",
"-fstack-protector-all",
]
if build_type.is_debug():
extra_compile_args += ["-O0", "-g"]
extra_link_args += ["-O0", "-g"]
if build_type.is_rel_with_deb_info():
extra_compile_args += ["-g"]
extra_link_args += ["-g"]
use_asan = os.getenv("USE_ASAN", default="").upper() in [
"ON",
"1",
"YES",
"TRUE",
"Y",
]
if use_asan:
extra_compile_args += ["-fsanitize=address"]
extra_link_args += ["-fsanitize=address"]
torch_musa_sources = glob.glob("torch_musa/csrc/stub.cpp")
cpp_extension = CppExtension(
name="torch_musa._MUSAC",
sources=torch_musa_sources,
libraries=["musa_python"],
include_dirs=[],
extra_compile_args=extra_compile_args,
library_dirs=[os.path.join(BASE_DIR, "torch_musa/lib")],
extra_link_args=extra_link_args + ["-Wl,-rpath,$ORIGIN/lib"],
)
ext_extension = CppExtension(
name="torch_musa._ext",
sources=glob.glob("torch_musa/csrc/extension/C_frontend.cpp"),
libraries=["_ext_musa_kernels", "musa_python"],
include_dirs=[],
extra_compile_args={"cxx": ["-std=c++17"]},
library_dirs=[os.path.join(BASE_DIR, "torch_musa/lib")],
extra_link_args=extra_link_args + ["-Wl,-rpath,$ORIGIN/lib"],
)
return [cpp_extension, ext_extension]
install_requires = ["packaging"]
def package_files(directory):
paths = []
for root, _, files in os.walk(directory):
for file in files:
paths.append(os.path.join("..", root, file))
return paths
def dump_version():
"""
Dump the torch_musa version.
"""
here = BASE_DIR
version_file_path = os.path.join(here, "torch_musa", "version.py")
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=here)
.decode("ascii")
.strip()
)
__version__ = version + f"+{sha[:7]}"
with open(version_file_path, "w", encoding="utf-8") as f:
f.write(f"__version__ = '{__version__}'\n")
f.write(f"git_version = {repr(sha)}\n")
# Setup
if __name__ == "__main__":
dump_version()
setup(
name="torch_musa",
version=version,
description="A PyTorch backend extension for Moore Threads MUSA",
url="https://github.mthreads.com/mthreads/torch_musa",
author="Moore Threads PyTorch AI Dev Team",
packages=find_packages(exclude=["tools", "tools*"]),
ext_modules=configure_extension_build(),
include_package_data=True,
install_requires=install_requires,
extras_require={},
entry_points={
"console_scripts": ["musa-converter = torch_musa.utils.musa_converter:main"]
},
cmdclass={"build_ext": Build, "clean": Clean, "install": Install},
)