Skip to content

Commit

Permalink
[pytorch] Upgrade PyTorch to 1.11.0
Browse files Browse the repository at this point in the history
Change-Id: I9ee7d61a1c3fa68df50da4cf9dc8f54f125b6b43
  • Loading branch information
frankfliu committed Apr 19, 2022
1 parent ef49a74 commit 7ffe5a1
Show file tree
Hide file tree
Showing 12 changed files with 598 additions and 48 deletions.
39 changes: 12 additions & 27 deletions .github/workflows/native_jni_s3_pytorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,14 @@ jobs:
- name: Install Environment
run: |
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y locales cmake curl unzip software-properties-common gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
DEBIAN_FRONTEND=noninteractive apt-get install -y locales curl unzip software-properties-common
add-apt-repository -y ppa:deadsnakes/ppa
apt-get update
apt-get install -y python3 python3-distutils
curl -O https://bootstrap.pypa.io/pip/3.6/get-pip.py
python3 get-pip.py
pip3 install awscli --upgrade
pip3 install awscli cmake
ln -s /usr/local/bin/cmake /usr/bin/cmake
- name: Release JNI prep
run: |
PYTORCH_VERSION=${{ github.event.inputs.pt_version }}
Expand All @@ -84,8 +85,6 @@ jobs:
./gradlew :engines:pytorch:pytorch-native:compileJNI -Pcu10 -Ppt_version=$PYTORCH_VERSION
./gradlew :engines:pytorch:pytorch-native:cleanJNI
./gradlew :engines:pytorch:pytorch-native:compileJNI -Pcu11 -Ppt_version=$PYTORCH_VERSION
./gradlew :engines:pytorch:pytorch-native:cleanJNI
CXX=aarch64-linux-gnu-gcc ./gradlew :engines:pytorch:pytorch-native:compileJNI -Paarch64 -Ppt_version=$PYTORCH_VERSION
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
Expand All @@ -109,7 +108,6 @@ jobs:
yum -y update
yum -y install centos-release-scl-rh epel-release
yum -y install devtoolset-7 rh-git218 patch cmake3
yum -y install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
ln -s /usr/bin/cmake3 /usr/bin/cmake
pip3 install awscli --upgrade
- uses: actions/checkout@v2
Expand All @@ -134,8 +132,6 @@ jobs:
./gradlew -Pjni -Ppt_version=$PYTORCH_VERSION :integration:test "-Dai.djl.default_engine=PyTorch"
./gradlew :engines:pytorch:pytorch-native:cleanJNI
./gradlew :engines:pytorch:pytorch-native:compileJNI -Pcu11 -Pprecxx11 -Ppt_version=$PYTORCH_VERSION
./gradlew :engines:pytorch:pytorch-native:cleanJNI
CXX=aarch64-linux-gnu-gcc ./gradlew :engines:pytorch:pytorch-native:compileJNI -Pprecxx11 -Paarch64 -Ppt_version=$PYTORCH_VERSION
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
Expand Down Expand Up @@ -164,33 +160,22 @@ jobs:
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*') }}
restore-keys: |
${{ runner.os }}-gradle-
- name: Install CUDA 10.2
- name: Install CUDA 11.3
shell: cmd
run: |
curl.exe -L http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_441.22_windows.exe -o cuda102.exe
curl.exe -L https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.4/cudnn-10.1-windows7-x64-v7.6.4.38.zip -o cudnn.zip
cuda102.exe -s
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
curl.exe -L https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe -o cuda.exe
curl.exe -L https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.1/cudnn-11.3-windows-x64-v8.2.1.32.zip -o cudnn.zip
cuda.exe -s
mkdir cuda
unzip.exe cudnn.zip
cp.exe -a cuda/include cuda/lib cuda/bin "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.2/"
rm.exe -Rf cuda102.exe cuda.exe cudnn.zip cuda
- name: Release CPU JNI
shell: cmd
run: |
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
gradlew :engines:pytorch:pytorch-native:compileJNI -Ppt_version=${{ github.event.inputs.pt_version }}
- name: Release cuda10 JNI
shell: cmd
run: |
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v10.2"
set "PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%"
gradlew :engines:pytorch:pytorch-native:cleanJNI :engines:pytorch:pytorch-native:compileJNI -Pcu10 -Ppt_version=${{ github.event.inputs.pt_version }}
cp.exe -a cuda/include cuda/lib cuda/bin "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.3/"
rm.exe -Rf cuda.exe cuda.exe cudnn.zip cuda
- name: Release cuda11 JNI
shell: cmd
run: |
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v10.2"
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.3"
set "PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%"
gradlew :engines:pytorch:pytorch-native:cleanJNI :engines:pytorch:pytorch-native:compileJNI -Pcu11 -Ppt_version=${{ github.event.inputs.pt_version }}
- name: Configure AWS Credentials
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/nightly_publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ jobs:
- name: Publish to snapshot repository
if: ${{ github.event.inputs.mode == '' || github.event.inputs.mode == 'snapshot' }}
run: |
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.8.1 -Psnapshot
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.9.1 -Psnapshot
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.10.0 -Psnapshot
./gradlew clean publish -Psnapshot
cd bom
./gradlew publish -Psnapshot
Expand All @@ -121,8 +121,8 @@ jobs:
- name: Publish to staging repository
if: ${{ github.event.inputs.mode == 'staging' }}
run: |
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.8.1 -P${{ github.event.inputs.mode }}
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.9.1 -P${{ github.event.inputs.mode }}
./gradlew clean engines:pytorch:pytorch-jni:publish -Ppt_version=1.10.0 -P${{ github.event.inputs.mode }}
./gradlew clean publish -P${{ github.event.inputs.mode }}
cd bom
./gradlew publish -P${{ github.event.inputs.mode }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,19 @@
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import java.util.Arrays;

import org.testng.SkipException;
import org.testng.annotations.Test;

/** The file is for testing PyTorch MKLDNN functionalities. */
public class MkldnnTest {

@Test
public void testMkldnn() {
if (!"amd64".equals(System.getProperty("os.arch"))) {
throw new SkipException("MKLDNN Test requires x86_64 arch.");
}

System.setProperty("ai.djl.pytorch.use_mkldnn", "true");
try (NDManager manager = NDManager.newBaseManager()) {
NDArray[] arrays = {
Expand Down
7 changes: 6 additions & 1 deletion engines/pytorch/pytorch-jni/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,12 @@ processResources {
"win-x86_64/cpu/djl_torch.dll",
"win-x86_64/cu102/djl_torch.dll"
]
if (ptVersion.startsWith("1.10.")) {
if (ptVersion.startsWith("1.11.")) {
files.add("linux-aarch64/cpu/libdjl_torch.so")
files.add("linux-x86_64/cu113/libdjl_torch.so")
files.add("linux-x86_64/cu113-precxx11/libdjl_torch.so")
files.add("win-x86_64/cu113/djl_torch.dll")
} else if (ptVersion.startsWith("1.10.")) {
files.add("linux-x86_64/cu113/libdjl_torch.so")
files.add("linux-x86_64/cu113-precxx11/libdjl_torch.so")
files.add("win-x86_64/cu113/djl_torch.dll")
Expand Down
4 changes: 4 additions & 0 deletions engines/pytorch/pytorch-native/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ set(SOURCE_FILES
"src/main/native/ai_djl_pytorch_jni_cache.h"
"src/main/native/ai_djl_pytorch_jni_cache.cc")

if(PT_OLD_VERSION)
add_compile_definitions(V1_10_X)
endif()

add_library(djl_torch SHARED ${SOURCE_FILES})
# build host
if(NOT BUILD_ANDROID)
Expand Down
14 changes: 12 additions & 2 deletions engines/pytorch/pytorch-native/build.cmd
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
@rem choco install jdk8 -y

set FILEPATH="libtorch"
set VERSION="%1"
set VERSION=%1
if "%2" == "cpu" (
set DOWNLOAD_URL="https://download.pytorch.org/libtorch/cpu/libtorch-win-shared-with-deps-%VERSION%%%2Bcpu.zip"
) else if "%2" == "cu102" (
Expand All @@ -28,9 +28,19 @@ if exist %FILEPATH% (
echo Finished downloading libtorch
)

if "%VERSION%" == "1.11.0" (
copy /y src\main\patch\cuda.cmake libtorch\share\cmake\Caffe2\public\
)
if "%VERSION%" == "1.10.0" (
set PT_OLD_VERSION=1
)
if "%VERSION%" == "1.9.1" (
set PT_OLD_VERSION=1
)

if exist build rd /q /s build
md build\classes
cd build
javac -sourcepath ..\..\pytorch-engine\src\main\java\ ..\..\pytorch-engine\src\main\java\ai\djl\pytorch\jni\PyTorchLibrary.java -h include -d classes
cmake -DCMAKE_PREFIX_PATH=libtorch ..
cmake -DCMAKE_PREFIX_PATH=libtorch -DPT_OLD_VERSION=%PT_OLD_VERSION% ..
cmake --build . --config Release
16 changes: 7 additions & 9 deletions engines/pytorch/pytorch-native/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ if (project.hasProperty("pt_version") && project.property("pt_version") != "") {
}
boolean isRelease = project.hasProperty("release") || project.hasProperty("staging")
boolean isPrecxx11 = project.hasProperty("precxx11")
boolean isAarch64 = project.hasProperty("aarch64")
boolean isAarch64 = project.hasProperty("aarch64") || System.properties["os.arch"] == "aarch64"

String FLAVOR = "cpu"
if (project.hasProperty("cu10")) {
Expand Down Expand Up @@ -97,8 +97,7 @@ def prepareNativeLib(String binaryRoot, String ver) {
]

def aarch64Files = [
"${ver}/libtorch-cxx11-shared-with-deps-${ver}-aarch64.zip": "cpu/linux-aarch64",
"${ver}/libtorch-shared-with-deps-${ver}-aarch64.zip" : "cpu-precxx11/linux-aarch64"
"${ver}/libtorch-cxx11-shared-with-deps-${ver}-aarch64.zip": "cpu/linux-aarch64"
]

copyNativeLibToOutputDir(files, binaryRoot, officialPytorchUrl)
Expand Down Expand Up @@ -256,8 +255,7 @@ task uploadS3 {
"${BINARY_ROOT}/cu113/linux-x86_64/native/lib/",
"${BINARY_ROOT}/cu113/win-x86_64/native/lib/",
"${BINARY_ROOT}/cu113-precxx11/linux-x86_64/native/lib/",
"${BINARY_ROOT}/cpu/linux-aarch64/native/lib/",
"${BINARY_ROOT}/cpu-precxx11/linux-aarch64/native/lib/"
"${BINARY_ROOT}/cpu/linux-aarch64/native/lib/"
]
uploadDirs.each { item ->
fileTree(item).files.name.each {
Expand Down Expand Up @@ -315,11 +313,11 @@ flavorNames.each { flavor ->
libstd.text = new URL("https://publish.djl.ai/extra/THIRD-PARTY-LICENSES_qHnMKgbdWa.txt").text
}
}
from ("${BINARY_ROOT}/${flavor}/${osName}/native/lib") {
into ("pytorch/${flavor}/${osName}")
from("${BINARY_ROOT}/${flavor}/${osName}/native/lib") {
into("pytorch/${flavor}/${osName}")
}
from ("${BINARY_ROOT}/pytorch.properties") {
into ("native/lib")
from("${BINARY_ROOT}/pytorch.properties") {
into("native/lib")
}
from "src/main/resources"
archiveClassifier = "${osName}"
Expand Down
7 changes: 5 additions & 2 deletions engines/pytorch/pytorch-native/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,20 @@ if [[ ! -d "libtorch" ]]; then
fi
fi

if [[ "$VERSION" =~ ^1\.10\..*|^1\.9\..* ]]; then
PT_OLD_VERSION=1
fi
pushd .

rm -rf build
mkdir build && cd build
mkdir classes
javac -sourcepath ../../pytorch-engine/src/main/java/ ../../pytorch-engine/src/main/java/ai/djl/pytorch/jni/PyTorchLibrary.java -h include -d classes
cmake -DCMAKE_PREFIX_PATH=libtorch ..
cmake -DCMAKE_PREFIX_PATH=libtorch -DPT_OLD_VERSION=${PT_OLD_VERSION} ..
cmake --build . --config Release -- -j "${NUM_PROC}"

if [[ $PLATFORM == 'darwin' ]]; then
install_name_tool -add_rpath @loader_path libdjl_torch.dylib
fi

popd
popd
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,12 @@
*/
#include <torch/torch.h>
// clang-format off
//#include <torch/csrc/jit/frontend/code_template.h>
#include <ATen/code_template.h>
#ifdef V1_10_X
#include <torch/csrc/jit/frontend/code_template.h>
#else
#include <ATen/code_template.h>
#endif
#include <ATen/core/jit_type.h>

// clang-format on

#include <sstream>
Expand Down Expand Up @@ -165,8 +167,22 @@ inline std::string FormatMemory(int64_t bytes) {
return oss.str();
}

// the code snippet is copied from torch/csrc/autograd/profiler.cpp
static at::jit::CodeTemplate event_template(R"(
// the code snippet is copied from torch/csrc/autograd/profiler_legacy.cpp
#ifdef V1_10_X
static torch::jit::CodeTemplate event_template(R"(
{
"name": "${name}",
"ph": "X",
"ts": ${ts},
"dur": ${dur},
"tid": ${tid},
"pid": "CPU Functions",
"shape": ${shape},
"cpu mem": "${cpu_mem}",
"args": {}
})");
#else
static const at::jit::CodeTemplate event_template(R"(
{
"name": "${name}",
"ph": "X",
Expand All @@ -178,6 +194,7 @@ static at::jit::CodeTemplate event_template(R"(
"cpu mem": "${cpu_mem}",
"args": {}
})");
#endif

// The function doesn't support GPU yet
// You can refer to
Expand Down Expand Up @@ -230,7 +247,11 @@ void WriteProfilerEventsToStream(std::ostream& out, const std::vector<std::vecto
LegacyEvent* start = it->second;
int64_t memory_usage = mem_it->second;

#ifdef V1_10_X
torch::jit::TemplateEnv env;
#else
at::jit::TemplateEnv env;
#endif
env.s("name", start->name());
env.d("ts", profiler_start->cpuElapsedUs(*start));
env.d("dur", start->cpuElapsedUs(*evt));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,16 @@ namespace utils {

#if !defined(__ANDROID__)
// for image interpolation
#ifdef V1_10_X
typedef torch::variant<torch::enumtype::kNearest, torch::enumtype::kLinear, torch::enumtype::kBilinear,
torch::enumtype::kBicubic, torch::enumtype::kTrilinear, torch::enumtype::kArea>
mode_t;
#else
typedef torch::variant<torch::enumtype::kNearest, torch::enumtype::kLinear, torch::enumtype::kBilinear,
torch::enumtype::kBicubic, torch::enumtype::kTrilinear, torch::enumtype::kArea, torch::enumtype::kNearestExact>
mode_t;
#endif
#endif

inline jint GetDTypeFromScalarType(const torch::ScalarType& type) {
if (torch::kFloat32 == type) {
Expand Down Expand Up @@ -109,7 +115,9 @@ inline mode_t GetInterpolationMode(jint jmode) {
case 5:
return torch::kArea;
case 6:
#ifndef V1_10_X
return torch::kNearestExact;
#endif
default:
throw;
}
Expand Down
Loading

0 comments on commit 7ffe5a1

Please sign in to comment.