Skip to content

Commit

Permalink
Fix Torch-MLIR LTC Backend based off latest PyTorch master (#723)
Browse files Browse the repository at this point in the history
* Changes as a result of the LTC TS backend decoupling

* Fix bugs in BackendImpl and codegen

* Fix based on latest PyTorch master
  • Loading branch information
antoniojkim committed Jul 7, 2022
1 parent 4ffcbb4 commit 0109b69
Show file tree
Hide file tree
Showing 16 changed files with 218 additions and 737 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ bazel-*
# Autogenerated files
/generated_native_functions.yaml
/generated_backend.hash
/python/torch_mlir/csrc/backend/LazyLazyIr.h
/python/torch_mlir/csrc/backend/LazyIr.h
/python/torch_mlir/csrc/backend/LazyNativeFunctions.cpp
/python/torch_mlir/csrc/backend/LazyNativeFunctions.h
/python/torch_mlir/csrc/backend/GenLazyShapeInference.cpp
Expand Down
58 changes: 44 additions & 14 deletions build_tools/autogen_ltc_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@
from codegen.model import NativeFunctionsGroup


def isOptionalCType(arg):
return str(type(arg)) == "<class 'tools.codegen.api.types.OptionalCType'>"


def generate_native_functions(
config_path: Path, torch_ops_file: Path, out_file: Path
):
Expand Down Expand Up @@ -98,7 +102,7 @@ def get_native_function_name(f):
yaml.dump(
{
"backend": "Lazy",
"cpp_namespace": "torch_lazy_tensors",
"cpp_namespace": "torch::lazy",
"full_codegen": opnames,
"supported": sorted(supported_ops),
},
Expand All @@ -120,21 +124,46 @@ def get_native_function_name(f):

@dataclass(frozen=True)
class MlirLazyIr(codegen.gen_lazy_tensor.dest.LazyIR):
lowering_function_type: str = "torch::lazy::MlirFunction"
lowering_context_type: str = "torch::lazy::MlirLoweringContext*"
lowering_return_type: str = "torch::lazy::MlirOpVector"

def lowering_body(self, f):
def lowering_function(self, f):
func = (
f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
)
schema = LazyIrSchema(func)

emplace_arguments = []
for arg in schema.positional_args:
if arg.is_lazy_value:
if isOptionalCType(arg.lazy_type):
emplace_arguments.append(f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr")
continue
emplace_arguments.append('loctx->GetOutputOp(operand(i++))')
continue
emplace_arguments.append(f'"{arg.name}", {arg.name}')

emplace_arguments_str = "\n ".join(
[f"arguments.emplace_back({a});" for a in emplace_arguments])
emplace_kwarg_values = [f'"{t.name}", loctx->GetOutputOp(operand(i++))' for t in schema.keyword_values]
emplace_kwarg_scalars = [f'"{t.name}", {t.name}' for t in schema.keyword_scalars]
emplace_kwarguments = "\n ".join(
[f"kwarguments.emplace_back({a});" for a in emplace_kwarg_values + emplace_kwarg_scalars])

return f"""
UNIMPLEMENTED_ERROR(
"'{func}' lowering not yet implemented"
);
""".rstrip()
TorchMlirOpVector Lower(TorchMlirFunction function, TorchMlirLoweringContext* loctx) const override {{
PRINT_FUNCTION();
std::vector<torch::jit::NamedValue> arguments;
std::vector<torch::jit::NamedValue> kwarguments;
arguments.reserve({len(emplace_arguments)});
kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
size_t i = 0;
{emplace_arguments_str}
{emplace_kwarguments}
torch::lazy::TorchMlirOpVector {schema.aten_name}_out = torch::lazy::LowerTorchMlirBuiltin(function, op().op, arguments, kwarguments);
CHECK_EQ({schema.aten_name}_out.size(), {len(func.returns)});
return {schema.aten_name}_out;
}}
""".strip()


def generate_backend(
Expand All @@ -151,14 +180,14 @@ def gen_fallback_code(*args, **kwargs):

codegen.dest.lazy_ir.gen_fallback_code = gen_fallback_code

codegen.gen_lazy_tensor.run(
codegen.gen_lazy_tensor.run_gen_lazy_tensor(
backend_name="TorchMlir",
aten_path=str(TORCH_DIR.joinpath("aten", "src", "ATen")),
source_yaml=str(source_yaml),
output_dir=str(backend_path),
dry_run=False,
impl_path=str(backend_path.joinpath("aten_ltc_mlir_type.cpp")),
gen_ts_lowerings=False,
node_base="torch::lazy::MlirNode",
impl_path=str(backend_path.joinpath("mlir_native_functions.cpp")),
node_base="torch::lazy::TorchMlirNode",
node_base_hdr=str(backend_path.joinpath("mlir_node.h")),
tensor_class="torch::lazy::LazyTensor",
tensor_class_hdr="torch/csrc/lazy/core/tensor.h",
Expand Down Expand Up @@ -298,7 +327,6 @@ def main(args):
new_hash = m.hexdigest().strip()

if args.force or new_hash != prev_hash:
hash_file.write_text(new_hash)
parsed_yaml, grouped_native_functions = generate_native_functions(
config_path, torch_ops_file, native_functions
)
Expand All @@ -310,6 +338,8 @@ def main(args):
grouped_native_functions,
)

hash_file.write_text(new_hash)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
Expand Down
4 changes: 2 additions & 2 deletions build_tools/autogen_ltc_backend.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,9 @@ supported:
- empty
- expand
- fill_
# - native_batch_norm_backward
- native_batch_norm
# - native_batch_norm_backward
- permute
- repeat
- squeeze
- t
- unsqueeze
Expand All @@ -50,3 +49,4 @@ additional_ops:
# Additional ops to support that are not supported by Torch-MLIR explicitly
- _copy_from
- _copy_from_and_resize
- native_batch_norm_backward
4 changes: 1 addition & 3 deletions python/torch_mlir/csrc/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,14 @@ link_directories("${TORCH_INSTALL_PREFIX}/lib")


add_library(torch_mlir_ltc_backend SHARED
backend/aten_eager_fallback.cpp
backend/aten_ltc_mlir_type.cpp
backend/backend_impl.cpp
backend/LazyNativeFunctions.cpp
backend/LazyShapeInference.cpp
backend/GenLazyShapeInference.cpp
backend/mlir_lowering_context.cpp
backend/mlir_native_functions.cpp
backend/mlir_node.cpp
backend/RegisterLazy.cpp
tensor_aten_ops.cpp
)

target_link_libraries(torch_mlir_ltc_backend
Expand Down
1 change: 1 addition & 0 deletions python/torch_mlir/csrc/backend/LazyShapeInference.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ TORCH_API std::vector<Shape> compute_shape_new_zeros(const at::Tensor & self, at
TORCH_API std::vector<Shape> compute_shape_rand_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format);
TORCH_API std::vector<Shape> compute_shape_relu(const at::Tensor & self);
TORCH_API std::vector<Shape> compute_shape_relu_(at::Tensor & self);
TORCH_API std::vector<Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
TORCH_API std::vector<Shape> compute_shape_reshape(const at::Tensor & self, at::IntArrayRef shape);
TORCH_API std::vector<Shape> compute_shape_rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha);
TORCH_API std::vector<Shape> compute_shape_select(const at::Tensor & self, int64_t dim, int64_t index);
Expand Down
58 changes: 0 additions & 58 deletions python/torch_mlir/csrc/backend/aten_eager_fallback.cpp

This file was deleted.

27 changes: 0 additions & 27 deletions python/torch_mlir/csrc/backend/aten_eager_fallback.h

This file was deleted.

Loading

0 comments on commit 0109b69

Please sign in to comment.