Skip to content

Commit

Permalink
Enable shim v2 build
Browse files Browse the repository at this point in the history
Summary:
Follow up of pytorch#125087

This diff will create shim v2 header and cpp file and corresponding build

Differential Revision: D56617546
  • Loading branch information
hl475 authored and facebook-github-bot committed Apr 30, 2024
1 parent 8320b77 commit 25fb0a0
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 0 deletions.
13 changes: 13 additions & 0 deletions buckbuild.bzl
Expand Up @@ -385,6 +385,8 @@ def get_aten_generated_files(enabled_backends):
"core/TensorMethods.cpp",
"core/aten_interned_strings.h",
"core/enum_tag.h",
"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.cpp",
"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h",
] + get_aten_derived_type_srcs(enabled_backends)

# This is tiresome. A better strategy would be to unconditionally
Expand Down Expand Up @@ -469,6 +471,7 @@ def gen_aten_files(
cmd = "$(exe {}torchgen:gen) ".format(ROOT_PATH) + " ".join([
"--source-path $(location {}:aten_src_path)/aten/src/ATen".format(ROOT),
"--install_dir $OUT",
"--aoti_install_dir $OUT/torch/csrc/inductor/aoti_torch/generated"
] + extra_params),
visibility = visibility,
compatible_with = compatible_with,
Expand Down Expand Up @@ -1008,6 +1011,16 @@ def define_buck_targets(
visibility = ["PUBLIC"],
)

fb_xplat_cxx_library(
name = "generated-aoti-cpu-headers",
header_namespace = "",
exported_headers = {
"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h": ":gen_aten[torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h]",
},
labels = labels,
visibility = ["PUBLIC"],
)

fb_xplat_cxx_library(
name = "generated-version-header",
header_namespace = "torch",
Expand Down
12 changes: 12 additions & 0 deletions build.bzl
Expand Up @@ -73,6 +73,7 @@ def define_targets(rules):
"$(execpath //torchgen:gen)",
"--install_dir=$(RULEDIR)",
"--source-path aten/src/ATen",
"--aoti_install_dir=$(RULEDIR)/torch/csrc/inductor/aoti_torch/generated"
] + (["--static_dispatch_backend CPU"] if rules.is_cpu_static_dispatch_build() else []))

gen_aten_outs_cuda = (
Expand All @@ -83,6 +84,7 @@ def define_targets(rules):
gen_aten_outs = (
GENERATED_H + GENERATED_H_CORE +
GENERATED_CPP + GENERATED_CPP_CORE +
GENERATED_AOTI_H + GENERATED_AOTI_CPP +
aten_ufunc_generated_cpu_sources() +
aten_ufunc_generated_cpu_kernel_sources() + [
"Declarations.yaml",
Expand Down Expand Up @@ -316,3 +318,13 @@ GENERATED_AUTOGRAD_CPP = [
"torch/csrc/lazy/generated/RegisterAutogradLazy.cpp",
"torch/csrc/lazy/generated/RegisterLazy.cpp",
] + _GENERATED_AUTOGRAD_CPP_HEADERS + GENERATED_LAZY_H

GENERATED_AOTI_H = [
"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.h",
"torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.h",
]

GENERATED_AOTI_CPP = [
"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.cpp",
"torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.cpp",
]
1 change: 1 addition & 0 deletions build_variables.bzl
Expand Up @@ -471,6 +471,7 @@ inductor_core_resources = [
"torch/csrc/inductor/aoti_torch/shim_common.cpp",
"torch/csrc/inductor/aoti_torch/tensor_converter.cpp",
"torch/csrc/inductor/inductor_ops.cpp",
":gen_aten[torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.cpp]",
]

libtorch_core_sources = sorted(
Expand Down

0 comments on commit 25fb0a0

Please sign in to comment.