Skip to content

Commit

Permalink
python-onnxruntime: split C++ and Python libraries & some other changes
Browse files Browse the repository at this point in the history
* Rename python-onnxruntime-cuda to onnxruntime-cuda - this split
  package actually does not contain Python libraries.
* Move optdepends to the correct package
* Drop flatbuffers fix after upstream adds the compatibility back [1]
* Improve onednn patch - fallback to bundled onednn if the system one
  is missing & wrap other usage of `DNNL_DLL_PATH`
* Add CUDA architectures introduced in CUDA 11.8. See [2]
* Refresh patches for 1.13

[1] google/flatbuffers#7499
[2] archlinux/svntogit-community@54642de
  • Loading branch information
Chih-Hsuan Yen authored and yan12125 committed Oct 25, 2022
1 parent bf89d3e commit c3dd234
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 56 deletions.
76 changes: 48 additions & 28 deletions archlinuxcn/python-onnxruntime/PKGBUILD
Expand Up @@ -5,28 +5,16 @@ _ENABLE_CUDA=1
pkgbase=python-onnxruntime
# Not split DNNL EP to another package as it's needed unconditionally at runtime if built at compile time
# https://github.com/microsoft/onnxruntime/blob/v1.9.1/onnxruntime/python/onnxruntime_pybind_state.cc#L533
pkgname=(python-onnxruntime)
pkgver=1.12.1
pkgname=(onnxruntime python-onnxruntime)
pkgver=1.13.1
pkgdesc='Cross-platform, high performance scoring engine for ML models'
pkgrel=1
arch=(x86_64)
url='https://github.com/microsoft/onnxruntime'
license=(MIT)
depends=(nsync re2 openmpi onednn libprotobuf-lite.so
python-coloredlogs python-flatbuffers python-numpy python-packaging python-protobuf python-sympy)
makedepends=(git cmake pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers)
optdepends=(
# https://github.com/microsoft/onnxruntime/pull/9969
'python-onnx: for the backend API, quantization, orttraining, transformers and various tools'
'python-psutil: for transformers'
'python-py-cpuinfo: for transformers'
'python-py3nvml: for transformers'
'python-transformers: for transformers'
'python-scipy: for transformers and various tools'
'python-pytorch: for transformers, orttraining and various tools'
'python-cerberus: for orttraining'
'python-h5py: for orttraining'
)
depends=(nsync re2 openmpi libprotobuf-lite.so)
makedepends=(git cmake pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers onednn
python-coloredlogs python-flatbuffers python-numpy python-packaging python-protobuf python-sympy)
# not de-vendored libraries
# onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030)
source=("git+https://github.com/microsoft/onnxruntime#tag=v$pkgver"
Expand All @@ -46,19 +34,19 @@ sha512sums=('SKIP'
'SKIP'
'SKIP'
'SKIP'
'6ad02636e2cba8f2e82f5ac733856eee3016750e809ee18a41c4edc78bca761f30ac174d7d683f9b14b9f72310dd654811b1ecc9dda514e12bac6b7440c449c2'
'8f0bd7ae59f86f002c88368a8c2852b9613363771aae61f91a90bfc13dcd3173e43d7988a59ccef86657cf6abfcc53837bbf445c216a7994a765a7e0770d0f5f'
'7d55b0d4232183a81c20a5049f259872150536eed799d81a15e7f10b5c8b5279b443ba96d7b97c0e4338e95fc18c9d6f088e348fc7002256ee7170d25b27d80d'
'6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08')
'ab48d27be98a88d3c361e1d0aac3b1e078096c0902ba7a543261a1c24faed0f1f44947a1b7ea1f264434cd2199b9d563d2447c14b6afbdf9900e68a65f7d2619')
# CUDA seems not working with LTO
options+=('!lto')

if [[ $_ENABLE_CUDA = 1 ]]; then
pkgname+=(python-onnxruntime-cuda)
pkgname+=(onnxruntime-cuda)
makedepends+=(cuda cudnn nccl gcc11)
fi

# Check PKGBUILDs of python-pytorch and tensorflow for CUDA architectures built by official packages
_CUDA_ARCHITECTURES="52-real;53-real;60-real;61-real;62-real;70-real;72-real;75-real;80-real;86-real;86-virtual"
_CUDA_ARCHITECTURES="52-real;53-real;60-real;61-real;62-real;70-real;72-real;75-real;80-real;86-real;87-real;89-real;90-real;90-virtual"

prepare() {
cd onnxruntime
Expand Down Expand Up @@ -86,13 +74,16 @@ build() {
export CUDAHOSTCXX=$CXX
fi

# Use -Donnxruntime_ENABLE_LAZY_TENSOR=OFF as it requires patched python-pytorch
# See: https://github.com/microsoft/onnxruntime/pull/10460 https://github.com/pytorch/pytorch/pulls/wschin
local cmake_args=(
-DCMAKE_INSTALL_PREFIX=/usr
-Donnxruntime_ENABLE_PYTHON=ON
-Donnxruntime_PREFER_SYSTEM_LIB=ON
-Donnxruntime_BUILD_SHARED_LIB=ON
-Donnxruntime_BUILD_UNIT_TESTS=OFF
-Donnxruntime_ENABLE_TRAINING=ON
-Donnxruntime_ENABLE_LAZY_TENSOR=OFF
-Donnxruntime_USE_MPI=ON
-Donnxruntime_USE_PREINSTALLED_EIGEN=ON
-Donnxruntime_USE_DNNL=ON
Expand Down Expand Up @@ -131,31 +122,60 @@ build() {
python ../setup.py build
}

package_python-onnxruntime() {
package_onnxruntime() {
depends+=(onednn)

cd onnxruntime/build

DESTDIR="$pkgdir" cmake --install .

install -Ddm755 "$pkgdir"/usr/share/licenses
for f in LICENSE ThirdPartyNotices.txt ; do
install -Dm644 ../$f -t "$pkgdir"/usr/share/licenses/$pkgname
done

# installed as split packages
rm -vf "$pkgdir"/usr/lib/libonnxruntime_providers_cuda.so
}

package_python-onnxruntime() {
depends+=(onnxruntime python-coloredlogs python-flatbuffers python-numpy python-packaging python-protobuf python-sympy)
optdepends=(
# https://github.com/microsoft/onnxruntime/pull/9969
'python-onnx: for the backend API, quantization, orttraining, transformers and various tools'
'python-psutil: for transformers'
'python-py-cpuinfo: for transformers'
'python-py3nvml: for transformers'
'python-transformers: for transformers'
'python-scipy: for transformers and various tools'
'python-pytorch: for transformers, orttraining and various tools'
'python-cerberus: for orttraining'
'python-h5py: for orttraining'
)

cd onnxruntime/build

python ../setup.py install --root="$pkgdir" --skip-build --optimize=1

PY_ORT_DIR="$(python -c 'import site; print(site.getsitepackages()[0])')/onnxruntime"
install -Ddm755 "$pkgdir"/usr/share/licenses/$pkgname
for f in LICENSE ThirdPartyNotices.txt ; do
ln -s "$PY_ORT_DIR/$f" "$pkgdir"/usr/share/licenses/$pkgname/$f
done
# already installed by `cmake --install`, and not useful as this path is not looked up by the linker
rm -vf "$pkgdir/$PY_ORT_DIR"/capi/libonnxruntime_providers_*

install -Ddm755 "$pkgdir"/usr/share/licenses
ln -s onnxruntime "$pkgdir"/usr/share/licenses/$pkgname

# installed as split packages
rm -vf "$pkgdir"/usr/lib/libonnxruntime_providers_cuda.so
}

package_python-onnxruntime-cuda() {
package_onnxruntime-cuda() {
depends=(cuda cudnn nccl openmpi nsync)
conflicts=('python-onnxruntime-cuda')
replaces=('python-onnxruntime-cuda')
pkgdesc+=' (CUDA execution provider)'

cd onnxruntime/build
install -Dm755 libonnxruntime_providers_cuda.so -t "$pkgdir"/usr/lib
install -Ddm755 "$pkgdir"/usr/share/licenses
ln -s python-onnxruntime "$pkgdir"/usr/share/licenses/$pkgname
ln -s onnxruntime "$pkgdir"/usr/share/licenses/$pkgname
}
21 changes: 2 additions & 19 deletions archlinuxcn/python-onnxruntime/build-fixes.patch
Expand Up @@ -25,29 +25,12 @@ index a027c69e0..eb7608518 100644
message("Use re2 from preinstalled system lib")
else()
add_subdirectory(external/re2 EXCLUDE_FROM_ALL)
@@ -1421,11 +1421,11 @@
#Dependencies end. In the next we'll enable "treat warning as error"

if (onnxruntime_PREFER_SYSTEM_LIB)
- find_package(Flatbuffers)
+ find_package(FlatBuffers)
@@ -1421,7 +1421,7 @@
endif()
-if (Flatbuffers_FOUND)
+if (FlatBuffers_FOUND)
if (Flatbuffers_FOUND)
message("Use flatbuffers from preinstalled system lib")
- add_library(flatbuffers ALIAS flatbuffers::flatbuffers)
+ add_library(flatbuffers ALIAS flatbuffers::flatbuffers_shared)
else()
message("Use flatbuffers from submodule")
# We do not need to build flatc for iOS or Android Cross Compile
--- a/setup.py 2022-07-22 17:00:19.638893453 +0800
+++ b/setup.py 2022-07-22 17:02:00.686317628 +0800
@@ -16,7 +16,7 @@

from setuptools import Extension, setup
from setuptools.command.install import install as InstallCommandBase
-from wheel.vendored.packaging.tags import sys_tags
+from packaging.tags import sys_tags

nightly_build = False
package_name = "onnxruntime"
35 changes: 26 additions & 9 deletions archlinuxcn/python-onnxruntime/system-dnnl.diff
@@ -1,40 +1,57 @@
diff --git a/cmake/external/dnnl.cmake b/cmake/external/dnnl.cmake
index 6a51a3d5d..a89635210 100644
index 5c42216..bc0a63f 100644
--- a/cmake/external/dnnl.cmake
+++ b/cmake/external/dnnl.cmake
@@ -26,6 +26,13 @@ elseif(onnxruntime_USE_DNNL AND onnxruntime_DNNL_GPU_RUNTIME STREQUAL "ocl" AND
@@ -26,6 +26,17 @@ elseif(onnxruntime_USE_DNNL AND onnxruntime_DNNL_GPU_RUNTIME STREQUAL "ocl" AND
endif()

if (onnxruntime_USE_DNNL)
+if (onnxruntime_PREFER_SYSTEM_LIB)
+ # https://oneapi-src.github.io/oneDNN/dev_guide_transition_to_dnnl.html
+ find_package(dnnl CONFIG REQUIRED)
+ find_package(dnnl CONFIG)
+endif ()
+if (TARGET DNNL::dnnl)
+ message("Use DNNL from preinstalled system lib")
+ add_library(project_dnnl INTERFACE)
+ add_library(dnnl INTERFACE)
+ target_link_libraries(dnnl INTERFACE DNNL::dnnl)
+else ()
+ message("Use DNNL from submodule")
set(DNNL_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/dnnl/src/dnnl/src)
set(DNNL_INSTALL ${CMAKE_CURRENT_BINARY_DIR}/dnnl/install)
set(DNNL_LIB_DIR ${DNNL_INSTALL}/${CMAKE_INSTALL_LIBDIR})
@@ -55,3 +62,4 @@ if (onnxruntime_USE_DNNL)
@@ -55,3 +66,4 @@ if (onnxruntime_USE_DNNL)
)
link_directories(${DNNL_LIB_DIR})
endif()
+endif()
diff --git a/cmake/onnxruntime_python.cmake b/cmake/onnxruntime_python.cmake
index b23d731..13ef7c3 100644
--- a/cmake/onnxruntime_python.cmake
+++ b/cmake/onnxruntime_python.cmake
@@ -752,7 +752,7 @@ if (onnxruntime_ENABLE_TRAINING)
endif()
endif()

-if (onnxruntime_USE_DNNL)
+if (onnxruntime_USE_DNNL AND NOT TARGET DNNL::dnnl)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
diff --git a/cmake/onnxruntime_unittests.cmake b/cmake/onnxruntime_unittests.cmake
index 6bdb2d03c..514faa375 100644
index c5e1dfe..a9cb311 100644
--- a/cmake/onnxruntime_unittests.cmake
+++ b/cmake/onnxruntime_unittests.cmake
@@ -744,10 +744,12 @@ add_custom_command(
if (NOT onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
@@ -836,10 +836,12 @@ if (NOT onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
endif()
if (onnxruntime_USE_DNNL)
list(APPEND onnx_test_libs dnnl)
+ if (NOT onnxruntime_PREFER_SYSTEM_LIB)
+ if (NOT TARGET DNNL::dnnl)
add_custom_command(
TARGET ${test_data_target} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${DNNL_DLL_PATH} $<TARGET_FILE_DIR:${test_data_target}>
)
+ endif()
endif()
if(WIN32)
if (onnxruntime_USE_TVM)
if (onnxruntime_USE_NUPHAR_TVM)

0 comments on commit c3dd234

Please sign in to comment.