New package: py-onnxmltools and dependencies (#28023)

* New package: py-onnxmltools and dependencies

* Small fix

* Changes from review

* Apply suggestions from code review

Co-authored-by: Adam J. Stewart <ajstewart426@gmail.com>

* Update recipe following review

Co-authored-by: Adam J. Stewart <ajstewart426@gmail.com>
This commit is contained in:
iarspider 2021-12-21 18:15:13 +01:00 committed by GitHub
parent 43042c14e0
commit 8ebad6963b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 395 additions and 4 deletions

View File

@ -0,0 +1,146 @@
diff -ur a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
--- a/cmake/CMakeLists.txt 2021-12-14 22:34:21.754062247 +0100
+++ b/cmake/CMakeLists.txt 2021-12-14 22:36:18.094061909 +0100
@@ -240,7 +240,7 @@
if (onnxruntime_MINIMAL_BUILD_CUSTOM_OPS)
add_compile_definitions(ORT_MINIMAL_BUILD_CUSTOM_OPS)
endif()
-
+
set(onnxruntime_REDUCED_OPS_BUILD ON)
if (NOT onnxruntime_ENABLE_PYTHON)
@@ -568,7 +568,7 @@
endif()
endif()
-if(NOT WIN32 AND NOT onnxruntime_PREFER_SYSTEM_LIB)
+if(NOT WIN32)
add_subdirectory(${PROJECT_SOURCE_DIR}/external/nsync EXCLUDE_FROM_ALL)
endif()
# External dependencies
@@ -596,7 +596,7 @@
if(NOT Protobuf_USE_STATIC_LIBS)
#Indeed here should be a warning, not a fatal error. ONNX Runtime itself can work in such a
#setting but it may cause compatibility issue when ONNX Runtime is integrated with the other ONNX ecosystem softwares.
- message(FATAL_ERROR "Please enable Protobuf_USE_STATIC_LIBS")
+ message(WARNING "Please enable Protobuf_USE_STATIC_LIBS")
endif()
else()
set(PROTOBUF_LIB protobuf::libprotobuf-lite)
diff -ur a/include/onnxruntime/core/platform/ort_mutex.h b/include/onnxruntime/core/platform/ort_mutex.h
--- a/include/onnxruntime/core/platform/ort_mutex.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/platform/ort_mutex.h 2021-12-14 22:36:18.164061909 +0100
@@ -101,7 +101,7 @@
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
} // namespace onnxruntime
-#else
+#elif !defined(__aarch64__)
#include "nsync.h"
#include <mutex> //for unique_lock
#include <condition_variable> //for cv_status
@@ -186,4 +186,11 @@
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
}; // namespace onnxruntime
+#else
+#include <mutex>
+#include <condition_variable>
+namespace onnxruntime {
+using OrtMutex = std::mutex;
+using OrtCondVar = std::condition_variable;
+} // namespace onnxruntime
#endif
diff -ur a/include/onnxruntime/core/session/onnxruntime_cxx_api.h b/include/onnxruntime/core/session/onnxruntime_cxx_api.h
--- a/include/onnxruntime/core/session/onnxruntime_cxx_api.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_api.h 2021-12-14 22:36:18.164061909 +0100
@@ -345,8 +345,8 @@
struct Session : Base<OrtSession> {
explicit Session(std::nullptr_t) {}
- Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options);
- Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options);
+ Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options);
+ Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options);
// Run that will allocate the output values
std::vector<Value> Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
diff -ur a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h
--- a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h 2021-12-14 22:36:18.164061909 +0100
@@ -500,11 +500,11 @@
return *this;
}
-inline Session::Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
+inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSession(env, model_path, options, &p_));
}
-inline Session::Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
+inline Session::Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSessionFromArray(env, model_data, model_data_length, options, &p_));
}
diff -ur a/onnxruntime/core/mlas/lib/platform.cpp b/onnxruntime/core/mlas/lib/platform.cpp
--- a/onnxruntime/core/mlas/lib/platform.cpp 2021-12-14 22:34:21.864062247 +0100
+++ b/onnxruntime/core/mlas/lib/platform.cpp 2021-12-14 22:36:18.244061908 +0100
@@ -16,6 +16,7 @@
--*/
#include "mlasi.h"
+#include <string>
//
// Stores the platform information.
@@ -170,8 +171,11 @@
//
uint64_t xcr0 = MlasReadExtendedControlRegister(_XCR_XFEATURE_ENABLED_MASK);
+ const char *cpu_opt = std::getenv("MLAS_DYNAMIC_CPU_ARCH");
+ if (cpu_opt == nullptr) cpu_opt = "99";
+ auto opt = std::stoi(cpu_opt);
- if ((xcr0 & 0x6) == 0x6) {
+ if (opt > 0 && (xcr0 & 0x6) == 0x6) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx;
@@ -204,7 +208,7 @@
__cpuid_count(7, 0, Cpuid7[0], Cpuid7[1], Cpuid7[2], Cpuid7[3]);
#endif
- if (((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
+ if (opt > 1 && ((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
this->GemmU8S8Operation = MlasGemmU8X8Operation<MLAS_GEMM_U8S8_KERNEL_AVX2>;
this->GemmU8S8PackedOperation = MlasGemmU8X8PackedOperation<MLAS_GEMM_U8S8_KERNEL_AVX2>;
@@ -264,7 +268,7 @@
// operating system supports saving AVX512F state.
//
- if (((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
+ if (opt > 2 && ((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx512F;
this->GemmDoubleKernel = MlasGemmDoubleKernelAvx512F;
diff -ur a/onnxruntime/core/platform/posix/ort_mutex.cc b/onnxruntime/core/platform/posix/ort_mutex.cc
--- a/onnxruntime/core/platform/posix/ort_mutex.cc 2021-12-14 22:34:21.874062247 +0100
+++ b/onnxruntime/core/platform/posix/ort_mutex.cc 2021-12-14 22:36:18.254061908 +0100
@@ -1,6 +1,7 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
+#if !defined(__aarch64__)
#include "core/common/common.h"
#include "core/platform/ort_mutex.h"
#include <assert.h>
@@ -40,4 +41,5 @@
nsync::nsync_cv_wait(&native_cv_object, lk.mutex()->native_handle());
}
-} // namespace onnxruntime
\ No newline at end of file
+} // namespace onnxruntime
+#endif

View File

@ -0,0 +1,36 @@
From de4089f8cbe0baffe56a363cc3a41595cc8f0809 Mon Sep 17 00:00:00 2001
From: ankurverma85 <31362771+ankurverma85@users.noreply.github.com>
Date: Mon, 10 May 2021 12:50:08 -0700
Subject: [PATCH] GCC11/Libstdc++11 Compilation fixes (#7599)
Authored-by: Ankur Verma <ankurv@microsoft.com>
---
include/onnxruntime/core/graph/graph_nodes.h | 2 +-
onnxruntime/test/providers/cpu/controlflow/loop_test.cc | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/include/onnxruntime/core/graph/graph_nodes.h b/include/onnxruntime/core/graph/graph_nodes.h
index 422fe9538ea..aec603f7942 100644
--- a/include/onnxruntime/core/graph/graph_nodes.h
+++ b/include/onnxruntime/core/graph/graph_nodes.h
@@ -100,7 +100,7 @@ class ValidNodes {
using const_reference = const T&;
/** Construct a NodeInterator and move to the first valid node. */
- NodeIterator<TIterator>(const TIterator current, const TIterator end, const NodeFilterFunc& filter_fn) noexcept
+ NodeIterator(const TIterator current, const TIterator end, const NodeFilterFunc& filter_fn) noexcept
: current_{current}, end_{end}, apply_filter_{filter_fn != nullptr}, filter_func_{&filter_fn} {
// skip to next valid node, stopping at end if none are found
while (current_ < end && (*current_ == nullptr ||
diff --git a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
index b058e9a16c7..3cf147e997c 100644
--- a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
+++ b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
@@ -2,6 +2,7 @@
// Licensed under the MIT License.
#include <future>
+#include <thread>
#include "gtest/gtest.h"
#include "gmock/gmock.h"

View File

@ -0,0 +1,42 @@
--- cmake/onnxruntime.cmake.orig 2021-08-06 12:36:32.720081500 +0200
+++ cmake/onnxruntime.cmake 2021-08-13 18:53:30.310868700 +0200
@@ -120,7 +120,8 @@
onnxruntime_common
onnxruntime_mlas
onnxruntime_flatbuffers
- ${onnxruntime_EXTERNAL_LIBRARIES})
+ ${onnxruntime_EXTERNAL_LIBRARIES}
+ -liconv)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
target_link_libraries(onnxruntime PRIVATE onnxruntime_language_interop onnxruntime_pyop)
--- cmake/onnxruntime_python.cmake.orig 2021-08-06 12:36:32.725148600 +0200
+++ cmake/onnxruntime_python.cmake 2021-08-13 18:54:37.085622000 +0200
@@ -106,6 +106,7 @@
onnxruntime_mlas
onnxruntime_flatbuffers
${pybind11_lib}
+ -liconv
)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
--- cmake/onnxruntime_unittests.cmake.orig 2021-08-13 19:11:58.645461300 +0200
+++ cmake/onnxruntime_unittests.cmake 2021-08-13 19:14:18.373814800 +0200
@@ -603,7 +603,7 @@
AddTest(
TARGET onnxruntime_test_all
SOURCES ${all_tests} ${onnxruntime_unittest_main_src}
- LIBS onnx_test_runner_common ${onnxruntime_test_providers_libs} ${onnxruntime_test_common_libs} re2::re2 onnx_test_data_proto
+ LIBS onnx_test_runner_common ${onnxruntime_test_providers_libs} ${onnxruntime_test_common_libs} re2::re2 onnx_test_data_proto -liconv
DEPENDS ${all_dependencies}
)
@@ -723,7 +723,7 @@
target_compile_options(onnx_test_runner PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:--compiler-options /utf-8>"
"$<$<NOT:$<COMPILE_LANGUAGE:CUDA>>:/utf-8>")
endif()
-target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs})
+target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs} -liconv)
target_include_directories(onnx_test_runner PRIVATE ${ONNXRUNTIME_ROOT})
set_target_properties(onnx_test_runner PROPERTIES FOLDER "ONNXRuntimeTest")

View File

@ -0,0 +1,121 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOnnxRuntime(CMakePackage, PythonPackage):
"""ONNX Runtime is a performance-focused complete scoring
engine for Open Neural Network Exchange (ONNX) models, with
an open extensible architecture to continually address the
latest developments in AI and Deep Learning. ONNX Runtime
stays up to date with the ONNX standard with complete
implementation of all ONNX operators, and supports all
ONNX releases (1.2+) with both future and backwards
compatibility."""
homepage = "https://github.com/microsoft/onnxruntime"
git = "https://github.com/microsoft/onnxruntime.git"
version('1.7.2', tag='v1.7.2', submodules=True)
variant('cuda', default=False, description='Build with CUDA support')
depends_on('cmake@3.1:', type='build')
depends_on('ninja', type='build')
depends_on('python', type=('build', 'run'))
depends_on('protobuf')
depends_on('py-protobuf', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.16.6:', type=('build', 'run'))
depends_on('py-wheel', type='build')
depends_on('py-onnx', type=('build', 'run'))
depends_on('zlib')
depends_on('libpng')
depends_on('py-pybind11', type='build')
depends_on('cuda', when='+cuda')
depends_on('cudnn', when='+cuda')
depends_on('iconv', type=('build', 'link', 'run'))
depends_on('re2+shared')
extends('python')
# Adopted from CMS experiment's fork of onnxruntime
# https://github.com/cms-externals/onnxruntime/compare/5bc92df...d594f80
patch('cms.patch', level=1, when='@1.7.2')
# https://github.com/microsoft/onnxruntime/issues/4234#issuecomment-698077636
patch('libiconv.patch', level=0, when='@1.7.2')
# https://github.com/microsoft/onnxruntime/commit/de4089f8cbe0baffe56a363cc3a41595cc8f0809.patch
patch('gcc11.patch', level=1, when='@1.7.2')
dynamic_cpu_arch_values = ('NOAVX', 'AVX', 'AVX2', 'AVX512')
variant('dynamic_cpu_arch', default='AVX512',
values=dynamic_cpu_arch_values, multi=False,
description='AVX support level')
generator = 'Ninja'
root_cmakelists_dir = 'cmake'
def setup_build_environment(self, env):
value = self.spec.variants['dynamic_cpu_arch'].value
value = self.dynamic_cpu_arch_values.index(value)
env.set('MLAS_DYNAMIC_CPU_ARCH', str(value))
def setup_run_environment(self, env):
value = self.spec.variants['dynamic_cpu_arch'].value
value = self.dynamic_cpu_arch_values.index(value)
env.set('MLAS_DYNAMIC_CPU_ARCH', str(value))
def cmake_args(self):
define = self.define
define_from_variant = self.define_from_variant
args = [define('onnxruntime_ENABLE_PYTHON', True),
define('onnxruntime_BUILD_SHARED_LIB', True),
define_from_variant('onnxruntime_USE_CUDA', 'cuda'),
define('onnxruntime_BUILD_CSHARP', False),
define('onnxruntime_USE_EIGEN_FOR_BLAS', True),
define('onnxruntime_USE_OPENBLAS', False),
define("onnxruntime_USE_MKLML", False),
define("onnxruntime_USE_NGRAPH", False),
define("onnxruntime_USE_OPENMP", False),
define("onnxruntime_USE_TVM", False),
define("onnxruntime_USE_LLVM", False),
define("onnxruntime_ENABLE_MICROSOFT_INTERNAL", False),
define("onnxruntime_USE_BRAINSLICE", False),
define("onnxruntime_USE_NUPHAR", False),
define("onnxruntime_USE_TENSORRT", False),
define("onnxruntime_CROSS_COMPILING", False),
define("onnxruntime_USE_FULL_PROTOBUF", True),
define("onnxruntime_DISABLE_CONTRIB_OPS", False),
define("onnxruntime_USE_PREINSTALLED_PROTOBUF", True),
define("onnxruntime_PREFER_SYSTEM_LIB", True)]
if self.spec.satisfies('+cuda'):
args.extend((
define('onnxruntime_CUDA_VERSION', str(self.spec['cuda'].version)),
define('onnxruntime_CUDA_HOME', self.spec['cuda'].prefix),
define('onnxruntime_CUDNN_HOME', self.spec['cudnn'].prefix),
define('CMAKE_CUDA_FLAGS', '-cudart shared'),
define('CMAKE_CUDA_RUNTIME_LIBRARY', 'Shared'),
define('DCMAKE_TRY_COMPILE_PLATFORM_VARIABLES',
'CMAKE_CUDA_RUNTIME_LIBRARY')
))
return args
def setup_file(self):
return join_path(self.stage.source_path, 'setup.py')
@run_after('build')
def build_python(self):
"""Build everything needed to install."""
with working_dir(self.stage.source_path):
PythonPackage.build(self, self.spec, self.prefix)
@run_after('install')
def install_python(self):
with working_dir(self.stage.source_path):
PythonPackage.install(self, self.spec, self.prefix)

View File

@ -23,13 +23,15 @@ class PyOnnx(PythonPackage):
version('1.5.0', sha256='1a584a4ef62a6db178c257fffb06a9d8e61b41c0a80bfd8bcd8a253d72c4b0b4')
depends_on('py-setuptools', type='build')
depends_on('protobuf')
depends_on('py-protobuf+cpp', type=('build', 'run'))
# Protobuf version limit is due to https://github.com/protocolbuffers/protobuf/pull/8794
depends_on('protobuf@:3.17')
depends_on('py-protobuf+cpp@:3.17', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-typing@3.6.4:', when='^python@:3.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.6.4:', type=('build', 'run'))
depends_on('py-typing-extensions@3.6.2.1:', type=('build', 'run'))
depends_on('cmake@3.1:', type='build')
depends_on('py-pytest-runner', type='build')
# 'python_out' does not recognize dllexport_decl.
patch('remove_dllexport_decl.patch', when='@:1.6.0')

View File

@ -12,7 +12,7 @@ class PyOnnxconverterCommon(PythonPackage):
homepage = "https://github.com/microsoft/onnxconverter-common"
url = "https://github.com/microsoft/onnxconverter-common/archive/refs/tags/v1.9.0.tar.gz"
version('1.9.0', sha256='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
version('1.9.0', sha256='32315bcc844a8203092f3117a4a092ac6cf03d6a20145477e284f1172557d6f9')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))

View File

@ -0,0 +1,21 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOnnxmltools(PythonPackage):
"""Converts Machine Learning models to ONNX"""
homepage = "https://github.com/onnx/onnxmltools"
pypi = "onnxmltools/onnxmltools-1.10.0.tar.gz"
version('1.10.0', sha256='4eb4605f18ed66553fc17438ac8cf5406d66dcc624bedd76d8067e1b08e6c75d')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-onnx', type=('build', 'run'))
depends_on('py-skl2onnx', type=('build', 'run'))
depends_on('py-onnx-runtime', type=('build', 'run'))

View File

@ -0,0 +1,23 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySkl2onnx(PythonPackage):
"""Convert scikit-learn models to ONNX"""
homepage = "https://github.com/onnx/sklearn-onnx"
pypi = "skl2onnx/skl2onnx-1.10.3.tar.gz"
version('1.10.3', sha256='798933378145412b9876ab3ff2c1dd5f241a7296406d786262000afa8d329628')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.15:', type=('build', 'run'))
depends_on('py-scipy@1.0:', type=('build', 'run'))
depends_on('py-protobuf', type=('build', 'run'))
depends_on('py-onnx@1.2.1:', type=('build', 'run'))
depends_on('py-scikit-learn@0.19:', type=('build', 'run'))
depends_on('py-onnxconverter-common@1.7.0:', type=('build', 'run'))