py-pytorch-lightning: add +extra variant (#35121)

* py-pytorch-lightning: add +extra variant

* Update dependencies

* py-fsspec: add v2023.1.0
This commit is contained in:
Adam J. Stewart 2023-01-25 14:58:26 -07:00 committed by GitHub
parent 5845750a10
commit 80f3888cc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 696 additions and 16 deletions

View File

@ -278,7 +278,7 @@ class Acts(CMakePackage, CudaPackage):
depends_on("python", when="+python")
depends_on("python@3.8:", when="+python @19.11:19")
depends_on("python@3.8:", when="+python @21:")
depends_on("py-onnx-runtime", when="+onnx")
depends_on("py-onnxruntime", when="+onnx")
depends_on("py-pybind11 @2.6.2:", when="+python @18:")
depends_on("py-pytest", when="+python +unit_tests")
depends_on("root @6.10: cxxstd=14", when="+tgeo @:0.8.0")

View File

@ -0,0 +1,18 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyDocstringParser(PythonPackage):
"""Parse Python docstrings in reST, Google and Numpydoc format."""
homepage = "https://github.com/rr-/docstring_parser"
pypi = "docstring-parser/docstring_parser-0.15.tar.gz"
version("0.15", sha256="48ddc093e8b1865899956fcc03b03e66bb7240c310fac5af81814580c55bf682")
depends_on("python@3.6:3", type=("build", "run"))
depends_on("py-poetry-core@1:", type="build")

View File

@ -12,6 +12,7 @@ class PyFsspec(PythonPackage):
homepage = "https://github.com/intake/filesystem_spec"
pypi = "fsspec/fsspec-0.4.4.tar.gz"
version("2023.1.0", sha256="fbae7f20ff801eb5f7d0bedf81f25c787c0dfac5e982d98fa3884a9cde2b5411")
version("2022.11.0", sha256="259d5fd5c8e756ff2ea72f42e7613c32667dc2049a4ac3d84364a7ca034acb8b")
version("2021.7.0", sha256="792ebd3b54de0b30f1ce73f0ba0a8bcc864724f2d9f248cb8d0ece47db0cbde8")
version("2021.4.0", sha256="8b1a69884855d1a8c038574292e8b861894c3373282d9a469697a2b41d5289a6")
@ -20,13 +21,8 @@ class PyFsspec(PythonPackage):
version("0.7.3", sha256="1b540552c93b47e83c568e87507d6e02993e6d1b30bc7285f2336c81c5014103")
version("0.4.4", sha256="97697a46e8bf8be34461c2520d6fc4bfca0ed749b22bb2b7c21939fd450a7d63")
variant("http", default=False, description="HTTPFileSystem support (Requires version 0.8.1+)")
variant("http", default=False, description="HTTPFileSystem support", when="@0.8.1:")
conflicts("+http", when="@:0.8.0", msg="Only available in 0.8.1+")
depends_on("python@3.5:", type=("build", "run"))
depends_on("python@3.6:", type=("build", "run"), when="@0.6.3:")
depends_on("python@3.7:", type=("build", "run"), when="@2022.11.0:")
depends_on("py-setuptools", type="build")
depends_on("py-requests", type=("build", "run"), when="+http")
depends_on("py-aiohttp", type=("build", "run"), when="+http")

View File

@ -0,0 +1,24 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyGcsfs(PythonPackage):
"""Pythonic file-system for Google Cloud Storage."""
homepage = "https://github.com/fsspec/gcsfs"
pypi = "gcsfs/gcsfs-2023.1.0.tar.gz"
version("2023.1.0", sha256="0a7b7ca8c1affa126a14ba35d7b7dff81c49e2aaceedda9732c7f159a4837a26")
depends_on("py-setuptools", type="build")
depends_on("py-aiohttp", type=("build", "run"))
depends_on("py-decorator@4.1.3:", type=("build", "run"))
depends_on("py-fsspec@2023.1.0", type=("build", "run"))
depends_on("py-google-auth@1.2:", type=("build", "run"))
depends_on("py-google-auth-oauthlib", type=("build", "run"))
depends_on("py-google-cloud-storage", type=("build", "run"))
depends_on("py-requests", type=("build", "run"))

View File

@ -0,0 +1,21 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyHydraCore(PythonPackage):
"""A framework for elegantly configuring complex applications."""
homepage = "https://github.com/facebookresearch/hydra"
pypi = "hydra-core/hydra-core-1.3.1.tar.gz"
version("1.3.1", sha256="8dd42d551befc43dfca0c612cbd58c4f3e273dbd97a87214c1a030ba557d238b")
depends_on("py-setuptools", type="build")
depends_on("py-omegaconf@2.2:2.3", type=("build", "run"))
depends_on("py-antlr4-python3-runtime@4.9", type=("build", "run"))
depends_on("py-importlib-resources", when="^python@:3.8", type=("build", "run"))
depends_on("py-packaging", type=("build", "run"))

View File

@ -0,0 +1,27 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyJsonargparse(PythonPackage):
"""An extension to python's argparse which simplifies parsing of configuration options from
command line arguments, json configuration files (yaml or jsonnet supersets), environment
variables and hard-coded defaults.
"""
homepage = "https://github.com/omni-us/jsonargparse"
pypi = "jsonargparse/jsonargparse-4.19.0.tar.gz"
version("4.19.0", sha256="63aa3c7bbdb219d0f254a5ae86f3d54384ebc1ffa905e776cc19283bc843787b")
variant("signatures", default=False, description="Enable signature features")
depends_on("py-setuptools", type="build")
depends_on("py-pyyaml@3.13:", type=("build", "run"))
with when("+signatures"):
depends_on("py-docstring-parser@0.15:", type=("build", "run"))
depends_on("py-typeshed-client@2.1:", type=("build", "run"))

View File

@ -13,15 +13,16 @@ class PyOmegaconf(PythonPackage):
"""
homepage = "https://github.com/omry/omegaconf"
url = "https://github.com/omry/omegaconf/archive/refs/tags/v2.1.0.tar.gz"
pypi = "omegaconf/omegaconf-2.3.0.tar.gz"
maintainers = ["calebrob6"]
version("2.1.0", sha256="0168f962822b7059c7553c4346541596ea48c0b542628d41a348a12eeaf971ff")
version("2.3.0", sha256="d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7")
version("2.1.0", sha256="a08aec03a63c66449b550b85d70238f4dee9c6c4a0541d6a98845dcfeb12439d")
depends_on("python@3.6:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-pytest-runner", type="build")
depends_on("py-antlr4-python3-runtime@4.8", type=("build", "run"))
depends_on("py-pytest-runner", when="@2.1", type="build")
depends_on("py-antlr4-python3-runtime@4.9", when="@2.3:", type=("build", "run"))
depends_on("py-antlr4-python3-runtime@4.8", when="@2.1", type=("build", "run"))
depends_on("py-pyyaml@5.1.0:", type=("build", "run"))
depends_on("java", type="build")

View File

@ -14,13 +14,16 @@ class PyOnnxRuntime(CMakePackage, PythonExtension):
stays up to date with the ONNX standard with complete
implementation of all ONNX operators, and supports all
ONNX releases (1.2+) with both future and backwards
compatibility."""
compatibility.
Deprecated in favor of py-onnxruntime.
"""
homepage = "https://github.com/microsoft/onnxruntime"
git = "https://github.com/microsoft/onnxruntime.git"
version("1.10.0", tag="v1.10.0", submodules=True)
version("1.7.2", tag="v1.7.2", submodules=True)
version("1.10.0", tag="v1.10.0", submodules=True, deprecated=True)
version("1.7.2", tag="v1.7.2", submodules=True, deprecated=True)
variant("cuda", default=False, description="Build with CUDA support")

View File

@ -26,4 +26,4 @@ class PyOnnxmltools(PythonPackage):
depends_on("py-numpy", type=("build", "run"))
depends_on("py-onnx", type=("build", "run"))
depends_on("py-skl2onnx", type=("build", "run"))
depends_on("py-onnx-runtime", type=("build", "run"))
depends_on("py-onnxruntime", type=("build", "run"))

View File

@ -0,0 +1,146 @@
diff -ur a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
--- a/cmake/CMakeLists.txt 2021-12-14 22:34:21.754062247 +0100
+++ b/cmake/CMakeLists.txt 2021-12-14 22:36:18.094061909 +0100
@@ -240,7 +240,7 @@
if (onnxruntime_MINIMAL_BUILD_CUSTOM_OPS)
add_compile_definitions(ORT_MINIMAL_BUILD_CUSTOM_OPS)
endif()
-
+
set(onnxruntime_REDUCED_OPS_BUILD ON)
if (NOT onnxruntime_ENABLE_PYTHON)
@@ -568,7 +568,7 @@
endif()
endif()
-if(NOT WIN32 AND NOT onnxruntime_PREFER_SYSTEM_LIB)
+if(NOT WIN32)
add_subdirectory(${PROJECT_SOURCE_DIR}/external/nsync EXCLUDE_FROM_ALL)
endif()
# External dependencies
@@ -596,7 +596,7 @@
if(NOT Protobuf_USE_STATIC_LIBS)
#Indeed here should be a warning, not a fatal error. ONNX Runtime itself can work in such a
#setting but it may cause compatibility issue when ONNX Runtime is integrated with the other ONNX ecosystem softwares.
- message(FATAL_ERROR "Please enable Protobuf_USE_STATIC_LIBS")
+ message(WARNING "Please enable Protobuf_USE_STATIC_LIBS")
endif()
else()
set(PROTOBUF_LIB protobuf::libprotobuf-lite)
diff -ur a/include/onnxruntime/core/platform/ort_mutex.h b/include/onnxruntime/core/platform/ort_mutex.h
--- a/include/onnxruntime/core/platform/ort_mutex.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/platform/ort_mutex.h 2021-12-14 22:36:18.164061909 +0100
@@ -101,7 +101,7 @@
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
} // namespace onnxruntime
-#else
+#elif !defined(__aarch64__)
#include "nsync.h"
#include <mutex> //for unique_lock
#include <condition_variable> //for cv_status
@@ -186,4 +186,11 @@
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
}; // namespace onnxruntime
+#else
+#include <mutex>
+#include <condition_variable>
+namespace onnxruntime {
+using OrtMutex = std::mutex;
+using OrtCondVar = std::condition_variable;
+} // namespace onnxruntime
#endif
diff -ur a/include/onnxruntime/core/session/onnxruntime_cxx_api.h b/include/onnxruntime/core/session/onnxruntime_cxx_api.h
--- a/include/onnxruntime/core/session/onnxruntime_cxx_api.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_api.h 2021-12-14 22:36:18.164061909 +0100
@@ -345,8 +345,8 @@
struct Session : Base<OrtSession> {
explicit Session(std::nullptr_t) {}
- Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options);
- Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options);
+ Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options);
+ Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options);
// Run that will allocate the output values
std::vector<Value> Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
diff -ur a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h
--- a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h 2021-12-14 22:34:21.784062247 +0100
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h 2021-12-14 22:36:18.164061909 +0100
@@ -500,11 +500,11 @@
return *this;
}
-inline Session::Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
+inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSession(env, model_path, options, &p_));
}
-inline Session::Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
+inline Session::Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSessionFromArray(env, model_data, model_data_length, options, &p_));
}
diff -ur a/onnxruntime/core/mlas/lib/platform.cpp b/onnxruntime/core/mlas/lib/platform.cpp
--- a/onnxruntime/core/mlas/lib/platform.cpp 2021-12-14 22:34:21.864062247 +0100
+++ b/onnxruntime/core/mlas/lib/platform.cpp 2021-12-14 22:36:18.244061908 +0100
@@ -16,6 +16,7 @@
--*/
#include "mlasi.h"
+#include <string>
//
// Stores the platform information.
@@ -170,8 +171,11 @@
//
uint64_t xcr0 = MlasReadExtendedControlRegister(_XCR_XFEATURE_ENABLED_MASK);
+ const char *cpu_opt = std::getenv("MLAS_DYNAMIC_CPU_ARCH");
+ if (cpu_opt == nullptr) cpu_opt = "99";
+ auto opt = std::stoi(cpu_opt);
- if ((xcr0 & 0x6) == 0x6) {
+ if (opt > 0 && (xcr0 & 0x6) == 0x6) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx;
@@ -204,7 +208,7 @@
__cpuid_count(7, 0, Cpuid7[0], Cpuid7[1], Cpuid7[2], Cpuid7[3]);
#endif
- if (((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
+ if (opt > 1 && ((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
this->GemmU8S8Operation = MlasGemmU8X8Operation<MLAS_GEMM_U8S8_KERNEL_AVX2>;
this->GemmU8S8PackedOperation = MlasGemmU8X8PackedOperation<MLAS_GEMM_U8S8_KERNEL_AVX2>;
@@ -264,7 +268,7 @@
// operating system supports saving AVX512F state.
//
- if (((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
+ if (opt > 2 && ((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx512F;
this->GemmDoubleKernel = MlasGemmDoubleKernelAvx512F;
diff -ur a/onnxruntime/core/platform/posix/ort_mutex.cc b/onnxruntime/core/platform/posix/ort_mutex.cc
--- a/onnxruntime/core/platform/posix/ort_mutex.cc 2021-12-14 22:34:21.874062247 +0100
+++ b/onnxruntime/core/platform/posix/ort_mutex.cc 2021-12-14 22:36:18.254061908 +0100
@@ -1,6 +1,7 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
+#if !defined(__aarch64__)
#include "core/common/common.h"
#include "core/platform/ort_mutex.h"
#include <assert.h>
@@ -40,4 +41,5 @@
nsync::nsync_cv_wait(&native_cv_object, lk.mutex()->native_handle());
}
-} // namespace onnxruntime
\ No newline at end of file
+} // namespace onnxruntime
+#endif

View File

@ -0,0 +1,142 @@
diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index acbde7f56a8..eb9f7bb9fbf 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -718,7 +718,7 @@ if (onnxruntime_BUILD_BENCHMARKS)
endif()
endif()
-if (NOT WIN32 AND NOT onnxruntime_PREFER_SYSTEM_LIB)
+if (NOT WIN32)
add_subdirectory(${PROJECT_SOURCE_DIR}/external/nsync EXCLUDE_FROM_ALL)
endif()
# External dependencies
diff --git a/include/onnxruntime/core/platform/ort_mutex.h b/include/onnxruntime/core/platform/ort_mutex.h
index e24665f5142..ddc11953fbc 100644
--- a/include/onnxruntime/core/platform/ort_mutex.h
+++ b/include/onnxruntime/core/platform/ort_mutex.h
@@ -101,7 +101,7 @@ std::cv_status OrtCondVar::wait_for(std::unique_lock<OrtMutex>& cond_mutex,
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
} // namespace onnxruntime
-#else
+#elif !defined(__aarch64__)
#include "nsync.h"
#include <mutex> //for unique_lock
#include <condition_variable> //for cv_status
@@ -186,4 +186,11 @@ std::cv_status OrtCondVar::wait_for(std::unique_lock<OrtMutex>& cond_mutex,
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
}; // namespace onnxruntime
+#else
+#include <mutex>
+#include <condition_variable>
+namespace onnxruntime {
+using OrtMutex = std::mutex;
+using OrtCondVar = std::condition_variable;
+} // namespace onnxruntime
#endif
diff --git a/include/onnxruntime/core/session/onnxruntime_cxx_api.h b/include/onnxruntime/core/session/onnxruntime_cxx_api.h
index 048421099bd..4430185d496 100644
--- a/include/onnxruntime/core/session/onnxruntime_cxx_api.h
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_api.h
@@ -379,9 +379,9 @@ struct ModelMetadata : Base<OrtModelMetadata> {
*/
struct Session : Base<OrtSession> {
explicit Session(std::nullptr_t) {} ///< Create an empty Session object, must be assigned a valid one to be used
- Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options); ///< Wraps OrtApi::CreateSession
- Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options, OrtPrepackedWeightsContainer* prepacked_weights_container); ///< Wraps OrtApi::CreateSessionWithPrepackedWeightsContainer
- Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options); ///< Wraps OrtApi::CreateSessionFromArray
+ Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options); ///< Wraps OrtApi::CreateSession
+ Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options, OrtPrepackedWeightsContainer* prepacked_weights_container); ///< Wraps OrtApi::CreateSessionWithPrepackedWeightsContainer
+ Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options); ///< Wraps OrtApi::CreateSessionFromArray
/** \brief Run the model returning results in an Ort allocated vector.
*
diff --git a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h
index 1f31dffca87..b9d2cdfc475 100644
--- a/include/onnxruntime/core/session/onnxruntime_cxx_inline.h
+++ b/include/onnxruntime/core/session/onnxruntime_cxx_inline.h
@@ -538,16 +538,16 @@ inline SessionOptions& SessionOptions::AppendExecutionProvider_OpenVINO(const Or
return *this;
}
-inline Session::Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
+inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSession(env, model_path, options, &p_));
}
-inline Session::Session(Env& env, const ORTCHAR_T* model_path, const SessionOptions& options,
+inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options,
OrtPrepackedWeightsContainer* prepacked_weights_container) {
ThrowOnError(GetApi().CreateSessionWithPrepackedWeightsContainer(env, model_path, options, prepacked_weights_container, &p_));
}
-inline Session::Session(Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
+inline Session::Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
ThrowOnError(GetApi().CreateSessionFromArray(env, model_data, model_data_length, options, &p_));
}
diff --git a/onnxruntime/core/mlas/lib/platform.cpp b/onnxruntime/core/mlas/lib/platform.cpp
index de7fee8c07a..6d97cf07a05 100644
--- a/onnxruntime/core/mlas/lib/platform.cpp
+++ b/onnxruntime/core/mlas/lib/platform.cpp
@@ -16,6 +16,7 @@ Module Name:
--*/
#include "mlasi.h"
+#include <string>
#if defined(MLAS_TARGET_POWER) && defined(__linux__)
#include <sys/auxv.h>
@@ -197,8 +198,11 @@ Return Value:
//
uint64_t xcr0 = MlasReadExtendedControlRegister(_XCR_XFEATURE_ENABLED_MASK);
+ const char *cpu_opt = std::getenv("MLAS_DYNAMIC_CPU_ARCH");
+ if (cpu_opt == nullptr) cpu_opt = "99";
+ auto opt = std::stoi(cpu_opt);
- if ((xcr0 & 0x6) == 0x6) {
+ if (opt > 0 && (xcr0 & 0x6) == 0x6) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx;
@@ -231,7 +235,7 @@ Return Value:
__cpuid_count(7, 0, Cpuid7[0], Cpuid7[1], Cpuid7[2], Cpuid7[3]);
#endif
- if (((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
+ if (opt > 1 && ((Cpuid1[2] & 0x1000) != 0) && ((Cpuid7[1] & 0x20) != 0)) {
this->GemmU8S8Dispatch = &MlasGemmU8S8DispatchAvx2;
this->GemmU8S8Kernel = MlasGemmU8S8KernelAvx2;
@@ -290,7 +294,7 @@ Return Value:
// operating system supports saving AVX512F state.
//
- if (((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
+ if (opt > 2 && ((Cpuid7[1] & 0x10000) != 0) && ((xcr0 & 0xE0) == 0xE0)) {
this->GemmFloatKernel = MlasGemmFloatKernelAvx512F;
this->GemmDoubleKernel = MlasGemmDoubleKernelAvx512F;
diff --git a/onnxruntime/core/platform/posix/ort_mutex.cc b/onnxruntime/core/platform/posix/ort_mutex.cc
index 8a5d41eb360..89111c9daa5 100644
--- a/onnxruntime/core/platform/posix/ort_mutex.cc
+++ b/onnxruntime/core/platform/posix/ort_mutex.cc
@@ -1,6 +1,7 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
+#if !defined(__aarch64__)
#include "core/common/common.h"
#include "core/platform/ort_mutex.h"
#include <assert.h>
@@ -40,4 +41,5 @@ void OrtCondVar::wait(std::unique_lock<OrtMutex>& lk) {
nsync::nsync_cv_wait(&native_cv_object, lk.mutex()->native_handle());
}
-} // namespace onnxruntime
\ No newline at end of file
+} // namespace onnxruntime
+#endif

View File

@ -0,0 +1,36 @@
From de4089f8cbe0baffe56a363cc3a41595cc8f0809 Mon Sep 17 00:00:00 2001
From: ankurverma85 <31362771+ankurverma85@users.noreply.github.com>
Date: Mon, 10 May 2021 12:50:08 -0700
Subject: [PATCH] GCC11/Libstdc++11 Compilation fixes (#7599)
Authored-by: Ankur Verma <ankurv@microsoft.com>
---
include/onnxruntime/core/graph/graph_nodes.h | 2 +-
onnxruntime/test/providers/cpu/controlflow/loop_test.cc | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/include/onnxruntime/core/graph/graph_nodes.h b/include/onnxruntime/core/graph/graph_nodes.h
index 422fe9538ea..aec603f7942 100644
--- a/include/onnxruntime/core/graph/graph_nodes.h
+++ b/include/onnxruntime/core/graph/graph_nodes.h
@@ -100,7 +100,7 @@ class ValidNodes {
using const_reference = const T&;
/** Construct a NodeInterator and move to the first valid node. */
- NodeIterator<TIterator>(const TIterator current, const TIterator end, const NodeFilterFunc& filter_fn) noexcept
+ NodeIterator(const TIterator current, const TIterator end, const NodeFilterFunc& filter_fn) noexcept
: current_{current}, end_{end}, apply_filter_{filter_fn != nullptr}, filter_func_{&filter_fn} {
// skip to next valid node, stopping at end if none are found
while (current_ < end && (*current_ == nullptr ||
diff --git a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
index b058e9a16c7..3cf147e997c 100644
--- a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
+++ b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc
@@ -2,6 +2,7 @@
// Licensed under the MIT License.
#include <future>
+#include <thread>
#include "gtest/gtest.h"
#include "gmock/gmock.h"

View File

@ -0,0 +1,40 @@
--- cmake/onnxruntime.cmake.orig 2022-10-07 16:39:05.935164330 +0200
+++ cmake/onnxruntime.cmake 2022-10-07 16:39:56.702750071 +0200
@@ -195,6 +195,7 @@
target_link_libraries(onnxruntime PRIVATE
${onnxruntime_INTERNAL_LIBRARIES}
${onnxruntime_EXTERNAL_LIBRARIES}
+ -liconv
)
set_property(TARGET onnxruntime APPEND_STRING PROPERTY LINK_FLAGS ${ONNXRUNTIME_SO_LINK_FLAG} ${onnxruntime_DELAYLOAD_FLAGS})
--- cmake/onnxruntime_python.cmake.orig 2022-10-07 16:40:36.134428297 +0200
+++ cmake/onnxruntime_python.cmake 2022-10-07 16:40:46.916340311 +0200
@@ -156,6 +156,7 @@
onnxruntime_common
onnxruntime_flatbuffers
${pybind11_lib}
+ -liconv
)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
--- cmake/onnxruntime_unittests.cmake.orig 2022-10-07 16:41:33.413960900 +0200
+++ cmake/onnxruntime_unittests.cmake 2022-10-07 16:42:35.780451977 +0200
@@ -664,7 +664,7 @@
SOURCES ${all_tests} ${onnxruntime_unittest_main_src}
LIBS
onnx_test_runner_common ${onnxruntime_test_providers_libs} ${onnxruntime_test_common_libs}
- onnx_test_data_proto nlohmann_json::nlohmann_json
+ onnx_test_data_proto nlohmann_json::nlohmann_json -liconv
DEPENDS ${all_dependencies}
)
if(NOT MSVC)
@@ -790,7 +790,7 @@
)
endif()
-target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs})
+target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs} -liconv)
target_include_directories(onnx_test_runner PRIVATE ${ONNXRUNTIME_ROOT})
if (onnxruntime_USE_ROCM)
target_include_directories(onnx_test_runner PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/amdgpu/onnxruntime ${CMAKE_CURRENT_BINARY_DIR}/amdgpu/orttraining)

View File

@ -0,0 +1,42 @@
--- cmake/onnxruntime.cmake.orig 2021-08-06 12:36:32.720081500 +0200
+++ cmake/onnxruntime.cmake 2021-08-13 18:53:30.310868700 +0200
@@ -120,7 +120,8 @@
onnxruntime_common
onnxruntime_mlas
onnxruntime_flatbuffers
- ${onnxruntime_EXTERNAL_LIBRARIES})
+ ${onnxruntime_EXTERNAL_LIBRARIES}
+ -liconv)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
target_link_libraries(onnxruntime PRIVATE onnxruntime_language_interop onnxruntime_pyop)
--- cmake/onnxruntime_python.cmake.orig 2021-08-06 12:36:32.725148600 +0200
+++ cmake/onnxruntime_python.cmake 2021-08-13 18:54:37.085622000 +0200
@@ -106,6 +106,7 @@
onnxruntime_mlas
onnxruntime_flatbuffers
${pybind11_lib}
+ -liconv
)
if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
--- cmake/onnxruntime_unittests.cmake.orig 2021-08-13 19:11:58.645461300 +0200
+++ cmake/onnxruntime_unittests.cmake 2021-08-13 19:14:18.373814800 +0200
@@ -603,7 +603,7 @@
AddTest(
TARGET onnxruntime_test_all
SOURCES ${all_tests} ${onnxruntime_unittest_main_src}
- LIBS onnx_test_runner_common ${onnxruntime_test_providers_libs} ${onnxruntime_test_common_libs} re2::re2 onnx_test_data_proto
+ LIBS onnx_test_runner_common ${onnxruntime_test_providers_libs} ${onnxruntime_test_common_libs} re2::re2 onnx_test_data_proto -liconv
DEPENDS ${all_dependencies}
)
@@ -723,7 +723,7 @@
target_compile_options(onnx_test_runner PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:--compiler-options /utf-8>"
"$<$<NOT:$<COMPILE_LANGUAGE:CUDA>>:/utf-8>")
endif()
-target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs})
+target_link_libraries(onnx_test_runner PRIVATE onnx_test_runner_common ${GETOPT_LIB_WIDE} ${onnx_test_libs} -liconv)
target_include_directories(onnx_test_runner PRIVATE ${ONNXRUNTIME_ROOT})
set_target_properties(onnx_test_runner PROPERTIES FOLDER "ONNXRuntimeTest")

View File

@ -0,0 +1,134 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyOnnxruntime(CMakePackage, PythonExtension):
"""ONNX Runtime is a performance-focused complete scoring
engine for Open Neural Network Exchange (ONNX) models, with
an open extensible architecture to continually address the
latest developments in AI and Deep Learning. ONNX Runtime
stays up to date with the ONNX standard with complete
implementation of all ONNX operators, and supports all
ONNX releases (1.2+) with both future and backwards
compatibility."""
homepage = "https://github.com/microsoft/onnxruntime"
git = "https://github.com/microsoft/onnxruntime.git"
version("1.10.0", tag="v1.10.0", submodules=True)
version("1.7.2", tag="v1.7.2", submodules=True)
variant("cuda", default=False, description="Build with CUDA support")
depends_on("cmake@3.1:", type="build")
depends_on("ninja", type="build")
depends_on("python", type=("build", "run"))
depends_on("py-pip", type="build")
depends_on("protobuf")
# https://github.com/microsoft/onnxruntime/pull/11639
depends_on("protobuf@:3.19", when="@:1.11")
depends_on("py-protobuf", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-numpy@1.16.6:", type=("build", "run"))
depends_on("py-sympy@1.1:", type=("build", "run"))
depends_on("py-packaging", type=("build", "run"))
depends_on("py-cerberus", type=("build", "run"))
depends_on("py-wheel", type="build")
depends_on("py-onnx", type=("build", "run"))
depends_on("py-flatbuffers", type=("build", "run"))
depends_on("zlib")
depends_on("libpng")
depends_on("py-pybind11", type="build")
depends_on("cuda", when="+cuda")
depends_on("cudnn", when="+cuda")
depends_on("iconv", type=("build", "link", "run"))
depends_on("re2+shared")
extends("python")
# Adopted from CMS experiment's fork of onnxruntime
# https://github.com/cms-externals/onnxruntime/compare/5bc92df...d594f80
patch("cms.patch", level=1, when="@1.7.2")
# https://github.com/cms-externals/onnxruntime/compare/0d9030e...7a6355a
patch("cms_1_10.patch", whe="@1.10")
# https://github.com/microsoft/onnxruntime/issues/4234#issuecomment-698077636
# only needed when iconv is provided by libiconv
patch("libiconv.patch", level=0, when="@1.7.2 ^libiconv")
patch("libiconv-1.10.patch", level=0, when="@1.10.0 ^libiconv")
# https://github.com/microsoft/onnxruntime/commit/de4089f8cbe0baffe56a363cc3a41595cc8f0809.patch
patch("gcc11.patch", level=1, when="@1.7.2")
dynamic_cpu_arch_values = ("NOAVX", "AVX", "AVX2", "AVX512")
variant(
"dynamic_cpu_arch",
default="AVX512",
values=dynamic_cpu_arch_values,
multi=False,
description="AVX support level",
)
generator = "Ninja"
root_cmakelists_dir = "cmake"
build_directory = "."
def setup_build_environment(self, env):
value = self.spec.variants["dynamic_cpu_arch"].value
value = self.dynamic_cpu_arch_values.index(value)
env.set("MLAS_DYNAMIC_CPU_ARCH", str(value))
def setup_run_environment(self, env):
value = self.spec.variants["dynamic_cpu_arch"].value
value = self.dynamic_cpu_arch_values.index(value)
env.set("MLAS_DYNAMIC_CPU_ARCH", str(value))
def cmake_args(self):
define = self.define
define_from_variant = self.define_from_variant
args = [
define("onnxruntime_ENABLE_PYTHON", True),
define("onnxruntime_BUILD_SHARED_LIB", True),
define_from_variant("onnxruntime_USE_CUDA", "cuda"),
define("onnxruntime_BUILD_CSHARP", False),
define("onnxruntime_USE_EIGEN_FOR_BLAS", True),
define("onnxruntime_USE_OPENBLAS", False),
define("onnxruntime_USE_MKLML", False),
define("onnxruntime_USE_NGRAPH", False),
define("onnxruntime_USE_OPENMP", False),
define("onnxruntime_USE_TVM", False),
define("onnxruntime_USE_LLVM", False),
define("onnxruntime_ENABLE_MICROSOFT_INTERNAL", False),
define("onnxruntime_USE_BRAINSLICE", False),
define("onnxruntime_USE_NUPHAR", False),
define("onnxruntime_USE_TENSORRT", False),
define("onnxruntime_CROSS_COMPILING", False),
define("onnxruntime_USE_FULL_PROTOBUF", True),
define("onnxruntime_DISABLE_CONTRIB_OPS", False),
define("onnxruntime_USE_PREINSTALLED_PROTOBUF", True),
define("onnxruntime_PREFER_SYSTEM_LIB", True),
]
if self.spec.satisfies("+cuda"):
args.extend(
(
define("onnxruntime_CUDA_VERSION", str(self.spec["cuda"].version)),
define("onnxruntime_CUDA_HOME", self.spec["cuda"].prefix),
define("onnxruntime_CUDNN_HOME", self.spec["cudnn"].prefix),
define("CMAKE_CUDA_FLAGS", "-cudart shared"),
define("CMAKE_CUDA_RUNTIME_LIBRARY", "Shared"),
define("DCMAKE_TRY_COMPILE_PLATFORM_VARIABLES", "CMAKE_CUDA_RUNTIME_LIBRARY"),
)
)
return args
@run_after("install")
def install_python(self):
"""Install everything from build directory."""
args = std_pip_args + ["--prefix=" + prefix, "."]
with working_dir(self.build_directory):
pip(*args)

View File

@ -42,6 +42,10 @@ class PyPytorchLightning(PythonPackage):
version("1.3.8", sha256="60b0a3e464d394864dae4c8d251afa7aa453644a19bb7672f5ee400343cdf7b0")
version("1.2.10", sha256="2d8365e30ded0c20e73ce6e5b6028478ae460b8fd33727df2275666df005a301")
variant(
"extra", default=False, description="Install extra dependencies for full functionality"
)
# src/pytorch_lightning/__setup__.py
depends_on("python@3.7:", when="@1.6:", type=("build", "run"))
depends_on("python@3.6:", when="@:1.5", type=("build", "run"))
@ -78,6 +82,26 @@ class PyPytorchLightning(PythonPackage):
depends_on("py-lightning-utilities@0.3,0.4.1:", when="@1.8.4:1.8", type=("build", "run"))
depends_on("py-lightning-utilities@0.3:", when="@1.8.0:1.8.3", type=("build", "run"))
# requirements/pytorch/extra.txt
with when("+extra"):
depends_on("py-matplotlib@3.2:", type=("build", "run"))
depends_on("py-omegaconf@2.0.5:", when="@1.5:", type=("build", "run"))
depends_on("py-omegaconf@2.0.1:", type=("build", "run"))
depends_on("py-hydra-core@1.0.5:", when="@1.5:", type=("build", "run"))
depends_on("py-hydra-core@1:", type=("build", "run"))
depends_on("py-jsonargparse@4.18:+signatures", when="@1.9:", type=("build", "run"))
depends_on("py-jsonargparse@4.15.2:+signatures", when="@1.8:", type=("build", "run"))
depends_on("py-jsonargparse@4.12:+signatures", when="@1.7:", type=("build", "run"))
depends_on("py-jsonargparse@4.7.1:+signatures", when="@1.6.2:", type=("build", "run"))
depends_on("py-jsonargparse@4.6:+signatures", when="@1.6.1:", type=("build", "run"))
depends_on("py-jsonargparse@4.3:+signatures", when="@1.6:", type=("build", "run"))
depends_on("py-jsonargparse@3.19.3:+signatures", when="@1.5:", type=("build", "run"))
depends_on("py-jsonargparse@3.17:+signatures", when="@1.4:", type=("build", "run"))
depends_on("py-jsonargparse@3.13.1:+signatures", when="@1.3:", type=("build", "run"))
depends_on("py-rich@10.14:", when="@1.7:", type=("build", "run"))
depends_on("py-rich@10.2.2:", when="@1.5:", type=("build", "run"))
depends_on("py-tensorboardx@2.2:", when="@1.9:", type=("build", "run"))
# Historical dependencies
depends_on("py-lightning-lite@1.8.0", when="@1.8.0", type=("build", "run"))
depends_on("py-future@0.17.1:", when="@:1.5", type=("build", "run"))
@ -91,6 +115,14 @@ class PyPytorchLightning(PythonPackage):
depends_on("py-tensorboard@2.9.1:", when="@1.7:1.8.2", type=("build", "run"))
depends_on("py-tensorboard@2.2.0:", when="@1.5:1.6", type=("build", "run"))
depends_on("py-tensorboard@2.2.0:2.4,2.5.1:", when="@:1.4", type=("build", "run"))
depends_on("py-gcsfs@2021.5:", when="@1.4:1.7+extra", type=("build", "run"))
depends_on("py-horovod@0.21.2:0.23,0.24.1:", when="@:1.6.3+extra", type=("build", "run"))
depends_on("py-onnx@1.7:", when="@1.5+extra", type=("build", "run"))
depends_on("py-onnxruntime@1.3:", when="@:1.5+extra", type=("build", "run"))
depends_on("py-torchtext@0.10:", when="@1.7+extra", type=("build", "run"))
depends_on("py-torchtext@0.9:", when="@1.6+extra", type=("build", "run"))
depends_on("py-torchtext@0.7:", when="@1.5+extra", type=("build", "run"))
depends_on("py-torchtext@0.5:", when="@:1.4+extra", type=("build", "run"))
# https://github.com/Lightning-AI/lightning/issues/15494
conflicts("^py-torch~distributed", when="@1.8.0")

View File

@ -0,0 +1,18 @@
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyTypeshedClient(PythonPackage):
"""A library for accessing stubs in typeshed."""
homepage = "https://github.com/JelleZijlstra/typeshed_client"
pypi = "typeshed-client/typeshed_client-2.1.0.tar.gz"
version("2.1.0", sha256="da1969ec48c342197ddec655c873100ece38fd93e6827a1e6377793a16526f28")
depends_on("py-setuptools@42:", type="build")
depends_on("py-importlib-resources@1.4:", type=("build", "run"))