diff --git a/mlx/backend/cpu/compiled.cpp b/mlx/backend/cpu/compiled.cpp index 9da9c14e8..e389e0df5 100644 --- a/mlx/backend/cpu/compiled.cpp +++ b/mlx/backend/cpu/compiled.cpp @@ -40,7 +40,10 @@ struct CompilerCache { std::shared_mutex mtx; }; -static CompilerCache cache{}; +static CompilerCache& cache() { + static CompilerCache cache_; + return cache_; +}; // GPU compile is always available if the GPU is available and since we are in // this file CPU compile is also available. @@ -56,14 +59,16 @@ void* compile( const std::string& kernel_name, const std::function& source_builder) { { - std::shared_lock lock(cache.mtx); - if (auto it = cache.kernels.find(kernel_name); it != cache.kernels.end()) { + std::shared_lock lock(cache().mtx); + if (auto it = cache().kernels.find(kernel_name); + it != cache().kernels.end()) { return it->second; } } - std::unique_lock lock(cache.mtx); - if (auto it = cache.kernels.find(kernel_name); it != cache.kernels.end()) { + std::unique_lock lock(cache().mtx); + if (auto it = cache().kernels.find(kernel_name); + it != cache().kernels.end()) { return it->second; } std::string source_code = source_builder(); @@ -120,10 +125,10 @@ void* compile( } // load library - cache.libs.emplace_back(shared_lib_path); + cache().libs.emplace_back(shared_lib_path); // Load function - void* fun = dlsym(cache.libs.back().lib, kernel_name.c_str()); + void* fun = dlsym(cache().libs.back().lib, kernel_name.c_str()); if (!fun) { std::ostringstream msg; msg << "[Compile::eval_cpu] Failed to load compiled function " @@ -131,7 +136,7 @@ void* compile( << dlerror(); throw std::runtime_error(msg.str()); } - cache.kernels.insert({kernel_name, fun}); + cache().kernels.insert({kernel_name, fun}); return fun; }