diff --git a/mlx/backend/cpu/compiled.cpp b/mlx/backend/cpu/compiled.cpp index 53d6c47e3..296390a32 100644 --- a/mlx/backend/cpu/compiled.cpp +++ b/mlx/backend/cpu/compiled.cpp @@ -49,7 +49,7 @@ static CompilerCache& cache() { // GPU compile is always available if the GPU is available and since we are in // this file CPU compile is also available. namespace detail { -bool compile_available_for_device(const Device& device) { +bool compile_available_for_device(const Device& /* device */) { return true; } @@ -168,7 +168,7 @@ inline void build_kernel( // Add the input arguments int cnt = 0; int strides_index = 1; - for (size_t i = 0; i < inputs.size(); ++i) { + for (int i = 0; i < std::ssize(inputs); ++i) { // Skip constants from the input list if (is_constant(i)) { continue; @@ -238,7 +238,7 @@ inline void build_kernel( } else { os << x.primitive().name(); os << "()("; - for (int i = 0; i < x.inputs().size() - 1; i++) { + for (int i = 0; i < std::ssize(x.inputs()) - 1; i++) { os << "tmp_" << namer.get_name(x.inputs()[i]) << ", "; } os << "tmp_" << namer.get_name(x.inputs().back()) << ");" << std::endl; diff --git a/mlx/backend/cpu/inverse.cpp b/mlx/backend/cpu/inverse.cpp index ddc979daa..3da657cbe 100644 --- a/mlx/backend/cpu/inverse.cpp +++ b/mlx/backend/cpu/inverse.cpp @@ -122,7 +122,7 @@ void inverse_impl( stream); const int N = a.shape(-1); - const size_t num_matrices = a.size() / (N * N); + const int64_t num_matrices = a.size() / (N * N); auto& encoder = cpu::get_command_encoder(stream); encoder.set_output_array(inv); @@ -130,13 +130,13 @@ void inverse_impl( auto inv_ptr = inv.data(); if (tri) { encoder.dispatch([inv_ptr, N, num_matrices, upper]() { - for (int i = 0; i < num_matrices; i++) { + for (int64_t i = 0; i < num_matrices; i++) { tri_inv(inv_ptr + N * N * i, N, upper); } }); } else { encoder.dispatch([inv_ptr, N, num_matrices]() { - for (int i = 0; i < num_matrices; i++) { + for (int64_t i = 0; i < num_matrices; i++) { general_inv(inv_ptr + N * N * i, N); } });