2024-05-24 07:23:44 +08:00
|
|
|
// Copyright © 2023-2024 Apple Inc.
|
2023-11-30 02:52:08 +08:00
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
#include "mlx/backend/metal/copy.h"
|
|
|
|
#include "mlx/backend/metal/device.h"
|
2024-05-24 07:23:44 +08:00
|
|
|
#include "mlx/backend/metal/kernels.h"
|
2023-11-30 02:52:08 +08:00
|
|
|
#include "mlx/backend/metal/kernels/defines.h"
|
|
|
|
#include "mlx/backend/metal/utils.h"
|
|
|
|
#include "mlx/primitives.h"
|
|
|
|
|
|
|
|
namespace mlx::core {
|
|
|
|
|
2024-05-24 07:23:44 +08:00
|
|
|
constexpr int SOFTMAX_LOOPED_LIMIT = 4096;
|
|
|
|
|
2023-11-30 02:52:08 +08:00
|
|
|
void Softmax::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
|
|
assert(inputs.size() == 1);
|
2024-03-26 03:32:59 +08:00
|
|
|
if (!issubdtype(out.dtype(), floating)) {
|
2023-11-30 02:52:08 +08:00
|
|
|
throw std::runtime_error(
|
|
|
|
"[softmax] Does not support non-floating point types.");
|
|
|
|
}
|
|
|
|
auto& s = stream();
|
|
|
|
auto& d = metal::device(s.device);
|
|
|
|
|
|
|
|
// Make sure that the last dimension is contiguous
|
|
|
|
std::vector<array> copies;
|
2024-03-31 01:08:54 +08:00
|
|
|
auto check_input = [&copies, &s](const array& x) -> const array& {
|
2024-09-05 10:10:43 +08:00
|
|
|
bool no_copy = x.flags().contiguous && x.strides()[x.ndim() - 1] == 1;
|
|
|
|
if (no_copy && x.ndim() > 1) {
|
2024-02-10 09:02:13 +08:00
|
|
|
auto s = x.strides()[x.ndim() - 2];
|
2024-02-10 08:50:45 +08:00
|
|
|
no_copy &= (s == 0 || s == x.shape().back());
|
|
|
|
}
|
|
|
|
if (no_copy) {
|
2023-11-30 02:52:08 +08:00
|
|
|
return x;
|
|
|
|
} else {
|
2024-03-31 01:08:54 +08:00
|
|
|
copies.push_back(array(x.shape(), x.dtype(), nullptr, {}));
|
|
|
|
copy_gpu(x, copies.back(), CopyType::General, s);
|
|
|
|
return copies.back();
|
2023-11-30 02:52:08 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
const array& in = check_input(inputs[0]);
|
2024-03-21 22:20:54 +08:00
|
|
|
if (in.is_donatable()) {
|
|
|
|
out.move_shared_buffer(in);
|
|
|
|
} else {
|
|
|
|
out.set_data(
|
|
|
|
allocator::malloc_or_wait(in.data_size() * in.itemsize()),
|
|
|
|
in.data_size(),
|
|
|
|
in.strides(),
|
|
|
|
in.flags());
|
|
|
|
}
|
2023-11-30 02:52:08 +08:00
|
|
|
|
|
|
|
int axis_size = in.shape().back();
|
|
|
|
int n_rows = in.data_size() / axis_size;
|
|
|
|
|
|
|
|
const int simd_size = 32;
|
|
|
|
const int n_reads = SOFTMAX_N_READS;
|
|
|
|
const int looped_limit = SOFTMAX_LOOPED_LIMIT;
|
2024-05-24 07:23:44 +08:00
|
|
|
|
|
|
|
std::string kernel_name = (axis_size > looped_limit) ? "looped_" : "block_";
|
|
|
|
kernel_name += "softmax_";
|
2024-04-04 23:32:35 +08:00
|
|
|
if (in.dtype() != float32 && precise_) {
|
2024-05-24 07:23:44 +08:00
|
|
|
kernel_name += "precise_";
|
2024-04-04 23:32:35 +08:00
|
|
|
}
|
2024-05-24 07:23:44 +08:00
|
|
|
kernel_name += type_to_name(out);
|
|
|
|
|
|
|
|
auto kernel = get_softmax_kernel(d, kernel_name, precise_, out);
|
2024-04-11 12:45:31 +08:00
|
|
|
auto& compute_encoder = d.get_command_encoder(s.index);
|
2023-11-30 02:52:08 +08:00
|
|
|
{
|
|
|
|
MTL::Size grid_dims, group_dims;
|
|
|
|
if (axis_size <= looped_limit) {
|
|
|
|
size_t threadgroup_needed = (axis_size + n_reads - 1) / n_reads;
|
|
|
|
size_t simds_needed = (threadgroup_needed + simd_size - 1) / simd_size;
|
|
|
|
size_t threadgroup_size = simd_size * simds_needed;
|
|
|
|
assert(threadgroup_size <= kernel->maxTotalThreadsPerThreadgroup());
|
|
|
|
size_t n_threads = n_rows * threadgroup_size;
|
|
|
|
grid_dims = MTL::Size(n_threads, 1, 1);
|
|
|
|
group_dims = MTL::Size(threadgroup_size, 1, 1);
|
|
|
|
} else {
|
|
|
|
size_t threadgroup_size = kernel->maxTotalThreadsPerThreadgroup();
|
|
|
|
size_t n_threads = n_rows * threadgroup_size;
|
|
|
|
grid_dims = MTL::Size(n_threads, 1, 1);
|
|
|
|
group_dims = MTL::Size(threadgroup_size, 1, 1);
|
|
|
|
}
|
|
|
|
|
2024-11-09 03:50:21 +08:00
|
|
|
compute_encoder.set_compute_pipeline_state(kernel);
|
2024-04-11 12:45:31 +08:00
|
|
|
compute_encoder.set_input_array(
|
|
|
|
in.data_shared_ptr() == nullptr ? out : in, 0);
|
|
|
|
compute_encoder.set_output_array(out, 1);
|
2024-11-09 03:50:21 +08:00
|
|
|
compute_encoder.set_bytes(axis_size, 2);
|
|
|
|
compute_encoder.dispatch_threads(grid_dims, group_dims);
|
2023-11-30 02:52:08 +08:00
|
|
|
}
|
2024-10-22 10:33:32 +08:00
|
|
|
|
|
|
|
d.add_temporaries(std::move(copies), s.index);
|
2023-11-30 02:52:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace mlx::core
|