mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Faster Metal unary and binary for general case (#1431)
* faster unary and binary for general case * update ternary + jit fix * fix jit * unary work per thread
This commit is contained in:
@@ -8,8 +8,6 @@
|
||||
|
||||
namespace mlx::core {
|
||||
|
||||
constexpr int MAX_TERNARY_SPECIALIZED_DIMS = 3;
|
||||
|
||||
void ternary_op_gpu_inplace(
|
||||
const std::vector<array>& inputs,
|
||||
array& out,
|
||||
@@ -43,13 +41,18 @@ void ternary_op_gpu_inplace(
|
||||
auto [shape, strides_a, strides_b, strides_c, strides_out] = maybe_collapse();
|
||||
|
||||
bool use_2d = out.data_size() > UINT_MAX;
|
||||
auto ndim = shape.size();
|
||||
int work_per_thread =
|
||||
(topt == TernaryOpType::General && shape[ndim - 1] > 4) ? 4 : 1;
|
||||
std::string kernel_name;
|
||||
{
|
||||
std::ostringstream kname;
|
||||
if (topt == TernaryOpType::General) {
|
||||
kname << "g";
|
||||
if (shape.size() <= MAX_TERNARY_SPECIALIZED_DIMS) {
|
||||
if (shape.size() <= 3) {
|
||||
kname << shape.size();
|
||||
} else if (work_per_thread > 1) {
|
||||
kname << "n" << work_per_thread;
|
||||
}
|
||||
} else if (use_2d) {
|
||||
kname << "v2";
|
||||
@@ -75,16 +78,19 @@ void ternary_op_gpu_inplace(
|
||||
compute_encoder.set_output_array(out, 3);
|
||||
|
||||
if (topt == TernaryOpType::General) {
|
||||
auto ndim = shape.size();
|
||||
// Launch up to 3D grid of threads
|
||||
size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1;
|
||||
size_t dim1 = ndim > 1 ? shape[ndim - 2] : 1;
|
||||
size_t rest = out.size() / (dim0 * dim1);
|
||||
|
||||
if (ndim > 3) {
|
||||
compute_encoder->setBytes(shape.data(), ndim * sizeof(int), 4);
|
||||
compute_encoder->setBytes(strides_a.data(), ndim * sizeof(size_t), 5);
|
||||
compute_encoder->setBytes(strides_b.data(), ndim * sizeof(size_t), 6);
|
||||
compute_encoder->setBytes(strides_c.data(), ndim * sizeof(size_t), 7);
|
||||
|
||||
if (ndim > MAX_TERNARY_SPECIALIZED_DIMS) {
|
||||
compute_encoder->setBytes(&ndim, sizeof(int), 8);
|
||||
}
|
||||
compute_encoder->setBytes(&ndim, sizeof(int), 8);
|
||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||
} else {
|
||||
// The shape is implicit in the grid for <= 3D
|
||||
compute_encoder->setBytes(strides_a.data(), ndim * sizeof(size_t), 4);
|
||||
@@ -92,10 +98,6 @@ void ternary_op_gpu_inplace(
|
||||
compute_encoder->setBytes(strides_c.data(), ndim * sizeof(size_t), 6);
|
||||
}
|
||||
|
||||
// Launch up to 3D grid of threads
|
||||
size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1;
|
||||
size_t dim1 = ndim > 1 ? shape[ndim - 2] : 1;
|
||||
size_t rest = out.size() / (dim0 * dim1);
|
||||
NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup();
|
||||
if (thread_group_size != 1024) {
|
||||
throw std::runtime_error("[Metal::ternary] Must use 1024 sized block");
|
||||
|
||||
Reference in New Issue
Block a user