From 884af42da21dbabdb8828f12eb3f22845f52f66f Mon Sep 17 00:00:00 2001 From: Awni Hannun Date: Wed, 30 Oct 2024 16:25:12 -0700 Subject: [PATCH] Fix thread group for large arrays (#1543) * fix thread group for large arrays * comment * one more --- mlx/backend/metal/binary.cpp | 8 +++----- mlx/backend/metal/compiled.cpp | 5 +++-- mlx/backend/metal/copy.cpp | 13 ++++++------- mlx/backend/metal/ternary.cpp | 7 +++---- mlx/backend/metal/unary.cpp | 6 +++--- mlx/backend/metal/utils.cpp | 6 ++++++ 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/mlx/backend/metal/binary.cpp b/mlx/backend/metal/binary.cpp index c87f98272..f70595e56 100644 --- a/mlx/backend/metal/binary.cpp +++ b/mlx/backend/metal/binary.cpp @@ -1,5 +1,4 @@ // Copyright © 2024 Apple Inc. - #include "mlx/backend/common/binary.h" #include "mlx/backend/metal/device.h" #include "mlx/backend/metal/kernels.h" @@ -110,6 +109,7 @@ void binary_op_gpu_inplace( compute_encoder.set_output_array(outputs[1], arg_idx++); } + auto thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (bopt == BinaryOpType::General) { // Launch up to 3D grid of threads size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1; @@ -132,7 +132,6 @@ void binary_op_gpu_inplace( strides_b.data(), ndim * sizeof(size_t), arg_idx++); } - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size != 1024) { throw std::runtime_error("[Metal::binary] Must use 1024 sized block"); } @@ -142,13 +141,12 @@ void binary_op_gpu_inplace( } else { // Launch a 1D or 2D grid of threads size_t nthreads = out.data_size(); - MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) - : MTL::Size(nthreads, 1, 1); - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size > nthreads) { thread_group_size = nthreads; } MTL::Size group_dims = MTL::Size(thread_group_size, 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) + : MTL::Size(nthreads, 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } } diff --git a/mlx/backend/metal/compiled.cpp b/mlx/backend/metal/compiled.cpp index add0592f2..af9b8c872 100644 --- a/mlx/backend/metal/compiled.cpp +++ b/mlx/backend/metal/compiled.cpp @@ -421,11 +421,12 @@ void Compiled::eval_gpu( // Launch the kernel if (contiguous) { size_t nthreads = outputs[0].data_size(); + MTL::Size group_dims( + std::min(nthreads, kernel->maxTotalThreadsPerThreadgroup()), 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(outputs[0].shape(), outputs[0].strides()) : MTL::Size(nthreads, 1, 1); - MTL::Size group_dims( - std::min(nthreads, kernel->maxTotalThreadsPerThreadgroup()), 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } else { size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1; diff --git a/mlx/backend/metal/copy.cpp b/mlx/backend/metal/copy.cpp index 49a09483a..897eadb1c 100644 --- a/mlx/backend/metal/copy.cpp +++ b/mlx/backend/metal/copy.cpp @@ -120,6 +120,7 @@ void copy_gpu_inplace( compute_encoder.set_input_array(donate_in ? out : in, 0, inp_offset); compute_encoder.set_output_array(out, 1, out_offset); + auto thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (ctype == CopyType::General || ctype == CopyType::GeneralGeneral) { std::vector strides_in{strides_in_.begin(), strides_in_.end()}; std::vector strides_out{strides_out_.begin(), strides_out_.end()}; @@ -145,7 +146,6 @@ void copy_gpu_inplace( } // NB assuming thread_group_size is a power of 2 larger than 32 x 32 - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size != 1024) { throw std::runtime_error("[Metal::copy] Must use 1024 sized block"); } @@ -155,13 +155,12 @@ void copy_gpu_inplace( compute_encoder.dispatchThreads(grid_dims, group_dims); } else { size_t nthreads = out.data_size(); - MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) - : MTL::Size(nthreads, 1, 1); - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size > nthreads) { thread_group_size = nthreads; } MTL::Size group_dims = MTL::Size(thread_group_size, 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) + : MTL::Size(nthreads, 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } } @@ -205,14 +204,14 @@ void fill_gpu(const array& val, array& out, const Stream& s) { compute_encoder.set_input_array(val, 0); compute_encoder.set_output_array(out, 1); + auto thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); size_t nthreads = out.data_size(); - MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) - : MTL::Size(nthreads, 1, 1); - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size > nthreads) { thread_group_size = nthreads; } MTL::Size group_dims = MTL::Size(thread_group_size, 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) + : MTL::Size(nthreads, 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } diff --git a/mlx/backend/metal/ternary.cpp b/mlx/backend/metal/ternary.cpp index d353dda5e..0f82f9894 100644 --- a/mlx/backend/metal/ternary.cpp +++ b/mlx/backend/metal/ternary.cpp @@ -72,6 +72,7 @@ void ternary_op_gpu_inplace( compute_encoder.set_input_array(donate_c ? out : c, 2); compute_encoder.set_output_array(out, 3); + auto thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (topt == TernaryOpType::General) { // Launch up to 3D grid of threads size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1; @@ -93,7 +94,6 @@ void ternary_op_gpu_inplace( compute_encoder->setBytes(strides_c.data(), ndim * sizeof(size_t), 6); } - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size != 1024) { throw std::runtime_error("[Metal::ternary] Must use 1024 sized block"); } @@ -103,13 +103,12 @@ void ternary_op_gpu_inplace( } else { // Launch a 1D or 2D grid of threads size_t nthreads = out.data_size(); - MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) - : MTL::Size(nthreads, 1, 1); - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); if (thread_group_size > nthreads) { thread_group_size = nthreads; } MTL::Size group_dims = MTL::Size(thread_group_size, 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) + : MTL::Size(nthreads, 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } } diff --git a/mlx/backend/metal/unary.cpp b/mlx/backend/metal/unary.cpp index acb469f15..8f061a3b7 100644 --- a/mlx/backend/metal/unary.cpp +++ b/mlx/backend/metal/unary.cpp @@ -47,9 +47,7 @@ void unary_op_gpu_inplace( kernel_name += "_" + op + type_to_name(in) + type_to_name(out); auto kernel = get_unary_kernel(d, kernel_name, in.dtype(), out.dtype(), op); - MTL::Size grid_dims = use_2d ? get_2d_grid_dims(in.shape(), in.strides()) - : MTL::Size(nthreads, 1, 1); - NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); + auto thread_group_size = kernel->maxTotalThreadsPerThreadgroup(); auto& compute_encoder = d.get_command_encoder(s.index); compute_encoder->setComputePipelineState(kernel); compute_encoder.set_input_array( @@ -75,6 +73,8 @@ void unary_op_gpu_inplace( thread_group_size = nthreads; } MTL::Size group_dims = MTL::Size(thread_group_size, 1, 1); + MTL::Size grid_dims = use_2d ? get_2d_grid_dims(out.shape(), out.strides()) + : MTL::Size(nthreads, 1, 1); compute_encoder.dispatchThreads(grid_dims, group_dims); } } diff --git a/mlx/backend/metal/utils.cpp b/mlx/backend/metal/utils.cpp index d15e221dd..deff629eb 100644 --- a/mlx/backend/metal/utils.cpp +++ b/mlx/backend/metal/utils.cpp @@ -103,6 +103,9 @@ MTL::Size get_2d_grid_dims( if (grid_y > UINT32_MAX || grid_x > UINT32_MAX) { throw std::runtime_error("Unable to safely factor shape."); } + if (grid_y > grid_x) { + std::swap(grid_x, grid_y); + } return MTL::Size( static_cast(grid_x), static_cast(grid_y), 1); } @@ -145,6 +148,9 @@ MTL::Size get_2d_grid_dims( if (grid_y > UINT32_MAX || grid_x > UINT32_MAX || divisor > 1) { throw std::runtime_error("Unable to safely factor shape."); } + if (grid_y > grid_x) { + std::swap(grid_x, grid_y); + } return MTL::Size( static_cast(grid_x), static_cast(grid_y), 1); }