From 6bb0b254fd0685386f4783169d68627ef90d4b04 Mon Sep 17 00:00:00 2001 From: Awni Hannun Date: Fri, 20 Jun 2025 13:01:27 -0700 Subject: [PATCH] format --- mlx/backend/cuda/matmul.cpp | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/mlx/backend/cuda/matmul.cpp b/mlx/backend/cuda/matmul.cpp index 5b86961da..c32cecc03 100644 --- a/mlx/backend/cuda/matmul.cpp +++ b/mlx/backend/cuda/matmul.cpp @@ -162,14 +162,14 @@ class MatMul { } } - void *workspace_ptr = nullptr; + void* workspace_ptr = nullptr; if (heuristic_.workspaceSize > 0) { - array workspace( - allocator::malloc(heuristic_.workspaceSize), - {static_cast(heuristic_.workspaceSize)}, - int8); - encoder.add_temporary(workspace); - workspace_ptr = workspace.data(); + array workspace( + allocator::malloc(heuristic_.workspaceSize), + {static_cast(heuristic_.workspaceSize)}, + int8); + encoder.add_temporary(workspace); + workspace_ptr = workspace.data(); } encoder.launch_kernel([&](cudaStream_t stream) { @@ -464,7 +464,14 @@ void AddMM::eval_gpu(const std::vector& inputs, array& out) { auto nbatch = batch_count / batch_shape.back(); if (nbatch == 1) { - matmul.run(encoder, out.data(), a.data(), b.data(), c.data(), alpha_, beta_); + matmul.run( + encoder, + out.data(), + a.data(), + b.data(), + c.data(), + alpha_, + beta_); return; }