mirror of
https://github.com/ml-explore/mlx.git
synced 2025-06-25 01:41:17 +08:00
Bug fix in metal binary kernel dispatch for large arrays (#125)
* bug fix * format
This commit is contained in:
parent
0cfbfc9904
commit
71d1fff90a
@ -84,9 +84,9 @@ void binary_op(
|
||||
}
|
||||
|
||||
// Launch up to 3D grid of threads
|
||||
int dim0 = ndim > 0 ? shape[ndim - 1] : 1;
|
||||
int dim1 = ndim > 1 ? shape[ndim - 2] : 1;
|
||||
int rest = out.size() / (dim0 * dim1);
|
||||
size_t dim0 = ndim > 0 ? shape[ndim - 1] : 1;
|
||||
size_t dim1 = ndim > 1 ? shape[ndim - 2] : 1;
|
||||
size_t rest = out.size() / (dim0 * dim1);
|
||||
NS::UInteger thread_group_size = kernel->maxTotalThreadsPerThreadgroup();
|
||||
if (thread_group_size != 1024) {
|
||||
throw std::runtime_error("[Metal::binary] Must use 1024 sized block");
|
||||
|
@ -24,7 +24,6 @@ def sigmoid(x):
|
||||
return mx.sigmoid(x)
|
||||
|
||||
|
||||
|
||||
def relu(x):
|
||||
"""Applies the Rectified Linear Unit.
|
||||
|
||||
@ -89,10 +88,12 @@ def gelu_fast_approx(x):
|
||||
"""
|
||||
return x * mx.sigmoid(1.773 * x)
|
||||
|
||||
|
||||
@_make_activation_module
|
||||
class Sigmoid(Module):
|
||||
pass
|
||||
|
||||
|
||||
@_make_activation_module(relu)
|
||||
class ReLU(Module):
|
||||
pass
|
||||
|
@ -1305,6 +1305,11 @@ class TestOps(mlx_tests.MLXTestCase):
|
||||
d_np = np.take(b_mx, np.arange(kth), axis=axis)
|
||||
self.assertTrue(np.all(d_np <= c_mx))
|
||||
|
||||
def test_large_binary(self):
|
||||
a = mx.ones([1000, 2147484], mx.int8)
|
||||
b = mx.ones([2147484], mx.int8)
|
||||
self.assertEqual((a + b)[0, 0].item(), 2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
Loading…
Reference in New Issue
Block a user