mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Compare commits
3 Commits
3e885f583a
...
9a742090ae
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a742090ae | ||
|
|
aca7fac9ef | ||
|
|
8b15773206 |
@@ -325,7 +325,15 @@ struct MaxReduce {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <int N, typename T>
|
template <int N, typename T>
|
||||||
T operator()(simd::Simd<T, N> x) {
|
std::enable_if_t<std::is_integral_v<T>, T> operator()(simd::Simd<T, N> x) {
|
||||||
|
return simd::max(x);
|
||||||
|
};
|
||||||
|
|
||||||
|
template <int N, typename T>
|
||||||
|
std::enable_if_t<!std::is_integral_v<T>, T> operator()(simd::Simd<T, N> x) {
|
||||||
|
if (simd::any(x != x)) {
|
||||||
|
return static_cast<T>(NAN);
|
||||||
|
}
|
||||||
return simd::max(x);
|
return simd::max(x);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -527,10 +535,10 @@ void Reduce::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
reduce_dispatch_min_max<uint64_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_min_max<uint64_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int8:
|
case int8:
|
||||||
reduce_dispatch_min_max<uint8_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_min_max<int8_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int16:
|
case int16:
|
||||||
reduce_dispatch_min_max<uint16_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_min_max<int16_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int32:
|
case int32:
|
||||||
reduce_dispatch_min_max<int32_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_min_max<int32_t>(in, out, reduce_type_, axes_);
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ cuda_skip = {
|
|||||||
"TestLayers.test_quantized_embedding",
|
"TestLayers.test_quantized_embedding",
|
||||||
"TestOps.test_dynamic_slicing",
|
"TestOps.test_dynamic_slicing",
|
||||||
"TestReduce.test_dtypes",
|
"TestReduce.test_dtypes",
|
||||||
|
"TestReduce.test_nanpropagation",
|
||||||
|
"TestReduce.test_nanpropagation_complex64",
|
||||||
# Block masked matmul NYI
|
# Block masked matmul NYI
|
||||||
"TestBlas.test_block_masked_matmul",
|
"TestBlas.test_block_masked_matmul",
|
||||||
# Gather matmul NYI
|
# Gather matmul NYI
|
||||||
|
|||||||
@@ -167,10 +167,10 @@ class TestReduce(mlx_tests.MLXTestCase):
|
|||||||
|
|
||||||
for dtype in dtypes:
|
for dtype in dtypes:
|
||||||
with self.subTest(dtype=dtype):
|
with self.subTest(dtype=dtype):
|
||||||
x = (mx.random.normal((4, 4))).astype(getattr(mx, dtype))
|
x = (mx.random.normal((4, 4)) * 10).astype(getattr(mx, dtype))
|
||||||
indices = mx.random.randint(0, 4, shape=(6,)).reshape(3, 2)
|
indices = mx.random.randint(0, 4, shape=(6,)).reshape(3, 2)
|
||||||
for idx in indices:
|
for idx in indices:
|
||||||
x[*idx] = mx.nan
|
x[idx[0], idx[1]] = mx.nan
|
||||||
x_np = np.array(x)
|
x_np = np.array(x)
|
||||||
|
|
||||||
for op in ["max"]:
|
for op in ["max"]:
|
||||||
|
|||||||
Reference in New Issue
Block a user