std and expm1 (#973)

* std and expm1

* actually add expm1

* fix linux

* fix vjp

* relax tol for linux test

* Add it to the compilable primitives

---------

Co-authored-by: Angelos Katharopoulos <a_katharopoulos@apple.com>
This commit is contained in:
Awni Hannun
2024-04-08 14:26:01 -07:00
committed by GitHub
parent 76e63212ff
commit 42afe27e12
19 changed files with 332 additions and 6 deletions

View File

@@ -772,6 +772,25 @@ void init_ops(nb::module_& m) {
Returns:
array: The exponential of ``a``.
)pbdoc");
m.def(
"expm1",
&mlx::core::expm1,
nb::arg(),
nb::kw_only(),
"stream"_a = nb::none(),
nb::sig(
"def expm1(a: array, /, *, stream: Union[None, Stream, Device] = None) -> array"),
R"pbdoc(
Element-wise exponential minus 1.
Computes ``exp(x) - 1`` with greater precision for small ``x``.
Args:
a (array): Input array.
Returns:
array: The expm1 of ``a``.
)pbdoc");
m.def(
"erf",
&mlx::core::erf,
@@ -2150,6 +2169,40 @@ void init_ops(nb::module_& m) {
Returns:
array: The output array of variances.
)pbdoc");
m.def(
"std",
[](const array& a,
const IntOrVec& axis,
bool keepdims,
int ddof,
StreamOrDevice s) {
return mlx::core::std(
a, get_reduce_axes(axis, a.ndim()), keepdims, ddof, s);
},
nb::arg(),
"axis"_a = nb::none(),
"keepdims"_a = false,
"ddof"_a = 0,
nb::kw_only(),
"stream"_a = nb::none(),
nb::sig(
"def std(a: array, /, axis: Union[None, int, Sequence[int]] = None, keepdims: bool = False, ddof: int = 0, *, stream: Union[None, Stream, Device] = None) -> array"),
R"pbdoc(
Compute the standard deviation(s) over the given axes.
Args:
a (array): Input array.
axis (int or list(int), optional): Optional axis or
axes to reduce over. If unspecified this defaults
to reducing over the entire array.
keepdims (bool, optional): Keep reduced axes as
singleton dimensions, defaults to `False`.
ddof (int, optional): The divisor to compute the variance
is ``N - ddof``, defaults to 0.
Returns:
array: The output array of standard deviations.
)pbdoc");
m.def(
"split",
[](const array& a,

View File

@@ -725,6 +725,11 @@ class TestOps(mlx_tests.MLXTestCase):
out = mx.var(x, ddof=3)
self.assertEqual(out.item(), float("inf"))
def test_std(self):
x = mx.random.uniform(shape=(5, 5))
x_np = np.array(x)
self.assertAlmostEqual(mx.std(x).item(), x_np.std().item(), places=6)
def test_abs(self):
a = mx.array([-1.0, 1.0, -2.0, 3.0])
result = mx.abs(a)
@@ -839,6 +844,13 @@ class TestOps(mlx_tests.MLXTestCase):
self.assertTrue(np.allclose(result, expected))
def test_expm1(self):
a = mx.array([0, 0.5, -0.5, 5])
result = mx.expm1(a)
expected = np.expm1(a, dtype=np.float32)
self.assertTrue(np.allclose(result, expected, rtol=1e-5, atol=1e-5))
def test_erf(self):
inputs = [-5, 0.0, 0.5, 1.0, 2.0, 10.0]
x = mx.array(inputs)