diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo
index 763d8a0c2..1062ede52 100644
--- a/docs/build/html/.buildinfo
+++ b/docs/build/html/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 2f6ec3ca18b17f6d0ad6bba5887f704c
+config: 8e5a9f3fa6ac8cf3fa3d36c262106632
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/build/html/_sources/dev/extensions.rst b/docs/build/html/_sources/dev/extensions.rst
index b8c3a4995..2aef28f99 100644
--- a/docs/build/html/_sources/dev/extensions.rst
+++ b/docs/build/html/_sources/dev/extensions.rst
@@ -93,9 +93,9 @@ Primitives
^^^^^^^^^^^
A :class:`Primitive` is part of the computation graph of an :class:`array`. It
-defines how to create outputs arrays given a input arrays. Further, a
+defines how to create output arrays given input arrays. Further, a
:class:`Primitive` has methods to run on the CPU or GPU and for function
-transformations such as ``vjp`` and ``jvp``. Lets go back to our example to be
+transformations such as ``vjp`` and ``jvp``. Let's go back to our example to be
more concrete:
.. code-block:: C++
@@ -128,7 +128,7 @@ more concrete:
/** The vector-Jacobian product. */
std::vector vjp(
const std::vector& primals,
- const array& cotan,
+ const std::vector& cotangents,
const std::vector& argnums,
const std::vector& outputs) override;
@@ -469,7 +469,7 @@ one we just defined:
const std::vector& tangents,
const std::vector& argnums) {
// Forward mode diff that pushes along the tangents
- // The jvp transform on the primitive can built with ops
+ // The jvp transform on the primitive can be built with ops
// that are scheduled on the same stream as the primitive
// If argnums = {0}, we only push along x in which case the
@@ -481,7 +481,7 @@ one we just defined:
auto scale_arr = array(scale, tangents[0].dtype());
return {multiply(scale_arr, tangents[0], stream())};
}
- // If, argnums = {0, 1}, we take contributions from both
+ // If argnums = {0, 1}, we take contributions from both
// which gives us jvp = tangent_x * alpha + tangent_y * beta
else {
return {axpby(tangents[0], tangents[1], alpha_, beta_, stream())};
@@ -735,7 +735,7 @@ Let's look at a simple script and its results:
print(f"c shape: {c.shape}")
print(f"c dtype: {c.dtype}")
- print(f"c correct: {mx.all(c == 6.0).item()}")
+ print(f"c is correct: {mx.all(c == 6.0).item()}")
Output:
@@ -743,7 +743,7 @@ Output:
c shape: [3, 4]
c dtype: float32
- c correctness: True
+ c is correct: True
Results
^^^^^^^
diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.async_eval.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.async_eval.rst
new file mode 100644
index 000000000..f44f1c49e
--- /dev/null
+++ b/docs/build/html/_sources/python/_autosummary/mlx.core.async_eval.rst
@@ -0,0 +1,6 @@
+mlx.core.async\_eval
+====================
+
+.. currentmodule:: mlx.core
+
+.. autofunction:: async_eval
\ No newline at end of file
diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.broadcast_arrays.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.broadcast_arrays.rst
new file mode 100644
index 000000000..5482dafe5
--- /dev/null
+++ b/docs/build/html/_sources/python/_autosummary/mlx.core.broadcast_arrays.rst
@@ -0,0 +1,6 @@
+mlx.core.broadcast\_arrays
+==========================
+
+.. currentmodule:: mlx.core
+
+.. autofunction:: broadcast_arrays
\ No newline at end of file
diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.contiguous.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.contiguous.rst
new file mode 100644
index 000000000..5b06a91ac
--- /dev/null
+++ b/docs/build/html/_sources/python/_autosummary/mlx.core.contiguous.rst
@@ -0,0 +1,6 @@
+mlx.core.contiguous
+===================
+
+.. currentmodule:: mlx.core
+
+.. autofunction:: contiguous
\ No newline at end of file
diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.pinv.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.pinv.rst
new file mode 100644
index 000000000..5b6af868d
--- /dev/null
+++ b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.pinv.rst
@@ -0,0 +1,6 @@
+mlx.core.linalg.pinv
+====================
+
+.. currentmodule:: mlx.core.linalg
+
+.. autofunction:: pinv
\ No newline at end of file
diff --git a/docs/build/html/_sources/python/linalg.rst b/docs/build/html/_sources/python/linalg.rst
index 769f4bbb1..b01f74117 100644
--- a/docs/build/html/_sources/python/linalg.rst
+++ b/docs/build/html/_sources/python/linalg.rst
@@ -20,5 +20,6 @@ Linear Algebra
eigh
lu
lu_factor
+ pinv
solve
solve_triangular
diff --git a/docs/build/html/_sources/python/ops.rst b/docs/build/html/_sources/python/ops.rst
index c0d098b21..66c5764ed 100644
--- a/docs/build/html/_sources/python/ops.rst
+++ b/docs/build/html/_sources/python/ops.rst
@@ -36,10 +36,12 @@ Operations
bitwise_or
bitwise_xor
block_masked_mm
+ broadcast_arrays
broadcast_to
ceil
clip
concatenate
+ contiguous
conj
conjugate
convolve
diff --git a/docs/build/html/_sources/python/transforms.rst b/docs/build/html/_sources/python/transforms.rst
index fbdfd4f08..23f86720b 100644
--- a/docs/build/html/_sources/python/transforms.rst
+++ b/docs/build/html/_sources/python/transforms.rst
@@ -9,6 +9,7 @@ Transforms
:toctree: _autosummary
eval
+ async_eval
compile
custom_function
disable_compile
diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js
index 77faecbeb..ae734fbf9 100644
--- a/docs/build/html/_static/documentation_options.js
+++ b/docs/build/html/_static/documentation_options.js
@@ -1,5 +1,5 @@
const DOCUMENTATION_OPTIONS = {
- VERSION: '0.24.1',
+ VERSION: '0.24.2',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
diff --git a/docs/build/html/cpp/ops.html b/docs/build/html/cpp/ops.html
index c95d17758..795da9057 100644
--- a/docs/build/html/cpp/ops.html
+++ b/docs/build/html/cpp/ops.html
@@ -8,7 +8,7 @@
- Operations — MLX 0.24.1 documentation
+ Operations — MLX 0.24.2 documentation
@@ -36,7 +36,7 @@
-
+
@@ -137,8 +137,8 @@
-
-
+
+
@@ -291,10 +291,12 @@
A Primitive is part of the computation graph of an array. It
-defines how to create outputs arrays given a input arrays. Further, a
+defines how to create output arrays given input arrays. Further, a
Primitive has methods to run on the CPU or GPU and for function
-transformations such as vjp and jvp. Lets go back to our example to be
+transformations such as vjp and jvp. Let’s go back to our example to be
more concrete:
classAxpby:publicPrimitive{public:
@@ -1040,7 +1044,7 @@ more concrete:
/** The vector-Jacobian product. */std::vector<array>vjp(conststd::vector<array>&primals,
-constarray&cotan,
+conststd::vector<array>&cotangents,conststd::vector<int>&argnums,conststd::vector<array>&outputs)override;
@@ -1360,7 +1364,7 @@ one we just defined:
conststd::vector<array>&tangents,conststd::vector<int>&argnums){// Forward mode diff that pushes along the tangents
-// The jvp transform on the primitive can built with ops
+// The jvp transform on the primitive can be built with ops// that are scheduled on the same stream as the primitive// If argnums = {0}, we only push along x in which case the
@@ -1372,7 +1376,7 @@ one we just defined:
autoscale_arr=array(scale,tangents[0].dtype());return{multiply(scale_arr,tangents[0],stream())};}
-// If, argnums = {0, 1}, we take contributions from both
+// If argnums = {0, 1}, we take contributions from both// which gives us jvp = tangent_x * alpha + tangent_y * betaelse{return{axpby(tangents[0],tangents[1],alpha_,beta_,stream())};
@@ -1608,13 +1612,13 @@ import the Python package and play with it as you would any other MLX operation.
print(f"c shape: {c.shape}")print(f"c dtype: {c.dtype}")
-print(f"c correct: {mx.all(c==6.0).item()}")
+print(f"c is correct: {mx.all(c==6.0).item()}")
Asynchronously evaluate an array or tree of array.
+
+
Note
+
This is an experimental API and may change in future versions.
+
+
+
Parameters:
+
*args (arrays or trees of arrays) – Each argument can be a single array
+or a tree of arrays. If a tree is given the nodes can be a Python
+list, tuple or dict. Leaves which are not
+arrays are ignored.
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
+
This function calculates a generalized inverse of a matrix using its
+singular-value decomposition. This function supports arrays with at least 2 dimensions.
+When the input has more than two dimensions, the inverse is computed for each
+matrix in the last two dimensions of a.