mirror of
https://github.com/ml-explore/mlx.git
synced 2025-09-17 17:28:10 +08:00
docs update
This commit is contained in:

committed by
CircleCI Docs

parent
3e724a7c98
commit
c620a28b16
16
docs/build/html/_sources/dev/extensions.rst
vendored
16
docs/build/html/_sources/dev/extensions.rst
vendored
@@ -1,5 +1,5 @@
|
||||
Developer Documentation
|
||||
=======================
|
||||
Custom Extensions in MLX
|
||||
========================
|
||||
|
||||
You can extend MLX with custom operations on the CPU or GPU. This guide
|
||||
explains how to do that with a simple example.
|
||||
@@ -494,7 +494,7 @@ below.
|
||||
auto kernel = d.get_kernel(kname.str(), "mlx_ext");
|
||||
|
||||
// Prepare to encode kernel
|
||||
auto compute_encoder = d.get_command_encoder(s.index);
|
||||
auto& compute_encoder = d.get_command_encoder(s.index);
|
||||
compute_encoder->setComputePipelineState(kernel);
|
||||
|
||||
// Kernel parameters are registered with buffer indices corresponding to
|
||||
@@ -503,11 +503,11 @@ below.
|
||||
size_t nelem = out.size();
|
||||
|
||||
// Encode input arrays to kernel
|
||||
set_array_buffer(compute_encoder, x, 0);
|
||||
set_array_buffer(compute_encoder, y, 1);
|
||||
compute_encoder.set_input_array(x, 0);
|
||||
compute_encoder.set_input_array(y, 1);
|
||||
|
||||
// Encode output arrays to kernel
|
||||
set_array_buffer(compute_encoder, out, 2);
|
||||
compute_encoder.set_output_array(out, 2);
|
||||
|
||||
// Encode alpha and beta
|
||||
compute_encoder->setBytes(&alpha_, sizeof(float), 3);
|
||||
@@ -531,7 +531,7 @@ below.
|
||||
|
||||
// Launch the grid with the given number of threads divided among
|
||||
// the given threadgroups
|
||||
compute_encoder->dispatchThreads(grid_dims, group_dims);
|
||||
compute_encoder.dispatchThreads(grid_dims, group_dims);
|
||||
}
|
||||
|
||||
We can now call the :meth:`axpby` operation on both the CPU and the GPU!
|
||||
@@ -825,7 +825,7 @@ Let's look at a simple script and its results:
|
||||
|
||||
print(f"c shape: {c.shape}")
|
||||
print(f"c dtype: {c.dtype}")
|
||||
print(f"c correctness: {mx.all(c == 6.0).item()}")
|
||||
print(f"c correct: {mx.all(c == 6.0).item()}")
|
||||
|
||||
Output:
|
||||
|
||||
|
22
docs/build/html/_sources/install.rst
vendored
22
docs/build/html/_sources/install.rst
vendored
@@ -153,6 +153,8 @@ should point to the path to the built metal library.
|
||||
- OFF
|
||||
* - MLX_BUILD_METAL
|
||||
- ON
|
||||
* - MLX_BUILD_CPU
|
||||
- ON
|
||||
* - MLX_BUILD_PYTHON_BINDINGS
|
||||
- OFF
|
||||
* - MLX_METAL_DEBUG
|
||||
@@ -179,10 +181,28 @@ should point to the path to the built metal library.
|
||||
|
||||
xcrun -sdk macosx --show-sdk-version
|
||||
|
||||
Binary Size Minimization
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To produce a smaller binary use the CMake flags `CMAKE_BUILD_TYPE=MinSizeRel`
|
||||
and `BUILD_SHARED_LIBS=ON`.
|
||||
|
||||
The MLX CMake build has several additional options to make smaller binaries.
|
||||
For example, if you don't need the CPU backend or support for safetensors and
|
||||
GGUF, you can do:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
cmake ..
|
||||
-DCMAKE_BUILD_TYPE=MinSizeRel \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DMLX_BUILD_CPU=ON \
|
||||
-DMLX_BUILD_SAFETENSORS=OFF \
|
||||
-DMLX_BUILD_GGUF=OFF
|
||||
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
Metal not found
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.addmm
|
||||
==============
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autofunction:: addmm
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.as\_strided
|
||||
====================
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autofunction:: as_strided
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.linalg.cholesky
|
||||
========================
|
||||
|
||||
.. currentmodule:: mlx.core.linalg
|
||||
|
||||
.. autofunction:: cholesky
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.linalg.inv
|
||||
===================
|
||||
|
||||
.. currentmodule:: mlx.core.linalg
|
||||
|
||||
.. autofunction:: inv
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.linalg.svd
|
||||
===================
|
||||
|
||||
.. currentmodule:: mlx.core.linalg
|
||||
|
||||
.. autofunction:: svd
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.power.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.power.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.power
|
||||
==============
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autofunction:: power
|
6
docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst
vendored
Normal file
6
docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
mlx.core.remainder
|
||||
==================
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autofunction:: remainder
|
3
docs/build/html/_sources/python/linalg.rst
vendored
3
docs/build/html/_sources/python/linalg.rst
vendored
@@ -8,5 +8,8 @@ Linear Algebra
|
||||
.. autosummary::
|
||||
:toctree: _autosummary
|
||||
|
||||
inv
|
||||
norm
|
||||
cholesky
|
||||
qr
|
||||
svd
|
||||
|
16
docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst
vendored
Normal file
16
docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
mlx.nn.Conv3d
|
||||
=============
|
||||
|
||||
.. currentmodule:: mlx.nn
|
||||
|
||||
.. autoclass:: Conv3d
|
||||
|
||||
|
||||
|
||||
|
||||
.. rubric:: Methods
|
||||
|
||||
.. autosummary::
|
||||
|
||||
|
||||
|
@@ -13,5 +13,6 @@
|
||||
.. autosummary::
|
||||
|
||||
~Embedding.as_linear
|
||||
~Embedding.to_quantized
|
||||
|
||||
|
||||
|
@@ -12,5 +12,6 @@
|
||||
|
||||
.. autosummary::
|
||||
|
||||
~Linear.to_quantized
|
||||
|
||||
|
||||
|
@@ -15,6 +15,7 @@ Layers
|
||||
BatchNorm
|
||||
Conv1d
|
||||
Conv2d
|
||||
Conv3d
|
||||
Dropout
|
||||
Dropout2d
|
||||
Dropout3d
|
||||
|
5
docs/build/html/_sources/python/ops.rst
vendored
5
docs/build/html/_sources/python/ops.rst
vendored
@@ -10,6 +10,7 @@ Operations
|
||||
|
||||
abs
|
||||
add
|
||||
addmm
|
||||
all
|
||||
allclose
|
||||
any
|
||||
@@ -26,6 +27,7 @@ Operations
|
||||
argpartition
|
||||
argsort
|
||||
array_equal
|
||||
as_strided
|
||||
atleast_1d
|
||||
atleast_2d
|
||||
atleast_3d
|
||||
@@ -76,6 +78,7 @@ Operations
|
||||
isnan
|
||||
isneginf
|
||||
isposinf
|
||||
issubdtype
|
||||
left_shift
|
||||
less
|
||||
less_equal
|
||||
@@ -106,11 +109,13 @@ Operations
|
||||
outer
|
||||
partition
|
||||
pad
|
||||
power
|
||||
prod
|
||||
quantize
|
||||
quantized_matmul
|
||||
radians
|
||||
reciprocal
|
||||
remainder
|
||||
repeat
|
||||
reshape
|
||||
right_shift
|
||||
|
Reference in New Issue
Block a user