mirror of
https://github.com/ml-explore/mlx.git
synced 2025-10-16 14:18:12 +08:00
angelos's commit files
This commit is contained in:
2
docs/.clang-format
Normal file
2
docs/.clang-format
Normal file
@@ -0,0 +1,2 @@
|
||||
DisableFormat: true
|
||||
SortIncludes: Never
|
18
docs/Makefile
Normal file
18
docs/Makefile
Normal file
@@ -0,0 +1,18 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
SOURCEDIR = src
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
131
docs/src/examples/mlp.rst
Normal file
131
docs/src/examples/mlp.rst
Normal file
@@ -0,0 +1,131 @@
|
||||
.. _mlp:
|
||||
|
||||
Multi-Layer Perceptron
|
||||
----------------------
|
||||
|
||||
In this example we'll learn to use ``mlx.nn`` by implementing a simple
|
||||
multi-layer perceptron to classify MNIST.
|
||||
|
||||
As a first step import the MLX packages we need:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import mlx.core as mx
|
||||
import mlx.nn as nn
|
||||
import mlx.optimizers as optim
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
The model is defined as the ``MLP`` class which inherits from
|
||||
:class:`mlx.nn.Module`. We follow the standard idiom to make a new module:
|
||||
|
||||
1. Define an ``__init__`` where the parameters and/or submodules are setup. See
|
||||
the :ref:`Module class docs<module_class>` for more information on how
|
||||
:class:`mlx.nn.Module` registers parameters.
|
||||
2. Define a ``__call__`` where the computation is implemented.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(
|
||||
self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int
|
||||
):
|
||||
super().__init__()
|
||||
layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim]
|
||||
self.layers = [
|
||||
nn.Linear(idim, odim)
|
||||
for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:])
|
||||
]
|
||||
|
||||
def __call__(self, x):
|
||||
for l in self.layers[:-1]:
|
||||
x = mx.maximum(l(x), 0.0)
|
||||
return self.layers[-1](x)
|
||||
|
||||
|
||||
We define the loss function which takes the mean of the per-example cross
|
||||
entropy loss. The ``mlx.nn.losses`` sub-package has implementations of some
|
||||
commonly used loss functions.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def loss_fn(model, X, y):
|
||||
return mx.mean(nn.losses.cross_entropy(model(X), y))
|
||||
|
||||
We also need a function to compute the accuracy of the model on the validation
|
||||
set:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def eval_fn(model, X, y):
|
||||
return mx.mean(mx.argmax(model(X), axis=1) == y)
|
||||
|
||||
Next, setup the problem parameters and load the data:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
num_layers = 2
|
||||
hidden_dim = 32
|
||||
num_classes = 10
|
||||
batch_size = 256
|
||||
num_epochs = 10
|
||||
learning_rate = 1e-1
|
||||
|
||||
# Load the data
|
||||
import mnist
|
||||
train_images, train_labels, test_images, test_labels = map(
|
||||
mx.array, mnist.mnist()
|
||||
)
|
||||
|
||||
Since we're using SGD, we need an iterator which shuffles and constructs
|
||||
minibatches of examples in the training set:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def batch_iterate(batch_size, X, y):
|
||||
perm = mx.array(np.random.permutation(y.size))
|
||||
for s in range(0, y.size, batch_size):
|
||||
ids = perm[s : s + batch_size]
|
||||
yield X[ids], y[ids]
|
||||
|
||||
|
||||
Finally, we put it all together by instantiating the model, the
|
||||
:class:`mlx.optimizers.SGD` optimizer, and running the training loop:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Load the model
|
||||
model = MLP(num_layers, train_images.shape[-1], hidden_dim, num_classes)
|
||||
mx.eval(model.parameters())
|
||||
|
||||
# Get a function which gives the loss and gradient of the
|
||||
# loss with respect to the model's trainable parameters
|
||||
loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
|
||||
|
||||
# Instantiate the optimizer
|
||||
optimizer = optim.SGD(learning_rate=learning_rate)
|
||||
|
||||
for e in range(num_epochs):
|
||||
for X, y in batch_iterate(batch_size, train_images, train_labels):
|
||||
loss, grads = loss_and_grad_fn(model, X, y)
|
||||
|
||||
# Update the optimizer state and model parameters
|
||||
# in a single call
|
||||
optimizer.update(model, grads)
|
||||
|
||||
# Force a graph evaluation
|
||||
mx.eval(model.parameters(), optimizer.state)
|
||||
|
||||
accuracy = eval_fn(model, test_images, test_labels)
|
||||
print(f"Epoch {e}: Test accuracy {accuracy.item():.3f}")
|
||||
|
||||
|
||||
.. note::
|
||||
The :func:`mlx.nn.value_and_grad` function is a convenience function to get
|
||||
the gradient of a loss with respect to the trainable parameters of a model.
|
||||
This should not be confused with :func:`mlx.core.value_and_grad`.
|
||||
|
||||
The model should train to a decent accuracy (about 95%) after just a few passes
|
||||
over the training set. The `full example <https://github.com/ml-explore/mlx-examples/tree/main/mlp>`_
|
||||
is available in the MLX GitHub repo.
|
45
docs/src/python/array.rst
Normal file
45
docs/src/python/array.rst
Normal file
@@ -0,0 +1,45 @@
|
||||
.. _array:
|
||||
|
||||
Array
|
||||
=====
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autosummary::
|
||||
:toctree: _autosummary
|
||||
|
||||
array
|
||||
array.astype
|
||||
array.item
|
||||
array.tolist
|
||||
array.dtype
|
||||
array.ndim
|
||||
array.shape
|
||||
array.size
|
||||
Dtype
|
||||
array.abs
|
||||
array.all
|
||||
array.any
|
||||
array.argmax
|
||||
array.argmin
|
||||
array.cos
|
||||
array.dtype
|
||||
array.exp
|
||||
array.log
|
||||
array.log1p
|
||||
array.logsumexp
|
||||
array.max
|
||||
array.mean
|
||||
array.min
|
||||
array.prod
|
||||
array.reciprocal
|
||||
array.reshape
|
||||
array.rsqrt
|
||||
array.sin
|
||||
array.split
|
||||
array.sqrt
|
||||
array.square
|
||||
array.sum
|
||||
array.transpose
|
||||
array.T
|
||||
array.var
|
94
docs/src/python/ops.rst
Normal file
94
docs/src/python/ops.rst
Normal file
@@ -0,0 +1,94 @@
|
||||
.. _ops:
|
||||
|
||||
Operations
|
||||
==========
|
||||
|
||||
.. currentmodule:: mlx.core
|
||||
|
||||
.. autosummary::
|
||||
:toctree: _autosummary
|
||||
|
||||
abs
|
||||
add
|
||||
all
|
||||
allclose
|
||||
any
|
||||
arange
|
||||
arccos
|
||||
arccosh
|
||||
arcsin
|
||||
arcsinh
|
||||
arctan
|
||||
arctanh
|
||||
argmax
|
||||
argmin
|
||||
argpartition
|
||||
argsort
|
||||
array_equal
|
||||
broadcast_to
|
||||
concatenate
|
||||
convolve
|
||||
conv1d
|
||||
conv2d
|
||||
cos
|
||||
cosh
|
||||
divide
|
||||
equal
|
||||
erf
|
||||
erfinv
|
||||
exp
|
||||
expand_dims
|
||||
full
|
||||
greater
|
||||
greater_equal
|
||||
less
|
||||
less_equal
|
||||
load
|
||||
log
|
||||
log2
|
||||
log10
|
||||
log1p
|
||||
logaddexp
|
||||
logical_not
|
||||
logsumexp
|
||||
matmul
|
||||
max
|
||||
maximum
|
||||
mean
|
||||
min
|
||||
minimum
|
||||
multiply
|
||||
negative
|
||||
ones
|
||||
ones_like
|
||||
partition
|
||||
pad
|
||||
prod
|
||||
reciprocal
|
||||
reshape
|
||||
rsqrt
|
||||
save
|
||||
savez
|
||||
savez_compressed
|
||||
sigmoid
|
||||
sign
|
||||
sin
|
||||
sinh
|
||||
softmax
|
||||
sort
|
||||
split
|
||||
sqrt
|
||||
square
|
||||
squeeze
|
||||
stop_gradient
|
||||
subtract
|
||||
sum
|
||||
take
|
||||
take_along_axis
|
||||
tan
|
||||
tanh
|
||||
transpose
|
||||
var
|
||||
where
|
||||
zeros
|
||||
zeros_like
|
Reference in New Issue
Block a user