docs update

This commit is contained in:
Awni Hannun
2024-05-20 09:40:17 -07:00
committed by CircleCI Docs
parent 3e724a7c98
commit c620a28b16
528 changed files with 17198 additions and 4162 deletions

View File

@@ -8,7 +8,7 @@
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="generator" content="Docutils 0.18.1: http://docutils.sourceforge.net/" />
<title>Layers &#8212; MLX 0.13.0 documentation</title>
<title>Layers &#8212; MLX 0.13.1 documentation</title>
@@ -36,7 +36,7 @@
<link rel="preload" as="script" href="../../_static/scripts/pydata-sphinx-theme.js?digest=5b4479735964841361fd" />
<script src="../../_static/vendor/fontawesome/6.1.2/js/all.min.js?digest=5b4479735964841361fd"></script>
<script src="../../_static/documentation_options.js?v=1539091c"></script>
<script src="../../_static/documentation_options.js?v=aec58b01"></script>
<script src="../../_static/doctools.js?v=888ff710"></script>
<script src="../../_static/sphinx_highlight.js?v=dc90522c"></script>
<script src="../../_static/scripts/sphinx-book-theme.js?v=efea14e4"></script>
@@ -131,8 +131,8 @@
<img src="../../_static/mlx_logo.png" class="logo__image only-light" alt="MLX 0.13.0 documentation - Home"/>
<script>document.write(`<img src="../../_static/mlx_logo_dark.png" class="logo__image only-dark" alt="MLX 0.13.0 documentation - Home"/>`);</script>
<img src="../../_static/mlx_logo.png" class="logo__image only-light" alt="MLX 0.13.1 documentation - Home"/>
<script>document.write(`<img src="../../_static/mlx_logo_dark.png" class="logo__image only-dark" alt="MLX 0.13.1 documentation - Home"/>`);</script>
</a></div>
@@ -246,6 +246,7 @@
<li class="toctree-l1 has-children"><a class="reference internal" href="../ops.html">Operations</a><input class="toctree-checkbox" id="toctree-checkbox-4" name="toctree-checkbox-4" type="checkbox"/><label class="toctree-toggle" for="toctree-checkbox-4"><i class="fa-solid fa-chevron-down"></i></label><ul>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.abs.html">mlx.core.abs</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.add.html">mlx.core.add</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.addmm.html">mlx.core.addmm</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.all.html">mlx.core.all</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.allclose.html">mlx.core.allclose</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.any.html">mlx.core.any</a></li>
@@ -262,6 +263,7 @@
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.argpartition.html">mlx.core.argpartition</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.argsort.html">mlx.core.argsort</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.array_equal.html">mlx.core.array_equal</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.as_strided.html">mlx.core.as_strided</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.atleast_1d.html">mlx.core.atleast_1d</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.atleast_2d.html">mlx.core.atleast_2d</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.atleast_3d.html">mlx.core.atleast_3d</a></li>
@@ -312,6 +314,7 @@
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.isnan.html">mlx.core.isnan</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.isneginf.html">mlx.core.isneginf</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.isposinf.html">mlx.core.isposinf</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.issubdtype.html">mlx.core.issubdtype</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.left_shift.html">mlx.core.left_shift</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.less.html">mlx.core.less</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.less_equal.html">mlx.core.less_equal</a></li>
@@ -342,11 +345,13 @@
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.outer.html">mlx.core.outer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.partition.html">mlx.core.partition</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.pad.html">mlx.core.pad</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.power.html">mlx.core.power</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.prod.html">mlx.core.prod</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.quantize.html">mlx.core.quantize</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.quantized_matmul.html">mlx.core.quantized_matmul</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.radians.html">mlx.core.radians</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.reciprocal.html">mlx.core.reciprocal</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.remainder.html">mlx.core.remainder</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.repeat.html">mlx.core.repeat</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.reshape.html">mlx.core.reshape</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.right_shift.html">mlx.core.right_shift</a></li>
@@ -439,8 +444,11 @@
</ul>
</li>
<li class="toctree-l1 has-children"><a class="reference internal" href="../linalg.html">Linear Algebra</a><input class="toctree-checkbox" id="toctree-checkbox-9" name="toctree-checkbox-9" type="checkbox"/><label class="toctree-toggle" for="toctree-checkbox-9"><i class="fa-solid fa-chevron-down"></i></label><ul>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.linalg.inv.html">mlx.core.linalg.inv</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.linalg.norm.html">mlx.core.linalg.norm</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.linalg.cholesky.html">mlx.core.linalg.cholesky</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.linalg.qr.html">mlx.core.linalg.qr</a></li>
<li class="toctree-l2"><a class="reference internal" href="../_autosummary/mlx.core.linalg.svd.html">mlx.core.linalg.svd</a></li>
</ul>
</li>
<li class="toctree-l1 has-children"><a class="reference internal" href="../metal.html">Metal</a><input class="toctree-checkbox" id="toctree-checkbox-10" name="toctree-checkbox-10" type="checkbox"/><label class="toctree-toggle" for="toctree-checkbox-10"><i class="fa-solid fa-chevron-down"></i></label><ul>
@@ -490,6 +498,7 @@
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.BatchNorm.html">mlx.nn.BatchNorm</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Conv1d.html">mlx.nn.Conv1d</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Conv2d.html">mlx.nn.Conv2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Conv3d.html">mlx.nn.Conv3d</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Dropout.html">mlx.nn.Dropout</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Dropout2d.html">mlx.nn.Dropout2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="_autosummary/mlx.nn.Dropout3d.html">mlx.nn.Dropout3d</a></li>
@@ -622,7 +631,7 @@
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Further Reading</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../../dev/extensions.html">Developer Documentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/extensions.html">Custom Extensions in MLX</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/metal_debugger.html">Metal Debugger</a></li>
</ul>
@@ -811,94 +820,97 @@ document.write(`
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Conv2d.html#mlx.nn.Conv2d" title="mlx.nn.Conv2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Conv2d</span></code></a>(in_channels, out_channels, kernel_size)</p></td>
<td><p>Applies a 2-dimensional convolution over the multi-channel input image.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout.html#mlx.nn.Dropout" title="mlx.nn.Dropout"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout</span></code></a>([p])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Conv3d.html#mlx.nn.Conv3d" title="mlx.nn.Conv3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Conv3d</span></code></a>(in_channels, out_channels, kernel_size)</p></td>
<td><p>Applies a 3-dimensional convolution over the multi-channel input image.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout.html#mlx.nn.Dropout" title="mlx.nn.Dropout"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout</span></code></a>([p])</p></td>
<td><p>Randomly zero a portion of the elements during training.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout2d.html#mlx.nn.Dropout2d" title="mlx.nn.Dropout2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout2d</span></code></a>([p])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout2d.html#mlx.nn.Dropout2d" title="mlx.nn.Dropout2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout2d</span></code></a>([p])</p></td>
<td><p>Apply 2D channel-wise dropout during training.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout3d.html#mlx.nn.Dropout3d" title="mlx.nn.Dropout3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout3d</span></code></a>([p])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Dropout3d.html#mlx.nn.Dropout3d" title="mlx.nn.Dropout3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Dropout3d</span></code></a>([p])</p></td>
<td><p>Apply 3D channel-wise dropout during training.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Embedding.html#mlx.nn.Embedding" title="mlx.nn.Embedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Embedding</span></code></a>(num_embeddings, dims)</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Embedding.html#mlx.nn.Embedding" title="mlx.nn.Embedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Embedding</span></code></a>(num_embeddings, dims)</p></td>
<td><p>Implements a simple lookup table that maps each input integer to a high-dimensional vector.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GELU.html#mlx.nn.GELU" title="mlx.nn.GELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GELU</span></code></a>([approx])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GELU.html#mlx.nn.GELU" title="mlx.nn.GELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GELU</span></code></a>([approx])</p></td>
<td><p>Applies the Gaussian Error Linear Units.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GroupNorm.html#mlx.nn.GroupNorm" title="mlx.nn.GroupNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GroupNorm</span></code></a>(num_groups, dims[, eps, affine, ...])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GroupNorm.html#mlx.nn.GroupNorm" title="mlx.nn.GroupNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GroupNorm</span></code></a>(num_groups, dims[, eps, affine, ...])</p></td>
<td><p>Applies Group Normalization [1] to the inputs.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GRU.html#mlx.nn.GRU" title="mlx.nn.GRU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GRU</span></code></a>(input_size, hidden_size[, bias])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.GRU.html#mlx.nn.GRU" title="mlx.nn.GRU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">GRU</span></code></a>(input_size, hidden_size[, bias])</p></td>
<td><p>A gated recurrent unit (GRU) RNN layer.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.InstanceNorm.html#mlx.nn.InstanceNorm" title="mlx.nn.InstanceNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">InstanceNorm</span></code></a>(dims[, eps, affine])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.InstanceNorm.html#mlx.nn.InstanceNorm" title="mlx.nn.InstanceNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">InstanceNorm</span></code></a>(dims[, eps, affine])</p></td>
<td><p>Applies instance normalization [1] on the inputs.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.LayerNorm.html#mlx.nn.LayerNorm" title="mlx.nn.LayerNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">LayerNorm</span></code></a>(dims[, eps, affine, bias])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.LayerNorm.html#mlx.nn.LayerNorm" title="mlx.nn.LayerNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">LayerNorm</span></code></a>(dims[, eps, affine, bias])</p></td>
<td><p>Applies layer normalization [1] on the inputs.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Linear.html#mlx.nn.Linear" title="mlx.nn.Linear"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Linear</span></code></a>(input_dims, output_dims[, bias])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Linear.html#mlx.nn.Linear" title="mlx.nn.Linear"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Linear</span></code></a>(input_dims, output_dims[, bias])</p></td>
<td><p>Applies an affine transformation to the input.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.LSTM.html#mlx.nn.LSTM" title="mlx.nn.LSTM"><code class="xref py py-obj docutils literal notranslate"><span class="pre">LSTM</span></code></a>(input_size, hidden_size[, bias])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.LSTM.html#mlx.nn.LSTM" title="mlx.nn.LSTM"><code class="xref py py-obj docutils literal notranslate"><span class="pre">LSTM</span></code></a>(input_size, hidden_size[, bias])</p></td>
<td><p>An LSTM recurrent layer.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MaxPool1d.html#mlx.nn.MaxPool1d" title="mlx.nn.MaxPool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MaxPool1d</span></code></a>(kernel_size[, stride, padding])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MaxPool1d.html#mlx.nn.MaxPool1d" title="mlx.nn.MaxPool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MaxPool1d</span></code></a>(kernel_size[, stride, padding])</p></td>
<td><p>Applies 1-dimensional max pooling.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MaxPool2d.html#mlx.nn.MaxPool2d" title="mlx.nn.MaxPool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MaxPool2d</span></code></a>(kernel_size[, stride, padding])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MaxPool2d.html#mlx.nn.MaxPool2d" title="mlx.nn.MaxPool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MaxPool2d</span></code></a>(kernel_size[, stride, padding])</p></td>
<td><p>Applies 2-dimensional max pooling.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Mish.html#mlx.nn.Mish" title="mlx.nn.Mish"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Mish</span></code></a>()</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Mish.html#mlx.nn.Mish" title="mlx.nn.Mish"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Mish</span></code></a>()</p></td>
<td><p>Applies the Mish function, element-wise.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MultiHeadAttention.html#mlx.nn.MultiHeadAttention" title="mlx.nn.MultiHeadAttention"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MultiHeadAttention</span></code></a>(dims, num_heads[, ...])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.MultiHeadAttention.html#mlx.nn.MultiHeadAttention" title="mlx.nn.MultiHeadAttention"><code class="xref py py-obj docutils literal notranslate"><span class="pre">MultiHeadAttention</span></code></a>(dims, num_heads[, ...])</p></td>
<td><p>Implements the scaled dot product attention with multiple heads.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.PReLU.html#mlx.nn.PReLU" title="mlx.nn.PReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">PReLU</span></code></a>([num_parameters, init])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.PReLU.html#mlx.nn.PReLU" title="mlx.nn.PReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">PReLU</span></code></a>([num_parameters, init])</p></td>
<td><p>Applies the element-wise parametric ReLU.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.QuantizedEmbedding.html#mlx.nn.QuantizedEmbedding" title="mlx.nn.QuantizedEmbedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">QuantizedEmbedding</span></code></a>(num_embeddings, dims[, ...])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.QuantizedEmbedding.html#mlx.nn.QuantizedEmbedding" title="mlx.nn.QuantizedEmbedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">QuantizedEmbedding</span></code></a>(num_embeddings, dims[, ...])</p></td>
<td><p>The same as <a class="reference internal" href="_autosummary/mlx.nn.Embedding.html#mlx.nn.Embedding" title="mlx.nn.Embedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Embedding</span></code></a> but with a quantized weight matrix.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.QuantizedLinear.html#mlx.nn.QuantizedLinear" title="mlx.nn.QuantizedLinear"><code class="xref py py-obj docutils literal notranslate"><span class="pre">QuantizedLinear</span></code></a>(input_dims, output_dims[, ...])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.QuantizedLinear.html#mlx.nn.QuantizedLinear" title="mlx.nn.QuantizedLinear"><code class="xref py py-obj docutils literal notranslate"><span class="pre">QuantizedLinear</span></code></a>(input_dims, output_dims[, ...])</p></td>
<td><p>Applies an affine transformation to the input using a quantized weight matrix.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RMSNorm.html#mlx.nn.RMSNorm" title="mlx.nn.RMSNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RMSNorm</span></code></a>(dims[, eps])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RMSNorm.html#mlx.nn.RMSNorm" title="mlx.nn.RMSNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RMSNorm</span></code></a>(dims[, eps])</p></td>
<td><p>Applies Root Mean Square normalization [1] to the inputs.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.ReLU.html#mlx.nn.ReLU" title="mlx.nn.ReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">ReLU</span></code></a>()</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.ReLU.html#mlx.nn.ReLU" title="mlx.nn.ReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">ReLU</span></code></a>()</p></td>
<td><p>Applies the Rectified Linear Unit.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RNN.html#mlx.nn.RNN" title="mlx.nn.RNN"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RNN</span></code></a>(input_size, hidden_size[, bias, ...])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RNN.html#mlx.nn.RNN" title="mlx.nn.RNN"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RNN</span></code></a>(input_size, hidden_size[, bias, ...])</p></td>
<td><p>An Elman recurrent layer.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RoPE.html#mlx.nn.RoPE" title="mlx.nn.RoPE"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RoPE</span></code></a>(dims[, traditional, base, scale])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.RoPE.html#mlx.nn.RoPE" title="mlx.nn.RoPE"><code class="xref py py-obj docutils literal notranslate"><span class="pre">RoPE</span></code></a>(dims[, traditional, base, scale])</p></td>
<td><p>Implements the rotary positional encoding.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SELU.html#mlx.nn.SELU" title="mlx.nn.SELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SELU</span></code></a>()</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SELU.html#mlx.nn.SELU" title="mlx.nn.SELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SELU</span></code></a>()</p></td>
<td><p>Applies the Scaled Exponential Linear Unit.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Sequential.html#mlx.nn.Sequential" title="mlx.nn.Sequential"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Sequential</span></code></a>(*modules)</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Sequential.html#mlx.nn.Sequential" title="mlx.nn.Sequential"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Sequential</span></code></a>(*modules)</p></td>
<td><p>A layer that calls the passed callables in order.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SiLU.html#mlx.nn.SiLU" title="mlx.nn.SiLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SiLU</span></code></a>()</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SiLU.html#mlx.nn.SiLU" title="mlx.nn.SiLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SiLU</span></code></a>()</p></td>
<td><p>Applies the Sigmoid Linear Unit.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SinusoidalPositionalEncoding.html#mlx.nn.SinusoidalPositionalEncoding" title="mlx.nn.SinusoidalPositionalEncoding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SinusoidalPositionalEncoding</span></code></a>(dims[, ...])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.SinusoidalPositionalEncoding.html#mlx.nn.SinusoidalPositionalEncoding" title="mlx.nn.SinusoidalPositionalEncoding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">SinusoidalPositionalEncoding</span></code></a>(dims[, ...])</p></td>
<td><p>Implements sinusoidal positional encoding.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Softshrink.html#mlx.nn.Softshrink" title="mlx.nn.Softshrink"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Softshrink</span></code></a>([lambd])</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Softshrink.html#mlx.nn.Softshrink" title="mlx.nn.Softshrink"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Softshrink</span></code></a>([lambd])</p></td>
<td><p>Applies the Softshrink function.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Step.html#mlx.nn.Step" title="mlx.nn.Step"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Step</span></code></a>([threshold])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Step.html#mlx.nn.Step" title="mlx.nn.Step"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Step</span></code></a>([threshold])</p></td>
<td><p>Applies the Step Activation Function.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Transformer.html#mlx.nn.Transformer" title="mlx.nn.Transformer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Transformer</span></code></a>(dims, num_heads, ...)</p></td>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Transformer.html#mlx.nn.Transformer" title="mlx.nn.Transformer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Transformer</span></code></a>(dims, num_heads, ...)</p></td>
<td><p>Implements a standard Transformer model.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Upsample.html#mlx.nn.Upsample" title="mlx.nn.Upsample"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Upsample</span></code></a>(scale_factor[, mode, align_corners])</p></td>
<tr class="row-odd"><td><p><a class="reference internal" href="_autosummary/mlx.nn.Upsample.html#mlx.nn.Upsample" title="mlx.nn.Upsample"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Upsample</span></code></a>(scale_factor[, mode, align_corners])</p></td>
<td><p>Upsample the input signal spatially.</p></td>
</tr>
</tbody>