From e9e600eb2cf23f6db0ef7790abe05ecca6fe1ce0 Mon Sep 17 00:00:00 2001 From: Awni Hannun Date: Mon, 20 May 2024 09:40:17 -0700 Subject: [PATCH] docs update --- docs/build/html/.buildinfo | 2 +- docs/build/html/_sources/dev/extensions.rst | 16 +- docs/build/html/_sources/install.rst | 22 +- .../python/_autosummary/mlx.core.addmm.rst | 6 + .../_autosummary/mlx.core.as_strided.rst | 6 + .../_autosummary/mlx.core.linalg.cholesky.rst | 6 + .../_autosummary/mlx.core.linalg.inv.rst | 6 + .../_autosummary/mlx.core.linalg.svd.rst | 6 + .../python/_autosummary/mlx.core.power.rst | 6 + .../_autosummary/mlx.core.remainder.rst | 6 + docs/build/html/_sources/python/linalg.rst | 3 + .../python/nn/_autosummary/mlx.nn.Conv3d.rst | 16 + .../nn/_autosummary/mlx.nn.Embedding.rst | 1 + .../python/nn/_autosummary/mlx.nn.Linear.rst | 1 + docs/build/html/_sources/python/nn/layers.rst | 1 + docs/build/html/_sources/python/ops.rst | 5 + .../html/_static/documentation_options.js | 2 +- docs/build/html/annotated.html | 172 +- ...ckend_2metal_2kernels_2bf16_8h_source.html | 4 +- ...ls_2steel_2gemm_2transforms_8h_source.html | 114 +- docs/build/html/bf16__math_8h_source.html | 2 +- docs/build/html/classes.html | 4 +- ..._1core_1_1_block_sparse_q_m_m-members.html | 115 ++ ...assmlx_1_1core_1_1_block_sparse_q_m_m.html | 447 ++++++ ...lassmlx_1_1core_1_1_block_sparse_q_m_m.png | Bin 0 -> 981 bytes ...classmlx_1_1core_1_1_cholesky-members.html | 115 ++ .../html/classmlx_1_1core_1_1_cholesky.html | 327 ++++ .../html/classmlx_1_1core_1_1_cholesky.png | Bin 0 -> 909 bytes .../html/classmlx_1_1core_1_1_primitive.html | 10 +- .../classmlx_1_1core_1_1_unary_primitive.html | 136 +- .../classmlx_1_1core_1_1_unary_primitive.png | Bin 31591 -> 32408 bytes docs/build/html/compiled_8h_source.html | 2 +- docs/build/html/cpp/ops.html | 39 +- docs/build/html/dev/extensions.html | 37 +- docs/build/html/dev/metal_debugger.html | 23 +- docs/build/html/doxygen_crawl.html | 4 + .../html/examples/linear_regression.html | 19 +- docs/build/html/examples/llama-inference.html | 19 +- docs/build/html/examples/mlp.html | 19 +- docs/build/html/functions_a.html | 4 +- docs/build/html/functions_b.html | 1 + docs/build/html/functions_c.html | 1 + docs/build/html/functions_e.html | 6 +- docs/build/html/functions_func_a.html | 4 +- docs/build/html/functions_func_b.html | 1 + docs/build/html/functions_func_c.html | 1 + docs/build/html/functions_func_e.html | 4 +- docs/build/html/functions_func_i.html | 2 +- docs/build/html/functions_func_j.html | 2 +- docs/build/html/functions_func_p.html | 2 +- docs/build/html/functions_func_v.html | 4 +- docs/build/html/functions_i.html | 2 +- docs/build/html/functions_j.html | 2 +- docs/build/html/functions_p.html | 2 +- docs/build/html/functions_t.html | 2 +- docs/build/html/functions_v.html | 4 +- docs/build/html/functions_vars_t.html | 2 +- docs/build/html/genindex.html | 47 +- docs/build/html/group__ops.html | 117 ++ docs/build/html/hierarchy.html | 132 +- docs/build/html/index.html | 21 +- docs/build/html/install.html | 64 +- .../kernels_2steel_2gemm_2gemm_8h_source.html | 2 +- docs/build/html/linalg_8h.html | 2 + docs/build/html/linalg_8h_source.html | 5 +- docs/build/html/mma_8h_source.html | 229 ++- docs/build/html/namespacemembers_b.html | 1 + docs/build/html/namespacemembers_c.html | 2 + docs/build/html/namespacemembers_func_b.html | 1 + docs/build/html/namespacemembers_func_c.html | 2 + docs/build/html/namespacemlx_1_1core.html | 10 + .../html/namespacemlx_1_1core_1_1linalg.html | 28 + docs/build/html/namespaces.html | 172 +- docs/build/html/objects.inv | Bin 23657 -> 24021 bytes docs/build/html/ops_8h.html | 6 + docs/build/html/ops_8h_source.html | 251 +-- docs/build/html/primitives_8h.html | 4 + docs/build/html/primitives_8h_source.html | 1416 +++++++++-------- .../python/_autosummary/mlx.core.Device.html | 19 +- .../python/_autosummary/mlx.core.Dtype.html | 19 +- .../_autosummary/mlx.core.DtypeCategory.html | 19 +- .../python/_autosummary/mlx.core.Stream.html | 19 +- .../python/_autosummary/mlx.core.abs.html | 19 +- .../python/_autosummary/mlx.core.add.html | 25 +- .../python/_autosummary/mlx.core.addmm.html | 940 +++++++++++ .../python/_autosummary/mlx.core.all.html | 25 +- .../_autosummary/mlx.core.allclose.html | 19 +- .../python/_autosummary/mlx.core.any.html | 19 +- .../python/_autosummary/mlx.core.arange.html | 19 +- .../python/_autosummary/mlx.core.arccos.html | 19 +- .../python/_autosummary/mlx.core.arccosh.html | 19 +- .../python/_autosummary/mlx.core.arcsin.html | 19 +- .../python/_autosummary/mlx.core.arcsinh.html | 19 +- .../python/_autosummary/mlx.core.arctan.html | 19 +- .../python/_autosummary/mlx.core.arctan2.html | 19 +- .../python/_autosummary/mlx.core.arctanh.html | 19 +- .../python/_autosummary/mlx.core.argmax.html | 19 +- .../python/_autosummary/mlx.core.argmin.html | 19 +- .../_autosummary/mlx.core.argpartition.html | 19 +- .../python/_autosummary/mlx.core.argsort.html | 19 +- .../python/_autosummary/mlx.core.array.T.html | 19 +- .../_autosummary/mlx.core.array.abs.html | 19 +- .../_autosummary/mlx.core.array.all.html | 19 +- .../_autosummary/mlx.core.array.any.html | 19 +- .../_autosummary/mlx.core.array.argmax.html | 19 +- .../_autosummary/mlx.core.array.argmin.html | 19 +- .../_autosummary/mlx.core.array.astype.html | 19 +- .../_autosummary/mlx.core.array.at.html | 19 +- .../_autosummary/mlx.core.array.cos.html | 19 +- .../_autosummary/mlx.core.array.cummax.html | 19 +- .../_autosummary/mlx.core.array.cummin.html | 19 +- .../_autosummary/mlx.core.array.cumprod.html | 19 +- .../_autosummary/mlx.core.array.cumsum.html | 19 +- .../_autosummary/mlx.core.array.diag.html | 19 +- .../_autosummary/mlx.core.array.diagonal.html | 19 +- .../_autosummary/mlx.core.array.dtype.html | 19 +- .../_autosummary/mlx.core.array.exp.html | 19 +- .../_autosummary/mlx.core.array.flatten.html | 19 +- .../python/_autosummary/mlx.core.array.html | 19 +- .../_autosummary/mlx.core.array.item.html | 19 +- .../_autosummary/mlx.core.array.itemsize.html | 19 +- .../_autosummary/mlx.core.array.log.html | 19 +- .../_autosummary/mlx.core.array.log10.html | 19 +- .../_autosummary/mlx.core.array.log1p.html | 19 +- .../_autosummary/mlx.core.array.log2.html | 19 +- .../mlx.core.array.logsumexp.html | 19 +- .../_autosummary/mlx.core.array.max.html | 19 +- .../_autosummary/mlx.core.array.mean.html | 19 +- .../_autosummary/mlx.core.array.min.html | 19 +- .../_autosummary/mlx.core.array.moveaxis.html | 19 +- .../_autosummary/mlx.core.array.nbytes.html | 19 +- .../_autosummary/mlx.core.array.ndim.html | 19 +- .../_autosummary/mlx.core.array.prod.html | 19 +- .../mlx.core.array.reciprocal.html | 19 +- .../_autosummary/mlx.core.array.reshape.html | 19 +- .../_autosummary/mlx.core.array.round.html | 19 +- .../_autosummary/mlx.core.array.rsqrt.html | 19 +- .../_autosummary/mlx.core.array.shape.html | 19 +- .../_autosummary/mlx.core.array.sin.html | 19 +- .../_autosummary/mlx.core.array.size.html | 19 +- .../_autosummary/mlx.core.array.split.html | 19 +- .../_autosummary/mlx.core.array.sqrt.html | 19 +- .../_autosummary/mlx.core.array.square.html | 19 +- .../_autosummary/mlx.core.array.squeeze.html | 19 +- .../_autosummary/mlx.core.array.sum.html | 19 +- .../_autosummary/mlx.core.array.swapaxes.html | 19 +- .../_autosummary/mlx.core.array.tolist.html | 19 +- .../mlx.core.array.transpose.html | 19 +- .../_autosummary/mlx.core.array.var.html | 19 +- .../_autosummary/mlx.core.array_equal.html | 25 +- .../_autosummary/mlx.core.as_strided.html | 950 +++++++++++ .../_autosummary/mlx.core.atleast_1d.html | 25 +- .../_autosummary/mlx.core.atleast_2d.html | 19 +- .../_autosummary/mlx.core.atleast_3d.html | 19 +- .../_autosummary/mlx.core.bitwise_and.html | 19 +- .../_autosummary/mlx.core.bitwise_or.html | 19 +- .../_autosummary/mlx.core.bitwise_xor.html | 19 +- .../mlx.core.block_masked_mm.html | 19 +- .../mlx.core.block_sparse_mm.html | 21 +- .../_autosummary/mlx.core.broadcast_to.html | 19 +- .../python/_autosummary/mlx.core.ceil.html | 19 +- .../python/_autosummary/mlx.core.clip.html | 19 +- .../python/_autosummary/mlx.core.compile.html | 19 +- .../_autosummary/mlx.core.concatenate.html | 19 +- .../python/_autosummary/mlx.core.conj.html | 19 +- .../_autosummary/mlx.core.conjugate.html | 19 +- .../python/_autosummary/mlx.core.conv1d.html | 19 +- .../python/_autosummary/mlx.core.conv2d.html | 19 +- .../_autosummary/mlx.core.conv_general.html | 19 +- .../_autosummary/mlx.core.convolve.html | 19 +- .../python/_autosummary/mlx.core.cos.html | 19 +- .../python/_autosummary/mlx.core.cosh.html | 19 +- .../python/_autosummary/mlx.core.cummax.html | 19 +- .../python/_autosummary/mlx.core.cummin.html | 19 +- .../python/_autosummary/mlx.core.cumprod.html | 19 +- .../python/_autosummary/mlx.core.cumsum.html | 19 +- .../_autosummary/mlx.core.default_device.html | 19 +- .../_autosummary/mlx.core.default_stream.html | 19 +- .../python/_autosummary/mlx.core.degrees.html | 19 +- .../_autosummary/mlx.core.dequantize.html | 21 +- .../python/_autosummary/mlx.core.diag.html | 19 +- .../_autosummary/mlx.core.diagonal.html | 19 +- .../mlx.core.disable_compile.html | 19 +- .../python/_autosummary/mlx.core.divide.html | 19 +- .../python/_autosummary/mlx.core.divmod.html | 19 +- .../_autosummary/mlx.core.enable_compile.html | 19 +- .../python/_autosummary/mlx.core.equal.html | 19 +- .../python/_autosummary/mlx.core.erf.html | 19 +- .../python/_autosummary/mlx.core.erfinv.html | 19 +- .../python/_autosummary/mlx.core.eval.html | 19 +- .../python/_autosummary/mlx.core.exp.html | 19 +- .../_autosummary/mlx.core.expand_dims.html | 19 +- .../python/_autosummary/mlx.core.expm1.html | 19 +- .../python/_autosummary/mlx.core.eye.html | 19 +- .../mlx.core.fast.layer_norm.html | 19 +- .../_autosummary/mlx.core.fast.rms_norm.html | 19 +- .../_autosummary/mlx.core.fast.rope.html | 19 +- ...ore.fast.scaled_dot_product_attention.html | 19 +- .../python/_autosummary/mlx.core.fft.fft.html | 19 +- .../_autosummary/mlx.core.fft.fft2.html | 19 +- .../_autosummary/mlx.core.fft.fftn.html | 19 +- .../_autosummary/mlx.core.fft.ifft.html | 19 +- .../_autosummary/mlx.core.fft.ifft2.html | 19 +- .../_autosummary/mlx.core.fft.ifftn.html | 19 +- .../_autosummary/mlx.core.fft.irfft.html | 19 +- .../_autosummary/mlx.core.fft.irfft2.html | 19 +- .../_autosummary/mlx.core.fft.irfftn.html | 19 +- .../_autosummary/mlx.core.fft.rfft.html | 19 +- .../_autosummary/mlx.core.fft.rfft2.html | 19 +- .../_autosummary/mlx.core.fft.rfftn.html | 19 +- .../python/_autosummary/mlx.core.flatten.html | 19 +- .../python/_autosummary/mlx.core.floor.html | 19 +- .../_autosummary/mlx.core.floor_divide.html | 19 +- .../python/_autosummary/mlx.core.full.html | 19 +- .../python/_autosummary/mlx.core.grad.html | 19 +- .../python/_autosummary/mlx.core.greater.html | 19 +- .../_autosummary/mlx.core.greater_equal.html | 19 +- .../_autosummary/mlx.core.identity.html | 19 +- .../python/_autosummary/mlx.core.inner.html | 21 +- .../python/_autosummary/mlx.core.isclose.html | 19 +- .../python/_autosummary/mlx.core.isinf.html | 19 +- .../python/_autosummary/mlx.core.isnan.html | 19 +- .../_autosummary/mlx.core.isneginf.html | 19 +- .../_autosummary/mlx.core.isposinf.html | 19 +- .../_autosummary/mlx.core.issubdtype.html | 21 +- .../python/_autosummary/mlx.core.jvp.html | 19 +- .../_autosummary/mlx.core.left_shift.html | 19 +- .../python/_autosummary/mlx.core.less.html | 19 +- .../_autosummary/mlx.core.less_equal.html | 19 +- .../mlx.core.linalg.cholesky.html | 943 +++++++++++ .../_autosummary/mlx.core.linalg.inv.html | 938 +++++++++++ .../_autosummary/mlx.core.linalg.norm.html | 31 +- .../_autosummary/mlx.core.linalg.qr.html | 31 +- .../_autosummary/mlx.core.linalg.svd.html | 939 +++++++++++ .../_autosummary/mlx.core.linspace.html | 19 +- .../python/_autosummary/mlx.core.load.html | 21 +- .../python/_autosummary/mlx.core.log.html | 19 +- .../python/_autosummary/mlx.core.log10.html | 19 +- .../python/_autosummary/mlx.core.log1p.html | 19 +- .../python/_autosummary/mlx.core.log2.html | 19 +- .../_autosummary/mlx.core.logaddexp.html | 19 +- .../_autosummary/mlx.core.logical_and.html | 19 +- .../_autosummary/mlx.core.logical_not.html | 19 +- .../_autosummary/mlx.core.logical_or.html | 19 +- .../_autosummary/mlx.core.logsumexp.html | 19 +- .../python/_autosummary/mlx.core.matmul.html | 19 +- .../python/_autosummary/mlx.core.max.html | 19 +- .../python/_autosummary/mlx.core.maximum.html | 19 +- .../python/_autosummary/mlx.core.mean.html | 19 +- .../_autosummary/mlx.core.meshgrid.html | 19 +- .../mlx.core.metal.clear_cache.html | 19 +- .../mlx.core.metal.device_info.html | 19 +- .../mlx.core.metal.get_active_memory.html | 19 +- .../mlx.core.metal.get_cache_memory.html | 19 +- .../mlx.core.metal.get_peak_memory.html | 19 +- .../mlx.core.metal.is_available.html | 19 +- .../mlx.core.metal.reset_peak_memory.html | 19 +- .../mlx.core.metal.set_cache_limit.html | 19 +- .../mlx.core.metal.set_memory_limit.html | 19 +- .../mlx.core.metal.start_capture.html | 19 +- .../mlx.core.metal.stop_capture.html | 19 +- .../python/_autosummary/mlx.core.min.html | 19 +- .../python/_autosummary/mlx.core.minimum.html | 19 +- .../_autosummary/mlx.core.moveaxis.html | 19 +- .../_autosummary/mlx.core.multiply.html | 19 +- .../_autosummary/mlx.core.negative.html | 19 +- .../_autosummary/mlx.core.new_stream.html | 19 +- .../_autosummary/mlx.core.not_equal.html | 19 +- .../python/_autosummary/mlx.core.ones.html | 19 +- .../_autosummary/mlx.core.ones_like.html | 19 +- .../python/_autosummary/mlx.core.outer.html | 21 +- .../python/_autosummary/mlx.core.pad.html | 25 +- .../_autosummary/mlx.core.partition.html | 19 +- .../python/_autosummary/mlx.core.power.html | 936 +++++++++++ .../python/_autosummary/mlx.core.prod.html | 25 +- .../_autosummary/mlx.core.quantize.html | 25 +- .../mlx.core.quantized_matmul.html | 21 +- .../python/_autosummary/mlx.core.radians.html | 19 +- .../mlx.core.random.bernoulli.html | 19 +- .../mlx.core.random.categorical.html | 19 +- .../_autosummary/mlx.core.random.gumbel.html | 19 +- .../_autosummary/mlx.core.random.key.html | 19 +- .../mlx.core.random.multivariate_normal.html | 19 +- .../_autosummary/mlx.core.random.normal.html | 19 +- .../_autosummary/mlx.core.random.randint.html | 19 +- .../_autosummary/mlx.core.random.seed.html | 19 +- .../_autosummary/mlx.core.random.split.html | 19 +- .../mlx.core.random.truncated_normal.html | 19 +- .../_autosummary/mlx.core.random.uniform.html | 19 +- .../_autosummary/mlx.core.reciprocal.html | 25 +- .../_autosummary/mlx.core.remainder.html | 937 +++++++++++ .../python/_autosummary/mlx.core.repeat.html | 25 +- .../python/_autosummary/mlx.core.reshape.html | 19 +- .../_autosummary/mlx.core.right_shift.html | 19 +- .../python/_autosummary/mlx.core.round.html | 21 +- .../python/_autosummary/mlx.core.rsqrt.html | 19 +- .../python/_autosummary/mlx.core.save.html | 19 +- .../_autosummary/mlx.core.save_gguf.html | 19 +- .../mlx.core.save_safetensors.html | 19 +- .../python/_autosummary/mlx.core.savez.html | 19 +- .../mlx.core.savez_compressed.html | 19 +- .../mlx.core.set_default_device.html | 19 +- .../mlx.core.set_default_stream.html | 19 +- .../python/_autosummary/mlx.core.sigmoid.html | 19 +- .../python/_autosummary/mlx.core.sign.html | 19 +- .../python/_autosummary/mlx.core.sin.html | 19 +- .../python/_autosummary/mlx.core.sinh.html | 19 +- .../python/_autosummary/mlx.core.softmax.html | 19 +- .../python/_autosummary/mlx.core.sort.html | 19 +- .../python/_autosummary/mlx.core.split.html | 19 +- .../python/_autosummary/mlx.core.sqrt.html | 19 +- .../python/_autosummary/mlx.core.square.html | 19 +- .../python/_autosummary/mlx.core.squeeze.html | 19 +- .../python/_autosummary/mlx.core.stack.html | 19 +- .../python/_autosummary/mlx.core.std.html | 19 +- .../_autosummary/mlx.core.stop_gradient.html | 19 +- .../_autosummary/mlx.core.subtract.html | 19 +- .../python/_autosummary/mlx.core.sum.html | 19 +- .../_autosummary/mlx.core.swapaxes.html | 19 +- .../_autosummary/mlx.core.synchronize.html | 19 +- .../python/_autosummary/mlx.core.take.html | 19 +- .../mlx.core.take_along_axis.html | 19 +- .../python/_autosummary/mlx.core.tan.html | 19 +- .../python/_autosummary/mlx.core.tanh.html | 19 +- .../_autosummary/mlx.core.tensordot.html | 21 +- .../python/_autosummary/mlx.core.tile.html | 21 +- .../python/_autosummary/mlx.core.topk.html | 19 +- .../_autosummary/mlx.core.transpose.html | 19 +- .../python/_autosummary/mlx.core.tri.html | 19 +- .../python/_autosummary/mlx.core.tril.html | 19 +- .../python/_autosummary/mlx.core.triu.html | 19 +- .../_autosummary/mlx.core.value_and_grad.html | 19 +- .../python/_autosummary/mlx.core.var.html | 19 +- .../python/_autosummary/mlx.core.vjp.html | 19 +- .../python/_autosummary/mlx.core.vmap.html | 19 +- .../python/_autosummary/mlx.core.where.html | 21 +- .../python/_autosummary/mlx.core.zeros.html | 19 +- .../_autosummary/mlx.core.zeros_like.html | 19 +- .../python/_autosummary/mlx.nn.quantize.html | 27 +- .../_autosummary/mlx.nn.value_and_grad.html | 19 +- .../mlx.optimizers.clip_grad_norm.html | 19 +- .../_autosummary/mlx.utils.tree_flatten.html | 19 +- .../_autosummary/mlx.utils.tree_map.html | 19 +- .../mlx.utils.tree_map_with_path.html | 19 +- .../_autosummary/mlx.utils.tree_reduce.html | 19 +- .../mlx.utils.tree_unflatten.html | 19 +- .../python/_autosummary/stream_class.html | 19 +- docs/build/html/python/array.html | 19 +- docs/build/html/python/data_types.html | 19 +- .../html/python/devices_and_streams.html | 19 +- docs/build/html/python/fast.html | 19 +- docs/build/html/python/fft.html | 19 +- docs/build/html/python/linalg.html | 36 +- docs/build/html/python/metal.html | 25 +- docs/build/html/python/nn.html | 23 +- .../python/nn/_autosummary/mlx.nn.ALiBi.html | 19 +- .../nn/_autosummary/mlx.nn.AvgPool1d.html | 19 +- .../nn/_autosummary/mlx.nn.AvgPool2d.html | 19 +- .../nn/_autosummary/mlx.nn.BatchNorm.html | 19 +- .../python/nn/_autosummary/mlx.nn.Conv1d.html | 25 +- .../python/nn/_autosummary/mlx.nn.Conv2d.html | 31 +- .../python/nn/_autosummary/mlx.nn.Conv3d.html | 948 +++++++++++ .../nn/_autosummary/mlx.nn.Dropout.html | 25 +- .../nn/_autosummary/mlx.nn.Dropout2d.html | 19 +- .../nn/_autosummary/mlx.nn.Dropout3d.html | 19 +- .../nn/_autosummary/mlx.nn.Embedding.html | 22 +- .../python/nn/_autosummary/mlx.nn.GELU.html | 19 +- .../python/nn/_autosummary/mlx.nn.GRU.html | 19 +- .../nn/_autosummary/mlx.nn.GroupNorm.html | 19 +- .../nn/_autosummary/mlx.nn.InstanceNorm.html | 19 +- .../python/nn/_autosummary/mlx.nn.LSTM.html | 19 +- .../nn/_autosummary/mlx.nn.LayerNorm.html | 19 +- .../python/nn/_autosummary/mlx.nn.Linear.html | 22 +- .../nn/_autosummary/mlx.nn.MaxPool1d.html | 19 +- .../nn/_autosummary/mlx.nn.MaxPool2d.html | 19 +- .../python/nn/_autosummary/mlx.nn.Mish.html | 19 +- .../nn/_autosummary/mlx.nn.Module.apply.html | 19 +- .../mlx.nn.Module.apply_to_modules.html | 19 +- .../_autosummary/mlx.nn.Module.children.html | 19 +- .../nn/_autosummary/mlx.nn.Module.eval.html | 19 +- .../mlx.nn.Module.filter_and_map.html | 19 +- .../nn/_autosummary/mlx.nn.Module.freeze.html | 19 +- .../mlx.nn.Module.leaf_modules.html | 19 +- .../mlx.nn.Module.load_weights.html | 19 +- .../_autosummary/mlx.nn.Module.modules.html | 19 +- .../mlx.nn.Module.named_modules.html | 19 +- .../mlx.nn.Module.parameters.html | 19 +- .../mlx.nn.Module.save_weights.html | 19 +- .../_autosummary/mlx.nn.Module.set_dtype.html | 19 +- .../nn/_autosummary/mlx.nn.Module.state.html | 19 +- .../nn/_autosummary/mlx.nn.Module.train.html | 19 +- .../mlx.nn.Module.trainable_parameters.html | 19 +- .../_autosummary/mlx.nn.Module.training.html | 19 +- .../_autosummary/mlx.nn.Module.unfreeze.html | 19 +- .../nn/_autosummary/mlx.nn.Module.update.html | 19 +- .../mlx.nn.Module.update_modules.html | 19 +- .../mlx.nn.MultiHeadAttention.html | 19 +- .../python/nn/_autosummary/mlx.nn.PReLU.html | 19 +- .../mlx.nn.QuantizedEmbedding.html | 19 +- .../_autosummary/mlx.nn.QuantizedLinear.html | 19 +- .../nn/_autosummary/mlx.nn.RMSNorm.html | 19 +- .../python/nn/_autosummary/mlx.nn.RNN.html | 19 +- .../python/nn/_autosummary/mlx.nn.ReLU.html | 19 +- .../python/nn/_autosummary/mlx.nn.RoPE.html | 19 +- .../python/nn/_autosummary/mlx.nn.SELU.html | 19 +- .../nn/_autosummary/mlx.nn.Sequential.html | 19 +- .../python/nn/_autosummary/mlx.nn.SiLU.html | 19 +- .../mlx.nn.SinusoidalPositionalEncoding.html | 19 +- .../nn/_autosummary/mlx.nn.Softshrink.html | 19 +- .../python/nn/_autosummary/mlx.nn.Step.html | 19 +- .../nn/_autosummary/mlx.nn.Transformer.html | 19 +- .../nn/_autosummary/mlx.nn.Upsample.html | 19 +- .../nn/_autosummary/mlx.nn.init.constant.html | 19 +- .../mlx.nn.init.glorot_normal.html | 19 +- .../mlx.nn.init.glorot_uniform.html | 19 +- .../_autosummary/mlx.nn.init.he_normal.html | 19 +- .../_autosummary/mlx.nn.init.he_uniform.html | 19 +- .../nn/_autosummary/mlx.nn.init.identity.html | 19 +- .../nn/_autosummary/mlx.nn.init.normal.html | 19 +- .../nn/_autosummary/mlx.nn.init.uniform.html | 19 +- .../nn/_autosummary_functions/mlx.nn.elu.html | 19 +- .../_autosummary_functions/mlx.nn.gelu.html | 19 +- .../mlx.nn.gelu_approx.html | 19 +- .../mlx.nn.gelu_fast_approx.html | 19 +- .../nn/_autosummary_functions/mlx.nn.glu.html | 19 +- .../mlx.nn.hardswish.html | 19 +- .../mlx.nn.leaky_relu.html | 19 +- .../mlx.nn.log_sigmoid.html | 19 +- .../mlx.nn.log_softmax.html | 19 +- .../mlx.nn.losses.binary_cross_entropy.html | 19 +- .../mlx.nn.losses.cosine_similarity_loss.html | 19 +- .../mlx.nn.losses.cross_entropy.html | 19 +- .../mlx.nn.losses.gaussian_nll_loss.html | 19 +- .../mlx.nn.losses.hinge_loss.html | 19 +- .../mlx.nn.losses.huber_loss.html | 19 +- .../mlx.nn.losses.kl_div_loss.html | 19 +- .../mlx.nn.losses.l1_loss.html | 19 +- .../mlx.nn.losses.log_cosh_loss.html | 19 +- .../mlx.nn.losses.margin_ranking_loss.html | 19 +- .../mlx.nn.losses.mse_loss.html | 19 +- .../mlx.nn.losses.nll_loss.html | 19 +- .../mlx.nn.losses.smooth_l1_loss.html | 19 +- .../mlx.nn.losses.triplet_loss.html | 19 +- .../_autosummary_functions/mlx.nn.mish.html | 19 +- .../_autosummary_functions/mlx.nn.prelu.html | 19 +- .../_autosummary_functions/mlx.nn.relu.html | 19 +- .../_autosummary_functions/mlx.nn.relu6.html | 19 +- .../_autosummary_functions/mlx.nn.selu.html | 19 +- .../mlx.nn.sigmoid.html | 19 +- .../_autosummary_functions/mlx.nn.silu.html | 19 +- .../mlx.nn.softmax.html | 19 +- .../mlx.nn.softplus.html | 19 +- .../mlx.nn.softshrink.html | 19 +- .../_autosummary_functions/mlx.nn.step.html | 19 +- .../_autosummary_functions/mlx.nn.tanh.html | 19 +- docs/build/html/python/nn/functions.html | 19 +- docs/build/html/python/nn/init.html | 19 +- docs/build/html/python/nn/layers.html | 82 +- docs/build/html/python/nn/losses.html | 19 +- docs/build/html/python/nn/module.html | 19 +- docs/build/html/python/ops.html | 210 +-- docs/build/html/python/optimizers.html | 19 +- .../_autosummary/mlx.optimizers.AdaDelta.html | 19 +- .../mlx.optimizers.Adafactor.html | 19 +- .../_autosummary/mlx.optimizers.Adagrad.html | 19 +- .../_autosummary/mlx.optimizers.Adam.html | 19 +- .../_autosummary/mlx.optimizers.AdamW.html | 19 +- .../_autosummary/mlx.optimizers.Adamax.html | 19 +- .../_autosummary/mlx.optimizers.Lion.html | 19 +- ....optimizers.Optimizer.apply_gradients.html | 19 +- .../mlx.optimizers.Optimizer.init.html | 19 +- .../mlx.optimizers.Optimizer.state.html | 19 +- .../mlx.optimizers.Optimizer.update.html | 19 +- .../_autosummary/mlx.optimizers.RMSprop.html | 19 +- .../_autosummary/mlx.optimizers.SGD.html | 19 +- .../mlx.optimizers.cosine_decay.html | 23 +- .../mlx.optimizers.exponential_decay.html | 19 +- .../mlx.optimizers.join_schedules.html | 19 +- .../mlx.optimizers.linear_schedule.html | 19 +- .../mlx.optimizers.step_decay.html | 19 +- .../python/optimizers/common_optimizers.html | 19 +- .../html/python/optimizers/optimizer.html | 19 +- .../html/python/optimizers/schedulers.html | 21 +- docs/build/html/python/random.html | 19 +- docs/build/html/python/transforms.html | 19 +- docs/build/html/python/tree_utils.html | 19 +- docs/build/html/reduce__inst_8h.html | 18 +- docs/build/html/reduce__inst_8h_source.html | 139 +- docs/build/html/search.html | 19 +- docs/build/html/search/all_1.js | 18 +- docs/build/html/search/all_10.js | 115 +- docs/build/html/search/all_14.js | 4 +- docs/build/html/search/all_16.js | 4 +- docs/build/html/search/all_2.js | 41 +- docs/build/html/search/all_3.js | 171 +- docs/build/html/search/all_4.js | 106 +- docs/build/html/search/all_5.js | 47 +- docs/build/html/search/all_9.js | 2 +- docs/build/html/search/all_a.js | 2 +- docs/build/html/search/all_d.js | 160 +- docs/build/html/search/classes_2.js | 9 +- docs/build/html/search/classes_3.js | 59 +- docs/build/html/search/functions_1.js | 92 +- docs/build/html/search/functions_10.js | 6 +- docs/build/html/search/functions_16.js | 4 +- docs/build/html/search/functions_2.js | 24 +- docs/build/html/search/functions_3.js | 109 +- docs/build/html/search/functions_4.js | 8 +- docs/build/html/search/functions_5.js | 8 +- docs/build/html/search/functions_9.js | 2 +- docs/build/html/search/functions_a.js | 2 +- docs/build/html/search/variables_12.js | 4 +- docs/build/html/searchindex.js | 2 +- ...tmlx_1_1steel_1_1_block_m_m_a-members.html | 6 +- .../structmlx_1_1steel_1_1_block_m_m_a.html | 97 ++ ...lx_1_1steel_1_1_transform_add-members.html | 5 +- .../structmlx_1_1steel_1_1_transform_add.html | 31 +- ..._1_1steel_1_1_transform_axpby-members.html | 7 +- ...tructmlx_1_1steel_1_1_transform_axpby.html | 35 +- docs/build/html/usage/compile.html | 19 +- .../build/html/usage/function_transforms.html | 19 +- docs/build/html/usage/indexing.html | 19 +- docs/build/html/usage/lazy_evaluation.html | 19 +- docs/build/html/usage/numpy.html | 19 +- docs/build/html/usage/quick_start.html | 19 +- docs/build/html/usage/saving_and_loading.html | 19 +- docs/build/html/usage/unified_memory.html | 19 +- docs/build/html/usage/using_streams.html | 19 +- 528 files changed, 17198 insertions(+), 4162 deletions(-) create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.power.rst create mode 100644 docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst create mode 100644 docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst create mode 100644 docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m-members.html create mode 100644 docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.html create mode 100644 docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.png create mode 100644 docs/build/html/classmlx_1_1core_1_1_cholesky-members.html create mode 100644 docs/build/html/classmlx_1_1core_1_1_cholesky.html create mode 100644 docs/build/html/classmlx_1_1core_1_1_cholesky.png create mode 100644 docs/build/html/python/_autosummary/mlx.core.addmm.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.as_strided.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.linalg.cholesky.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.linalg.inv.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.linalg.svd.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.power.html create mode 100644 docs/build/html/python/_autosummary/mlx.core.remainder.html create mode 100644 docs/build/html/python/nn/_autosummary/mlx.nn.Conv3d.html diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo index 4cd5f8315..e0cff859d 100644 --- a/docs/build/html/.buildinfo +++ b/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 6d31d3d7850f7f8959377483b35af018 +config: c0e0bb3fe4816a1bf9a98909252329c4 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/build/html/_sources/dev/extensions.rst b/docs/build/html/_sources/dev/extensions.rst index acf41a773..9a2be90cd 100644 --- a/docs/build/html/_sources/dev/extensions.rst +++ b/docs/build/html/_sources/dev/extensions.rst @@ -1,5 +1,5 @@ -Developer Documentation -======================= +Custom Extensions in MLX +======================== You can extend MLX with custom operations on the CPU or GPU. This guide explains how to do that with a simple example. @@ -494,7 +494,7 @@ below. auto kernel = d.get_kernel(kname.str(), "mlx_ext"); // Prepare to encode kernel - auto compute_encoder = d.get_command_encoder(s.index); + auto& compute_encoder = d.get_command_encoder(s.index); compute_encoder->setComputePipelineState(kernel); // Kernel parameters are registered with buffer indices corresponding to @@ -503,11 +503,11 @@ below. size_t nelem = out.size(); // Encode input arrays to kernel - set_array_buffer(compute_encoder, x, 0); - set_array_buffer(compute_encoder, y, 1); + compute_encoder.set_input_array(x, 0); + compute_encoder.set_input_array(y, 1); // Encode output arrays to kernel - set_array_buffer(compute_encoder, out, 2); + compute_encoder.set_output_array(out, 2); // Encode alpha and beta compute_encoder->setBytes(&alpha_, sizeof(float), 3); @@ -531,7 +531,7 @@ below. // Launch the grid with the given number of threads divided among // the given threadgroups - compute_encoder->dispatchThreads(grid_dims, group_dims); + compute_encoder.dispatchThreads(grid_dims, group_dims); } We can now call the :meth:`axpby` operation on both the CPU and the GPU! @@ -825,7 +825,7 @@ Let's look at a simple script and its results: print(f"c shape: {c.shape}") print(f"c dtype: {c.dtype}") - print(f"c correctness: {mx.all(c == 6.0).item()}") + print(f"c correct: {mx.all(c == 6.0).item()}") Output: diff --git a/docs/build/html/_sources/install.rst b/docs/build/html/_sources/install.rst index 252b234e6..693385e2c 100644 --- a/docs/build/html/_sources/install.rst +++ b/docs/build/html/_sources/install.rst @@ -153,6 +153,8 @@ should point to the path to the built metal library. - OFF * - MLX_BUILD_METAL - ON + * - MLX_BUILD_CPU + - ON * - MLX_BUILD_PYTHON_BINDINGS - OFF * - MLX_METAL_DEBUG @@ -179,10 +181,28 @@ should point to the path to the built metal library. xcrun -sdk macosx --show-sdk-version +Binary Size Minimization +~~~~~~~~~~~~~~~~~~~~~~~~ + +To produce a smaller binary use the CMake flags `CMAKE_BUILD_TYPE=MinSizeRel` +and `BUILD_SHARED_LIBS=ON`. + +The MLX CMake build has several additional options to make smaller binaries. +For example, if you don't need the CPU backend or support for safetensors and +GGUF, you can do: + +.. code-block:: shell + + cmake .. + -DCMAKE_BUILD_TYPE=MinSizeRel \ + -DBUILD_SHARED_LIBS=ON \ + -DMLX_BUILD_CPU=ON \ + -DMLX_BUILD_SAFETENSORS=OFF \ + -DMLX_BUILD_GGUF=OFF + Troubleshooting ^^^^^^^^^^^^^^^ - Metal not found ~~~~~~~~~~~~~~~ diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst new file mode 100644 index 000000000..2f513993b --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.addmm.rst @@ -0,0 +1,6 @@ +mlx.core.addmm +============== + +.. currentmodule:: mlx.core + +.. autofunction:: addmm \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst new file mode 100644 index 000000000..5ed10ae88 --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.as_strided.rst @@ -0,0 +1,6 @@ +mlx.core.as\_strided +==================== + +.. currentmodule:: mlx.core + +.. autofunction:: as_strided \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst new file mode 100644 index 000000000..3e2dfd367 --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.cholesky.rst @@ -0,0 +1,6 @@ +mlx.core.linalg.cholesky +======================== + +.. currentmodule:: mlx.core.linalg + +.. autofunction:: cholesky \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst new file mode 100644 index 000000000..0e030237b --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.inv.rst @@ -0,0 +1,6 @@ +mlx.core.linalg.inv +=================== + +.. currentmodule:: mlx.core.linalg + +.. autofunction:: inv \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst new file mode 100644 index 000000000..4f0ddf4be --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.linalg.svd.rst @@ -0,0 +1,6 @@ +mlx.core.linalg.svd +=================== + +.. currentmodule:: mlx.core.linalg + +.. autofunction:: svd \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.power.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.power.rst new file mode 100644 index 000000000..1ef907bd8 --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.power.rst @@ -0,0 +1,6 @@ +mlx.core.power +============== + +.. currentmodule:: mlx.core + +.. autofunction:: power \ No newline at end of file diff --git a/docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst b/docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst new file mode 100644 index 000000000..2b333363e --- /dev/null +++ b/docs/build/html/_sources/python/_autosummary/mlx.core.remainder.rst @@ -0,0 +1,6 @@ +mlx.core.remainder +================== + +.. currentmodule:: mlx.core + +.. autofunction:: remainder \ No newline at end of file diff --git a/docs/build/html/_sources/python/linalg.rst b/docs/build/html/_sources/python/linalg.rst index 0ac559f5e..3c34cb3f7 100644 --- a/docs/build/html/_sources/python/linalg.rst +++ b/docs/build/html/_sources/python/linalg.rst @@ -8,5 +8,8 @@ Linear Algebra .. autosummary:: :toctree: _autosummary + inv norm + cholesky qr + svd diff --git a/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst new file mode 100644 index 000000000..5442dcdbf --- /dev/null +++ b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Conv3d.rst @@ -0,0 +1,16 @@ +mlx.nn.Conv3d +============= + +.. currentmodule:: mlx.nn + +.. autoclass:: Conv3d + + + + + .. rubric:: Methods + + .. autosummary:: + + + diff --git a/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Embedding.rst b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Embedding.rst index 598179428..9c433bfb5 100644 --- a/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Embedding.rst +++ b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Embedding.rst @@ -13,5 +13,6 @@ .. autosummary:: ~Embedding.as_linear + ~Embedding.to_quantized diff --git a/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Linear.rst b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Linear.rst index f19fc1994..70850e9d8 100644 --- a/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Linear.rst +++ b/docs/build/html/_sources/python/nn/_autosummary/mlx.nn.Linear.rst @@ -12,5 +12,6 @@ .. autosummary:: + ~Linear.to_quantized diff --git a/docs/build/html/_sources/python/nn/layers.rst b/docs/build/html/_sources/python/nn/layers.rst index 6fb624d54..cbbbb5c3b 100644 --- a/docs/build/html/_sources/python/nn/layers.rst +++ b/docs/build/html/_sources/python/nn/layers.rst @@ -15,6 +15,7 @@ Layers BatchNorm Conv1d Conv2d + Conv3d Dropout Dropout2d Dropout3d diff --git a/docs/build/html/_sources/python/ops.rst b/docs/build/html/_sources/python/ops.rst index 177332c49..c88885101 100644 --- a/docs/build/html/_sources/python/ops.rst +++ b/docs/build/html/_sources/python/ops.rst @@ -10,6 +10,7 @@ Operations abs add + addmm all allclose any @@ -26,6 +27,7 @@ Operations argpartition argsort array_equal + as_strided atleast_1d atleast_2d atleast_3d @@ -76,6 +78,7 @@ Operations isnan isneginf isposinf + issubdtype left_shift less less_equal @@ -106,11 +109,13 @@ Operations outer partition pad + power prod quantize quantized_matmul radians reciprocal + remainder repeat reshape right_shift diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js index 607aaea4c..4b6f897c5 100644 --- a/docs/build/html/_static/documentation_options.js +++ b/docs/build/html/_static/documentation_options.js @@ -1,5 +1,5 @@ const DOCUMENTATION_OPTIONS = { - VERSION: '0.13.0', + VERSION: '0.13.1', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/build/html/annotated.html b/docs/build/html/annotated.html index 220efdb5b..d2c356c14 100644 --- a/docs/build/html/annotated.html +++ b/docs/build/html/annotated.html @@ -192,91 +192,93 @@ $(function() {  CBitwiseBinary  CBlockMaskedMM  CBlockSparseMM - CBroadcast - CCeil - CCompiled - Ccomplex128_t - Ccomplex64_t - CConcatenate - CConjugate - CConvolution - CCopy - CCos - CCosh - CCustomVJP - CDepends - CDevice - CDivide - CDivMod - CDtype - CEqual - CErf - CErfInv - CEvent - CExp - CExpm1 - CFFT - CFloor - CFull - CGather - CGreater - CGreaterEqual - CInverse - CLess - CLessEqual - CLoad - CLog - CLog1p - CLogAddExp - CLogicalAnd - CLogicalNot - CLogicalOr - CMatmul - CMaximum - CMinimum - CMultiply - CNegative - CNodeNamer - CNotEqual - CNumberOfElements - CPad - CPartition - CPower - CPrimitive - CPrintFormatter - CQRF - CQuantizedMatmul - CRandomBits - CReduce - CReductionPlan - CRemainder - CReshape - CRound - CScan - CScatter - CSelect - CSigmoid - CSign - CSin - CSinh - CSlice - CSliceUpdate - CSoftmax - CSort - CSplit - CSqrt - CSquare - CStopGradient - CStream - CStreamContext - CSubtract - CSVD - CTan - CTanh - CTranspose - CTypeToDtype - CUnaryPrimitive - CUniform + CBlockSparseQMM + CBroadcast + CCeil + CCholesky + CCompiled + Ccomplex128_t + Ccomplex64_t + CConcatenate + CConjugate + CConvolution + CCopy + CCos + CCosh + CCustomVJP + CDepends + CDevice + CDivide + CDivMod + CDtype + CEqual + CErf + CErfInv + CEvent + CExp + CExpm1 + CFFT + CFloor + CFull + CGather + CGreater + CGreaterEqual + CInverse + CLess + CLessEqual + CLoad + CLog + CLog1p + CLogAddExp + CLogicalAnd + CLogicalNot + CLogicalOr + CMatmul + CMaximum + CMinimum + CMultiply + CNegative + CNodeNamer + CNotEqual + CNumberOfElements + CPad + CPartition + CPower + CPrimitive + CPrintFormatter + CQRF + CQuantizedMatmul + CRandomBits + CReduce + CReductionPlan + CRemainder + CReshape + CRound + CScan + CScatter + CSelect + CSigmoid + CSign + CSin + CSinh + CSlice + CSliceUpdate + CSoftmax + CSort + CSplit + CSqrt + CSquare + CStopGradient + CStream + CStreamContext + CSubtract + CSVD + CTan + CTanh + CTranspose + CTypeToDtype + CUnaryPrimitive + CUniform  Nsteel  CAccumHelper  CBlockLoader diff --git a/docs/build/html/backend_2metal_2kernels_2bf16_8h_source.html b/docs/build/html/backend_2metal_2kernels_2bf16_8h_source.html index 83ebc44f4..a266656b8 100644 --- a/docs/build/html/backend_2metal_2kernels_2bf16_8h_source.html +++ b/docs/build/html/backend_2metal_2kernels_2bf16_8h_source.html @@ -91,7 +91,7 @@ $(function() { codefold.init(0); });
6
7using namespace metal;
8
-
9#if defined(__HAVE_BFLOAT__)
+
9#if defined METAL_3_1 || (__METAL_VERSION__ >= 310)
10
11typedef bfloat bfloat16_t;
12
@@ -445,7 +445,7 @@ $(function() { codefold.init(0); });
312
313#pragma METAL internals : disable
314
-
315#endif // defined(__HAVE_BFLOAT__)
+
315#endif
316
317#include "mlx/backend/metal/kernels/bf16_math.h"
constexpr METAL_FUNC uint16_t float_to_bfloat_bits(float x)
Definition bf16.h:19
diff --git a/docs/build/html/backend_2metal_2kernels_2steel_2gemm_2transforms_8h_source.html b/docs/build/html/backend_2metal_2kernels_2steel_2gemm_2transforms_8h_source.html index f9ed6a5f8..13d88e7f2 100644 --- a/docs/build/html/backend_2metal_2kernels_2steel_2gemm_2transforms_8h_source.html +++ b/docs/build/html/backend_2metal_2kernels_2steel_2gemm_2transforms_8h_source.html @@ -117,69 +117,83 @@ $(function() { codefold.init(0); });
27 TransformAdd(const float, const float) {}
28
-
29 static METAL_FUNC OutT apply(InT x, OutT c) {
-
30 return static_cast<OutT>(x) + c;
+
29 static METAL_FUNC OutT apply(InT x) {
+
30 return static_cast<OutT>(x);
31 }
-
32};
+
32
+
+
33 static METAL_FUNC OutT apply(InT x, OutT c) {
+
34 return static_cast<OutT>(x) + c;
+
35 }
-
33
-
34template <typename OutT, typename InT>
-
- -
36 const float alpha;
-
37 const float beta;
-
38
-
-
39 TransformAxpby(const float alpha_, const float beta_)
-
40 : alpha(alpha_), beta(beta_) {}
+
36};
-
41
-
-
42 METAL_FUNC OutT apply(InT x, OutT c) const {
-
43 return static_cast<OutT>(x * alpha + (beta * c));
-
44 }
+
37
+
38template <typename OutT, typename InT>
+
+ +
40 const float alpha;
+
41 const float beta;
+
42
+
+
43 TransformAxpby(const float alpha_, const float beta_)
+
44 : alpha(alpha_), beta(beta_) {}
-
45};
+
45
+
+
46 static METAL_FUNC OutT apply(InT x) {
+
47 return static_cast<OutT>(x);
+
48 }
-
46
-
47template <typename T>
-
- -
49 typedef float accum_type;
-
50};
+
49
+
+
50 METAL_FUNC OutT apply(InT x, OutT c) const {
+
51 return static_cast<OutT>(x * alpha + (beta * c));
+
52 }
-
51
-
- -
53 static METAL_FUNC int2
-
-
54 swizzle(uint3 tid [[threadgroup_position_in_grid]], const int swizzle_log) {
-
55 const int tid_x = (tid.x) >> swizzle_log;
-
56 const int tid_y =
-
57 ((tid.y) << swizzle_log) + ((tid.x) & ((1 << swizzle_log) - 1));
-
58 return int2(tid_x, tid_y);
-
59 }
+
53};
-
60};
+
54
+
55template <typename T>
+
+ +
57 typedef float accum_type;
+
58};
-
61
-
62} // namespace steel
-
63} // namespace mlx
+
59
+
+ +
61 static METAL_FUNC int2
+
+
62 swizzle(uint3 tid [[threadgroup_position_in_grid]], const int swizzle_log) {
+
63 const int tid_x = (tid.x) >> swizzle_log;
+
64 const int tid_y =
+
65 ((tid.y) << swizzle_log) + ((tid.x) & ((1 << swizzle_log) - 1));
+
66 return int2(tid_x, tid_y);
+
67 }
+
+
68};
+
+
69
+
70} // namespace steel
+
71} // namespace mlx
Definition allocator.h:7
-
Definition transforms.h:48
-
float accum_type
Definition transforms.h:49
-
Definition transforms.h:52
-
static METAL_FUNC int2 swizzle(uint3 tid, const int swizzle_log)
Definition transforms.h:54
+
Definition transforms.h:56
+
float accum_type
Definition transforms.h:57
+
Definition transforms.h:60
+
static METAL_FUNC int2 swizzle(uint3 tid, const int swizzle_log)
Definition transforms.h:62
Definition transforms.h:26
-
static METAL_FUNC OutT apply(InT x, OutT c)
Definition transforms.h:29
+
static METAL_FUNC OutT apply(InT x, OutT c)
Definition transforms.h:33
TransformAdd(const float, const float)
Definition transforms.h:27
-
Definition transforms.h:35
-
const float beta
Definition transforms.h:37
-
METAL_FUNC OutT apply(InT x, OutT c) const
Definition transforms.h:42
-
const float alpha
Definition transforms.h:36
-
TransformAxpby(const float alpha_, const float beta_)
Definition transforms.h:39
+
static METAL_FUNC OutT apply(InT x)
Definition transforms.h:29
+
Definition transforms.h:39
+
static METAL_FUNC OutT apply(InT x)
Definition transforms.h:46
+
const float beta
Definition transforms.h:41
+
METAL_FUNC OutT apply(InT x, OutT c) const
Definition transforms.h:50
+
const float alpha
Definition transforms.h:40
+
TransformAxpby(const float alpha_, const float beta_)
Definition transforms.h:43
Definition transforms.h:15
static METAL_FUNC OutT apply(InT x)
Definition transforms.h:16
static METAL_FUNC OutT apply(InT x, OutT)
Definition transforms.h:20
diff --git a/docs/build/html/bf16__math_8h_source.html b/docs/build/html/bf16__math_8h_source.html index 217403ab0..f8c7bfe2d 100644 --- a/docs/build/html/bf16__math_8h_source.html +++ b/docs/build/html/bf16__math_8h_source.html @@ -458,7 +458,7 @@ $(function() { codefold.init(0); });
370 }
371
-
372#if defined(__HAVE_BFLOAT__)
+
372#if defined METAL_3_1 || (__METAL_VERSION__ >= 310)
373
374#define bfloat16_to_uint16(x) as_type<uint16_t>(x)
375#define uint16_to_bfloat16(x) as_type<bfloat16_t>(x)
diff --git a/docs/build/html/classes.html b/docs/build/html/classes.html index e2a481f31..be7b8f1de 100644 --- a/docs/build/html/classes.html +++ b/docs/build/html/classes.html @@ -81,10 +81,10 @@ $(function() {
Abs
Abs (mlx::core)
Abs (mlx::core::detail)
AccumHelper (mlx::steel)
Add
Add (mlx::core)
Add (mlx::core::detail)
add_vec (pocketfft::detail)
add_vec< cmplx< T > > (pocketfft::detail)
AddMM (mlx::core)
aligned_allocator (pocketfft::detail::threading)
Allocator (mlx::core::allocator)
And
Arange (mlx::core)
ArcCos
ArcCos (mlx::core)
ArcCos (mlx::core::detail)
ArcCosh
ArcCosh (mlx::core)
ArcCosh (mlx::core::detail)
ArcSin
ArcSin (mlx::core)
ArcSin (mlx::core::detail)
ArcSinh
ArcSinh (mlx::core)
ArcSinh (mlx::core::detail)
ArcTan
ArcTan (mlx::core)
ArcTan (mlx::core::detail)
ArcTan2
ArcTan2 (mlx::core)
ArcTan2 (mlx::core::detail)
ArcTanh
ArcTanh (mlx::core)
ArcTanh (mlx::core::detail)
ArgPartition (mlx::core)
ArgReduce (mlx::core)
ArgSort (mlx::core)
arr (pocketfft::detail)
arr_info (pocketfft::detail)
array (mlx::core)
array::ArrayIterator (mlx::core)
AsStrided (mlx::core)
AsType (mlx::core)
B
-
_MLX_BFloat16::bits_to_bfloat_struct
BitwiseAnd
BitwiseAnd (mlx::core::detail)
BitwiseBinary (mlx::core)
BitwiseOr
BitwiseOr (mlx::core::detail)
BitwiseXor
BitwiseXor (mlx::core::detail)
BlockLoader (mlx::steel)
BlockMaskedMM (mlx::core)
BlockMMA (mlx::steel)
BlockSparseMM (mlx::core)
BlockSwizzle (mlx::steel)
bool4_or_uint
Broadcast (mlx::core)
Buffer (mlx::core::allocator)
+
_MLX_BFloat16::bits_to_bfloat_struct
BitwiseAnd
BitwiseAnd (mlx::core::detail)
BitwiseBinary (mlx::core)
BitwiseOr
BitwiseOr (mlx::core::detail)
BitwiseXor
BitwiseXor (mlx::core::detail)
BlockLoader (mlx::steel)
BlockMaskedMM (mlx::core)
BlockMMA (mlx::steel)
BlockSparseMM (mlx::core)
BlockSparseQMM (mlx::core)
BlockSwizzle (mlx::steel)
bool4_or_uint
Broadcast (mlx::core)
Buffer (mlx::core::allocator)
C
-
Ceil
Ceil (mlx::core)
Ceil (mlx::core::detail)
cfftp (pocketfft::detail)
ChannelHelper (mlx::steel)
ChannelHelper< 1 > (mlx::steel)
ChannelHelper< 2 > (mlx::steel)
ChannelHelper< 3 > (mlx::steel)
ChannelHelper< 4 > (mlx::steel)
cmplx (pocketfft::detail)
cndarr (pocketfft::detail)
CommandEncoder (mlx::core::metal)
CommonAllocator (mlx::core::allocator)
Compiled (mlx::core)
complex128_t (mlx::core)
complex64_t
complex64_t (mlx::core)
Concatenate (mlx::core)
concurrent_queue (pocketfft::detail::threading)
CommandEncoder::ConcurrentContext (mlx::core::metal)
Conjugate
Conjugate (mlx::core)
Conjugate (mlx::core::detail)
Conv2DGeneralBaseInfo (mlx::steel)
Conv2DGeneralJumpParams (mlx::steel)
Conv2DInputBlockLoaderGeneral (mlx::steel)
Conv2DInputBlockLoaderLargeFilter (mlx::steel)
Conv2DInputBlockLoaderSmallChannels (mlx::steel)
Conv2DInputBlockLoaderSmallFilter (mlx::steel)
Conv2DWeightBlockLoader (mlx::steel)
Conv2DWeightBlockLoaderGeneral (mlx::steel)
Conv2DWeightBlockLoaderSmallChannels (mlx::steel)
Convolution (mlx::core)
Copy (mlx::core)
Cos
Cos (mlx::core)
Cos (mlx::core::detail)
Cosh
Cosh (mlx::core)
Cosh (mlx::core::detail)
Custom (mlx::core::fast)
CustomVJP (mlx::core)
+
Ceil
Ceil (mlx::core)
Ceil (mlx::core::detail)
cfftp (pocketfft::detail)
ChannelHelper (mlx::steel)
ChannelHelper< 1 > (mlx::steel)
ChannelHelper< 2 > (mlx::steel)
ChannelHelper< 3 > (mlx::steel)
ChannelHelper< 4 > (mlx::steel)
Cholesky (mlx::core)
cmplx (pocketfft::detail)
cndarr (pocketfft::detail)
CommandEncoder (mlx::core::metal)
CommonAllocator (mlx::core::allocator)
Compiled (mlx::core)
complex128_t (mlx::core)
complex64_t
complex64_t (mlx::core)
Concatenate (mlx::core)
concurrent_queue (pocketfft::detail::threading)
CommandEncoder::ConcurrentContext (mlx::core::metal)
Conjugate
Conjugate (mlx::core)
Conjugate (mlx::core::detail)
Conv2DGeneralBaseInfo (mlx::steel)
Conv2DGeneralJumpParams (mlx::steel)
Conv2DInputBlockLoaderGeneral (mlx::steel)
Conv2DInputBlockLoaderLargeFilter (mlx::steel)
Conv2DInputBlockLoaderSmallChannels (mlx::steel)
Conv2DInputBlockLoaderSmallFilter (mlx::steel)
Conv2DWeightBlockLoader (mlx::steel)
Conv2DWeightBlockLoaderGeneral (mlx::steel)
Conv2DWeightBlockLoaderSmallChannels (mlx::steel)
Convolution (mlx::core)
Copy (mlx::core)
Cos
Cos (mlx::core)
Cos (mlx::core::detail)
Cosh
Cosh (mlx::core)
Cosh (mlx::core::detail)
Custom (mlx::core::fast)
CustomVJP (mlx::core)
D
array::Data (mlx::core)
Depends (mlx::core)
Device (mlx::core)
Device (mlx::core::metal)
Divide
Divide (mlx::core::detail)
Divide (mlx::core)
DivMod (mlx::core)
Dtype (mlx::core)
diff --git a/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m-members.html b/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m-members.html new file mode 100644 index 000000000..342d7880e --- /dev/null +++ b/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m-members.html @@ -0,0 +1,115 @@ + + + + + + + +MLX: Member List + + + + + + + + + + + +
+
+ + + + + + +
+
MLX +
+
+
+ + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ + +
+
+
mlx::core::BlockSparseQMM Member List
+
+
+ +

This is the complete list of members for mlx::core::BlockSparseQMM, including all inherited members.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
BlockSparseQMM(Stream stream, int group_size, int bits, bool transpose)mlx::core::BlockSparseQMMinlineexplicit
device()mlx::core::Primitiveinline
eval_cpu(const std::vector< array > &inputs, array &out) overridemlx::core::BlockSparseQMMvirtual
mlx::core::UnaryPrimitive::eval_cpu(const std::vector< array > &inputs, std::vector< array > &outputs) overridemlx::core::UnaryPrimitiveinlinevirtual
eval_gpu(const std::vector< array > &inputs, array &out) overridemlx::core::BlockSparseQMMvirtual
mlx::core::UnaryPrimitive::eval_gpu(const std::vector< array > &inputs, std::vector< array > &outputs) overridemlx::core::UnaryPrimitiveinlinevirtual
is_equivalent(const Primitive &other) const overridemlx::core::BlockSparseQMMvirtual
jvp(const std::vector< array > &primals, const std::vector< array > &tangents, const std::vector< int > &argnums) overridemlx::core::BlockSparseQMMvirtual
operator=(const UnaryPrimitive &other)=deletemlx::core::UnaryPrimitive
operator=(UnaryPrimitive &&other)=deletemlx::core::UnaryPrimitive
mlx::core::Primitive::operator=(const Primitive &other)=deletemlx::core::Primitive
mlx::core::Primitive::operator=(Primitive &&other)=deletemlx::core::Primitive
output_shapes(const std::vector< array > &inputs)mlx::core::Primitivevirtual
Primitive(Stream stream)mlx::core::Primitiveinlineexplicit
Primitive(const Primitive &other)=deletemlx::core::Primitive
Primitive(Primitive &&other)=deletemlx::core::Primitive
print(std::ostream &os) overridemlx::core::BlockSparseQMMinlinevirtual
stream()mlx::core::Primitiveinline
UnaryPrimitive(Stream stream)mlx::core::UnaryPrimitiveinlineexplicit
UnaryPrimitive(const UnaryPrimitive &other)=deletemlx::core::UnaryPrimitive
UnaryPrimitive(UnaryPrimitive &&other)=deletemlx::core::UnaryPrimitive
vjp(const std::vector< array > &primals, const std::vector< array > &cotangents, const std::vector< int > &argnums, const std::vector< array > &outputs) overridemlx::core::BlockSparseQMMvirtual
vmap(const std::vector< array > &inputs, const std::vector< int > &axes) overridemlx::core::BlockSparseQMMvirtual
~Primitive()=defaultmlx::core::Primitivevirtual
~UnaryPrimitive()=defaultmlx::core::UnaryPrimitivevirtual
+ + + + diff --git a/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.html b/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.html new file mode 100644 index 000000000..1cbeda629 --- /dev/null +++ b/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.html @@ -0,0 +1,447 @@ + + + + + + + +MLX: mlx::core::BlockSparseQMM Class Reference + + + + + + + + + + + +
+
+ + + + + + +
+
MLX +
+
+
+ + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ + +
+
+ +
mlx::core::BlockSparseQMM Class Reference
+
+
+ +

#include <primitives.h>

+
+Inheritance diagram for mlx::core::BlockSparseQMM:
+
+
+ + +mlx::core::UnaryPrimitive +mlx::core::Primitive + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Member Functions

 BlockSparseQMM (Stream stream, int group_size, int bits, bool transpose)
 
void eval_cpu (const std::vector< array > &inputs, array &out) override
 
void eval_gpu (const std::vector< array > &inputs, array &out) override
 
virtual std::pair< std::vector< array >, std::vector< int > > vmap (const std::vector< array > &inputs, const std::vector< int > &axes) override
 The primitive must know how to vectorize itself across the given axes.
 
std::vector< arrayjvp (const std::vector< array > &primals, const std::vector< array > &tangents, const std::vector< int > &argnums) override
 The Jacobian-vector product.
 
std::vector< arrayvjp (const std::vector< array > &primals, const std::vector< array > &cotangents, const std::vector< int > &argnums, const std::vector< array > &outputs) override
 The vector-Jacobian product.
 
void print (std::ostream &os) override
 Print the primitive.
 
bool is_equivalent (const Primitive &other) const override
 Equivalence check defaults to false unless overridden by the primitive.
 
- Public Member Functions inherited from mlx::core::UnaryPrimitive
 UnaryPrimitive (Stream stream)
 An abstract base class for a primitive with a single output.
 
void eval_cpu (const std::vector< array > &inputs, std::vector< array > &outputs) override
 A primitive must know how to evaluate itself on the CPU/GPU for the given inputs and populate the output arrays.
 
void eval_gpu (const std::vector< array > &inputs, std::vector< array > &outputs) override
 
virtual ~UnaryPrimitive ()=default
 
 UnaryPrimitive (const UnaryPrimitive &other)=delete
 
 UnaryPrimitive (UnaryPrimitive &&other)=delete
 
UnaryPrimitiveoperator= (const UnaryPrimitive &other)=delete
 
UnaryPrimitiveoperator= (UnaryPrimitive &&other)=delete
 
- Public Member Functions inherited from mlx::core::Primitive
 Primitive (Stream stream)
 
const Devicedevice ()
 The device the primitive will run on.
 
const Streamstream ()
 The stream the primitive will run on.
 
virtual std::vector< std::vector< int > > output_shapes (const std::vector< array > &inputs)
 Get the output shapes of the primitive.
 
virtual ~Primitive ()=default
 
 Primitive (const Primitive &other)=delete
 
 Primitive (Primitive &&other)=delete
 
Primitiveoperator= (const Primitive &other)=delete
 
Primitiveoperator= (Primitive &&other)=delete
 
+

Constructor & Destructor Documentation

+ +

◆ BlockSparseQMM()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
mlx::core::BlockSparseQMM::BlockSparseQMM (Stream stream,
int group_size,
int bits,
bool transpose )
+
+inlineexplicit
+
+ +
+
+

Member Function Documentation

+ +

◆ eval_cpu()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
void mlx::core::BlockSparseQMM::eval_cpu (const std::vector< array > & inputs,
array & out )
+
+overridevirtual
+
+ +

Implements mlx::core::UnaryPrimitive.

+ +
+
+ +

◆ eval_gpu()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
void mlx::core::BlockSparseQMM::eval_gpu (const std::vector< array > & inputs,
array & out )
+
+overridevirtual
+
+ +

Implements mlx::core::UnaryPrimitive.

+ +
+
+ +

◆ is_equivalent()

+ +
+
+ + + + + +
+ + + + + + + +
bool mlx::core::BlockSparseQMM::is_equivalent (const Primitive & other) const
+
+overridevirtual
+
+ +

Equivalence check defaults to false unless overridden by the primitive.

+ +

Reimplemented from mlx::core::Primitive.

+ +
+
+ +

◆ jvp()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + +
std::vector< array > mlx::core::BlockSparseQMM::jvp (const std::vector< array > & primals,
const std::vector< array > & tangents,
const std::vector< int > & argnums )
+
+overridevirtual
+
+ +

The Jacobian-vector product.

+ +

Reimplemented from mlx::core::Primitive.

+ +
+
+ +

◆ print()

+ +
+
+ + + + + +
+ + + + + + + +
void mlx::core::BlockSparseQMM::print (std::ostream & os)
+
+inlineoverridevirtual
+
+ +

Print the primitive.

+ +

Implements mlx::core::Primitive.

+ +
+
+ +

◆ vjp()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
std::vector< array > mlx::core::BlockSparseQMM::vjp (const std::vector< array > & primals,
const std::vector< array > & cotangents,
const std::vector< int > & argnums,
const std::vector< array > & outputs )
+
+overridevirtual
+
+ +

The vector-Jacobian product.

+ +

Reimplemented from mlx::core::Primitive.

+ +
+
+ +

◆ vmap()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
virtual std::pair< std::vector< array >, std::vector< int > > mlx::core::BlockSparseQMM::vmap (const std::vector< array > & inputs,
const std::vector< int > & axes )
+
+overridevirtual
+
+ +

The primitive must know how to vectorize itself across the given axes.

+

The output is a pair containing the output arrays representing the vectorized computation and the axes which corresponds to the vectorized dimensions of each output.

+ +

Reimplemented from mlx::core::Primitive.

+ +
+
+
The documentation for this class was generated from the following file: +
+ + + + diff --git a/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.png b/docs/build/html/classmlx_1_1core_1_1_block_sparse_q_m_m.png new file mode 100644 index 0000000000000000000000000000000000000000..3c033d8f85ff4fdaeda5dbdd9234fd18efac0e02 GIT binary patch literal 981 zcmeAS@N?(olHy`uVBq!ia0vp^tAV(KgBeJ!=X<>wNJ$6ygt-3y{~ySF@#br3|Doj; z2ATyD)6cwk@ZbSZ-1KbN5}+JsNswPK1CS2}=1jA%FfcGDc)B=-R4~4so7ms1z|$5V zH)-kr|KjtDn{*dk-koPTNzZPg=9KoRA3bx5=dq+rn$uLE>REqis>Sb zx;OUL`fio=p98N?Hr(#2ziZi~)~ySl-Mkc*m%ZI~a`p4)vrUi4rN0dOeQOprSRVWJ zpY4h4{q^^DO?pzDw`dpUF3xv%}b)vtE>O*`>#)59;yshjpn zd^bMdX8dxa%2m@dtL1Jy>GgS&`Kjv4_G{U@kFDQPHv4Fu``ep(+7thVzH&Xj_tmQM z=Z+rq*J!ZY0l)aXH_x|ka`uid${>9D`PJNJN zRQy#&!LvMX-c^;GA63Kouj=pIwW4k>qM5{39UHH6>^ki;CbxO^$Fu!TLj?E=T6y z`?~D+E=Tvn+t_bs3r;NdI)15o=jvts*}~g9lb4ALS}rQPGws`+mCFi!Qx9!j_-Ojm zX`$;4zunxr`sJ^xZOhX{j(yQ<)Z4M{;v|pymAAE9%mUYK%Z}VUYZrGi`^&=FU0(A} zu9my=#8<`BKYIPyEv8G)O8owQd0o~1p3k?AUPykeA-d*gSnTHuq4`sr_w>Em>U&Y| z>hz%9*F#0`7v=W%l?g>NAOD(lYUj%9WfQE}k4>JpyU(orXGNCY*U;O=3-;unYuv>m z`{U|GF1eju9W?^|kswbQO + + + + + + +MLX: Member List + + + + + + + + + + + +
+
+ + + + + + +
+
MLX +
+
+
+ + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ + +
+
+
mlx::core::Cholesky Member List
+
+
+ +

This is the complete list of members for mlx::core::Cholesky, including all inherited members.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Cholesky(Stream stream, bool upper)mlx::core::Choleskyinlineexplicit
device()mlx::core::Primitiveinline
eval_cpu(const std::vector< array > &inputs, array &out) overridemlx::core::Choleskyvirtual
mlx::core::UnaryPrimitive::eval_cpu(const std::vector< array > &inputs, std::vector< array > &outputs) overridemlx::core::UnaryPrimitiveinlinevirtual
eval_gpu(const std::vector< array > &inputs, array &out) overridemlx::core::Choleskyvirtual
mlx::core::UnaryPrimitive::eval_gpu(const std::vector< array > &inputs, std::vector< array > &outputs) overridemlx::core::UnaryPrimitiveinlinevirtual
is_equivalent(const Primitive &other) constmlx::core::Primitiveinlinevirtual
jvp(const std::vector< array > &primals, const std::vector< array > &tangents, const std::vector< int > &argnums)mlx::core::Primitivevirtual
operator=(const UnaryPrimitive &other)=deletemlx::core::UnaryPrimitive
operator=(UnaryPrimitive &&other)=deletemlx::core::UnaryPrimitive
mlx::core::Primitive::operator=(const Primitive &other)=deletemlx::core::Primitive
mlx::core::Primitive::operator=(Primitive &&other)=deletemlx::core::Primitive
output_shapes(const std::vector< array > &inputs)mlx::core::Primitivevirtual
Primitive(Stream stream)mlx::core::Primitiveinlineexplicit
Primitive(const Primitive &other)=deletemlx::core::Primitive
Primitive(Primitive &&other)=deletemlx::core::Primitive
print(std::ostream &os) overridemlx::core::Choleskyinlinevirtual
stream()mlx::core::Primitiveinline
UnaryPrimitive(Stream stream)mlx::core::UnaryPrimitiveinlineexplicit
UnaryPrimitive(const UnaryPrimitive &other)=deletemlx::core::UnaryPrimitive
UnaryPrimitive(UnaryPrimitive &&other)=deletemlx::core::UnaryPrimitive
vjp(const std::vector< array > &primals, const std::vector< array > &cotangents, const std::vector< int > &argnums, const std::vector< array > &outputs)mlx::core::Primitivevirtual
vmap(const std::vector< array > &inputs, const std::vector< int > &axes) overridemlx::core::Choleskyvirtual
~Primitive()=defaultmlx::core::Primitivevirtual
~UnaryPrimitive()=defaultmlx::core::UnaryPrimitivevirtual
+ + + + diff --git a/docs/build/html/classmlx_1_1core_1_1_cholesky.html b/docs/build/html/classmlx_1_1core_1_1_cholesky.html new file mode 100644 index 000000000..ece661cf6 --- /dev/null +++ b/docs/build/html/classmlx_1_1core_1_1_cholesky.html @@ -0,0 +1,327 @@ + + + + + + + +MLX: mlx::core::Cholesky Class Reference + + + + + + + + + + + +
+
+ + + + + + +
+
MLX +
+
+
+ + + + + + + + +
+
+ + +
+
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
+
+ + +
+
+ +
mlx::core::Cholesky Class Reference
+
+
+ +

#include <primitives.h>

+
+Inheritance diagram for mlx::core::Cholesky:
+
+
+ + +mlx::core::UnaryPrimitive +mlx::core::Primitive + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Member Functions

 Cholesky (Stream stream, bool upper)
 
void eval_cpu (const std::vector< array > &inputs, array &out) override
 
void eval_gpu (const std::vector< array > &inputs, array &out) override
 
virtual std::pair< std::vector< array >, std::vector< int > > vmap (const std::vector< array > &inputs, const std::vector< int > &axes) override
 The primitive must know how to vectorize itself across the given axes.
 
void print (std::ostream &os) override
 Print the primitive.
 
- Public Member Functions inherited from mlx::core::UnaryPrimitive
 UnaryPrimitive (Stream stream)
 An abstract base class for a primitive with a single output.
 
void eval_cpu (const std::vector< array > &inputs, std::vector< array > &outputs) override
 A primitive must know how to evaluate itself on the CPU/GPU for the given inputs and populate the output arrays.
 
void eval_gpu (const std::vector< array > &inputs, std::vector< array > &outputs) override
 
virtual ~UnaryPrimitive ()=default
 
 UnaryPrimitive (const UnaryPrimitive &other)=delete
 
 UnaryPrimitive (UnaryPrimitive &&other)=delete
 
UnaryPrimitiveoperator= (const UnaryPrimitive &other)=delete
 
UnaryPrimitiveoperator= (UnaryPrimitive &&other)=delete
 
- Public Member Functions inherited from mlx::core::Primitive
 Primitive (Stream stream)
 
const Devicedevice ()
 The device the primitive will run on.
 
const Streamstream ()
 The stream the primitive will run on.
 
virtual std::vector< arrayjvp (const std::vector< array > &primals, const std::vector< array > &tangents, const std::vector< int > &argnums)
 The Jacobian-vector product.
 
virtual std::vector< arrayvjp (const std::vector< array > &primals, const std::vector< array > &cotangents, const std::vector< int > &argnums, const std::vector< array > &outputs)
 The vector-Jacobian product.
 
virtual bool is_equivalent (const Primitive &other) const
 Equivalence check defaults to false unless overridden by the primitive.
 
virtual std::vector< std::vector< int > > output_shapes (const std::vector< array > &inputs)
 Get the output shapes of the primitive.
 
virtual ~Primitive ()=default
 
 Primitive (const Primitive &other)=delete
 
 Primitive (Primitive &&other)=delete
 
Primitiveoperator= (const Primitive &other)=delete
 
Primitiveoperator= (Primitive &&other)=delete
 
+

Constructor & Destructor Documentation

+ +

◆ Cholesky()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
mlx::core::Cholesky::Cholesky (Stream stream,
bool upper )
+
+inlineexplicit
+
+ +
+
+

Member Function Documentation

+ +

◆ eval_cpu()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
void mlx::core::Cholesky::eval_cpu (const std::vector< array > & inputs,
array & out )
+
+overridevirtual
+
+ +

Implements mlx::core::UnaryPrimitive.

+ +
+
+ +

◆ eval_gpu()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
void mlx::core::Cholesky::eval_gpu (const std::vector< array > & inputs,
array & out )
+
+overridevirtual
+
+ +

Implements mlx::core::UnaryPrimitive.

+ +
+
+ +

◆ print()

+ +
+
+ + + + + +
+ + + + + + + +
void mlx::core::Cholesky::print (std::ostream & os)
+
+inlineoverridevirtual
+
+ +

Print the primitive.

+ +

Implements mlx::core::Primitive.

+ +
+
+ +

◆ vmap()

+ +
+
+ + + + + +
+ + + + + + + + + + + +
virtual std::pair< std::vector< array >, std::vector< int > > mlx::core::Cholesky::vmap (const std::vector< array > & inputs,
const std::vector< int > & axes )
+
+overridevirtual
+
+ +

The primitive must know how to vectorize itself across the given axes.

+

The output is a pair containing the output arrays representing the vectorized computation and the axes which corresponds to the vectorized dimensions of each output.

+ +

Reimplemented from mlx::core::Primitive.

+ +
+
+
The documentation for this class was generated from the following file: +
+ + + + diff --git a/docs/build/html/classmlx_1_1core_1_1_cholesky.png b/docs/build/html/classmlx_1_1core_1_1_cholesky.png new file mode 100644 index 0000000000000000000000000000000000000000..f0f57d4b8bb439f6c553edaf191c18964559e36d GIT binary patch literal 909 zcmeAS@N?(olHy`uVBq!ia0vp^(}1{xgBeH~F+Z{fQqloFA+G=b{|7Q(y!l$%e`vXd zfo6fk^fNCWJa_;UH$5A+1SrQ@666=m0OW&#In(Sb3=GT?o-U3d6^w7^KAiMQfya$s z-q`Qn|IYli)0mb@mB*w$Pix;C#{JMl<=6B%#q;<#shoAo@SHUL!Xy=azO^czb(g;8 zZz-j>xWS06WC zGwqqgwzxO8e#_jT(sYUCeF~no{hLDFUv5%VmCejvJ^k;VYa34aZ+(|~am}O<{`p~d z9o27IP5TuGgx>D4JU@zL$hHS0pRx2niix~`VVxZU}*YQi-Y)5VoX z>}e5en0|rjgHRMhoD=r}?+nHYB*{PQQI^kb@yrhhALeW^>fS^qO9Kl=Uv|KA^fZoCTLT9@~=yl$21%hxT_R4QkNYd@c}ocTd)48yZz zzqO}Rz1CZ96r<1bJoJd8DOn7K}1 zvhK1UcDZl%U3&lQ`(;x(($d(^Z4Py}PEnp2=|6vM@LHjd-2Axr$F ztaE8|)&}PCZVEkm&-?P3HOCi%QwjT4E8hOB)$i1eH;e5Lv%IiP_;uRtd!_C+?~brf z{rWZkr{z|^@Bda^`+2SY>+)M?-h6JEHgo3m%B?_eSp8%Cc5e2npZQ&&jKkpR>gTe~ HDWM4fy=K7t literal 0 HcmV?d00001 diff --git a/docs/build/html/classmlx_1_1core_1_1_primitive.html b/docs/build/html/classmlx_1_1core_1_1_primitive.html index 12c2e073c..aa701ef61 100644 --- a/docs/build/html/classmlx_1_1core_1_1_primitive.html +++ b/docs/build/html/classmlx_1_1core_1_1_primitive.html @@ -364,7 +364,7 @@ Public Member Functions

Equivalence check defaults to false unless overridden by the primitive.

-

Reimplemented in mlx::core::fast::ScaledDotProductAttention, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, and mlx::core::Transpose.

+

Reimplemented in mlx::core::fast::ScaledDotProductAttention, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, and mlx::core::Transpose.

@@ -402,7 +402,7 @@ Public Member Functions

The Jacobian-vector product.

-

Reimplemented in mlx::core::fast::Custom, mlx::core::Abs, mlx::core::Add, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::AsType, mlx::core::AsStrided, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, and mlx::core::Transpose.

+

Reimplemented in mlx::core::fast::Custom, mlx::core::Abs, mlx::core::Add, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::AsType, mlx::core::AsStrided, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, and mlx::core::Transpose.

@@ -511,7 +511,7 @@ Public Member Functions

Print the primitive.

-

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::QRF, mlx::core::SVD, and mlx::core::Inverse.

+

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::QRF, mlx::core::SVD, mlx::core::Inverse, and mlx::core::Cholesky.

@@ -581,7 +581,7 @@ Public Member Functions

The vector-Jacobian product.

-

Reimplemented in mlx::core::CustomVJP, mlx::core::Depends, mlx::core::fast::Custom, mlx::core::fast::RMSNorm, mlx::core::fast::LayerNorm, mlx::core::fast::RoPE, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, and mlx::core::Transpose.

+

Reimplemented in mlx::core::CustomVJP, mlx::core::Depends, mlx::core::fast::Custom, mlx::core::fast::RMSNorm, mlx::core::fast::LayerNorm, mlx::core::fast::RoPE, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, and mlx::core::Transpose.

@@ -615,7 +615,7 @@ Public Member Functions

The primitive must know how to vectorize itself across the given axes.

The output is a pair containing the output arrays representing the vectorized computation and the axes which corresponds to the vectorized dimensions of each output.

-

Reimplemented in mlx::core::fast::Custom, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::SVD, and mlx::core::Inverse.

+

Reimplemented in mlx::core::fast::Custom, mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::SVD, mlx::core::Inverse, and mlx::core::Cholesky.

diff --git a/docs/build/html/classmlx_1_1core_1_1_unary_primitive.html b/docs/build/html/classmlx_1_1core_1_1_unary_primitive.html index 5644a664e..0e244a441 100644 --- a/docs/build/html/classmlx_1_1core_1_1_unary_primitive.html +++ b/docs/build/html/classmlx_1_1core_1_1_unary_primitive.html @@ -109,71 +109,73 @@ Inheritance diagram for mlx::core::UnaryPrimitive: mlx::core::BitwiseBinary mlx::core::BlockMaskedMM mlx::core::BlockSparseMM -mlx::core::Broadcast -mlx::core::Ceil -mlx::core::Concatenate -mlx::core::Conjugate -mlx::core::Convolution -mlx::core::Copy -mlx::core::Cos -mlx::core::Cosh -mlx::core::Divide -mlx::core::Equal -mlx::core::Erf -mlx::core::ErfInv -mlx::core::Exp -mlx::core::Expm1 -mlx::core::FFT -mlx::core::Floor -mlx::core::Full -mlx::core::Gather -mlx::core::Greater -mlx::core::GreaterEqual -mlx::core::Inverse -mlx::core::Less -mlx::core::LessEqual -mlx::core::Load -mlx::core::Log -mlx::core::Log1p -mlx::core::LogAddExp -mlx::core::LogicalAnd -mlx::core::LogicalNot -mlx::core::LogicalOr -mlx::core::Matmul -mlx::core::Maximum -mlx::core::Minimum -mlx::core::Multiply -mlx::core::Negative -mlx::core::NotEqual -mlx::core::NumberOfElements -mlx::core::Pad -mlx::core::Partition -mlx::core::Power -mlx::core::QuantizedMatmul -mlx::core::RandomBits -mlx::core::Reduce -mlx::core::Remainder -mlx::core::Reshape -mlx::core::Round -mlx::core::Scan -mlx::core::Scatter -mlx::core::Select -mlx::core::Sigmoid -mlx::core::Sign -mlx::core::Sin -mlx::core::Sinh -mlx::core::Slice -mlx::core::SliceUpdate -mlx::core::Softmax -mlx::core::Sort -mlx::core::Sqrt -mlx::core::Square -mlx::core::StopGradient -mlx::core::Subtract -mlx::core::Tan -mlx::core::Tanh -mlx::core::Transpose -mlx::core::Uniform +mlx::core::BlockSparseQMM +mlx::core::Broadcast +mlx::core::Ceil +mlx::core::Cholesky +mlx::core::Concatenate +mlx::core::Conjugate +mlx::core::Convolution +mlx::core::Copy +mlx::core::Cos +mlx::core::Cosh +mlx::core::Divide +mlx::core::Equal +mlx::core::Erf +mlx::core::ErfInv +mlx::core::Exp +mlx::core::Expm1 +mlx::core::FFT +mlx::core::Floor +mlx::core::Full +mlx::core::Gather +mlx::core::Greater +mlx::core::GreaterEqual +mlx::core::Inverse +mlx::core::Less +mlx::core::LessEqual +mlx::core::Load +mlx::core::Log +mlx::core::Log1p +mlx::core::LogAddExp +mlx::core::LogicalAnd +mlx::core::LogicalNot +mlx::core::LogicalOr +mlx::core::Matmul +mlx::core::Maximum +mlx::core::Minimum +mlx::core::Multiply +mlx::core::Negative +mlx::core::NotEqual +mlx::core::NumberOfElements +mlx::core::Pad +mlx::core::Partition +mlx::core::Power +mlx::core::QuantizedMatmul +mlx::core::RandomBits +mlx::core::Reduce +mlx::core::Remainder +mlx::core::Reshape +mlx::core::Round +mlx::core::Scan +mlx::core::Scatter +mlx::core::Select +mlx::core::Sigmoid +mlx::core::Sign +mlx::core::Sin +mlx::core::Sinh +mlx::core::Slice +mlx::core::SliceUpdate +mlx::core::Softmax +mlx::core::Sort +mlx::core::Sqrt +mlx::core::Square +mlx::core::StopGradient +mlx::core::Subtract +mlx::core::Tan +mlx::core::Tanh +mlx::core::Transpose +mlx::core::Uniform @@ -370,7 +372,7 @@ Public Member Functions
-

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, and mlx::core::Inverse.

+

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::Cholesky, and mlx::core::Inverse.

@@ -435,7 +437,7 @@ Public Member Functions
-

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, and mlx::core::Inverse.

+

Implemented in mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan, mlx::core::ArcTan2, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::AsStrided, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::Select, mlx::core::Remainder, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log, mlx::core::Log1p, mlx::core::LogicalNot, mlx::core::LogicalAnd, mlx::core::LogicalOr, mlx::core::LogAddExp, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::QuantizedMatmul, mlx::core::BlockSparseQMM, mlx::core::RandomBits, mlx::core::Reshape, mlx::core::Reduce, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Square, mlx::core::Sqrt, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Uniform, mlx::core::Transpose, mlx::core::Cholesky, and mlx::core::Inverse.

diff --git a/docs/build/html/classmlx_1_1core_1_1_unary_primitive.png b/docs/build/html/classmlx_1_1core_1_1_unary_primitive.png index 191d394b31de10715621038ababedb088a3b7120..162024de6b07d41c7496e57c09ced4339db4d3b3 100644 GIT binary patch literal 32408 zcmdVDdq7iX)-LX)%2-{b`$j|A``EO2}HqGga&$on5n>IaZ+O&C#|27l+B<7nf>%qr@;BDbs zEfx#-$LIZUM!~JJHJ>a=eV2Ck`O~LQ!OvHiXho%q62hx7tK67hl)6aiG24R=?#&Goa%e8Z}pVY3XnBBPniz!)X{`6$`sQ>ha zy_X?Xc#`Dv(#R9`k72a<2&8qQV{~fM8Z?oKVThTe3&*GjM3~fE2dMK?hq5EH+Sczw z@%BARaZ}8u;?BDr$ZoxTO_c}7Gx4y2f!VjPp^D8vTjsU&x2`|s%4PJ6`fJ|jBmw7J zDkQ&BRMlj|te;|F9Ojb1@R9LHGVaGn5+hd<8-jm$)S-%OfedG3;PV~dk>Qy3N(pb) zvuvn`6$Te}oYL2yE5axupN);S4!fsUvjsIC(gFpRttY+ucbN1I73d58rkkd(d|FdH zZT>I!&&*$Zzb`!b@P^XapAWwupM1D`>8cIon`UnLc-4k0q5sD}eqru>_gK`MPq!{G z8^^TDjE`YDhHBpO-+t(;#TcX8)y@L<(XoS?KKI+o0TK9LyM;q#4EOI3K`29txG~{a zpo8N#S^l`pwrjFw{JG}ZYl}#^PX-QX>v|N!%SZJ)P3?+pzR^xpb3eEG_aX6&G(jr0 z9cq^g_ho6ouZxsv3MI`{-JXZ$u@%P0-18Fr34WzS#wiyo!ZaNRRaSlm{#i6k!kui81Su5SIR= zNB+kDI`jYY!Cwh~@>Zkctb2!LnKSlI^NUJ4yrnFRJ$(l9Wb@M5(@%25(=NT~^mm4M z+7JH!10N^L+-@JpDV;WV$p*H5-I>vCk@J0RQR+7;u2xXbCbmjywBIx^O z7@j@()2VSx+faC{xPU9BeZ|D6TFO0`8+Ec>-WxlD;zb>mcM~J%8u=dhN6pn$?oLn* zFUov>T`0sU$535_bCuZU$87!SxmVD|L1nwcOd&@IHF;HJZNxH{(_V|21u=^`xO`bP zxtVfV9mCdd$%a(B(#-2Mgxdj9Ek(OkLWgl;rX~92dp_wM0Y4mmAnsms67|nt-d}`u zy65k7iupejy}vw>zC+M~g}1>ga%zT{$q9QIOAyir*`dx%43v30XISe8Qikl9%(FDo zR5_==ZSPs$pp#OhCCtx6cLY<5iq!afEVLv$g!;GmT8DT-zFc{(iJ^Ktb~mR+%*5B! z@Oef8FFSz}0-yIc&4QJpE@`K+k63CC=ojMw?T?x-QD#e{`D9Om6!O&lSf5-CaR(Tj z{5#+Q@)n`WEDg}IOr}T|2?84bngw)0$ap}n^NM2KnYmGn2#t3;Av&1uj3MQ3mT2@Q z@w`W6zb_+o`z(mhOuYS}?5Mu`S-NHY6){t%|7U%wbA7uDuSyP!;r^75@gylH{h1M!B*`{?qt_-5s#482O9ftEAisLuDSi67;*)`}Y_@#Ab0XZ! zdc_ zcP}y_a%S!=_`$$P(|<`cijrA^One7Y>^J4xXI zm|$!;F!*--`b{ve+uR#u@XEQvr_Wpl2fe|)Vqs}yVC1V-9mi&S~u&0jIw%!-PFG;`S%3Wy$8i&L}xA_E9KewljFEf2I ze6u^_z8AErWx*iw`5^MK+nUcjo=?h1$kNZPhE|**H#Lxz@fhEWW*?VRZGGwMR!cMs z(yM%nB!!$^2lx5OXFWs1P<3rQi`xzm%<%`+F&S+o0K(iw@nznrXx!Ug9VfKkwWLrR zXUs_5ivD6lT^4o)eu!8P2g5YU+>FZZ*0EmV8e;%s*f54w^#_akj(6Rqc|5k`kJH<^EN>QR4gn zZnKmpnpyI|42$RecF~Y>z3>{LtTGW*lz(6PT~4-m=s4tRGXF=cVKo}OJ>=z8w55N* zZ8M?f8AJ3idQe&0bw+O6#lnHkijJIj3Ilu~N!n$7x%UMLfF+sijx-H}w)9Uj+Y5FD z$NbCOKDpp4?O>W7We@rtBS4j;Og!gfw1peMvF>+Wgf87evfl zMd4lOLxoc!dcHVCwN%ztsQDZ=`15y`;K^L?r;DJ0-=a){nG9@MTi>H_`o4QDBdL)@ z!SR+-M@nKfR5NR6;)pl9MkGmmtd-LEfJd3B?Cu%HH!MK$K(3(@Up+Z5zfDo(VJnoa zAcu&cKebrTYRGiVroH9!2Qu)gMoQ6xdxcayfx3%HH%7vrsU`qQ}Vi4%` z{Y=H`=Xf26O6*PQHLIxF?_=}8jltHryIUG5-pujcJu zV<|V^3}xj?QZ;9(&x{`d7~*kn8L9e0nKHrKDYoU2BMdz-Du?^JZ?z49BKegq%mcPC zcQy7Dd}kSpHoH}k?~qQPFPNPn)BmX-9@^mxN}@l1500;`q^pDJDAR}b5Mx)4hgkg} zX?kBpByl{(_-%@T(oj|h5le~R)=TU7I&~c`$kN0c%h79CVu@&%UX)QHuXI`^hLsx z^hYWA@^6r`=!4lgxN~^{%C*FFV$6gDdxbUFd*ZhjAt5$;h?(_LKUVkwqXE~1U# z3n1yD&nfGQgKl5d2Tq`DFSg4mSr9Di*c-{oeauaUUE4_6;AH4zVR+ZmW2M~1MJH)( z)Ima=U>XKfO!ww5qNz@T@{kFR_#}k&S{(6|mq)yLpwEE@$d5*TY>7{4y@nfx-9evU z)&9<&mxYoQpryB!6*%S>L*2Q)@V}Qo&yDT>h%H0fnK}8+G|Wflo*?*ukQ#{^Bm7%? z{~{q4A5F)(?H71T@+KEhDWuue2tYz{ieoPT#r%WqI)${rU@;`C2+XLI`m~u@<$pNq^i9n)Fuex$7Kp zAurR}v}f&DH8}gJGtb!)&Cr81Hm=0nWeDR(F{CKIs;s(ipbP(n?c9@vRwPbQrjYW; z)cUHJJXwvsy*x>-n??iK{UtDrf2We!OzPeSwvVzGHK%5Wcas z%~N+$-x8v`q5tYm){^W?C8+q4 zs`{~xl~HotTrqv&+>XaCJkj#}Flm)`-k$!k0#iIwG3TZ4c`md>$GV% z;+D2@WU?b2lt`A@_ccRCnk`5>)r24;*d5RtsQC}MMog;)08GlKEoo8LvwlRlc1Z(2 zSmePWVMVd>3P5*l@V#y$Y19CJj;!7r$alM{k0&k%ESUGXvAw|{=>3Tqj0mae8#3I> z%YCwJar0yIk*J;tfbTtyKKGm#EPL8~bSwN_NAfrgM%fBzFjs`7DYU|ToSO@Z-*;%O z$(7aGC4H)Wzqv3%sB$Wgu(U)9=QFY34^F>%p_3^B|KNTzZfiLGwW$m(_IZ<1c9uk- z-+1>bsB<`Hj6(aowdoUy(r^?EJ#aw|j%T&|8`M{KD!b2g-aaa8@Wt%T>P~sIGEMuN zgVy97ik)HYZbR#f>nr9*HGT-I&CTZ}M|PA4GrFE8`Ct;<8r(7|ce{AO{I>CDWU!&8;%fbkV{uWYo+Be-R#^+`Z<0V^ zHu9uMJ03dzQQTK=_;3ll8&Z9mYGvGMwDy6vv)OA82_vZ`+dcc3yxjMzA?kKU#5~H9 zc#yNMb4ml)7yhFSDz)+~|8;otx{~=90c#oYe}%jR|3?o75$c}0 zfF$>{b)}Etp<j@{QiP z%UK$>WC0}kGQMJC;jAzDCyjpMX?Y_}6*#^N=q(7-$H?rCcCN1wok8m71uJp~lnS`8 ze^&;sLZZ6VOzRpLsvv*JZ$Ds39xE0zsaY@iiPvIIg1X`sPA85U>qxSkU4jaIdGomf zQ`a>yuha2j%Rw(%g!?M0JDu0msk!3jg=_ou>%@{X2rc*PwJSI$Upg=Fes)3h_qiU> zoZa_KHIIrt&z^FG;CszcHJ`Nm?(rd;-M8m-3lB!3ECYqntRse+WOx@h=l6^yLTz~{ zbu&LlBBx7uiOV$nj_N~?jvk`C`?GDXLX=sdmVg^cEGO>PRBUExb}YYcnU{2Uvmwp; z3Vjb=A!#z4v_w%M!MCR?Rfil9)Mcqr2)^op8n&~fn^Bv+=mcq{;+Ndv*kG31qu&GJ zJb-`!sz(L^sMmtE6XuhHDS}R|vLi+N-PjGN+iNE3GG-*dF;vv(x~x!HF&r+0)}@;F zph@|SiY>lz4OfrM5+&S-?`_|bYPgMJL~Rx+3G=iayCW~7oRJ~T?+AhFk>`Z#m)>Gr`}LS)_vc@m-1Yk zWyY)ha|_C>4~yr=H%ckiS4lR9yh6?a59{e_HRj6fH8-;nblkA}CT@cO$BGIV+r#eY zNH+vXY2Jzf6qxUPq$L^#7HqF<&94Tp_ovT>`Z5RfPRrbL+-gTmrDCQ-O-yworZ422E%x}hyiQFXy%7H$a5X`_-S3B{Fup~)FqY`5Y+p%u@j=QHv zZWJE#>>bQ_G4y8!NpQorj(ivr-~&y+rrFb8xr#B?SiF?RB4Gul3fvRI9`;;yEJsqf zG`U-Xe=zG=7SvOidOPR6>z7M3>nxY_<>QR2yN#p*>5cU8c++J9DRX)q!NzcVE_P<{ zB3Xnd2qM!eWyjaM<3G_wU6sE_)_xOBt=q&<8(Kgz3wh0wSs^m?-8VWKEjM?2fDx48 z>L9BSApDHR1T??_oUB#^kTtEuEeuQqJ%js5=Wt!$HJPmw36M&nCGNw7D)K&FVKtbP z`LaNtwheO$C-@u9v1NUaS$(FW-vD;5vGZy9%uDsVr~8lAE?Cz>(%RtrG5j1Q6a9Rk+&W-SsV*)+&C}foYClVp z+;2F%UKna2CrLJd(c|z>>5K@AsN}R)b<)*WQ3~xUBFVTkZEoM8q{ER(hr|D`u-XW3 ze%m^i3!Rqt&x?^1(1uQ)Oy_#rBJVD7pk$b2LY9~}sAvy|V4=QSVH(5I=eoS9E#iaT zI)7ionhUMDZ*4zeX_uR~<8eh%{5H*%lD~uS=gZ&7@L5Hzy?{+45n>mnpzv-L^%`0$ z)FY&LCp%=w$dQ^<61L-yv-&wuPyUMqkoKN--SyEEkBY%B8Ar0Z`PRv#p<3?+lP}eJ zLxY8bT+#S*KwZ{}Nm2;a%IfF~v^@x*biiMImN2WfMBcHQpQ?$&m4IKzxe{qIyh^!F z`ZhHmL&w+TVAB`KESSkD8R~BfVAt=PZy63OHLn~E9{uLrT$hhteF|$dy?K-pI*I0^ zF}akp2BN~SpMl=ugse+!>`X$hRcqSi!=`6uc@=rdb$gDHD(;vAK8CGnhJF>TNXKCW z_>B{j_H^0HJaFABz zC+w{@v|}L!$6a_K2FC9{F($rnsiP(o$Xv)@Evbf7DqtK*6cHSd$Y?U=qg&MGi{FdqGj89*JgtugiG6@11pP7@1}ON#G~UYi`|9u zS!Axl-`B)*K;DWk5BgNc&Gkyu$TKxpW!nKu_#~SD<)fA-8|5`65}$UDa$$6cziG_P zc#@3EGsoBylEIdcOfjYyq(f*&It0?!v6q^kdn(`p+35hF4Vrim?ZRaq(yC@n(t#o4 zZ&@{*+suwW8kqj1{3Yae3_3+4Py8Hk99KHK{a{OlnCV>jyT$RLX0vZj8s#`p6E;jP zsF>MBBn(Xb0J^gm=>!5#a>KNFtg+!$6~D94QlA#6bb#=1E>r_o;HVy8-mk!x(@?iK z3~Ftz`}mY6-Nxz(BZ*NLgx}78de1y)4>LIKa13aHZPNy;o(kV=b4t$%c!SzjFz05WBZ-twpiJzIiI%GqM^PUYL2}ZECR3ENc&7#%tqJ7F`539(7#eks-rEcZt z{DZ)-)XaKjQ&IViA7u*f!K^K?%H?ROJC7`P5r`n!q$=|QWQDEjSirr4q4DkHDA&H17=AkCHWyz)&_NZsady=xfIt8+x);N}`Bo>BK-x znQI-tR7yy$r&Q;g-Q!si!o3Lf-%3}?2S)!@U!QiA?lFpeSU|cIY2v(YkuE?Yy;C0| z*H!s}IBzUC24;TJ7jv}xRxDa}Zld5VCkQf<=7%In;_0C_R$sSv&w(WKC#UHeMf(+f z9C77%T8K=B2VA={{TLA>z%Y;fEAB1?&lvKykzx$Ajdtl^OO`@F9*nQvFiwO$u9T=G zaqyjvb5=3Eh$kCdZ@f_ycM0X);v7n0azS{hhWFU@Pe8}@rLA*x1$k_e+_Gn>} zm5YicOht8#+zKH8%K-%0m12At2lq|$5#dSuAU$uqp~(O}qi(!sjNJgDb=s<@G-h?k zPbxdX0mi>>GKB#kQ&29rN|%+O)UOre2lZhx{Bw)+vVL1>2NFtI26=4hi>jlB z@ea|$rkT_|goNi1sL*MJ3S`-=XVYQP6~*LZs;aIBbVkB!NN0$=w_%tTKY(Q_Em31m zj6Cd3EVpmR#x=VHil;AMvfU*svihU#?}-O%^Z z@t}5fR%9Su#9<_|>3#ZZtx1wV_bv3oxjY~{{Wr0!@}d+gm5=QNe8|ljzE3ijErqhy zsjfW&)7L%^akFdZEhFcujm4%gZL`sAPXetF{34#`;m9q(ZNWWUfzLnU#SNVx;Sfa7 zFuknj8Cf(0V3jlEca5n6xgpMWXro?>Osca^6VByU8no&>(SHH^L;NuX!Ejy63!!fN zqv}EWwP}UQ=ZGk8n=I5)q}S3Ezxv2_*=iL?7-g|C^;4j~OJU>s0r_@~9*n~0P)!xD zSS{+4Vd1AN+qC~b)2AWEnD;uGD*$ncyy+N0U`9M6!ad_X90aQk!qe>Jmlen- zS2gR3*n*h={PN_%j$nzB?4BgC08Qn%9bpnA1wSNDO+9EJ!&ZfQij zy!_jKow2{93K?7Zp5Pd?^T2C$(U~s_%#&MTC=0j-{n#lagqV~+gaAz2o@1E;*Pll zdQa~YLOCF)^PF6-^zG<^)Epz1w-q*Yd_$fwKV9gVIj?I#oc1QFwNhOq zj}Q*T?|~m=S_#I1TM0%J5mXC*ega_8MGmyKrBK!*#YVeuBz!`|Lp#&+!8(?trv`zW znu^)UYTnXAu78)HCiP3(+C^B}S2U6e3IYez85~fcr+uvsXfc3E^p!cra7x0UTAO(aQA}y`RP$C}_SP|SWwAgP-fAUPWJmWI zdmkJF?pJ^QjW{IKvH+z#%T$}|%v5A9wkH6c4T82rU8mnKRHjENN^Ri&q(L8h?RhJ( z|7|0FG0{4Gnux)~uX4Ryrt62$z{&zdQ^HHHS;V6dMJJdDg~+*UO>1A=xde*#>Jvh0 z8+hgOfl+wWD9tq=DLc|6q?CM%B#7nSG(T=Yn|kE-SXb8%+A_wzUt&Y3#%@$QsMbyD zgLe@sSN~1Rb01xcqIHb(WIaAZrYVvH6v-=qKA-C-{jRR|<|UvG;3|qG!IF4Q5I=`k z>|8^uyegfS&q)lVbVtc2o=EY8x>oeO<)Gk7u4dMkRfZ9N(SN@oPzb!j6OOe19Y{ZvfEXB;i+z+agu5tECBtXYwVObv&gpYVd%QjVM zFJMbSE1dIKXeLl`FNF(Ug4}VVn*jJUeH3fB^XqD(fXVD@@{9#Y;yEoJ5I6YFuUm5& z=&FE#>JwIU_T!Y;c2a?U9a9YLGRy&P%@Q(>>=mUf-R*;eH;xNA(f4hI6gu!}W>e5QSFgo3?+IqFjhZoheAX|1 zL%4=(XAiiJpG!eIXjO_geAF)ZDvP~_C_S_Ldb%;f@o&Jz35K;8Krm0@3MPr1rf4T& zo$w3X(4avx2uuM0{i~+~%OHOh{9;UG`6n0;y{o;82w?N?kk{7F<(caa-7?-|z^j16 zH3`urzZ&EUZgZ*jw0A+6>4w60ya!gtVW<}@MN&w{`7>odgMI}kP3Fm zaPxl*iUhidWd%SNbE4ky+qv8nYbsHAEyi;sstn*}>BvW1gvqxC7MznFj~={{)_(S6 z7Lbd7oFCPFlbW0)>DHf(j86gGriZ<$Yke)6IzObT6D%}0D#1b8S!!Mmzu2d}0~oR^ z$2vjQ3~X$@z4U&|wXr2k5ga6TUo)8^ORcM^2T&(_902kJ=3U|R3NeX+p$h8J@p@jl z-NHt+9&YMj#Qw7jBp%r!yVLu&^>7m>A(d0DbV6h6JrKqPuN&X^6T&yT&yA8tB}p{E z>QgWubj5O{EB>SiBeHfOTPcf1urCzf>l>fn@_n8b+mF7z3}JAjt+AkWxoM-pK$3ud zgbtq3FNH+(s7v++&bE9&pU?TZDh4jJj~&dJ)B6DQD1SM1$nQ7yeOgf!vkI8{ojJ*1 zYM-?z#I*4*^vx7?0P=u~Sfp!u;f@CSB}F=E^b>F@a~nOLc-%|Bwz%lA7YO5~p|tcx zHMOShQNL%JO}=$jN$?#t;BHduGCN$vToqF>vU+^%FwhFCN>%Qqk@4_65Kq_3c6OoC z$}X6iqV7kt^?{Dhtm+6V?{GSp;)t!rko^I-N|9*pU&V4=`UB+6p92PEZG_?|3KR4> zkI5Nc)lf)_3QR{VU9ZEdu7{CQMpMx`IOC+DHy%}3z9;n#=v7}~-9(zq0pwTtz7xFvW?@&Rk z4u%*?%O;GjOnT&$i-qoVYU$`BaqqlL$ZXX$)xonmPX;D4{}NMY;EPZHVtpz<5v;I7 zInY6o%7_K$@|YbOSJC+V*hdS&4rYJUXKnkLIuAhY=zt%axMP z&g!*zw5XwP$NS6ge&)*jA_bMXD3j7UB`e;#$AS!daW`V(mv|x8lEuO%dH^W+{Jnn!oeCB zA+iahonPi!wYvmHl!?jx8K^*4JV1Op_%D@M+D?+1R}zbhUKYxfhR}gk8h@7KOM`iJ*e38lzY%XoAg$YZm|3~XWy2fmckkP zY%j$K$}Xm+h{vBqtq_+U$>OGb)nK&|5?xPm9zspK1NfK5cc$e0P>hK`Ca=>B{n1>n z?FDRVkYY0vT?ynL*@O=f8E%9-Df)wOOTnx40(_gI^F7_qNMicbm?|EpU%U5S>v5#}pToOY(n>X7KlCV^;azp`)3j6`yFIe(>VV#~tQF-J znUqltU}E#H7YRJm>bTr@fVsIeG77MTU^|vBo>=H%QjAMkw%NEXisE54z&fne`I?AC zpj>o4HE?cYR4fBfU1b9Hn@VXfa_%|30~yct1NDb|n1Iy*t0i==nbZP67|_EfjE>Z} zqd((=pvYGh-4^*sEJA#cZ_a!w8K^A=J1Y>6xXZNwsL#0T(ZCO3QW3L2KOlDhyK6o$ zqWSO7fV#M${FHpZN;C@%Ns+_7>tRQxj39KP;g2@Sk9c`I5DTHp{*=PU`R{yL9b6=` zwAcq(1-5CR%An5AjAW^0%LGwB06QRl!|L5oaO!zl+pn8) z)+|K?&$If!CO^3KBPyu>r@PnQZRXaGbIDsbm(ZqaZU+WU!kv6V34jefdokK z+yw-2p@$skiE2Ypdq#H6imq@%jh9!u6`!JL4xYL23E0MD-Q%Hi{NuB>UqMB_jqC=p zQdLp4_%NcPhFM@30Mpkm{sQ0a3g{^cXM7M8PkJwxH_-jO!F1UHFouN2EMo&N#M0&< zm=34D4}41-8bKEgB>X#xVkHlgmy>hBnJ|n!E}RL!u>yGScdyu@QKxZ2=JqG@~?)QK>181}Xxf#JMHm+@ z0YxM4sk4nN_PwnL3Y3w5+ZBl&GBh)t4Pf)fK3>eV9S8eli{l_Jj)HBiw5T`wmdbynPSvpE)Y_yhm%+Rz{NFjl%=@+W9Y9#ciV=49;$pFYIbw4HZOZ{$qQ0i}nS`LoH!ia#yAXS}R+i=w%T-FN$j8oX|xL|cZhJ6wUw$5+f$ak@VJ(1D*)Sgl`uNc*ja>nX)H zN^Gd;f@4MVkJm1w@wkxr_k9VqC-BSW6?#;<6z>b(F@5@)o9AQTcV10jlfF&SnW~ja z>(u&zO?x?;z&LvEe1V`0s879f-lY5s;2}=d1FIcry~xSp{KmEVC%+b{SN(VDtT>I-nhk^8_RNa73!JcU=LA3vv=p8M$~u~ zs#+ePiFK@JrK2;>I$m$79eY6?zOH;+?zQs;7O=#-@igF9@eUk1xk;d*$GoJX22Q ze$uVj6NyUkMy%QFhY;IBxCnUzO)o!Z9&T=?IG?itK0h(Mu<@Au?0g(UWMQ|0or(rz z9*0=HTu#Aj9OR{7G955JnLgxWBF@b8E=S)l1h_!@nzc&8ljCLPK{8JGUXtWQY{Lk!58yYf$Ah%kQW?@CdsWzd;?W5f-skAPtRs z?^KQC2kQ>W9{z5iDK5$eDCI<9yOwBi%3LS#YNWd>6O?I<3GWLUp7r{86+EG2DPtq6 zjO(6DfrX&wb$eJPHl@2?hP1ef_N%+^pcGl1+V+B{YriZgrtSu7%_U$u(qvm}zRL~o zfbUf$jOw5DJfD+;x?6Rv$&CZOykxit>_7wJ0Aa;zrr|(TGs*>Ms<>&S(W5*c zaImDpV;l|eu}Piio9Lgn{dz0898(ZF>Oo_0URcK;4I*iRR>hI z)hC{aJZVL8Myeu-Ey(B9MFWbRzY8arLQYlC7k%)w0e;nVvod)23BU z)RB@<`a9@_=p|Q=9R2gG}1b2H>Y@zF#N*Mhq5 zYfs46e-5*9f#=KaTxUzNEe8BU^>ilbW4Pgs_SyS`yyyoE4HrhZoX=p^cttSP+bV7> zi#soCRkJrQvEvj_cmUC-k5H$Y^<`Bg=4Y_yhUpCUB+E8^!nx!9xcTjChdk9mxw$yO zVx_bP+6buXd@x11`YPOi`E81_mE22+8)4KK5+!q{UqU3p>cV40; ztnr|!+wL?y%T1fLcKqDicm9EMPg$$VYPj$M?B@E+ZnFWmDyU=lXlZo}f0Fo7CGARS zCr>}`c9Z-K)R^m24Q=%0r-8Aj#m1}Yqd?}FWPYEboDUD-e7wP?xm<~J37CMR3Xfsx z%fwL90(2lc8oUAbCcgo#l(!0z895XClU>X41|pa-9ME1H`yAXuF&Y0j!FtC~U{2JO z(}=fDSQP&DSyh~kCgw(xfYx4O-_**R^`##_x;21ZS@Uuz4kr(;CTLy2{yW8ASCrYy z>J*h99Rr}!c|JNf9Q?h(pdWzBohI~OL8YM^ea!TeWwHk^zBSIPBU5_`I?ZpZ=+JuZ z82~FQa{^fN@`<0pcMyfM?xVQ7o!V+)zTIZiXH(Rvt83k)U?{%TJ`^X`(Zq(*cwk>T zKY`Z=^hv)Ary&82dCgEdZakn-l%lb|gSMcmORSTP&hv9!LbX?{+X1q2NbSmPbe*&? zTsYucj#)nmr}y0L>FRwl^pQl9eL9-2jJ=EapGVI|f?aJ)Yy{+_RR`8?zo0zR-Ow`A z@}_;EsK=5( zpk@H$c?;z=BY}~GfZ01bnw6@oa(eXI8my?g0}v#nHo)`xr`BiW{$ae5)jn_>QDDLjzt7q??jfx& zsw-YYqCDtYTZ+5ex=6976gkGL6@QMzk{iykxNPy#-m$<#$~_KIGW=G1lw6L^7>fQ!=%;AR5hW-RbMvn$P#Peey03Ox%CvmH>JKOFz>hnZ$t&b z&^HsjU|ZT=NPNN8uX``PNS_?Ucly(H>xHg=J(zbrdQWR-*@Vgy)l5xU%EU%N2C=sA zMQaQ71G^EzdAyA60$$v(aW8I<7cAWH@>KGH2%Ke~QU3K~itcRs05GISz`Q`4g5ifD zgLulLl}1ZOE(+@G0$%3lt*1xok-Ma#6nA^nlD$C2>M^1BHY}_HOPs3E2NnPWH*xYC zKGf^;LAa0sG_gwWPFHWmqD%T~Vm);w$OTAX^X2$Fm?)Lf zT`ot^`%i!^Zp|+nu~$d$Q?xAu)5mi2#m;VJj!fO!_nW?dplu!%%aF)2I~zzq_)2ne z*Wq_^V<0fDTeu8iNxImYI%nStOR$nwr8J$Ycz_Y>Dvcd@h!04oGC%+ z!A5Wq_Z+fxY|zt_C^)g*%PaYDDCP@a;T&P@*y=7vNY!LIB5s=SjhJjGmk)4QBoTLB zm#N^tM+I#jSX#Lvt(jk@jxm9?qV|it08YR*-=`(h3Z%=GVFlpgJAXDgT;cgorVdf~ z+9oMx$!mofBHgLRH{cpD)+4mNkonRPhjjhEPfIR6B#&d?1~=W&ZJ$!u2uxHf z(_XaN)=mJ(5U6Y>4Bl+oE+HaU^niTPWJSN3;sANeya-_>01Yi5W%T3GkTK9WYy55v z+v;5srwLWgJ6v8F`?LbL3jx2Te`etG3mC+uzhrF`WVIyUN;+Cw9LL9%wdKcxJ#({x z{$cZL27RqJr~&93LX%%Pv*I`J@e>|ygTUQ$de5qhj65+e;K?dOod(Y-!o43o8uLG$RV|*J;(pf$J;-p?; z(Y0t?xDddR2n!I!P9EXD{5D0Chd0ncdj0`yVz4}e#Yj`Iqr z-@yet^d4qG_6i95ZwJkO+jVr!=f{5gP~r}Q?SvDFVnFb=M7b1>67yE=!Wh6Efa8l1 z?E4A9DFu%6vX}kCMO;NU6Z2d?aU-3gQskjo1_X)2$Q6G?480Y+MGm$+U{Sw}>J-)| zpzOelY4Hbt_MZW@u*c-`S8AcDcI+Vo^%+o=fi8IaD_w9-mp>)(CZyxvnRHN_@!z7< z!KRu};?qpxEyp*ugpGW=}zTcKZsh@eJib?8~=G)0Q9*VSUd{Ukq4 zA{uhi#*!KYyEr7fzE5b{%MO-<@ui0?`wAv;V+humn&<-9mTWLrM=a!k{dV2$>R}pn zAH%y*(-)8Yz%f@s5&l4~JrwAWPc^Pzg%pHR*;U>m4JG9;(Xq+a;dI@&Rc|B{<#)_;pS-8+`_Xz8HfpM4@Tt=^EubV_+U*3 z39pK0h;++5biLm|6BkTr?CVbXF76?kN;ch4Inmbnhzm<8oB7eK07{4ap-hD^6S)+8 zs}e>0Q=2`KDSD60Jt;DSZQCCpYGc}paJUQ<7M6c}(vIt<*+5UlesyFrMFj_WV6eCm zTsx&!8=SM$_d!41w$hl11$tfYoJ~0~+rTFD3CY32Vxp-b_q}Ti$>{4vW}v1JU*%)j(QD2BvJ*p^ zaYWrkf#5&yNAnSDu|}36I6l_e5C#GYMNkbC>fYg$V>6zu4A`NmioW z=^{#;*<)z(mKJEFG_Ey1h>2EM6&@#&k$co+v%VZW0dOX=vn}@@IM-9cAMG{LuJHhp z*62h~OU5-G)a!=<%U<^qWP=e&U;EfYU}9ju4BjM%D)`|P4W$O`8nlKI*mX((;7F`L zxEU;HNg5p<`!`UT#n|@WvG(Pl(R4#?!y~Y($|nT7-ioRJ1xyN*CA5RuvF24^IRFc2 zI{0f_kozuqG5x}K!Tl<5JOt;zs6T9E+p(O;hUK`)@RM3Zp*`+K+ch14oP#f~Z3Wx< z+v>DzP+eZTHEmyk;dOnd@v>e6rZ0gGYb*q)^K^e$%DU~-S`Q+tr`cBr#||Cn>`oOV zz@3={?KDZe@I3j`_pDnTBm?5EfN0#i!!;@HRfo<&Q6>neC~G%buMq`XnnZQE&V`($ zj3Bt#pCYJ_ndTx^IyNf~XA9qvIh3m>YNg6nE5R+$x5>omF_<{uyx(oTt|EGD@ASu> zE`YAEU6Y%Tvb}O%eqSAKL$ATr@ejSZ$3emY*F`h^hVSmWR1er746uj;ldwJtFmIDe z<`f+#`B=wEPC~ghYo+nUKcEyqxXD+GG1SBL?Hz~7qyN6p`|LB~8telrh0F!|0@q%i z$RK;6It}({fcsuqR^AP{pXDtNbIT%bC(VOpUHecfwkpC}Dg*)ibu4{+DGn=XdUI&g zt9^>5LPGUf^~-frjy2rX`V7~2gq(SO>s=`qu$y?66(Z-7r)FND;B$3nmDftySw z6ZqxxZkUj9kL9-Fi}=6O>Yo4k=bzy7)%DmL;P+-H`TGV;1MlYlTxVoTrcEOaY}vRj z@W}KYh5D!KyJl=^t^UWoykTnFmj2(WR~56G!gJ+&KlqzXj7}qIMYX$~?R)=Drc1sM zlY>1I7$e_wk~HzMv_N_{ zwzilw`o_0RjO`*s@+VTv3?%uu*5Pm=%R*1`qsR~{`_NQ-Lv&&>Gt zPOQhYRYxlwSFgQZb?x!A@3z{Wd%msw@icvK%u&DNv;Gwrb98gg|NdiB)rz;xb~b1= zDzQEgLQ{=~Q~&WXc#w@VEY5Asa5!RUPSwf!qxIlNm^K%5(U8q23|%c{Asl~sZ{yv! zTRlHT38J6GUW~AzhVRPo@ah>V)s$(KgR4h$2ei$~?Oq|aRO2Cj&Tj_E+XHE>#?tCC z0kzT40{-%4B}!M0%B0|g=vCquQSulp)pe*G%cWlTH*K#>5ihs@ZjDA4Jl|+&ilbFW z8k_B|2@M?T3Qk{vbX&jro^b}u1b<9(mwf;#BAxg*hvgrcPLqs~0r{>6L`=&XJ(g?w zdvq~`vQf@1!?#4XJ-T}oZkO#eAyyEzh&1*WbYk%5EEz_PdgU0ApFKIco4SK-0(M&J z|35q)?GCW3T2Zlcnr&&U&Cyvmi8-@2C%H)$eB{TT{vLUYY{4GWR@-;-A58lZJlK-| z{YNwvN?24wd}|MFh9ti)fF1P7h0xXDkRXpoC;jd6QH1h#SiiMUx3#M4qCdyG@y+CC zp|!SEUHTMPTaU$DHtLNaoVqG+Y5qFC{E+Lq7T<7jOHr0M-&@J5ht?=}sMA*Ese2=| zKI+YEQMNZF(zM7|*~!}tUr4^;y=d-9asZjVE?8a_QmOkQGJT8vJ5rzebN1Y7IJB}; zxRY3jC0}0-6IQMvqx<4&5@n5KReC-b4w2-WLM9*eiMPSW65tT<@85Ci!Pq_kXCwc= zV7dRe8vmm?qs-ZU)2K)YLvX0yvn^+F><1DSQED(**#(>m7dkhe8};Z zM(w1|4llK#<9UjNx^4;Ft*z4@Vh4$0qf?36aN_VE1J@zv3%Z3+d(@D)b+8AYV>M{b zm_bV%&p;Y7R1i_|q3j@hCDm6^917QzNMzN|21hFdi^VKciu7_%AhRz!FUTbLk4^`C z*j31`Nm!)({Al=7=7Osi7JL-Mp8gN;@c*wr9vP9P1U|=UW?qnz<}+2~Z1+Ye@q?cUK}#*FRaSdZIcd-EVl3{@XYsKj#1RFd>!U2kDNdhpa_ zlb7{?4L}2b@K~Ih*t|&a!td}p+dlI3^ltFapH?WD#{Msz{#W<#5Nk0)c0C5TA_u;ELf3ETxN#xI=$X?(>sWV`(dp&BywI^D^e#$&SKN~FJZNr^ZW%_1> zoemx?$+*fmGD{K1Q*1oqLGVhY+=CzW3^jcYR_1hwmr8IyD`OTxG4Ka0=^Xx|*j*lR z&FmnG4%TYrCrB$JVEvex$=AosjHU5Cq*1z?A(atxA9Du2*V{lv()?QUroQ!6UGS;u z%!K|p&lb1TQw>Y5fyG=~Snk6uSdxPs^{Kpd7zAtP@V!5F#z?<;EsXviV)WtOzYD$d zQDDR17xXrNMc4W~c3A86-Qt9X!@(@gUGM;sF~ZBkg)N1;?aZ~KpAuS^Bnj)2 z&o}je&3!!4MdvQSZwR77pG3l0c$ek&k=vT9FKD|wB>Cb%Xr`u(P8=ONs_WaecJ9fl zy~;pp+ym)QJeEuOw=~C=V1G_8Ntd;e3L_HnY?b3yS&TGjH>BQon9`-7eAn2Y6+aZ` z$=szFa$WY(G{Mk6iy@`oq+nu=DafO8HSBh_ec{ZT+w=i%-1dLt#+R$t-td283NFTJN}DGh+SD*EnRXLVx^8sx2{OJ#1Hr3Ia8J{-fhv)4*2A(@L0Pj7-H=Tf9i+2xKQdtelYI1w|U`qUOY%( zi>?WsK2y9 zp#Kg;d=+-yMkCAZ0hW7ujC6Q1sO{qA*+^;Si8U`^?NafV#Sq_CzB_jw*A(fn-s?Mh zWZQetDl`|M!>%KnyaY*Qo?eGn?t<4b*LhujyIc_|xZjfuk%kOERNvI(X+oq-{W-*| zv|7E2ZV+*|)kn;Yj>5g!o0QLOtyIM zeNR#(DbKXmp?2@RSh3b6z58m>?uZa(gz@Xz^n7Iyl!EL{+ao7+nbZ&RGvq&7_jb+1 z-Y$PY#Bz!A@h;*>`%k|ItvG{1P4k$4vJ*v<}X3A->JZfH)UIVZn)1 zzTp#<-8IIYvumTuIMNhSL~`TJ>eH2eC?~opOGpks5^DS;Z|ONRe7>dt@R`9}p+BtM z-$tcer2baG&?vrX@HtiQDX7@F}tsH!#oiK}$!f}~=v zEGdV%FERR;qe)l}KlVh$k>y1)2%p=h1Ae!UpJVYIr{moj7*ms=Ke=tNXRrcb(4fhI ze&P+}CmHQ-!kyCux6!NDf^;+g|BIhSBEhH`a(;0%{4GDuU$c4#kktz%oo8DIciPM8 z0r{C*3rXD8mgUMU64WZ9lV*5CZ(kDpvnI?O7`sxgMTEJV){yn(@$+%emIF_do*d#S zez5N8?%G33a890jRHxZn*FQrN7OeVS+n`}j7gty6FOD2an^{H?Y#^849!&`p;eD(o zcbH`I=Hz_z&^jKeKZ9I}r8<*kH1MrN=G;Xz82LkEf0|C*azggnrj*F9jG;QgyuGIMfX;C+JR zM`qVAuIkEe;IkaU06exGj!WtN87fiw0#LFVgvv`3p>oXjE97kNUReO6wrnpDt z^m2w5dhbxjK+UgcCHO6~_0O2Ce=+zXOJ&2sZG-pbFzUOtZyg>@KB-IULS45cIHT9b zz@Q85!#SIyib?Xw-_7vE7^y2lfpbT(f|L5cSsgcisjt#k>{txpf?qRSB4>jT`Lt!_ zA~cky*HILK%sdH{wy=c1eJM5ygTs+@s2t#3-T~g_ehS6xU4E|gW1_;B_72`uhu36B zyLwO!MY?d&1(L3BNs<6y!g>S~YG+4vJHk*={RZkTUrJ8_!?}5y)zeU-g;+6$s?oDs zIFedg3oL&$nB32`ChV2UtDK(J0->fvu%XgR5mHzcyW5&zZt%h#-Bws0w~ZXWy#WMW zRDAdcMfn_=xBIv*t!p5wvC7F+$q-VmLkbEC zru{ban+MvGZ>}(rL(qDfoWnTN>9ejVX2bj8)0LDxSG%T)?`OxtgZ@wg7X$wGQgFld1!_@;qIrDl^bCB*2=ZJ3ueji z2OFgYrardaUvs98IpnBfsC@oFTXAUwM;5;jQkMfy=@H8wTLa%3O&6N#8u9p&G@Q5E z2Nrk(IHByi-^C#Tp2Pd$9LM7j9nb{ZRUd4CnL)2`1yhtHn1$r4=^iJz;_l`YQQC7c zbrU0@YaAi%A;#B>P}@~b0?jdRhHl%R_q?8` zW%hg~mb2AOO;>sP*zQ63vR>T?0`2$KeJCf$9c!B^4HvMY%h2!z-C=_8`!JEBVYy0w zfVg0o8Q=gCSRvaxW-NbIGc~fdWOrriQ79L4S!#|GEz9iX)OouBbC7{7NJ>hn1>%be z-Blgm2(eiae7+4?SAZQ&eJLJK&Z?$pbUuYd=O!dN>+C?hbG5DAZFEqHuYwq{9mI$y zDLc`Ee^$+3UjjmAB|p z;OBcr^MsohnRe5Yl=Ff2~C(duuc;Cu0)8WX3y~d4Fg6J?-9E;^)ZTN~1 z4xoqmcR8PqX0q~&bn%e0JZ5@A{4aFI;UyJ|lF4=5e80-Zu{Dxg(lC0l?$(L0;8E?d z5=lCXj(aDcLm5V>hQ2$??9X{L5;Pe%B;Z`s-T^E>(L-4AEKd2jS#wujtUI#rmm8co z-Ely_@6EyP7%WU;2brpcG^gU2+}GiBP!dRX=DfD{F!W=pNqx)~Q897ANI9*Y)?A z@ZslOOEX8y%6oLbsxi0z?xB}bl?9Arg^`7G<92hUJOw=8k*DscmEtK`{y39VIjfl| zO{|mu>}~p-FKRX1(5B%U^zXy1@owF6SBb{-8$J@QEGBKLyi~wj{cZ8lWwIYl|5~s| zCLH%(A1S#~j{dQV#_Byg`aUE_>op=cv-=Q%Q7EmwrlGa<41~a2NoDOu@qhRqSQWa^H|-sC#@|l1l~zoiPJajWxG~It>vh~oH*W)A$1XvSEbzI6;_dZY`!=J zi@3GeNA85(GKqskr5O9!YarTPYJn+6mr4*=?|}Mo{cRbDll?82SM0 z6*}SD{)Kg)4N=zv;$PVSFjxQChli(YDTNWsIyl9;%*v zhUXuY;r4jwKFJFr9_q-|?aKj`F7pwBGXKI%PR(+bkDpz>*mV&)e^}r7D1*9%9Yotv zB@(eD`L!q7sm7gq+%N6TTH15K)i$%fUHzx_g1R^90QqqLG-<5piY~pwdV>690V))+ zD6FW(OWV&Q#R;wyyO+k6VYx-i&e&78*q>9eRrq_hnghR{(51<`?4&;9xc90#3nb$U zg*xOiVrSlzufBT>JpNbOB(iwjmnRFGkAD{0$Wc;QFvFP3yL}uA(7v2qYc#x&7VWy30cfbvIG0 z_S~vS3##obO6TRH2BQ%pUoHlW(i+Y-057?IV%!g! zt;uyuJnn1a^qu6~iGr-Xr1tPFPmV5$xv5b1A3&;=8l9S0bu`Xm%?#+6P+)8a^kw~s zL)@3}fCzYa{Rq>0aozhbTU5V#&q`ydVDy&?PY*7M1zgX^W)^xjlnfa+4S%!hdGhYEdSALItbS=t4?Q!J!e|zyV#S}j@RI#x%cI}{*CylU z9OF`n#=f;t!pihj*N7c|A45iV<2&94>PV5EB>!$UePUu2c|JK~&^<$dMc)AQDXUMV z=o$HH9ZG!{?mjd>wEL`n%cu%&4-ViM)0eMghB`~+*cd4*)M-TyR>3OkaaJFPEN*?g zYz>_J6s^Ya1MKq63*nF?FcJn0IKLP|$&PkvY4o~6U6|-+Qt9^6u04^589bU|)ghNN zQ9g}L-03VuSuMY|xUt=Se^B4hZZ^x*YbLXsntkhO$7-8W`3gao3l@$OC1b_&$JS%k z@NCEd`?J4;@)&w2Q+7>s(ON5@SlA2!R%E~WPLY=@2Z%w={2BWED-uXM+J~O`Q#XKc z>Z=qB4OlhgUNx zC#sJ~Rk|A?UVKH}Q;@G^No$oiB>BUs$4SP27#O;0ICYG;W?3+dyqHw^$goKQ3bq+h zFd!=44NJs*jgmo#i~?csUNUlRlcsIp`k>8Dj&WhEeS(=Ja;&#sB4vT^>sTL=oiBb> z&M-CIKKXNy18fBh5o`iVJG9Fg6S@$cPI5cExEcV-mXrnrNH(ovEDoQQFMND*C-vh9 ziwNh<6tk&ubboR_CR3UyuP<0EA=HL=>30O;@+Hchz`GxWM6uJ?Owu)lbMV3CNAoC3 zYk6^DZ|!rC2#aqc^%#po`JNQwTjU{?>6XPvO$Cz^sfo8K>NVZ~TxjqDp*OTIRr@jE zbgxAlbRZd|G{!$$^VSX<=K@U@qOasddwP_=?P;4Dq(7Folx}trvs=s_8y}?#4y5*; zkA$0(oe$a&6d=K+oR_{MWjexMkTyMj2RUS|@iQwr8Q;CS7IqPO9;$_}Y@Qt9wNE=qQ1e8=eZQc#}KmpD7Hb)}eV##?0;ZBzTDCbovw^Q3FIRFR|G=-|vz zMG);<2zHS$WaAGq4tyR8e{VIM(tmlU*eBmRZp=rhED@0?gO(Qq4Oxp7iYR72BXth= z0R0F4sXxyCJW#FmCa1j{9#(dbxpc%)UBdz#j_ft_zDyq^+gw2o^|A3CysuC=daP6d zWJWv80Gk(588)$nqbv?MvzW9_F>@m<7z5bLJ_0rIvMU_#CYyk9<1V29=kS1**FLzA>TOsqQOPJ!|~T1iME!|^Bl0T?3|v+$8(|bj?hXskagr38KnMy zWH}fJR!s-W#mqwX6-w#Se6;HZi|{dW`WGNbnW=X(qvdVa7h0(M>^0k^4%S^7r;Ml+ zN%Aqpf2SDyW28Pi8v8e{-dq0}WY**gcCt@xh*0XQQ0L7L76L-MI-_GZDwmfOYC!S-tOO>sz zrmm_&pC*@3}Xe;F@+ro&AL}( z{bch_(emhCGY8)215;35AnGdNBO_I~b%drU0BLV$*CM7p7>>|&e`=qT63UavyFjrl zA9dYAR(y6}&USnb%Lzo%E_P7x^4PwHr-a&KohO}eDHfC<;J0AICO2mj$69t{T1q{5 zpeX9d1$;%E^VOLTNrf%TKB|?qUDbdRf~DwSmOSHHjy4`XTp2|H1gHo6UI7H0sJBHE z056_g>zSf>%NgJ;)xumcf%>#^Nw8AwIF7rMa3Zud1{GWS$rkK7BAt1FUx|~J&Y8$XE zE@P_zhgjNrebwT+I~JB^Ijf&QC9P)%ISl3yo6SuOq)S@plDlffF(YowuP?ya? z6qutOOM&H7x&3^gThrZa{T|rvCFb_|k)e?qnaPEpXKX&{hgoZuORcY2gLy1#L~Pwmz4#>yuXOcOMJsnrwMaG*REK3zXK7XTbKq9Z!9rb&)TwsY6my z=FJ6_J)~uF4-eK5u#C;dey$7&}Fwt-Hm&LCrKzdF`c(Hgo zCxlmt;J7x+s55!J1t9Go{BgV&1lQT!FAqv|A(6yRy#b?ayQ|pIg)i;oiPRtYaBA*r zK)zbo6oMQYO!)Ks21Nd~fNIKJ4YUX3_uzN~*UM;wvEiA9G#e2;Ca-J_x;*?!G9T@0 zAC=)r&Ac)z?R{8AHT-n}$SR?Ro2cVp#lhjjpj5Xg%FGS)+#~F3a4O7;ItceD8}{V^ zQ3s15Qux}aw*@z> zqzm*QDXFUtCw~n-+>aIS{g5|wYqzVQZwX8YWJ{ZX9HgI@1?9PiX4p&n{&MGmjU5> zpdgsjZ&f+{DhKpT?drvmnNTD7@i@Fl93AX&SoUR(5JNT;aGB!K<8yvT+@aLo)Za-%jQ}{2+BvcSB8$lxS^9`Qg*-* zpvJtd{VztoDDQaGb)QqirpWskWBe_#h$*C-Jh@Un^-KOQIh3DB#&SUznknENUB<>6 z@|7i@n3Wls&@qdArjogjayx8ooFsg0QfaJ3cAQ`?`4?&OmBS&3d$0Jcnev?`Gyz*t zu!x`2UU9VU!wXHEgs0S1#Smq5Gsmd};P&%Y7ZUYaxR8G!E<`z(+P?-cPrKMSum&zZ zjIKSOt$rIJu~&5`RlE~!>w(?`knJhm9F@iPh@7$k=Op_OQNwes>`khShCU6QzW}0R zzeWLfJyLG6N?Fs)&RUnr+*jNTnB6EVvx|;cTaM)zA55glbGo$h*eA2gLAi7PWb;kD zP0?FV@t`Qjj`FE4{hyfz6(kJe)zg8Kgk=VxYS+*Eor3D69Z z3HzmYYvl((){V%vdLsaxyIn=t2p@k%CNxF$L6~fi%=KlqqP%lJ2`7m-vI0PAO4YkP ze*<9pZ{kCUPxvflP5InRg^^%*Ps{SdIGKU;ErHdJK>ADoNcz$^l2+;9M{tHe$BA@Y zH8vA%9JFRI|}-E~mY)*C=DyuMJ^cRNHC z!~_5oQxZl8h$vV^0JS)3!o=o8INPZS7Dh=(jwf75@t}I^p4tKyhrPA1p#*=zjz7ma zMyk>mO3v3mY&)HF-o3u6w%60cRU+F8qiGAaza5wV-b(_^G z>=<_nd&@&8SkUkR4-}8=IZ%or@BZFi8Re+yD1K6`+nNwQ$Jf*eR91w9(gKlYZ+ZyX zNLfQ(kVZBhqWgyrBe0FWL|?hWOi8utb_2zGj>i4EE&v=vkIA@uilU5RP@xwSi6foQ zx_)(K1@#5S5QJW?X$w{#nnnE#eim=QppS)pFy4)A9}0q^s;)0HfZ7weA*gRSmA&*k zGavkg0!ed<=q0Jc;dqf(n`a36%MZerbwho|ig&^eaOd3pm0(Ps zpR5w9%ix7W&gP}ivCS<8JvB3TAuR?vvg*JF*z_tTGDXE$ZzAWUcdjza;GW=46bw`; z17f6`#I|UE%qQE(1{7Lc0m9?TtTRr5=bvJ`-Me*JT5@BV=5Ck7Fh5^Safa=yk4Ga} z=Z-{8GF0wVCsqjXzFu-RrIGs*K0#5Ar)D@6&*k+jh1yjiP~#JA+u)eaonmRmF?O<^ z_@OX{#Yi>ZU@g{V%=vxzARNgK9xcka4(O+-8=xV_UHCgg6n;Wc)<=q?94Oh$F49*6 zIj=8zd$75xf6QfD8HD7{8Sbcs`KDLJL{l`Z0&dxQG?Mm#9&YcQfI9YaHMl{)7U~j3 zS4eJOW^KIqnTma+X-8EgPjfhFaWc8VAk>sO>M(7hOd$C(-CKop9w3nA@%JocYXh0{ z9lD_h=gm=-n3Y_;Vanf~mQ?QA8Yn81$Z}d;{#*(cL-a2zb-HuRPqi6IAo{kHSi3){ zVQYoEIZ+wQ;eU^pOU&{230?1$QBmOG;0q$gpz{r=9U+8N*nQF(Q0+~itzK`IDEx&W zoT8^;8FCt&s_=Y8%AH2>GNki>)vpR?2T{LuBQ4sJd+c&Uygxz!BtDrBCmBa-y7va; zMASFj|NSmp62Cj^N{PD9*#n)ri!H4#xvf}cbF92~h}$}VS$DD?8${EG?^%C;P_SEB!D6N zTW^>*_{~J9%W&vP&yNAJzWfYbj|BIQefm0;eEp#Ws15W4#PWQcgg`|t6GLS^haR^i ztK&=yoJ{XW62VB6MHR968DGM&kz(w*>h%fkCfBj18p}`!RG$F#G^DURws<6B(__$G zHa+CbWzmQB>SVSkM_Bk4OV$Yn6maLsm46Pl$^E#vQt%Zz0RGuF8UIYYO;MjxWA-WY zjm08`w#`|h1=7CQs%S$BOGh1+vrpDmWX+9itJ>0!h;H{pNg7?q=*8g-KIpbA#bLa@ zuq{`7_-%TC{XNHLL23OJ)kJB{s1X62%LJCq{dFbj5$IpeaD)#bwr+a*VwSa#jkXLY zpbb~rg0d?pc3Vawg|Ee4nHyK)kIo&hCVZ#&f_1SRU8xdy5!z%az)NpabW0=SO~97s z%Y|osUMG{NJCLX4LJflFHcUPrl_<2JL0T;t3-k>wILm8i&mV7HoOr76lgtQD&8zG z74Jst>6DdTb3GO-dp!vAkLZ&P?Ybu&< z2y}3l)uug;AgX(t`K7zSS>gIQpfze*&;b`Zi|hPnJ^1n4W4_zYV*YAOLijoS8z0X3 z{m{B#K!IPeH0TL=M-O3Dws22|(?{;jCA?ZN0_G)Su0pl5;7UVFbj{;J< zUp@*;MwVh12XFd1&h85fg4FKrgHniCl>Ipq!wOuiqi5P!M^2Q_`Q6K+rc;Kj?Jf8d z3^+^)f%h_fkQSCJ=II>7O53amuIhu%x5#B_loIF|#iJOS>$Sm5Y6-*41tXX{8O2Xg z8bP@c5r0IF&@+3zqegUDcDe2CdZ6!k#&75?Am>ZBg`k6dQA_Xt+g)GnS~8BMb9brXnQ8BI@uWM)`vVn+PVa)I@3@ zNT}s%Om?+C&2h#LKo5oovzX5&s&JxX{QA4qC08Rn)t={3Jbiy! zyTrUG^y5Y3ezL~&two&2Uk;}&AUkvH(bXHE>Qb9Izw1;&MWSxV(f%B0K*}WS(FTO& zKD)|~EygNmahLQzn=z2d#&3hRB%UCIy!T;1oCd3CaVvbA-_JX+5B|0Ky0h(D$lRvn zTrBzIWdAksHbs}vE(a;=yHO-fLv8Z_eBRjjqQeX1^TO88lQx2L6QSCYiXeW?i2H~; zNc`>G-+O7Mp0@UIGs$b`=ryyR*F4e_i1LQc=Ygh8Q8q&~+rE?fv6cSaW^q{c9`VHw zz^|jkQEH0iVtZiQkMR86!abgLzz?y&pHrzHN6RM73<~+An=O<>(|GPMJ>| zR(EYlCv1XGQqW+g2gvna8eI93;(v-(siQ4D!WobVOi2^PE-<1(sDFdw2i<=H5Wdnf zkCvB`w%C*I>9r?@atxXWus`ko%N5dP)Ew!9#YrV zjy)T^^s@t*(SK3&X7Zn3&<%>JQ@{|tiU}dwBGpx(VC~wEw8NXLpb;38{Aj&xK4$P^ zKo1Zmu=-1&5_~vCkvkU<#JFg{1GMz0|ENsoHS%kTvz&a%p71K8JX2T}f5SWc%IJ3H zdatm^r%5aH8Hm=e_r~r1vX-Py?>@7L1%%faCRd6xM~1KNnQ{0eN#9-&j*hj8uByCn4j0cqzAGl%WG# zG4?{O>V=xU^ahB#i(=j@XOu5{lXsp!C;hxEN`13A5b&M|9x|Vj$if!GS89g?Wmwbg z6F-x*pH&pjVXbam3Af>OcJ}8i!?f@gb~CeB=ds_$UOs>t8JzizIX3_koIRja4oL>- zB{FIU4Q~BR5D|}R_Z?=)PRQ$lTs{#%WQ1wHH4!@hGW2e0PE$ojr;GpO1#$9q;A+ z1({%@C;%*b#0s(U2hMr({2HlXto2csXi;eJu&A0?paXRVb?`CWwl=P?r6B>-X*MIn zCe0!+8n;?rTKnGZN@Q#W7@30qzmChEViGztZG55~h+}KFp*#hW{Pf&baZ3Pn{vNIB z$9?XX*e`WWH{Mt8FH&Z(8kR=3J5kSo;X%&uKfijO4CbR01Cgw{=UMkk+PnN;I5uGD zxS2CHW0>?}lA3C&dFr_q_66dM>HO)O^O~q;Au4yYhqZIGXiTmo!kdSr;2%Tq#)!HB zY42s%XHm%Hp#BDtzGKF%P>#b+)Z-Yb4Ve8J`>`AMzH3wMdn;0x$N4T1iE-ytvW~WO zV7sS-?VkIZZTF08*6m(BG{)>SzJL+rSk(GF@t*)WhQ5p(dsoFIHx6V%?G!KgP0Iq@ zMx+}ZrCt18S-aB$$jyCVsDpgW{6puLeSL_!Tzc{X$%PlS?XK=wGueGjW;3SfqcMSu z@qq>J6iCw@qSj>C&x-(K;lCXh-8P4FXevaXhisSbaLl`p{jL5h5p;UTcRy*z%^np@j#oUctFpfvZC-h)rHMW8P0 zem%_6hxbpU#W2qgR)?dwt7`Wax0`;rjin1Nmon@5ey{vwF$ta%CW*Y7%^TM{xpc$eSREA3d3=4Uy3LKn{@Zg@98>N<`X^blp|wq~Y+By&gA@hJC8TgKy|p#7}#XoaZdhHgJ|-DpRGg_S{? z!)VU5bk0FdCTMTm5D8zzyCCh2-NB(*Pk<6Xh#g#)>DqcI+MBzG)#LesJEq#rwWxMM zzevq;het>$>9W`|NUS*#!UO36OXoKp5b-kmH*#s}u1^J}TYzHq`%qZ)>aZlMcb=kj zB+}^!cCPrC0G9#?B#;qN1YnRz#=^6o`_Q`fe4WI&ZB}_@1h?l#5|`yxK-u2M$3=8V z!Okhl$-b`{RNczIjkZj;7;+?U0L^s-g`=d_wN=~Mc)$7~ZlhIF5N3fKK#6~5_X04G z*6GUFKw2+WFVNAwBs#~-XiI6tf{P2Jc>Oz|AE$gmiuhVc2F`UE1m zTtsr41m40^{Y-N4a82V}NJh5t=$C3-SO31EmJ)x|B{BY3ekD^s>;C7m>gB4UoSaXS^9y35G#Zq2nKfz!cy+e$o6R zTbJ>w1DTqlXz!maoHf9-ZY!{IAIfo{9Q zCa>9`UKALPmm)3!Yu>!NWV$u=+?INsXPSQ$%6Ghjb%vDBI27s++?a2Nmiz)o7iP_6X?=H z2`Y0(mB~(JcVo%f2v%2_#AC-(wfX)G!&i+8o+ckNlpvWWG0#w2fd}rPVQE8DAg)XE zvJ*Be%I5|`eQfi_HN{TuA$8?egPJmH+%bfMCcFuXFHO(_TFU>@F!qu^nA-c2SK1>* z+ILRnwx%QT>}u=gq$`YVm5nzld(Ha8?xf)qiWrR9YV_HOS#K6{`B%`(v(_-(n4UO4 za=G}5bhoexnSa2;b+vG>gI-*qRi?~qDbwP|?sc(F=}lRlj0Lxuj1O1$H_aLEfKV18 zH@pCC=zqBF2z)q2IcE%LCBuCS>IHGBurFK!v>JOGUzlWCUf#)cq?zocy+{+-XCrVZ zPxKC|dBT}SrnI?)!uP#nuy}1G(uZ45${fU7tTw`Gc}KjrappHGcht(9&1d zS6rGU1w))JTT7K-R6PN}0h^b4c*1d%%rl@j+zvEJpE*61}g_b0^zl!Mn-$y7=5 z&W5gE2fBOY^QaiMRG!>gK?Hu()z+R1BvlDlZp$o!V?nc*16e=cDGI+oj+)of$M?r9 zG4Ix|1Yoy@Fm`qOqU&MZP_h)SE9Q}ET{DrvS3{2Vc;T5#D5q^r{-DT1nMX;~#Qq9o zjOlV4HZOLJFx6WK|5R>uM?pm8SA9JclC#lhrc9%rT}j;rn$-@xgu-ab&TN#-3CG>< z%UZ&95M2l*a4Dgm2lfH}oMz3>q_0K$F49Cb#}>z|meH73L1^c+Ab?DD1TepSnV+@K zj$sqW02!_QEOmQ0BiVb@fFGGH@%hB256o6at}E)Oi*?5(^H}x2j4~$Vf*vrb(ho)^O@1E{E zXP{nP@!2x$zvTC;)dY7hK@hyx0o+*QI+>e(>1~S2A3n3}cQCA&>wO&=Mq`Up01+X6 z;n}r5*omBwxW z4LAtSg$|{{vjJi(iyj$Z0CGVY|GxV_Kv{)5Mk;tNB$DnWU*vfAR^?@E9`B8=y{tL4 zEguDg`FP`QB9`)=ROJZT`7>AV^Uwz|$4CELC*)-UWDA;z$1f~n>=ZvvklO2#UUCcK zrG3lLpo)TAS%&9gRG{(Wm?hp(iIa;eQ*@7hXx*bQ4nRXCxIIx;fd2j!T7hxKL+K9J ze+#aynFZU|PL!&`8?rEA?7q`rMeHt5fQD$JU0bF{Ahkf^SX!SM|I1L=i8PN;ICDJW zP2|9NriZ7EzaoB@a*S{IqJhXze(zqo1LV3tkLxMru;#v&)35t?WeeM)?lTQN3cXei zzB0z*2FTWSn=v)z6venwtMAqG>0EFTk-E8akWY;Ns5 zBN~ahrLu|SeZ@Dtb2#RTxKFFo3=G%JQZkLAizHo!dEo!jOKNuQ&; z5x5Ui*(IN!uZsbPw8)>f;8LvUyHFT6nQfb*$WIJdV+8pf>J^&Ksg{78z+*=h9x0L} zRK45wEtplftKqzP24`n~$MYn0zwW@DwC7#Frs=@^{ow%2W(1t{zM0}hSmT@>5wWX9 z5kkK9^hF`|2Lwkifes1LVOU8u7Sh&5pk@+PeFr*5(yg2ub$ralFq`J+69r!Zaibm z$jpJR6QB>pJg;ro+N$L%wQr|W-zTh zgQ;O9;%HIeo<03Pp;J*0dO(Z95BvbWL&xFXG~?kNpv5l+^ornp&6D9HmY0mR^9^h| z628`m9c!Ml@IPRtRdh4xMhmxgqfyNhMam{7ihv>Ujb)96z#pW)W(fhxh_P=UkM@^NWLpT*F`$B?bM za^|<%c#!U#wj?SY*)zZ4?E>9@;pY3FXvKSlzIC~(!_(I&(Yx1P92Sf6C^A$oR*@a` z;T$QDd5}R%A^?664oLT364gxA9c@tRT>+iDE&AW~O{%-P=}f6PR2?U$93R!L;aBDN4|6tu(HDiFPCUMIl2{ k>_J8Sf4uH<)NVzc_?!2evfIFw*weOb^4(ape$V&+2c?alh5!Hn diff --git a/docs/build/html/compiled_8h_source.html b/docs/build/html/compiled_8h_source.html index 6e37a563e..f5ec537b0 100644 --- a/docs/build/html/compiled_8h_source.html +++ b/docs/build/html/compiled_8h_source.html @@ -168,7 +168,7 @@ $(function() { codefold.init(0); });
Definition primitives.h:525
Definition primitives.h:680
Definition primitives.h:48
-
Definition primitives.h:1919
+
Definition primitives.h:1947
Definition array.h:20
size_t ndim() const
The number of dimensions of the array.
Definition array.h:94
T item()
Get the value from a scalar array.
Definition array.h:489
diff --git a/docs/build/html/cpp/ops.html b/docs/build/html/cpp/ops.html index a43770828..9e4d3eb47 100644 --- a/docs/build/html/cpp/ops.html +++ b/docs/build/html/cpp/ops.html @@ -8,7 +8,7 @@ - Operations — MLX 0.13.0 documentation + Operations — MLX 0.13.1 documentation @@ -36,14 +36,14 @@ - + - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -1041,9 +1050,11 @@ document.write(`
  • conv_general()
  • conv1d()
  • conv2d()
  • +
  • conv3d()
  • quantized_matmul()
  • quantize()
  • dequantize()
  • +
  • block_sparse_qmm()
  • tensordot()
  • tensordot()
  • outer()
  • @@ -2520,6 +2531,12 @@ document.write(`

    2D convolution with a filter

    +
    +
    +array conv3d(const array &input, const array &weight, const std::tuple<int, int, int> &stride = {1, 1, 1}, const std::tuple<int, int, int> &padding = {0, 0, 0}, const std::tuple<int, int, int> &dilation = {1, 1, 1}, int groups = 1, StreamOrDevice s = {})#
    +

    3D convolution with a filter

    +
    +
    array quantized_matmul(const array &x, const array &w, const array &scales, const array &biases, bool transpose = true, int group_size = 64, int bits = 4, StreamOrDevice s = {})#
    @@ -2538,6 +2555,12 @@ document.write(`

    Dequantize a matrix produced by quantize()

    +
    +
    +array block_sparse_qmm(const array &x, const array &w, const array &scales, const array &biases, std::optional<array> lhs_indices = std::nullopt, std::optional<array> rhs_indices = std::nullopt, bool transpose = true, int group_size = 64, int bits = 4, StreamOrDevice s = {})#
    +

    Compute matrix products with matrix-level gather.

    +
    +
    array tensordot(const array &a, const array &b, const int axis = 2, StreamOrDevice s = {})#
    @@ -2723,7 +2746,7 @@ document.write(` title="next page">

    next

    -

    Developer Documentation

    +

    Custom Extensions in MLX

    @@ -2996,9 +3019,11 @@ document.write(`
  • conv_general()
  • conv1d()
  • conv2d()
  • +
  • conv3d()
  • quantized_matmul()
  • quantize()
  • dequantize()
  • +
  • block_sparse_qmm()
  • tensordot()
  • tensordot()
  • outer()
  • diff --git a/docs/build/html/dev/extensions.html b/docs/build/html/dev/extensions.html index adbf3b2c6..f801e7199 100644 --- a/docs/build/html/dev/extensions.html +++ b/docs/build/html/dev/extensions.html @@ -8,7 +8,7 @@ - Developer Documentation — MLX 0.13.0 documentation + Custom Extensions in MLX — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -777,7 +786,7 @@ document.write(`
    -

    Developer Documentation

    +

    Custom Extensions in MLX

    @@ -822,8 +831,8 @@ document.write(`
    -
    -

    Developer Documentation#

    +
    +

    Custom Extensions in MLX#

    You can extend MLX with custom operations on the CPU or GPU. This guide explains how to do that with a simple example.

    @@ -1280,7 +1289,7 @@ below.

    auto kernel = d.get_kernel(kname.str(), "mlx_ext"); // Prepare to encode kernel - auto compute_encoder = d.get_command_encoder(s.index); + auto& compute_encoder = d.get_command_encoder(s.index); compute_encoder->setComputePipelineState(kernel); // Kernel parameters are registered with buffer indices corresponding to @@ -1289,11 +1298,11 @@ below.

    size_t nelem = out.size(); // Encode input arrays to kernel - set_array_buffer(compute_encoder, x, 0); - set_array_buffer(compute_encoder, y, 1); + compute_encoder.set_input_array(x, 0); + compute_encoder.set_input_array(y, 1); // Encode output arrays to kernel - set_array_buffer(compute_encoder, out, 2); + compute_encoder.set_output_array(out, 2); // Encode alpha and beta compute_encoder->setBytes(&alpha_, sizeof(float), 3); @@ -1317,7 +1326,7 @@ below.

    // Launch the grid with the given number of threads divided among // the given threadgroups - compute_encoder->dispatchThreads(grid_dims, group_dims); + compute_encoder.dispatchThreads(grid_dims, group_dims); }
    @@ -1589,7 +1598,7 @@ import the Python package and play with it as you would any other MLX operation. print(f"c shape: {c.shape}") print(f"c dtype: {c.dtype}") -print(f"c correctness: {mx.all(c == 6.0).item()}") +print(f"c correct: {mx.all(c == 6.0).item()}")

    Output:

    diff --git a/docs/build/html/dev/metal_debugger.html b/docs/build/html/dev/metal_debugger.html index 57c07026d..a23b9d821 100644 --- a/docs/build/html/dev/metal_debugger.html +++ b/docs/build/html/dev/metal_debugger.html @@ -8,7 +8,7 @@ - Metal Debugger — MLX 0.13.0 documentation + Metal Debugger — MLX 0.13.1 documentation @@ -36,14 +36,14 @@ - + - + @@ -130,8 +130,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -245,6 +245,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -870,7 +879,7 @@ Xcode project using CMake.

    previous

    -

    Developer Documentation

    +

    Custom Extensions in MLX

    diff --git a/docs/build/html/doxygen_crawl.html b/docs/build/html/doxygen_crawl.html index eeeffb925..753520686 100644 --- a/docs/build/html/doxygen_crawl.html +++ b/docs/build/html/doxygen_crawl.html @@ -401,10 +401,14 @@ + + + + diff --git a/docs/build/html/examples/linear_regression.html b/docs/build/html/examples/linear_regression.html index 7ed3901d7..7ce87a327 100644 --- a/docs/build/html/examples/linear_regression.html +++ b/docs/build/html/examples/linear_regression.html @@ -8,7 +8,7 @@ - Linear Regression — MLX 0.13.0 documentation + Linear Regression — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    diff --git a/docs/build/html/examples/llama-inference.html b/docs/build/html/examples/llama-inference.html index 8234fe5af..ea01e1615 100644 --- a/docs/build/html/examples/llama-inference.html +++ b/docs/build/html/examples/llama-inference.html @@ -8,7 +8,7 @@ - LLM inference — MLX 0.13.0 documentation + LLM inference — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    diff --git a/docs/build/html/examples/mlp.html b/docs/build/html/examples/mlp.html index b2d096363..43d2faf59 100644 --- a/docs/build/html/examples/mlp.html +++ b/docs/build/html/examples/mlp.html @@ -8,7 +8,7 @@ - Multi-Layer Perceptron — MLX 0.13.0 documentation + Multi-Layer Perceptron — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    diff --git a/docs/build/html/functions_a.html b/docs/build/html/functions_a.html index 3da3d67a0..9ab5d4d08 100644 --- a/docs/build/html/functions_a.html +++ b/docs/build/html/functions_a.html @@ -90,7 +90,9 @@ $(function() {
  • allocator : mlx::core::allocator::CommonAllocator, mlx::core::metal::MetalAllocator
  • alpha : mlx::steel::GEMMAddMMParams, mlx::steel::TransformAxpby< OutT, InT >
  • And : mlx::core::BitwiseBinary, mlx::core::Reduce
  • -
  • apply() : mlx::steel::TransformAdd< OutT, InT >, mlx::steel::TransformAxpby< OutT, InT >, mlx::steel::TransformNone< OutT, InT >
  • +
  • apply() : mlx::steel::TransformAdd< OutT, InT >, mlx::steel::TransformAxpby< OutT, InT >, mlx::steel::TransformNone< OutT, InT >
  • +
  • apply_epilogue() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • +
  • apply_epilogue_safe() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • Arange() : mlx::core::Arange
  • ArcCos() : mlx::core::ArcCos
  • ArcCosh() : mlx::core::ArcCosh
  • diff --git a/docs/build/html/functions_b.html b/docs/build/html/functions_b.html index e5279524c..8d03d5e6f 100644 --- a/docs/build/html/functions_b.html +++ b/docs/build/html/functions_b.html @@ -95,6 +95,7 @@ $(function() {
  • BlockMaskedMM() : mlx::core::BlockMaskedMM
  • BlockMMA() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • BlockSparseMM() : mlx::core::BlockSparseMM
  • +
  • BlockSparseQMM() : mlx::core::BlockSparseQMM
  • Broadcast() : mlx::core::Broadcast
  • BROWS : mlx::steel::Conv2DInputBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderLargeFilter< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderSmallChannels< T, BM, BN, BK, tgp_size, n_channels, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderSmallFilter< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoader< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoaderSmallChannels< T, BM, BN, BK, tgp_size, n_channels, tgp_padding >
  • Bs_offset : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • diff --git a/docs/build/html/functions_c.html b/docs/build/html/functions_c.html index 727c87084..f2a3a607d 100644 --- a/docs/build/html/functions_c.html +++ b/docs/build/html/functions_c.html @@ -79,6 +79,7 @@ $(function() {
  • Category : mlx::core::Dtype
  • Ceil() : mlx::core::Ceil
  • cfftp() : pocketfft::detail::cfftp< T0 >
  • +
  • Cholesky() : mlx::core::Cholesky
  • clear_cache() : mlx::core::metal::MetalAllocator
  • cmplx() : pocketfft::detail::cmplx< T >
  • cndarr() : pocketfft::detail::cndarr< T >
  • diff --git a/docs/build/html/functions_e.html b/docs/build/html/functions_e.html index 0373de1bd..6c5380511 100644 --- a/docs/build/html/functions_e.html +++ b/docs/build/html/functions_e.html @@ -85,10 +85,10 @@ $(function() {
  • Erf() : mlx::core::Erf
  • ErfInv() : mlx::core::ErfInv
  • eval() : mlx::core::array
  • -
  • eval_cpu() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::LayerNorm, mlx::core::fast::LayerNormVJP, mlx::core::fast::RMSNorm, mlx::core::fast::RMSNormVJP, mlx::core::fast::RoPE, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::UnaryPrimitive, mlx::core::Uniform
  • -
  • eval_gpu() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::LayerNorm, mlx::core::fast::LayerNormVJP, mlx::core::fast::RMSNorm, mlx::core::fast::RMSNormVJP, mlx::core::fast::RoPE, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::UnaryPrimitive, mlx::core::Uniform
  • +
  • eval_cpu() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Cholesky, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::LayerNorm, mlx::core::fast::LayerNormVJP, mlx::core::fast::RMSNorm, mlx::core::fast::RMSNormVJP, mlx::core::fast::RoPE, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::UnaryPrimitive, mlx::core::Uniform
  • +
  • eval_gpu() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Cholesky, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::LayerNorm, mlx::core::fast::LayerNormVJP, mlx::core::fast::RMSNorm, mlx::core::fast::RMSNormVJP, mlx::core::fast::RoPE, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::UnaryPrimitive, mlx::core::Uniform
  • event() : mlx::core::array
  • -
  • Event() : mlx::core::Event
  • +
  • Event() : mlx::core::Event
  • excess : mlx::steel::ChannelHelper< n_channels_ >, mlx::steel::ChannelHelper< 1 >, mlx::steel::ChannelHelper< 2 >, mlx::steel::ChannelHelper< 3 >, mlx::steel::ChannelHelper< 4 >
  • exec() : pocketfft::detail::cfftp< T0 >, pocketfft::detail::fftblue< T0 >, pocketfft::detail::pocketfft_c< T0 >, pocketfft::detail::pocketfft_r< T0 >, pocketfft::detail::rfftp< T0 >, pocketfft::detail::T_dcst23< T0 >, pocketfft::detail::T_dcst4< T0 >, pocketfft::detail::T_dct1< T0 >, pocketfft::detail::T_dst1< T0 >
  • exec_r() : pocketfft::detail::fftblue< T0 >
  • diff --git a/docs/build/html/functions_func_a.html b/docs/build/html/functions_func_a.html index 2afeb9d52..b3fcd5beb 100644 --- a/docs/build/html/functions_func_a.html +++ b/docs/build/html/functions_func_a.html @@ -82,7 +82,9 @@ $(function() {
  • alloc() : MPS::Matrix, MPS::MatrixMultiplication, MPS::MatrixVectorMultiplication, MPS::Vector
  • allocate() : pocketfft::detail::threading::aligned_allocator< T >
  • Allocator() : mlx::core::allocator::Allocator
  • -
  • apply() : mlx::steel::TransformAdd< OutT, InT >, mlx::steel::TransformAxpby< OutT, InT >, mlx::steel::TransformNone< OutT, InT >
  • +
  • apply() : mlx::steel::TransformAdd< OutT, InT >, mlx::steel::TransformAxpby< OutT, InT >, mlx::steel::TransformNone< OutT, InT >
  • +
  • apply_epilogue() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • +
  • apply_epilogue_safe() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • Arange() : mlx::core::Arange
  • ArcCos() : mlx::core::ArcCos
  • ArcCosh() : mlx::core::ArcCosh
  • diff --git a/docs/build/html/functions_func_b.html b/docs/build/html/functions_func_b.html index 3c83a2696..d7756da8d 100644 --- a/docs/build/html/functions_func_b.html +++ b/docs/build/html/functions_func_b.html @@ -81,6 +81,7 @@ $(function() {
  • BlockMaskedMM() : mlx::core::BlockMaskedMM
  • BlockMMA() : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • BlockSparseMM() : mlx::core::BlockSparseMM
  • +
  • BlockSparseQMM() : mlx::core::BlockSparseQMM
  • Broadcast() : mlx::core::Broadcast
  • Buffer() : mlx::core::allocator::Buffer
  • buffer() : mlx::core::array
  • diff --git a/docs/build/html/functions_func_c.html b/docs/build/html/functions_func_c.html index 5b6bb295c..bb3e9a6c9 100644 --- a/docs/build/html/functions_func_c.html +++ b/docs/build/html/functions_func_c.html @@ -76,6 +76,7 @@ $(function() {

    - c -

    diff --git a/docs/build/html/functions_i.html b/docs/build/html/functions_i.html index 70eb7d0fa..7d601d7ba 100644 --- a/docs/build/html/functions_i.html +++ b/docs/build/html/functions_i.html @@ -95,7 +95,7 @@ $(function() {
  • iS : MLXConvParams< NDIM >
  • is_available() : mlx::core::array
  • is_donatable() : mlx::core::array
  • -
  • is_equivalent() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • +
  • is_equivalent() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::fast::ScaledDotProductAttention, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • is_open() : mlx::core::io::FileReader, mlx::core::io::FileWriter, mlx::core::io::Reader, mlx::core::io::Writer
  • is_ready() : pocketfft::detail::threading::latch
  • is_tracer() : mlx::core::array
  • diff --git a/docs/build/html/functions_j.html b/docs/build/html/functions_j.html index a0c1ec1af..8657cd14e 100644 --- a/docs/build/html/functions_j.html +++ b/docs/build/html/functions_j.html @@ -77,7 +77,7 @@ $(function() {
  • jump_a : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • jump_b : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • jump_params : mlx::steel::Conv2DInputBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >
  • -
  • jvp() : mlx::core::Abs, mlx::core::Add, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::AsStrided, mlx::core::AsType, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose
  • +
  • jvp() : mlx::core::Abs, mlx::core::Add, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose
  • diff --git a/docs/build/html/functions_p.html b/docs/build/html/functions_p.html index 0475a32f5..d581b8645 100644 --- a/docs/build/html/functions_p.html +++ b/docs/build/html/functions_p.html @@ -85,7 +85,7 @@ $(function() {
  • Primitive() : mlx::core::Primitive
  • primitive_id() : mlx::core::array
  • primitive_ptr() : mlx::core::array
  • -
  • print() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::PrintFormatter, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • +
  • print() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::Arange, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Cholesky, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Load, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::PrintFormatter, mlx::core::QRF, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • Prod : mlx::core::Reduce, mlx::core::Scan, mlx::core::Scatter
  • prod() : pocketfft::detail::util
  • ptr() : mlx::core::allocator::Buffer
  • diff --git a/docs/build/html/functions_t.html b/docs/build/html/functions_t.html index 90c7cbe9a..f0e8fd5b0 100644 --- a/docs/build/html/functions_t.html +++ b/docs/build/html/functions_t.html @@ -99,8 +99,8 @@ $(function() {
  • tile_stride_b : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • tiles_m : mlx::steel::GEMMParams, mlx::steel::GEMMSpiltKParams, mlx::steel::ImplicitGemmConv2DParams
  • tiles_n : mlx::steel::GEMMParams, mlx::steel::GEMMSpiltKParams, mlx::steel::ImplicitGemmConv2DParams
  • -
  • tm : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TM : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • +
  • tm : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TM_stride : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • tn : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TN : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • diff --git a/docs/build/html/functions_v.html b/docs/build/html/functions_v.html index 786549f12..801da911d 100644 --- a/docs/build/html/functions_v.html +++ b/docs/build/html/functions_v.html @@ -83,8 +83,8 @@ $(function() {
  • value_type : mlx::core::array::ArrayIterator, pocketfft::detail::threading::aligned_allocator< T >
  • vec_size : mlx::steel::BlockLoader< T, BROWS, BCOLS, dst_ld, reduction_dim, tgp_size, alignment, n_reads, TCOLS, TROWS >, mlx::steel::ChannelHelper< n_channels_ >, mlx::steel::ChannelHelper< 1 >, mlx::steel::ChannelHelper< 2 >, mlx::steel::ChannelHelper< 3 >, mlx::steel::ChannelHelper< 4 >, mlx::steel::Conv2DInputBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderLargeFilter< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderSmallChannels< T, BM, BN, BK, tgp_size, n_channels, tgp_padding >, mlx::steel::Conv2DInputBlockLoaderSmallFilter< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoader< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoaderGeneral< T, BM, BN, BK, tgp_size, tgp_padding >, mlx::steel::Conv2DWeightBlockLoaderSmallChannels< T, BM, BN, BK, tgp_size, n_channels, tgp_padding >
  • vectorDescriptor() : MPS::VectorDescriptor
  • -
  • vjp() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::fast::LayerNorm, mlx::core::fast::RMSNorm, mlx::core::fast::RoPE, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose
  • -
  • vmap() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • +
  • vjp() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::AsStrided, mlx::core::AsType, mlx::core::BlockMaskedMM, mlx::core::BlockSparseMM, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Convolution, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::CustomVJP, mlx::core::Depends, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::fast::LayerNorm, mlx::core::fast::RMSNorm, mlx::core::fast::RoPE, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Scatter, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::Subtract, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose
  • +
  • vmap() : mlx::core::Abs, mlx::core::Add, mlx::core::AddMM, mlx::core::ArcCos, mlx::core::ArcCosh, mlx::core::ArcSin, mlx::core::ArcSinh, mlx::core::ArcTan2, mlx::core::ArcTan, mlx::core::ArcTanh, mlx::core::ArgPartition, mlx::core::ArgReduce, mlx::core::ArgSort, mlx::core::AsType, mlx::core::BitwiseBinary, mlx::core::BlockSparseQMM, mlx::core::Broadcast, mlx::core::Ceil, mlx::core::Cholesky, mlx::core::Compiled, mlx::core::Concatenate, mlx::core::Conjugate, mlx::core::Copy, mlx::core::Cos, mlx::core::Cosh, mlx::core::Divide, mlx::core::DivMod, mlx::core::Equal, mlx::core::Erf, mlx::core::ErfInv, mlx::core::Exp, mlx::core::Expm1, mlx::core::fast::Custom, mlx::core::FFT, mlx::core::Floor, mlx::core::Full, mlx::core::Gather, mlx::core::Greater, mlx::core::GreaterEqual, mlx::core::Inverse, mlx::core::Less, mlx::core::LessEqual, mlx::core::Log1p, mlx::core::Log, mlx::core::LogAddExp, mlx::core::LogicalAnd, mlx::core::LogicalNot, mlx::core::LogicalOr, mlx::core::Matmul, mlx::core::Maximum, mlx::core::Minimum, mlx::core::Multiply, mlx::core::Negative, mlx::core::NotEqual, mlx::core::NumberOfElements, mlx::core::Pad, mlx::core::Partition, mlx::core::Power, mlx::core::Primitive, mlx::core::QuantizedMatmul, mlx::core::RandomBits, mlx::core::Reduce, mlx::core::Remainder, mlx::core::Reshape, mlx::core::Round, mlx::core::Scan, mlx::core::Select, mlx::core::Sigmoid, mlx::core::Sign, mlx::core::Sin, mlx::core::Sinh, mlx::core::Slice, mlx::core::SliceUpdate, mlx::core::Softmax, mlx::core::Sort, mlx::core::Split, mlx::core::Sqrt, mlx::core::Square, mlx::core::StopGradient, mlx::core::Subtract, mlx::core::SVD, mlx::core::Tan, mlx::core::Tanh, mlx::core::Transpose, mlx::core::Uniform
  • diff --git a/docs/build/html/functions_vars_t.html b/docs/build/html/functions_vars_t.html index 3b6040778..8cd202b1b 100644 --- a/docs/build/html/functions_vars_t.html +++ b/docs/build/html/functions_vars_t.html @@ -88,8 +88,8 @@ $(function() {
  • tile_stride_b : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • tiles_m : mlx::steel::GEMMParams, mlx::steel::GEMMSpiltKParams, mlx::steel::ImplicitGemmConv2DParams
  • tiles_n : mlx::steel::GEMMParams, mlx::steel::GEMMSpiltKParams, mlx::steel::ImplicitGemmConv2DParams
  • -
  • tm : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TM : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • +
  • tm : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TM_stride : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • tn : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • TN : mlx::steel::BlockMMA< T, U, BM, BN, BK, WM, WN, transpose_a, transpose_b, lda_tgp, ldb_tgp, AccumType, Epilogue >
  • diff --git a/docs/build/html/genindex.html b/docs/build/html/genindex.html index f7fe3ca9e..3b58c2384 100644 --- a/docs/build/html/genindex.html +++ b/docs/build/html/genindex.html @@ -7,7 +7,7 @@ - Index — MLX 0.13.0 documentation + Index — MLX 0.13.1 documentation @@ -35,7 +35,7 @@ - + @@ -128,8 +128,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -243,6 +243,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -815,6 +824,8 @@ document.write(`
  • add() (in module mlx.core)
  • addmm (C++ function) +
  • +
  • addmm() (in module mlx.core)
  • ALiBi (class in mlx.nn)
  • @@ -909,6 +920,8 @@ document.write(`
  • array_equal() (in module mlx.core)
  • as_strided (C++ function) +
  • +
  • as_strided() (in module mlx.core)
  • astype (C++ function)
  • @@ -965,6 +978,8 @@ document.write(`
  • block_sparse_mm (C++ function)
  • block_sparse_mm() (in module mlx.core) +
  • +
  • block_sparse_qmm (C++ function)
  • broadcast_arrays (C++ function)
  • @@ -985,6 +1000,8 @@ document.write(`
  • ceil() (in module mlx.core)
  • children() (Module method) +
  • +
  • cholesky() (in module mlx.core.linalg)
  • clear_cache() (in module mlx.core.metal)
  • @@ -1020,10 +1037,14 @@ document.write(`
  • conv2d() (in module mlx.core)
  • -
  • conv_general (C++ function), [1] +
  • conv3d (C++ function) +
  • +
  • Conv3d (class in mlx.nn)
  • - - + +
  • svd() (in module mlx.core.linalg) +
  • swapaxes (C++ function)
  • swapaxes() (array method) diff --git a/docs/build/html/group__ops.html b/docs/build/html/group__ops.html index 8bae2068f..ad56fc697 100644 --- a/docs/build/html/group__ops.html +++ b/docs/build/html/group__ops.html @@ -769,6 +769,9 @@ Functions array mlx::core::conv2d (const array &input, const array &weight, const std::pair< int, int > &stride={1, 1}, const std::pair< int, int > &padding={0, 0}, const std::pair< int, int > &dilation={1, 1}, int groups=1, StreamOrDevice s={})  2D convolution with a filter
      +array mlx::core::conv3d (const array &input, const array &weight, const std::tuple< int, int, int > &stride={1, 1, 1}, const std::tuple< int, int, int > &padding={0, 0, 0}, const std::tuple< int, int, int > &dilation={1, 1, 1}, int groups=1, StreamOrDevice s={}) + 3D convolution with a filter
    array mlx::core::quantized_matmul (const array &x, const array &w, const array &scales, const array &biases, bool transpose=true, int group_size=64, int bits=4, StreamOrDevice s={})  Quantized matmul multiplies x with a quantized matrix w.
      @@ -778,6 +781,9 @@ Functions array mlx::core::dequantize (const array &w, const array &scales, const array &biases, int group_size=64, int bits=4, StreamOrDevice s={})  Dequantize a matrix produced by quantize()
      +array mlx::core::block_sparse_qmm (const array &x, const array &w, const array &scales, const array &biases, std::optional< array > lhs_indices=std::nullopt, std::optional< array > rhs_indices=std::nullopt, bool transpose=true, int group_size=64, int bits=4, StreamOrDevice s={}) + Compute matrix products with matrix-level gather.
    array mlx::core::tensordot (const array &a, const array &b, const int axis=2, StreamOrDevice s={})  Returns a contraction of a and b over multiple dimensions.
      @@ -2378,6 +2384,69 @@ Functions

    Compute matrix product with matrix-level gather.

    + + + +

    ◆ block_sparse_qmm()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    array mlx::core::block_sparse_qmm (const array & x,
    const array & w,
    const array & scales,
    const array & biases,
    std::optional< array > lhs_indices = std::nullopt,
    std::optional< array > rhs_indices = std::nullopt,
    bool transpose = true,
    int group_size = 64,
    int bits = 4,
    StreamOrDevice s = {} )
    +
    + +

    Compute matrix products with matrix-level gather.

    +
    @@ -2651,6 +2720,54 @@ Functions

    2D convolution with a filter

    + + + +

    ◆ conv3d()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    array mlx::core::conv3d (const array & input,
    const array & weight,
    const std::tuple< int, int, int > & stride = {1, 1, 1},
    const std::tuple< int, int, int > & padding = {0, 0, 0},
    const std::tuple< int, int, int > & dilation = {1, 1, 1},
    int groups = 1,
    StreamOrDevice s = {} )
    +
    + +

    3D convolution with a filter

    +
    diff --git a/docs/build/html/hierarchy.html b/docs/build/html/hierarchy.html index ece2f6176..1b2cdec32 100644 --- a/docs/build/html/hierarchy.html +++ b/docs/build/html/hierarchy.html @@ -295,71 +295,73 @@ $(function() {  Cmlx::core::BitwiseBinary  Cmlx::core::BlockMaskedMM  Cmlx::core::BlockSparseMM - Cmlx::core::Broadcast - Cmlx::core::Ceil - Cmlx::core::Concatenate - Cmlx::core::Conjugate - Cmlx::core::Convolution - Cmlx::core::Copy - Cmlx::core::Cos - Cmlx::core::Cosh - Cmlx::core::Divide - Cmlx::core::Equal - Cmlx::core::Erf - Cmlx::core::ErfInv - Cmlx::core::Exp - Cmlx::core::Expm1 - Cmlx::core::FFT - Cmlx::core::Floor - Cmlx::core::Full - Cmlx::core::Gather - Cmlx::core::Greater - Cmlx::core::GreaterEqual - Cmlx::core::Inverse - Cmlx::core::Less - Cmlx::core::LessEqual - Cmlx::core::Load - Cmlx::core::Log - Cmlx::core::Log1p - Cmlx::core::LogAddExp - Cmlx::core::LogicalAnd - Cmlx::core::LogicalNot - Cmlx::core::LogicalOr - Cmlx::core::Matmul - Cmlx::core::Maximum - Cmlx::core::Minimum - Cmlx::core::Multiply - Cmlx::core::Negative - Cmlx::core::NotEqual - Cmlx::core::NumberOfElements - Cmlx::core::Pad - Cmlx::core::Partition - Cmlx::core::Power - Cmlx::core::QuantizedMatmul - Cmlx::core::RandomBits - Cmlx::core::Reduce - Cmlx::core::Remainder - Cmlx::core::Reshape - Cmlx::core::Round - Cmlx::core::Scan - Cmlx::core::Scatter - Cmlx::core::Select - Cmlx::core::Sigmoid - Cmlx::core::Sign - Cmlx::core::Sin - Cmlx::core::Sinh - Cmlx::core::Slice - Cmlx::core::SliceUpdate - Cmlx::core::Softmax - Cmlx::core::Sort - Cmlx::core::Sqrt - Cmlx::core::Square - Cmlx::core::StopGradient - Cmlx::core::Subtract - Cmlx::core::Tan - Cmlx::core::Tanh - Cmlx::core::Transpose - Cmlx::core::Uniform + Cmlx::core::BlockSparseQMM + Cmlx::core::Broadcast + Cmlx::core::Ceil + Cmlx::core::Cholesky + Cmlx::core::Concatenate + Cmlx::core::Conjugate + Cmlx::core::Convolution + Cmlx::core::Copy + Cmlx::core::Cos + Cmlx::core::Cosh + Cmlx::core::Divide + Cmlx::core::Equal + Cmlx::core::Erf + Cmlx::core::ErfInv + Cmlx::core::Exp + Cmlx::core::Expm1 + Cmlx::core::FFT + Cmlx::core::Floor + Cmlx::core::Full + Cmlx::core::Gather + Cmlx::core::Greater + Cmlx::core::GreaterEqual + Cmlx::core::Inverse + Cmlx::core::Less + Cmlx::core::LessEqual + Cmlx::core::Load + Cmlx::core::Log + Cmlx::core::Log1p + Cmlx::core::LogAddExp + Cmlx::core::LogicalAnd + Cmlx::core::LogicalNot + Cmlx::core::LogicalOr + Cmlx::core::Matmul + Cmlx::core::Maximum + Cmlx::core::Minimum + Cmlx::core::Multiply + Cmlx::core::Negative + Cmlx::core::NotEqual + Cmlx::core::NumberOfElements + Cmlx::core::Pad + Cmlx::core::Partition + Cmlx::core::Power + Cmlx::core::QuantizedMatmul + Cmlx::core::RandomBits + Cmlx::core::Reduce + Cmlx::core::Remainder + Cmlx::core::Reshape + Cmlx::core::Round + Cmlx::core::Scan + Cmlx::core::Scatter + Cmlx::core::Select + Cmlx::core::Sigmoid + Cmlx::core::Sign + Cmlx::core::Sin + Cmlx::core::Sinh + Cmlx::core::Slice + Cmlx::core::SliceUpdate + Cmlx::core::Softmax + Cmlx::core::Sort + Cmlx::core::Sqrt + Cmlx::core::Square + Cmlx::core::StopGradient + Cmlx::core::Subtract + Cmlx::core::Tan + Cmlx::core::Tanh + Cmlx::core::Transpose + Cmlx::core::Uniform  Cmlx::core::fast::Custom  Cmlx::core::fast::LayerNorm  Cmlx::core::fast::LayerNormVJP diff --git a/docs/build/html/index.html b/docs/build/html/index.html index e97ca1e75..7e8403c53 100644 --- a/docs/build/html/index.html +++ b/docs/build/html/index.html @@ -8,7 +8,7 @@ - MLX — MLX 0.13.0 documentation + MLX — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -130,8 +130,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -245,6 +245,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -867,7 +876,7 @@ are the CPU and GPU.

    diff --git a/docs/build/html/install.html b/docs/build/html/install.html index b278367c2..458329dda 100644 --- a/docs/build/html/install.html +++ b/docs/build/html/install.html @@ -8,7 +8,7 @@ - Build and Install — MLX 0.13.0 documentation + Build and Install — MLX 0.13.1 documentation @@ -36,7 +36,7 @@ - + @@ -131,8 +131,8 @@ - MLX 0.13.0 documentation - Home - + MLX 0.13.1 documentation - Home + @@ -246,6 +246,7 @@
  • Operations
  • Linear Algebra
  • Metal

    Further Reading

    @@ -794,7 +803,10 @@ document.write(`
  • Build from source