add mode parameter for quantization

This commit is contained in:
Awni Hannun
2025-08-15 17:36:55 -07:00
committed by Awni Hannun
parent 584d48458e
commit e04e17e3b6
9 changed files with 127 additions and 56 deletions

View File

@@ -39,6 +39,6 @@ class Embedding(Module):
"""
return x @ self.weight.T
def to_quantized(self, group_size: int = 64, bits: int = 4):
def to_quantized(self, group_size: int = 64, bits: int = 4, mode: str = "affine"):
"""Return a :obj:`QuantizedEmbedding` layer that approximates this embedding layer."""
return QuantizedEmbedding.from_embedding(self, group_size, bits)
return QuantizedEmbedding.from_embedding(self, group_size, bits, mode)

View File

@@ -70,9 +70,9 @@ class Linear(Module):
x = x @ self["weight"].T
return x
def to_quantized(self, group_size: int = 64, bits: int = 4):
def to_quantized(self, group_size: int = 64, bits: int = 4, mode: str = "affine"):
"""Return a :obj:`QuantizedLinear` layer that approximates this layer."""
return QuantizedLinear.from_linear(self, group_size, bits)
return QuantizedLinear.from_linear(self, group_size, bits, mode)
class Bilinear(Module):

View File

@@ -12,6 +12,8 @@ def quantize(
model: Module,
group_size: int = 64,
bits: int = 4,
*,
mode: str = "affine",
class_predicate: Optional[Callable[[str, Module], Union[bool, dict]]] = None,
):
"""Quantize the sub-modules of a module according to a predicate.
@@ -26,6 +28,8 @@ def quantize(
:func:`mlx.core.quantize`). Default: ``64``.
bits (int): The number of bits per parameter (see
:func:`mlx.core.quantize`). Default: ``4``.
mode (str): The quantization method to use (see
:func:`mlx.core.quantize`). Default: ``"affine"``.
class_predicate (Optional[Callable]): A callable which receives the
:obj:`Module` path and :obj:`Module` itself and returns ``True`` or a
dict of params for `to_quantized` if it should be quantized and
@@ -39,7 +43,7 @@ def quantize(
if bool_or_params := class_predicate(path, m):
if hasattr(m, "to_quantized"):
if isinstance(bool_or_params, bool):
return m.to_quantized(group_size=group_size, bits=bits)
return m.to_quantized(group_size=group_size, bits=bits, mode=mode)
elif isinstance(bool_or_params, dict):
return m.to_quantized(**bool_or_params)
else:
@@ -72,6 +76,8 @@ class QuantizedEmbedding(Module):
weight. See :func:`~mlx.core.quantize`. Default: ``64``.
bits (int, optional): The bit width to use for the quantized weight.
See :func:`~mlx.core.quantize`. Default: ``4``.
mode (str): The quantization method to use (see
:func:`mlx.core.quantize`). Default: ``"affine"``.
"""
def __init__(
@@ -80,17 +86,21 @@ class QuantizedEmbedding(Module):
dims: int,
group_size: int = 64,
bits: int = 4,
mode: str = "affine",
):
super().__init__()
# Quantization config
self.group_size = group_size
self.bits = bits
self.mode = mode
# Initialize the quantized weight
scale = math.sqrt(1 / dims)
weight = mx.random.normal(shape=(num_embeddings, dims), scale=scale)
self.weight, self.scales, self.biases = mx.quantize(weight, group_size, bits)
self.weight, self.scales, self.biases = mx.quantize(
weight, group_size, bits, mode=mode
)
self.num_embeddings = num_embeddings
self.dims = dims
@@ -104,6 +114,7 @@ class QuantizedEmbedding(Module):
biases=self["biases"][x],
group_size=self.group_size,
bits=self.bits,
mode=self.mode,
)
def as_linear(self, x):
@@ -121,23 +132,31 @@ class QuantizedEmbedding(Module):
transpose=True,
group_size=self.group_size,
bits=self.bits,
mode=self.mode,
)
def _extra_repr(self):
return (
f"{self.num_embeddings}, {self.dims}, "
f"group_size={self.group_size}, bits={self.bits}"
f"group_size={self.group_size}, bits={self.bits}, mode={self.mode}"
)
@classmethod
def from_embedding(
cls, embedding_layer: Module, group_size: int = 64, bits: int = 4
cls,
embedding_layer: Module,
group_size: int = 64,
bits: int = 4,
mode: str = "affine",
):
"""Create a :obj:`QuantizedEmbedding` layer from an :obj:`Embedding` layer."""
embedding_dims, dims = embedding_layer.weight.shape
ql = cls(embedding_dims, dims, group_size, bits)
ql.weight, ql.scales, ql.biases = mx.quantize(
embedding_layer.weight, group_size, bits
embedding_layer.weight,
group_size,
bits,
mode=mode,
)
return ql
@@ -161,6 +180,8 @@ class QuantizedLinear(Module):
weight. See :func:`~mlx.core.quantize`. Default: ``64``.
bits (int, optional): The bit width to use for the quantized weight.
See :func:`~mlx.core.quantize`. Default: ``4``.
mode (str): The quantization method to use (see
:func:`mlx.core.quantize`). Default: ``"affine"``.
"""
def __init__(
@@ -170,12 +191,14 @@ class QuantizedLinear(Module):
bias: bool = True,
group_size: int = 64,
bits: int = 4,
mode: str = "affine",
):
super().__init__()
# Quantization config
self.group_size = group_size
self.bits = bits
self.mode = mode
# Initialize the quantized weight
scale = math.sqrt(1 / input_dims)
@@ -184,7 +207,9 @@ class QuantizedLinear(Module):
high=scale,
shape=(output_dims, input_dims),
)
self.weight, self.scales, self.biases = mx.quantize(weight, group_size, bits)
self.weight, self.scales, self.biases = mx.quantize(
weight, group_size, bits, mode=mode
)
# And bias if needed
if bias:
@@ -198,7 +223,7 @@ class QuantizedLinear(Module):
in_dims *= 32 // self.bits
return (
f"input_dims={in_dims}, output_dims={out_dims}, bias={'bias' in self}, "
f"group_size={self.group_size}, bits={self.bits}"
f"group_size={self.group_size}, bits={self.bits}, mode={self.mode}"
)
def __call__(self, x):
@@ -210,18 +235,28 @@ class QuantizedLinear(Module):
transpose=True,
group_size=self.group_size,
bits=self.bits,
mode=self.mode,
)
if "bias" in self:
x = x + self["bias"]
return x
@classmethod
def from_linear(cls, linear_layer: Module, group_size: int = 64, bits: int = 4):
def from_linear(
cls,
linear_layer: Module,
group_size: int = 64,
bits: int = 4,
mode: str = "affine",
):
"""Create a :obj:`QuantizedLinear` layer from a :obj:`Linear` layer."""
output_dims, input_dims = linear_layer.weight.shape
ql = cls(input_dims, output_dims, False, group_size, bits)
ql.weight, ql.scales, ql.biases = mx.quantize(
linear_layer.weight, group_size, bits
linear_layer.weight,
group_size,
bits,
mode=mode,
)
if "bias" in linear_layer:
ql.bias = linear_layer.bias

View File

@@ -4157,10 +4157,11 @@ void init_ops(nb::module_& m) {
"transpose"_a = true,
"group_size"_a = 64,
"bits"_a = 4,
"mode"_a = "affine",
nb::kw_only(),
"stream"_a = nb::none(),
nb::sig(
"def quantized_matmul(x: array, w: array, /, scales: array, biases: array, transpose: bool = True, group_size: int = 64, bits: int = 4, *, stream: Union[None, Stream, Device] = None) -> array"),
"def quantized_matmul(x: array, w: array, /, scales: array, biases: array, transpose: bool = True, group_size: int = 64, bits: int = 4, mode: str = 'affine', *, stream: Union[None, Stream, Device] = None) -> array"),
R"pbdoc(
Perform the matrix multiplication with the quantized matrix ``w``. The
quantization uses one floating point scale and bias per ``group_size`` of
@@ -4179,6 +4180,7 @@ void init_ops(nb::module_& m) {
shares a scale and bias. Default: ``64``.
bits (int, optional): The number of bits occupied by each element in
``w``. Default: ``4``.
mode (str, optional): The quantization mode. Default: ``"affine"``.
Returns:
array: The result of the multiplication of ``x`` with ``w``.
@@ -4189,10 +4191,11 @@ void init_ops(nb::module_& m) {
nb::arg(),
"group_size"_a = 64,
"bits"_a = 4,
"mode"_a = "affine",
nb::kw_only(),
"stream"_a = nb::none(),
nb::sig(
"def quantize(w: array, /, group_size: int = 64, bits : int = 4, *, stream: Union[None, Stream, Device] = None) -> tuple[array, array, array]"),
"def quantize(w: array, /, group_size: int = 64, bits: int = 4, mode: str = 'affine', *, stream: Union[None, Stream, Device] = None) -> tuple[array, array, array]"),
R"pbdoc(
Quantize the matrix ``w`` using ``bits`` bits per element.
@@ -4203,30 +4206,10 @@ void init_ops(nb::module_& m) {
.. warning::
``quantize`` currently only supports 2D inputs with dimensions which are multiples of 32
``quantize`` currently only supports 2D inputs with the second
dimension divisible by ``group_size``
Formally, for a group of :math:`g` consecutive elements :math:`w_1` to
:math:`w_g` in a row of ``w`` we compute the quantized representation
of each element :math:`\hat{w_i}` as follows
.. math::
\begin{aligned}
\alpha &= \max_i w_i \\
\beta &= \min_i w_i \\
s &= \frac{\alpha - \beta}{2^b - 1} \\
\hat{w_i} &= \textrm{round}\left( \frac{w_i - \beta}{s}\right).
\end{aligned}
After the above computation, :math:`\hat{w_i}` fits in :math:`b` bits
and is packed in an unsigned 32-bit integer from the lower to upper
bits. For instance, for 4-bit quantization we fit 8 elements in an
unsigned 32 bit integer where the 1st element occupies the 4 least
significant bits, the 2nd bits 4-7 etc.
In order to be able to dequantize the elements of ``w`` we also need to
save :math:`s` and :math:`\beta` which are the returned ``scales`` and
``biases`` respectively.
The supported quantization modes are described in more detail below.
Args:
w (array): Matrix to be quantized
@@ -4234,6 +4217,7 @@ void init_ops(nb::module_& m) {
scale and bias. Default: ``64``.
bits (int, optional): The number of bits occupied by each element of
``w`` in the returned quantized matrix. Default: ``4``.
mode (str, optional): The quantization mode. Default: ``"affine"``.
Returns:
tuple: A tuple containing
@@ -4241,6 +4225,31 @@ void init_ops(nb::module_& m) {
* w_q (array): The quantized version of ``w``
* scales (array): The scale to multiply each element with, namely :math:`s`
* biases (array): The biases to add to each element, namely :math:`\beta`
Notes:
The currently supported quantization mode is `"affine"`.
Formally, for a group of :math:`g` consecutive elements :math:`w_1` to
:math:`w_g` in a row of ``w`` we compute the quantized representation
of each element :math:`\hat{w_i}` as follows
.. math::
\begin{aligned}
\alpha &= \max_i w_i \\
\beta &= \min_i w_i \\
s &= \frac{\alpha - \beta}{2^b - 1} \\
\hat{w_i} &= \textrm{round}\left( \frac{w_i - \beta}{s}\right).
\end{aligned}
After the above computation, :math:`\hat{w_i}` fits in :math:`b` bits
and is packed in an unsigned 32-bit integer from the lower to upper
bits. For instance, for 4-bit quantization we fit 8 elements in an
unsigned 32 bit integer where the 1st element occupies the 4 least
significant bits, the 2nd bits 4-7 etc.
In order to be able to dequantize the elements of ``w`` we also need to
save :math:`s` and :math:`\beta` which are the returned ``scales`` and
``biases`` respectively.
)pbdoc");
m.def(
"dequantize",
@@ -4250,21 +4259,15 @@ void init_ops(nb::module_& m) {
"biases"_a,
"group_size"_a = 64,
"bits"_a = 4,
"mode"_a = "affine",
nb::kw_only(),
"stream"_a = nb::none(),
nb::sig(
"def dequantize(w: array, /, scales: array, biases: array, group_size: int = 64, bits: int = 4, *, stream: Union[None, Stream, Device] = None) -> array"),
"def dequantize(w: array, /, scales: array, biases: array, group_size: int = 64, bits: int = 4, mode: str = 'affine', *, stream: Union[None, Stream, Device] = None) -> array"),
R"pbdoc(
Dequantize the matrix ``w`` using the provided ``scales`` and
``biases`` and the ``group_size`` and ``bits`` configuration.
Dequantize the matrix ``w`` using quantization parameters.
Formally, given the notation in :func:`quantize`, we compute
:math:`w_i` from :math:`\hat{w_i}` and corresponding :math:`s` and
:math:`\beta` as follows
.. math::
w_i = s \hat{w_i} + \beta
The supported quantization modes are described in more detail below.
Args:
w (array): Matrix to be quantized
@@ -4274,9 +4277,20 @@ void init_ops(nb::module_& m) {
scale and bias. Default: ``64``.
bits (int, optional): The number of bits occupied by each element in
``w``. Default: ``4``.
mode (str, optional): The quantization mode. Default: ``"affine"``.
Returns:
array: The dequantized version of ``w``
Notes:
The currently supported quantization mode is `"affine"`.
Formally, given the notation in :func:`quantize`, we compute
:math:`w_i` from :math:`\hat{w_i}` and corresponding :math:`s` and
:math:`\beta` as follows
.. math::
w_i = s \hat{w_i} + \beta
)pbdoc");
m.def(
"gather_qmm",
@@ -4290,11 +4304,12 @@ void init_ops(nb::module_& m) {
"transpose"_a = true,
"group_size"_a = 64,
"bits"_a = 4,
"mode"_a = "affine",
nb::kw_only(),
"sorted_indices"_a = false,
"stream"_a = nb::none(),
nb::sig(
"def gather_qmm(x: array, w: array, /, scales: array, biases: array, lhs_indices: Optional[array] = None, rhs_indices: Optional[array] = None, transpose: bool = True, group_size: int = 64, bits: int = 4, *, sorted_indices: bool = False, stream: Union[None, Stream, Device] = None) -> array"),
"def gather_qmm(x: array, w: array, /, scales: array, biases: array, lhs_indices: Optional[array] = None, rhs_indices: Optional[array] = None, transpose: bool = True, group_size: int = 64, bits: int = 4, mode: str = 'affine', *, sorted_indices: bool = False, stream: Union[None, Stream, Device] = None) -> array"),
R"pbdoc(
Perform quantized matrix multiplication with matrix-level gather.
@@ -4320,6 +4335,7 @@ void init_ops(nb::module_& m) {
shares a scale and bias. Default: ``64``.
bits (int, optional): The number of bits occupied by each element in
``w``. Default: ``4``.
mode (str, optional): The quantization mode. Default: ``"affine"``.
sorted_indices (bool, optional): May allow a faster implementation
if the passed indices are sorted. Default: ``False``.