Support for quantized matmul with w and w^T (#349)

* Add the metal qvm implementation
* Add qmm_n
* Add gradient wrt to input for quantized_matmul
This commit is contained in:
Angelos Katharopoulos
2024-01-03 14:22:36 -08:00
committed by GitHub
parent d7ac050f4b
commit e7f5059fe4
12 changed files with 718 additions and 193 deletions

View File

@@ -81,9 +81,10 @@ class QuantizedLinear(Module):
def __call__(self, x):
x = mx.quantized_matmul(
x,
self.weight.T,
self.weight,
scales=self.scales,
biases=self.biases,
transpose=True,
group_size=self.group_size,
bits=self.bits,
)

View File

@@ -3072,12 +3072,13 @@ void init_ops(py::module_& m) {
py::pos_only(),
"scales"_a,
"biases"_a,
"transpose"_a = true,
"group_size"_a = 64,
"bits"_a = 4,
py::kw_only(),
"stream"_a = none,
R"pbdoc(
quantized_matmul(x: array, w: array, scales: array, biases: array, /, group_size: int = 64, bits: int = 4, *, stream: Union[None, Stream, Device] = None) -> array
quantized_matmul(x: array, w: array, /, scales: array, biases: array, transpose: bool = True, group_size: int = 64, bits: int = 4, *, stream: Union[None, Stream, Device] = None) -> array
Perform the matrix multiplication with the quantized matrix ``w``. The
quantization uses one floating point scale and bias per ``group_size`` of
@@ -3089,10 +3090,13 @@ void init_ops(py::module_& m) {
w (array): Quantized matrix packed in unsigned integers
scales (array): The scales to use per ``group_size`` elements of ``w``
biases (array): The biases to use per ``group_size`` elements of ``w``
transpose (bool, optional): Defines whether to multiply with the
transposed ``w`` or not, namely whether we are performing
``x @ w.T`` or ``x @ w``. (default: ``True``)
group_size (int, optional): The size of the group in ``w`` that
shares a scale and bias. (default: 64)
shares a scale and bias. (default: ``64``)
bits (int, optional): The number of bits occupied by each element in
``w``. (default: 4)
``w``. (default: ``4``)
Returns:
result (array): The result of the multiplication of ``x`` with ``w``.
@@ -3146,9 +3150,9 @@ void init_ops(py::module_& m) {
Args:
w (array): Matrix to be quantized
group_size (int, optional): The size of the group in ``w`` that shares a
scale and bias. (default: 64)
scale and bias. (default: ``64``)
bits (int, optional): The number of bits occupied by each element of
``w`` in the returned quantized matrix. (default: 4)
``w`` in the returned quantized matrix. (default: ``4``)
Returns:
(tuple): A tuple containing
@@ -3187,9 +3191,9 @@ void init_ops(py::module_& m) {
scales (array): The scales to use per ``group_size`` elements of ``w``
biases (array): The biases to use per ``group_size`` elements of ``w``
group_size (int, optional): The size of the group in ``w`` that shares a
scale and bias. (default: 64)
scale and bias. (default: ``64``)
bits (int, optional): The number of bits occupied by each element in
``w``. (default: 4)
``w``. (default: ``4``)
Returns:
result (array): The dequantized version of ``w``

View File

@@ -1,6 +1,7 @@
# Copyright © 2023 Apple Inc.
import unittest
from itertools import product
import mlx.core as mx
import mlx_tests
@@ -19,62 +20,116 @@ class TestQuantized(mlx_tests.MLXTestCase):
def test_qmm(self):
key = mx.random.key(0)
k1, k2 = mx.random.split(key)
for group_size in [128, 64]:
for bits in [2, 4, 8]:
for M in [8, 32, 33, 64]:
for N in [512, 1024]:
for K in [512, 1024]:
with self.subTest(
shape=(M, N, K), group_size=group_size, bits=bits
):
x = mx.random.normal(shape=(M, K), key=k1)
w = mx.random.normal(shape=(N, K), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(
w_q, scales, biases, group_size, bits
)
y_q = mx.quantized_matmul(
x, w_q.T, scales, biases, group_size, bits
)
y_hat = x @ w_hat.T
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
tests = product(
[128, 64], # group_size
[2, 4, 8], # bits
[8, 32, 33, 64], # M
[512, 1024], # N
[512, 1024], # K
[True, False], # transposed
)
for group_size, bits, M, N, K, transposed in tests:
with self.subTest(
shape=(M, N, K),
group_size=group_size,
bits=bits,
transposed=transposed,
):
x = mx.random.normal(shape=(M, K), key=k1)
w = mx.random.normal(shape=(N, K) if transposed else (K, N), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
y_q = mx.quantized_matmul(
x, w_q, scales, biases, transposed, group_size, bits
)
y_hat = (x @ w_hat.T) if transposed else (x @ w_hat)
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
def test_qmm_shapes(self):
key = mx.random.key(0)
k1, k2 = mx.random.split(key)
group_size = 64
bits = 4
w = mx.random.normal(shape=(32, 128), key=k2)
w = mx.random.normal(shape=(32, 256), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
for s in [(3, 128), (2, 1, 7, 128)]:
x = mx.random.normal(shape=(3, 128), key=k1)
y_q = mx.quantized_matmul(x, w_q.T, scales, biases, group_size, bits)
for s in [(3, 256), (2, 1, 7, 256)]:
x = mx.random.normal(shape=s, key=k1)
y_q = mx.quantized_matmul(x, w_q, scales, biases, True, group_size, bits)
y_hat = x @ w_hat.T
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
w = mx.random.normal(shape=(256, 256), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
for s in [(3, 256), (2, 1, 7, 256)]:
x = mx.random.normal(shape=s, key=k1)
y_q = mx.quantized_matmul(x, w_q, scales, biases, False, group_size, bits)
y_hat = x @ w_hat
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
def test_qmv(self):
key = mx.random.key(0)
k1, k2 = mx.random.split(key)
for group_size in [128, 64]:
for bits in [2, 4, 8]:
for M in [512, 1024]:
for N in [512, 1024]:
with self.subTest(
shape=(M, N), group_size=group_size, bits=bits
):
x = mx.random.normal(shape=(1, N), key=k1)
w = mx.random.normal(shape=(M, N), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
y_q = mx.quantized_matmul(
x, w_q.T, scales, biases, group_size, bits
)
y_hat = x @ w_hat.T
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
tests = product(
[128, 64], # group_size
[2, 4, 8], # bits
[512, 1024], # M
[512, 1024], # N
)
for group_size, bits, M, N in tests:
with self.subTest(shape=(M, N), group_size=group_size, bits=bits):
x = mx.random.normal(shape=(1, N), key=k1)
w = mx.random.normal(shape=(M, N), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
y_q = mx.quantized_matmul(
x, w_q, scales, biases, True, group_size, bits
)
y_hat = x @ w_hat.T
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
def test_qvm(self):
key = mx.random.key(0)
k1, k2 = mx.random.split(key)
tests = product(
[128, 64], # group_size
[2, 4, 8], # bits
[512, 1024], # M
[512, 1024], # N
)
for group_size, bits, M, N in tests:
with self.subTest(shape=(M, N), group_size=group_size, bits=bits):
x = mx.random.normal(shape=(1, N), key=k1)
w = mx.random.normal(shape=(N, M), key=k2)
w_q, scales, biases = mx.quantize(w, group_size, bits)
w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
y_q = mx.quantized_matmul(
x, w_q, scales, biases, False, group_size, bits
)
y_hat = x @ w_hat
self.assertEqual(y_q.shape, y_hat.shape)
self.assertLess((y_q - y_hat).abs().max(), 1e-3)
def test_throw(self):
x = mx.random.normal(shape=(10, 512))
w = mx.random.normal(shape=(32, 512))
w_q, scales, biases = mx.quantize(w)
with self.assertRaises(ValueError):
mx.quantized_matmul(x, w_q.T, scales, biases)
with self.assertRaises(ValueError):
mx.quantized_matmul(x, w_q.T, scales.T, biases)
with self.assertRaises(ValueError):
mx.quantized_matmul(x, w_q, scales, biases, False)
with self.assertRaises(ValueError):
mx.quantized_matmul(x, w_q, scales.T, biases.T)
y = mx.quantized_matmul(x, w_q, scales, biases, True)
mx.eval(y)
if __name__ == "__main__":