Make sure 0 is represented in the quantization (#1016)

This commit is contained in:
Angelos Katharopoulos
2024-04-19 19:47:26 -07:00
committed by GitHub
parent ed83908931
commit 84d61d27aa
3 changed files with 13 additions and 3 deletions

View File

@@ -16,7 +16,7 @@ class TestQuantized(mlx_tests.MLXTestCase):
w_hat = mx.dequantize(w_q, scales, biases, gs, b)
errors = (w - w_hat).abs().reshape(*scales.shape, -1)
eps = 1e-6
self.assertTrue((errors <= (scales[..., None] / 2 + eps)).all())
self.assertTrue((errors <= (scales[..., None] + eps)).all())
def test_qmm(self):
key = mx.random.key(0)