change to explicitly check for quantization config

This commit is contained in:
Anchen 2023-12-29 23:12:06 +11:00 committed by GitHub
parent a7da4ad7a3
commit b38ab8b911
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -244,7 +244,7 @@ def load_model(model_path: str):
model = DeepseekCoder(model_args)
weights = mx.load(str(model_path / "weights.npz"))
if quantization:
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(tree_unflatten(list(weights.items())))