chore(lora): support mixtral in lora example (#343)

This commit is contained in:
Anchen
2024-01-20 06:07:45 -08:00
committed by GitHub
parent 527cea4027
commit 1415595409
6 changed files with 279 additions and 4 deletions

View File

@@ -328,7 +328,12 @@ def load(path_or_hf_repo: str):
model = Model(model_args)
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
nn.QuantizedLinear.quantize_module(
model,
**quantization,
linear_class_predicate=lambda m: isinstance(m, nn.Linear)
and m.weight.shape[0] != 8,
)
model.load_weights(list(weights.items()))