mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 09:21:18 +08:00

* feat: Add lora and qlora training to mlx-lm --------- Co-authored-by: Awni Hannun <awni@apple.com>
23 lines
675 B
Python
23 lines
675 B
Python
import mlx.core as mx
|
|
from mlx.utils import tree_unflatten
|
|
|
|
from .lora import LoRALinear
|
|
|
|
|
|
def apply_lora_layers(model, adapter_file: str):
|
|
adapters = list(mx.load(adapter_file).items())
|
|
linear_replacements = {}
|
|
lora_layers = set(
|
|
[name.replace(".lora_a", "").replace(".lora_b", "") for name, _ in adapters]
|
|
)
|
|
|
|
for name, module in model.named_modules():
|
|
if name in lora_layers:
|
|
replacement_module = LoRALinear.from_linear(module)
|
|
linear_replacements[name] = replacement_module
|
|
|
|
model.update_modules(tree_unflatten(list(linear_replacements.items())))
|
|
|
|
model.update(tree_unflatten(adapters))
|
|
return model
|