mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-09-01 04:14:38 +08:00
feat: move lora into mlx-lm (#337)
* feat: Add lora and qlora training to mlx-lm --------- Co-authored-by: Awni Hannun <awni@apple.com>
This commit is contained in:
22
llms/mlx_lm/tuner/utils.py
Normal file
22
llms/mlx_lm/tuner/utils.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import mlx.core as mx
|
||||
from mlx.utils import tree_unflatten
|
||||
|
||||
from .lora import LoRALinear
|
||||
|
||||
|
||||
def apply_lora_layers(model, adapter_file: str):
|
||||
adapters = list(mx.load(adapter_file).items())
|
||||
linear_replacements = {}
|
||||
lora_layers = set(
|
||||
[name.replace(".lora_a", "").replace(".lora_b", "") for name, _ in adapters]
|
||||
)
|
||||
|
||||
for name, module in model.named_modules():
|
||||
if name in lora_layers:
|
||||
replacement_module = LoRALinear.from_linear(module)
|
||||
linear_replacements[name] = replacement_module
|
||||
|
||||
model.update_modules(tree_unflatten(list(linear_replacements.items())))
|
||||
|
||||
model.update(tree_unflatten(adapters))
|
||||
return model
|
||||
Reference in New Issue
Block a user