From 8bf397e4509cf76c2d30085a8c1f16e5ebc1b84f Mon Sep 17 00:00:00 2001 From: Chime Ogbuji Date: Thu, 11 Jul 2024 17:34:34 -0400 Subject: [PATCH] Pass use_dora parameter to linear_to_lora_layers (#885) --- llms/mlx_lm/lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llms/mlx_lm/lora.py b/llms/mlx_lm/lora.py index 9ccea53b..580e3d3c 100644 --- a/llms/mlx_lm/lora.py +++ b/llms/mlx_lm/lora.py @@ -166,7 +166,7 @@ def train_model( model.freeze() # Convert linear layers to lora layers and unfreeze in the process - linear_to_lora_layers(model, args.lora_layers, args.lora_parameters) + linear_to_lora_layers(model, args.lora_layers, args.lora_parameters, args.use_dora) # Resume training the given adapters. if args.resume_adapter_file is not None: