From 582f979dfd7709f5c10164fd30bfacbf894027a8 Mon Sep 17 00:00:00 2001 From: Goekdeniz-Guelmez Date: Sun, 19 Jan 2025 00:41:27 +0100 Subject: [PATCH] fixing reference model loading and freezing --- llms/mlx_lm/lora.py | 4 ++-- llms/mlx_lm/tuner/dpo_trainer.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/llms/mlx_lm/lora.py b/llms/mlx_lm/lora.py index 9ddaf82d..ccfd7e01 100644 --- a/llms/mlx_lm/lora.py +++ b/llms/mlx_lm/lora.py @@ -249,11 +249,11 @@ def train_model( if args.reference_model_path: reference_model, _ = load(args.reference_model_path) else: - reference_model = model + reference_model, _ = load(args.model) train_dpo( model=model, - reference_model=reference_model, + reference_model=reference_model.freeze(), tokenizer=tokenizer, optimizer=opt, train_dataset=train_set, diff --git a/llms/mlx_lm/tuner/dpo_trainer.py b/llms/mlx_lm/tuner/dpo_trainer.py index f552c02a..ab410373 100644 --- a/llms/mlx_lm/tuner/dpo_trainer.py +++ b/llms/mlx_lm/tuner/dpo_trainer.py @@ -148,7 +148,7 @@ def dpo_loss( logits = model(inputs) logits = logits.astype(mx.float32) - + return -nn.losses.cross_entropy(logits, targets) * mask[:, :-1] num_chosen_tokens = chosen_masks.sum(-1)