diff --git a/llms/mlx_lm/lora.py b/llms/mlx_lm/lora.py index 9ddaf82d..ccfd7e01 100644 --- a/llms/mlx_lm/lora.py +++ b/llms/mlx_lm/lora.py @@ -249,11 +249,11 @@ def train_model( if args.reference_model_path: reference_model, _ = load(args.reference_model_path) else: - reference_model = model + reference_model, _ = load(args.model) train_dpo( model=model, - reference_model=reference_model, + reference_model=reference_model.freeze(), tokenizer=tokenizer, optimizer=opt, train_dataset=train_set, diff --git a/llms/mlx_lm/tuner/dpo_trainer.py b/llms/mlx_lm/tuner/dpo_trainer.py index f552c02a..ab410373 100644 --- a/llms/mlx_lm/tuner/dpo_trainer.py +++ b/llms/mlx_lm/tuner/dpo_trainer.py @@ -148,7 +148,7 @@ def dpo_loss( logits = model(inputs) logits = logits.astype(mx.float32) - + return -nn.losses.cross_entropy(logits, targets) * mask[:, :-1] num_chosen_tokens = chosen_masks.sum(-1)