removing tokenizer and updates

This commit is contained in:
Goekdeniz-Guelmez 2025-01-26 15:25:27 +01:00
parent 4d0e52f7c8
commit 557649d8da

View File

@ -128,7 +128,6 @@ def dpo_loss(
'chosen_logits_mean': mx.mean(policy_chosen_score) 'chosen_logits_mean': mx.mean(policy_chosen_score)
} }
return mx.mean(losses), reward, num_tokens, metrics return mx.mean(losses), reward, num_tokens, metrics
@ -180,7 +179,6 @@ def evaluate_dpo(
model, model,
reference_model, reference_model,
dataset, dataset,
tokenizer,
batch_size, batch_size,
num_batches, num_batches,
beta: float, beta: float,
@ -328,7 +326,6 @@ def train_dpo(
model=model, model=model,
reference_model=reference_model, reference_model=reference_model,
dataset=val_dataset, dataset=val_dataset,
tokenizer=tokenizer,
batch_size=args.batch_size, batch_size=args.batch_size,
num_batches=args.val_batches, num_batches=args.val_batches,
max_seq_length=args.max_seq_length, max_seq_length=args.max_seq_length,