diff --git a/llms/mlx_lm/examples/lora_config.yaml b/llms/mlx_lm/examples/lora_config.yaml index 530272c7..36bc1dff 100644 --- a/llms/mlx_lm/examples/lora_config.yaml +++ b/llms/mlx_lm/examples/lora_config.yaml @@ -7,6 +7,15 @@ train: true # The fine-tuning method: "lora", "dora", or "full". fine_tune_type: lora +# The Optimizer with its possible inputs +optimizer: adamw +# optimizer_config: +# adamw: +# betas: [0.9, 0.98] +# eps: 1e-6 +# weight_decay: 0.05 +# bias_correction: true + # Directory with {train, valid, test}.jsonl files data: "/path/to/training/data"