From a608ae99bc668f3aad35b0893626d4a539bbe3c9 Mon Sep 17 00:00:00 2001 From: Goekdeniz-Guelmez Date: Wed, 12 Mar 2025 14:46:19 +0100 Subject: [PATCH] update lora.md and lora_config.yaml --- llms/mlx_lm/LORA.md | 4 ++++ llms/mlx_lm/examples/lora_config.yaml | 3 +++ 2 files changed, 7 insertions(+) diff --git a/llms/mlx_lm/LORA.md b/llms/mlx_lm/LORA.md index e863abc4..e6f8d3a5 100644 --- a/llms/mlx_lm/LORA.md +++ b/llms/mlx_lm/LORA.md @@ -387,6 +387,10 @@ tokens-per-second, using the MLX Example [`wikisql`](https://github.com/ml-explore/mlx-examples/tree/main/lora/data) data set. +## Logging + +You can log training metrics to Weights & Biases by adding the `--report-to-wandb` flag. This requires installing wandb manually with `pip install wandb`. When enabled, all training and validation metrics will be logged to your wandb account. + [^lora]: Refer to the [arXiv paper](https://arxiv.org/abs/2106.09685) for more details on LoRA. [^qlora]: Refer to the paper [QLoRA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/abs/2305.14314) diff --git a/llms/mlx_lm/examples/lora_config.yaml b/llms/mlx_lm/examples/lora_config.yaml index 36bc1dff..fe58875a 100644 --- a/llms/mlx_lm/examples/lora_config.yaml +++ b/llms/mlx_lm/examples/lora_config.yaml @@ -37,6 +37,9 @@ val_batches: 25 # Adam learning rate. learning_rate: 1e-5 +# to report the loggs to WandB +report_to_wand: true + # Number of training steps between loss reporting. steps_per_report: 10