diff --git a/llms/mlx_lm/LORA.md b/llms/mlx_lm/LORA.md index e863abc4..e6f8d3a5 100644 --- a/llms/mlx_lm/LORA.md +++ b/llms/mlx_lm/LORA.md @@ -387,6 +387,10 @@ tokens-per-second, using the MLX Example [`wikisql`](https://github.com/ml-explore/mlx-examples/tree/main/lora/data) data set. +## Logging + +You can log training metrics to Weights & Biases by adding the `--report-to-wandb` flag. This requires installing wandb manually with `pip install wandb`. When enabled, all training and validation metrics will be logged to your wandb account. + [^lora]: Refer to the [arXiv paper](https://arxiv.org/abs/2106.09685) for more details on LoRA. [^qlora]: Refer to the paper [QLoRA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/abs/2305.14314) diff --git a/llms/mlx_lm/examples/lora_config.yaml b/llms/mlx_lm/examples/lora_config.yaml index 36bc1dff..fe58875a 100644 --- a/llms/mlx_lm/examples/lora_config.yaml +++ b/llms/mlx_lm/examples/lora_config.yaml @@ -37,6 +37,9 @@ val_batches: 25 # Adam learning rate. learning_rate: 1e-5 +# to report the loggs to WandB +report_to_wand: true + # Number of training steps between loss reporting. steps_per_report: 10