From 0b20d08d1b3d65f5a2bc48a2d74e1c2e3da9e74a Mon Sep 17 00:00:00 2001 From: Prince Canuma Date: Sat, 24 Aug 2024 08:47:11 +0200 Subject: [PATCH] add phimoe to tunner --- llms/mlx_lm/tuner/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llms/mlx_lm/tuner/utils.py b/llms/mlx_lm/tuner/utils.py index c6af9730..92afd97c 100644 --- a/llms/mlx_lm/tuner/utils.py +++ b/llms/mlx_lm/tuner/utils.py @@ -96,6 +96,7 @@ def linear_to_lora_layers( "stablelm", "qwen2", "qwen2_moe", + "phimoe", "gemma", "gemma2", "starcoder2", @@ -104,7 +105,7 @@ def linear_to_lora_layers( "deepseek", ]: keys = set(["self_attn.q_proj", "self_attn.v_proj"]) - if model.model_type == "mixtral": + if model.model_type in ["mixtral", "phimoe"]: keys.add("block_sparse_moe.gate") if model.model_type == "qwen2_moe": keys.add("mlp.gate")