From ba6c7d3aba506c5f03152fe42992489171e4f444 Mon Sep 17 00:00:00 2001 From: Sindhu Satish Date: Wed, 29 Jan 2025 07:30:11 -0800 Subject: [PATCH] Qwen2 support --- llms/mlx_lm/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llms/mlx_lm/utils.py b/llms/mlx_lm/utils.py index 96be6d29..68d2204e 100644 --- a/llms/mlx_lm/utils.py +++ b/llms/mlx_lm/utils.py @@ -43,7 +43,7 @@ MODEL_REMAPPING = { "mistral": "llama", # mistral is compatible with llama "phi-msft": "phixtral", "falcon_mamba": "mamba", - "qwen2": "qwen2", + "qwen2": "qwen2" } MAX_FILE_SIZE_GB = 5