From b0520e770880e639d9a25e852e6c20027b51680d Mon Sep 17 00:00:00 2001 From: Sindhu Satish Date: Wed, 29 Jan 2025 06:21:36 -0800 Subject: [PATCH] Bug fix - Qwen2 support --- llms/mlx_lm/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llms/mlx_lm/utils.py b/llms/mlx_lm/utils.py index 8e48ab25..96be6d29 100644 --- a/llms/mlx_lm/utils.py +++ b/llms/mlx_lm/utils.py @@ -43,6 +43,7 @@ MODEL_REMAPPING = { "mistral": "llama", # mistral is compatible with llama "phi-msft": "phixtral", "falcon_mamba": "mamba", + "qwen2": "qwen2", } MAX_FILE_SIZE_GB = 5