Fix whipser conversion for safetensors models (#935)

* fix whipser conversion for safetensor only. error in mlx lm for existing paths

* fix tests
This commit is contained in:
Awni Hannun
2024-08-14 10:22:04 -07:00
committed by GitHub
parent 33905447f9
commit 95840f32e2
3 changed files with 34 additions and 14 deletions

View File

@@ -660,6 +660,16 @@ def convert(
revision: Optional[str] = None,
dequantize: bool = False,
):
# Check the save path is empty
if isinstance(mlx_path, str):
mlx_path = Path(mlx_path)
if mlx_path.exists():
raise ValueError(
f"Cannot save to the path {mlx_path} as it already exists."
" Please delete the file/directory or specify a new path to save to."
)
print("[INFO] Loading")
model_path = get_model_path(hf_path, revision=revision)
model, config, tokenizer = fetch_from_hub(model_path, lazy=True)
@@ -681,9 +691,6 @@ def convert(
model = dequantize_model(model)
weights = dict(tree_flatten(model.parameters()))
if isinstance(mlx_path, str):
mlx_path = Path(mlx_path)
del model
save_weights(mlx_path, weights, donate_weights=True)

View File

@@ -82,6 +82,7 @@ class TestUtils(unittest.TestCase):
self.assertTrue(isinstance(model.layers[-1].mlp.up_proj, nn.QuantizedLinear))
# Check model weights have right type
mlx_path = os.path.join(self.test_dir, "mlx_model_bf16")
utils.convert(HF_MODEL_PATH, mlx_path=mlx_path, dtype="bfloat16")
model, _ = utils.load(mlx_path)