diff --git a/whisper/README.md b/whisper/README.md index 9f1d777a..50fc0764 100644 --- a/whisper/README.md +++ b/whisper/README.md @@ -19,14 +19,15 @@ Install [`ffmpeg`](https://ffmpeg.org/): brew install ffmpeg ``` -Next, download the Whisper PyTorch checkpoint and convert the weights to MLX format: +Next, download the Whisper PyTorch checkpoint and convert the weights to the MLX format. For example, to convert the `tiny` model use: ``` -# Take the "tiny" model as an example. Note that you can also convert a local PyTorch checkpoint in OpenAI's format. python convert.py --torch-name-or-path tiny --mlx-path mlx_models/tiny ``` -To generate a 4-bit quantized model, use ``-q`` for a full list of options: +Note you can also convert a local PyTorch checkpoint which is in the original OpenAI format. + +To generate a 4-bit quantized model, use `-q`. For a full list of options: ``` python convert.py --help diff --git a/whisper/convert.py b/whisper/convert.py index 3ec9323c..48cbebc5 100644 --- a/whisper/convert.py +++ b/whisper/convert.py @@ -113,7 +113,7 @@ def load_torch_model( Parameters ---------- name_or_path : str - one of the official model names listed by `whisper.available_models()` or a local Pytorch checkpoint in OpenAI's format + one of the official model names listed by `whisper.available_models()` or a local Pytorch checkpoint which is in the original OpenAI format download_root: str path to download the model files; by default, it uses "~/.cache/whisper"