diff --git a/llama/llama.py b/llama/llama.py index 9b8157b7..73eb39c5 100644 --- a/llama/llama.py +++ b/llama/llama.py @@ -315,7 +315,7 @@ def load_model(model_path): config["hidden_dim"] = weights["layers.0.feed_forward.w1.weight"].shape[0] if config.get("vocab_size", -1) < 0: config["vocab_size"] = weights["output.weight"].shape[-1] - unused = ["multiple_of", "ffn_dim_multiplier", 'rope_theta'] + unused = ["multiple_of", "ffn_dim_multiplier", "rope_theta"] for k in unused: if k in config: config.pop(k) diff --git a/mixtral/README.md b/mixtral/README.md index 3b0c50d0..9194979e 100644 --- a/mixtral/README.md +++ b/mixtral/README.md @@ -62,6 +62,12 @@ For more options including how to prompt the model, run: python mixtral.py --help ``` -[^mixtral]: Refer to Mistral's [blog post](https://mistral.ai/news/mixtral-of-experts/) for more details. +For the Instruction model, make sure to follow the prompt format: + +``` +[INST] Instruction prompt [/INST] +``` + +[^mixtral]: Refer to Mistral's [blog post](https://mistral.ai/news/mixtral-of-experts/) and the [Hugging Face blog post](https://huggingface.co/blog/mixtral) for more details. [^instruc]: Refer to the [Hugging Face repo](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) for more details diff --git a/phi2/convert.py b/phi2/convert.py index 4c625a6e..5aa07dce 100644 --- a/phi2/convert.py +++ b/phi2/convert.py @@ -1,6 +1,7 @@ from transformers import AutoModelForCausalLM import numpy as np + def replace_key(key: str) -> str: if "wte.weight" in key: key = "wte.weight" diff --git a/whisper/test.py b/whisper/test.py index 79f233ba..3e7630a9 100644 --- a/whisper/test.py +++ b/whisper/test.py @@ -65,7 +65,6 @@ class TestWhisper(unittest.TestCase): logits = mlx_model(mels, tokens) self.assertEqual(logits.dtype, mx.float16) - def test_decode_lang(self): options = decoding.DecodingOptions(task="lang_id", fp16=False) result = decoding.decode(self.model, self.mels, options) diff --git a/whisper/whisper/decoding.py b/whisper/whisper/decoding.py index 7c7c4a93..d5025444 100644 --- a/whisper/whisper/decoding.py +++ b/whisper/whisper/decoding.py @@ -112,7 +112,7 @@ class DecodingOptions: max_initial_timestamp: Optional[float] = 1.0 # implementation details - fp16: bool = True # use fp16 for most of the calculation + fp16: bool = True # use fp16 for most of the calculation @dataclass(frozen=True) diff --git a/whisper/whisper/load_models.py b/whisper/whisper/load_models.py index 58cef9ac..ffdccf44 100644 --- a/whisper/whisper/load_models.py +++ b/whisper/whisper/load_models.py @@ -44,7 +44,7 @@ _ALIGNMENT_HEADS = { "large-v1": b"ABzY8r9j$a0{>%R7#4sLmoOs{s)o3~84-RPdcFk!JR%R7=D0pU<_bnWW*tkYAhobTNnu$jnkEkXqp)j;w1Tzk)UH3X%SZd&fFZ2fC2yj", "large-v3": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", - "large": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00" + "large": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", } @@ -166,7 +166,8 @@ def convert(model, rules=None): def torch_to_mlx( - torch_model: torch_whisper.Whisper, dtype: mx.Dtype = mx.float16, + torch_model: torch_whisper.Whisper, + dtype: mx.Dtype = mx.float16, ) -> whisper.Whisper: def convert_rblock(model, rules): children = dict(model.named_children()) @@ -194,6 +195,6 @@ def torch_to_mlx( def load_model( name: str, download_root: str = None, - dtype : mx.Dtype = mx.float32, + dtype: mx.Dtype = mx.float32, ) -> whisper.Whisper: return torch_to_mlx(load_torch_model(name, download_root), dtype) diff --git a/whisper/whisper/transcribe.py b/whisper/whisper/transcribe.py index 3172bdb3..06f3c9ea 100644 --- a/whisper/whisper/transcribe.py +++ b/whisper/whisper/transcribe.py @@ -43,7 +43,7 @@ class ModelHolder: model_name = None @classmethod - def get_model(cls, model: str, dtype : mx.Dtype): + def get_model(cls, model: str, dtype: mx.Dtype): if cls.model is None or model != cls.model_name: cls.model = load_model(model, dtype=dtype) cls.model_name = model diff --git a/whisper/whisper/whisper.py b/whisper/whisper/whisper.py index bca69946..8ee6d7d9 100644 --- a/whisper/whisper/whisper.py +++ b/whisper/whisper/whisper.py @@ -37,6 +37,7 @@ def sinusoids(length, channels, max_timescale=10000): scaled_time = mx.arange(length)[:, None] * inv_timescales[None, :] return mx.concatenate([mx.sin(scaled_time), mx.cos(scaled_time)], axis=1) + class LayerNorm(nn.LayerNorm): def __call__(self, x: mx.array) -> mx.array: return super().__call__(x.astype(mx.float32)).astype(x.dtype) @@ -123,7 +124,13 @@ class ResidualAttentionBlock(nn.Module): class AudioEncoder(nn.Module): def __init__( - self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int, dtype: mx.Dtype = mx.float16, + self, + n_mels: int, + n_ctx: int, + n_state: int, + n_head: int, + n_layer: int, + dtype: mx.Dtype = mx.float16, ): super().__init__() self.conv1 = nn.Conv1d(n_mels, n_state, kernel_size=3, padding=1) @@ -148,7 +155,13 @@ class AudioEncoder(nn.Module): class TextDecoder(nn.Module): def __init__( - self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int, dtype: mx.Dtype = mx.float16, + self, + n_vocab: int, + n_ctx: int, + n_state: int, + n_head: int, + n_layer: int, + dtype: mx.Dtype = mx.float16, ): super().__init__() @@ -160,7 +173,9 @@ class TextDecoder(nn.Module): for _ in range(n_layer) ] self.ln = LayerNorm(n_state) - self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype(dtype) + self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype( + dtype + ) def __call__(self, x, xa, kv_cache=None): """