mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 01:17:28 +08:00

* more async eval * quantize embedding / update quantize api * more updates for quantize * update for quantize embeddings * update sd quant API * update sdxl quants * error for datasets < batch_size * async * fix config loading * fix quant * fix tests * fix req * remove lm head if tie weights is true * fix test
170 lines
5.3 KiB
Python
170 lines
5.3 KiB
Python
from dataclasses import dataclass
|
|
from typing import Optional, Tuple
|
|
|
|
import mlx.core as mx
|
|
import mlx.nn as nn
|
|
|
|
from .base import BaseModelArgs
|
|
|
|
|
|
@dataclass
|
|
class ModelArgs(BaseModelArgs):
|
|
model_type: str
|
|
hidden_size: int
|
|
num_hidden_layers: int
|
|
intermediate_size: int
|
|
num_attention_heads: int
|
|
num_key_value_heads: int
|
|
norm_epsilon: float = 1e-5
|
|
vocab_size: int = 49152
|
|
rope_theta: float = 100000
|
|
tie_word_embeddings: bool = True
|
|
|
|
|
|
class Attention(nn.Module):
|
|
def __init__(self, args: ModelArgs):
|
|
super().__init__()
|
|
self.args = args
|
|
|
|
dim = args.hidden_size
|
|
self.n_heads = n_heads = args.num_attention_heads
|
|
self.n_kv_heads = n_kv_heads = args.num_key_value_heads
|
|
|
|
head_dim = args.hidden_size // args.num_attention_heads
|
|
self.scale = head_dim**-0.5
|
|
|
|
self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=True)
|
|
self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True)
|
|
self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=True)
|
|
self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=True)
|
|
self.rope = nn.RoPE(head_dim, traditional=False, base=args.rope_theta)
|
|
|
|
def __call__(
|
|
self,
|
|
x: mx.array,
|
|
mask: Optional[mx.array] = None,
|
|
cache: Optional[Tuple[mx.array, mx.array]] = None,
|
|
) -> mx.array:
|
|
B, L, D = x.shape
|
|
|
|
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
|
|
|
|
# Prepare the queries, keys and values for the attention computation
|
|
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
|
|
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
|
|
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
|
|
|
|
if cache is not None:
|
|
key_cache, value_cache = cache
|
|
queries = self.rope(queries, offset=key_cache.shape[2])
|
|
keys = self.rope(keys, offset=key_cache.shape[2])
|
|
keys = mx.concatenate([key_cache, keys], axis=2)
|
|
values = mx.concatenate([value_cache, values], axis=2)
|
|
else:
|
|
queries = self.rope(queries)
|
|
keys = self.rope(keys)
|
|
|
|
output = mx.fast.scaled_dot_product_attention(
|
|
queries, keys, values, scale=self.scale, mask=mask
|
|
)
|
|
|
|
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
|
|
return self.o_proj(output), (keys, values)
|
|
|
|
|
|
class MLP(nn.Module):
|
|
def __init__(self, dim, hidden_dim):
|
|
super().__init__()
|
|
self.c_fc = nn.Linear(dim, hidden_dim, bias=True)
|
|
self.c_proj = nn.Linear(hidden_dim, dim, bias=True)
|
|
|
|
def __call__(self, x):
|
|
return self.c_proj(nn.gelu(self.c_fc(x)))
|
|
|
|
|
|
class TransformerBlock(nn.Module):
|
|
def __init__(self, args: ModelArgs):
|
|
super().__init__()
|
|
self.hidden_size = args.hidden_size
|
|
self.n_heads = args.num_attention_heads
|
|
|
|
self.self_attn = Attention(args)
|
|
self.mlp = MLP(args.hidden_size, args.intermediate_size)
|
|
self.input_layernorm = nn.LayerNorm(args.hidden_size, eps=args.norm_epsilon)
|
|
self.post_attention_layernorm = nn.LayerNorm(
|
|
args.hidden_size, eps=args.norm_epsilon
|
|
)
|
|
self.args = args
|
|
|
|
def __call__(
|
|
self,
|
|
x: mx.array,
|
|
mask: Optional[mx.array] = None,
|
|
cache: Optional[Tuple[mx.array, mx.array]] = None,
|
|
) -> mx.array:
|
|
r, cache = self.self_attn(self.input_layernorm(x), mask, cache)
|
|
h = x + r
|
|
r = self.mlp(self.post_attention_layernorm(h))
|
|
out = h + r
|
|
return out, cache
|
|
|
|
|
|
class Starcoder2Model(nn.Module):
|
|
def __init__(self, args: ModelArgs):
|
|
super().__init__()
|
|
self.args = args
|
|
self.vocab_size = args.vocab_size
|
|
self.num_hidden_layers = args.num_hidden_layers
|
|
assert self.vocab_size > 0
|
|
self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size)
|
|
self.layers = [
|
|
TransformerBlock(args=args) for _ in range(args.num_hidden_layers)
|
|
]
|
|
self.norm = nn.LayerNorm(args.hidden_size, eps=args.norm_epsilon)
|
|
|
|
def __call__(
|
|
self,
|
|
inputs: mx.array,
|
|
cache=None,
|
|
):
|
|
h = self.embed_tokens(inputs)
|
|
|
|
mask = None
|
|
if h.shape[1] > 1:
|
|
mask = nn.MultiHeadAttention.create_additive_causal_mask(h.shape[1])
|
|
mask = mask.astype(h.dtype)
|
|
|
|
if cache is None:
|
|
cache = [None] * len(self.layers)
|
|
|
|
for e, layer in enumerate(self.layers):
|
|
h, cache[e] = layer(h, mask, cache[e])
|
|
|
|
return self.norm(h), cache
|
|
|
|
|
|
class Model(nn.Module):
|
|
def __init__(self, args: ModelArgs):
|
|
super().__init__()
|
|
self.args = args
|
|
self.model_type = args.model_type
|
|
self.model = Starcoder2Model(args)
|
|
if not args.tie_word_embeddings:
|
|
sself.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False)
|
|
|
|
def __call__(
|
|
self,
|
|
inputs: mx.array,
|
|
cache=None,
|
|
):
|
|
out, cache = self.model(inputs, cache)
|
|
if self.args.tie_word_embeddings:
|
|
out = self.model.embed_tokens.as_linear(out)
|
|
else:
|
|
out = self.lm_head(out)
|
|
return out, cache
|
|
|
|
@property
|
|
def layers(self):
|
|
return self.model.layers
|