add cache + generation, clean up some stuff

This commit is contained in:
Awni Hannun 2023-12-13 22:26:33 -08:00
parent a466cc5191
commit 88d7b67e6e
4 changed files with 70 additions and 113 deletions

1
phi2/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
weights.npz

View File

@ -60,7 +60,7 @@ def convert():
del state_dict[key_stub + ".bias"] del state_dict[key_stub + ".bias"]
weights = {replace_key(k): v.numpy() for k, v in state_dict.items()} weights = {replace_key(k): v.numpy() for k, v in state_dict.items()}
numpy.savez("weights/phi-2.npz", **weights) numpy.savez("weights.npz", **weights)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -7,7 +7,6 @@ import mlx.core as mx
import mlx.nn as nn import mlx.nn as nn
import math import math
@dataclass @dataclass
class ModelArgs: class ModelArgs:
max_sequence_length: int = 2048 max_sequence_length: int = 2048
@ -18,23 +17,6 @@ class ModelArgs:
rotary_dim: int = 32 rotary_dim: int = 32
class NewGELUActivation(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def __call__(self, input: mx.array) -> mx.array:
return (
0.5
* input
* (
1.0
+ mx.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * (input**3)))
)
)
class RoPEAttention(nn.Module): class RoPEAttention(nn.Module):
def __init__(self, dims: int, num_heads: int, bias: bool = True): def __init__(self, dims: int, num_heads: int, bias: bool = True):
super().__init__() super().__init__()
@ -77,6 +59,7 @@ class RoPEAttention(nn.Module):
scores = (queries * scale) @ keys.transpose(0, 1, 3, 2) scores = (queries * scale) @ keys.transpose(0, 1, 3, 2)
if mask is not None: if mask is not None:
scores = scores + mask scores = scores + mask
scores = mx.softmax(scores, axis=-1) scores = mx.softmax(scores, axis=-1)
values_hat = (scores @ values).transpose(0, 2, 1, 3).reshape(B, L, -1) values_hat = (scores @ values).transpose(0, 2, 1, 3).reshape(B, L, -1)
@ -92,19 +75,13 @@ class ParallelBlock(nn.Module):
self.ln = nn.LayerNorm(dims) self.ln = nn.LayerNorm(dims)
self.fc1 = nn.Linear(dims, mlp_dims) self.fc1 = nn.Linear(dims, mlp_dims)
self.fc2 = nn.Linear(mlp_dims, dims) self.fc2 = nn.Linear(mlp_dims, dims)
self.act = NewGELUActivation() self.act = nn.GELU(approx="precise")
def __call__(self, x, x_mask): def __call__(self, x, mask, cache):
residual = x h = self.ln(x)
hidden_states = self.ln(x) attn_h, cache = self.self_attention(h, h, h, mask, cache)
attn_outputs, _ = self.self_attention( ff_h = self.fc2(self.act(self.fc1(h)))
hidden_states, hidden_states, hidden_states, x_mask return attn_h + ff_h + x, cache
)
ff_hidden_states = self.fc2(self.act(self.fc1(hidden_states)))
hidden_states = attn_outputs + ff_hidden_states + residual
return hidden_states
class TransformerDecoder(nn.Module): class TransformerDecoder(nn.Module):
@ -114,10 +91,22 @@ class TransformerDecoder(nn.Module):
super().__init__() super().__init__()
self.h = [ParallelBlock(dims, num_heads, mlp_dims) for i in range(num_layers)] self.h = [ParallelBlock(dims, num_heads, mlp_dims) for i in range(num_layers)]
def __call__(self, x, x_mask): def __call__(self, x, mask, cache):
for layer in self.h: if cache is None:
x = layer(x, x_mask) cache = [None] * len(self.h)
return x
for e, layer in enumerate(self.h):
x, cache[e] = layer(x, mask, cache[e])
return x, cache
class OutputHead(nn.Module):
def __init__(self, config: ModelArgs) -> None:
self.ln = nn.LayerNorm(config.model_dim)
self.linear = nn.Linear(config.model_dim, config.num_vocab)
def __call__(self, inputs):
return self.linear(self.ln(inputs))
class Phi2(nn.Module): class Phi2(nn.Module):
@ -128,105 +117,69 @@ class Phi2(nn.Module):
dims=config.model_dim, dims=config.model_dim,
num_heads=config.num_heads, num_heads=config.num_heads,
) )
self.lm_head = OutputHead(config)
self.lm_head = LanguageModelingHead(config)
def __call__( def __call__(
self, self,
input_ids: mx.array, inputs: mx.array,
attention_mask: mx.array = None, mask: mx.array = None,
cache: mx.array = None,
) -> tuple[mx.array, mx.array]: ) -> tuple[mx.array, mx.array]:
x = self.wte(input_ids) x = self.wte(inputs)
if attention_mask is not None: mask = None
# convert 0's to -infs, 1's to 0's, and make it broadcastable if x.shape[1] > 1:
attention_mask = mx.log(attention_mask) mask = nn.MultiHeadAttention.create_additive_causal_mask(x.shape[1])
attention_mask = mx.expand_dims(attention_mask, (1, 2)) mask = mask.astype(x.dtype)
y, cache = self.transformer(x, mask, cache)
return self.lm_head(y), cache
def generate(prompt: mx.array, model: Phi2, temp: Optional[float] = 0.0):
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else: else:
attention_mask = nn.MultiHeadAttention.create_additive_causal_mask( return mx.random.categorical(logits * (1 / temp))
x.shape[1]
)
y = self.transformer(x, attention_mask) logits, cache = model(prompt)
return self.lm_head(y) y = sample(logits[:, -1, :])
def generate(self, input_ids, temp=1.0):
cache = input_ids.tolist()
# Make an additive causal mask. We will need that to process the prompt.
mask = nn.MultiHeadAttention.create_additive_causal_mask(input_ids.shape[1])
mask = mask.astype(self.wte.weight.dtype)
# First we process the prompt x the same way as in __call__ but
# save the caches in cache
x = self.wte(input_ids)
# for l in self.layers:
# x, c = l(x, mask=mask)
# cache.append(c) # <--- we store the per layer cache in a
# simple python list
x = self.transformer(x, mask)
y = self.lm_head(x[:, -1]) # <--- we only care about the last logits
# that generate the next token
y = mx.random.categorical(y * (1 / temp))
# y now has size [1]
# Since MLX is lazily evaluated nothing is computed yet.
# Calling y.item() would force the computation to happen at
# this point but we can also choose not to do that and let the
# user choose when to start the computation.
yield y yield y
cache += [y.item()]
# Now we parsed the prompt and generated the first token we
# need to feed it back into the model and loop to generate the
# rest.
while True: while True:
# Unsqueezing the last dimension to add a sequence length logits, cache = model(y[:, None], cache=cache)
# dimension of 1 y = sample(logits.squeeze(1))
x = self.wte(mx.array(cache))
x = self.transformer(x, mask)
y = self.lm_head(x[:, -1])
y = mx.random.categorical(y * (1 / temp))
cache += [y[0].item()]
yield y yield y
class LanguageModelingHead(nn.Module):
def __init__(self, config: ModelArgs) -> None:
self.ln = nn.LayerNorm(config.model_dim)
self.linear = nn.Linear(config.model_dim, config.num_vocab)
def __call__(self, inputs):
return self.linear(self.ln(inputs))
if __name__ == "__main__": if __name__ == "__main__":
model = Phi2(ModelArgs()) model = Phi2(ModelArgs())
weights = mx.load("weights/phi-2.npz") weights = mx.load("weights/phi-2.npz")
weights = tree_unflatten(list(weights.items())) weights = tree_unflatten(list(weights.items()))
weights = tree_map(lambda p: mx.array(p), weights) weights = tree_map(lambda p: mx.array(p, mx.float32), weights)
model.update(weights) model.update(weights)
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
tokens = tokenizer( prompt = tokenizer("Write a detailed analogy between mathematics and a lighthouse.",
'''def print_prime(n):
"""
Print all primes between 1 and n
"""''',
return_tensors="np", return_tensors="np",
return_attention_mask=False, return_attention_mask=False,
) )["input_ids"]
tokens = {key: mx.array(v) for key, v in tokens.items()} prompt = mx.array(prompt)
tokens_per_eval = 1
max_tokens = 100
tokens = []
for token, _ in zip(generate(prompt, model), range(max_tokens)):
tokens.append(token)
if (len(tokens) % tokens_per_eval) == 0:
mx.eval(tokens)
s = tokenizer.decode([t.item() for t in tokens])
print(s, end="", flush=True)
tokens = []
print(
'''def print_prime(n):
"""
Print all primes between 1 and n
"""'''
)
for output in model.generate(**tokens):
print(tokenizer.decode(output.item()))

3
phi2/requirements.txt Normal file
View File

@ -0,0 +1,3 @@
einops
mlx
transformers