not working, incorrect handling with cache probably

This commit is contained in:
Goekdeniz-Guelmez 2024-10-22 22:04:25 +02:00
parent 55485b98e8
commit e43a2ab229
2 changed files with 97 additions and 84 deletions

View File

@ -341,21 +341,20 @@ class MambaCache(_BaseCache):
class Mamba2Cache(_BaseCache):
"""Cache for Mamba model inference containing conv cache and SSM state."""
conv_cache: Optional[mx.array] = None
ssm_state: Optional[mx.array] = None
conv_states: Optional[mx.array] = None
ssm_states: Optional[mx.array] = None
def __getitem__(self, idx: int) -> Optional[mx.array]:
if idx == 0:
return self.conv_cache
return self.conv_states
elif idx == 1:
return self.ssm_state
return self.ssm_states
raise IndexError("Cache index must be 0 or 1")
def __setitem__(self, idx: int, value: Optional[mx.array]):
if idx == 0:
self.conv_cache = value
self.conv_states = value
elif idx == 1:
self.ssm_state = value
self.ssm_states = value
else:
raise IndexError("Cache index must be 0 or 1")

View File

@ -5,7 +5,7 @@ import mlx.core as mx
import mlx.nn as nn
from .base import BaseModelArgs
from .cache import MambaCache
from .cache import Mamba2Cache
@dataclass
class ModelArgs(BaseModelArgs):
@ -62,6 +62,7 @@ def silu(x):
return x * mx.sigmoid(x)
def ssd(x, A, B, C, chunk_size):
# Replace einsum operations with explicit reshape and matrix multiply
batch, seqlen, nheads, dim = x.shape
B = mx.expand_dims(B, axis=2)
C = mx.expand_dims(C, axis=2)
@ -73,9 +74,18 @@ def ssd(x, A, B, C, chunk_size):
chunk = slice(i, min(i + chunk_size, seqlen))
dA = mx.exp(mx.expand_dims(A[chunk], axis=0))
dBx = mx.einsum('blhp,bln->bhpn', x[:, chunk], B[:, chunk])
# Replace einsum with explicit operations
x_chunk = x[:, chunk] # [batch, chunk_size, nheads, dim]
x_chunk = mx.transpose(x_chunk, [0, 2, 3, 1]) # [batch, nheads, dim, chunk_size]
B_chunk = B[:, chunk] # [batch, chunk_size, state_size]
dBx = mx.matmul(x_chunk, B_chunk) # [batch, nheads, dim, state_size]
state = state * mx.expand_dims(dA, axis=-1) + dBx
y = mx.einsum('bhpn,bln->blhp', state, C[:, chunk])
# Replace einsum with explicit operations
C_chunk = C[:, chunk] # [batch, chunk_size, state_size]
y = mx.matmul(state, mx.transpose(C_chunk, [0, 2, 1])) # [batch, nheads, dim, chunk_size]
y = mx.transpose(y, [0, 3, 1, 2]) # [batch, chunk_size, nheads, dim]
outputs.append(y)
return mx.concatenate(outputs, axis=1), state
@ -93,7 +103,7 @@ class DepthWiseConv1d(nn.Module):
assert in_channels == out_channels, "In and out channels must be same for depthwise convolution"
assert self.groups == in_channels, "Groups must be equal to in_channels for depthwise convolution"
# Initialize with shape (channels, 1, kernel_size) to match pretrained weights
# Weight shape: (channels, 1, kernel_size) to match pretrained weights
self.weight = mx.random.normal((in_channels, 1, kernel_size))
self.bias = mx.zeros((out_channels,)) if bias else None
@ -101,56 +111,78 @@ class DepthWiseConv1d(nn.Module):
B, L, C = x.shape
K = self.kernel_size
# Validate input dimensions
assert C == self.in_channels, f"Input channels {C} doesn't match expected {self.in_channels}"
# Handle padding and caching
if cache is not None:
conv_cache = cache[cache_idx]
if conv_cache is not None:
x = mx.concatenate([conv_cache, x], axis=1)
L = x.shape[1] # Update L after concatenation
conv_states = cache[cache_idx]
if conv_states is not None:
# Validate cache shape
assert conv_states.shape[0] == B, "Cache batch size mismatch"
assert conv_states.shape[2] == C, "Cache channel count mismatch"
x = mx.concatenate([conv_states, x], axis=1)
L = x.shape[1]
else:
# Add left padding of size (kernel_size - 1)
pad_left = K - 1
x = mx.pad(x, [(0, 0), (pad_left, 0), (0, 0)])
L = x.shape[1] # Update L after padding
L = x.shape[1]
# Implement depthwise convolution manually for each channel
# Pre-allocate output array if possible
outputs = []
# Process each channel independently
for c in range(C):
# Extract single channel and reshape for 1D convolution
# Extract and prepare channel data
x_c = x[:, :, c] # Shape: [B, L]
x_c = mx.expand_dims(x_c, axis=1) # Shape: [B, 1, L]
# Extract and ensure filter is 3D
w_c = self.weight[c] # Shape: [1, kernel_size] or [1, 1, kernel_size]
# Prepare filter weights
w_c = self.weight[c] # Get channel weights
# Ensure filter is 3D: [depth(1), in_channels(1), kernel_size]
if w_c.ndim == 2:
w_c = mx.expand_dims(w_c, axis=0) # Shape: [1, 1, kernel_size]
w_c = mx.expand_dims(w_c, axis=0)
elif w_c.ndim == 1:
w_c = mx.expand_dims(mx.expand_dims(w_c, axis=0), axis=0)
# For inference mode (single token), adjust the input
# Handle inference mode (single token)
if L < K:
# Pad input to match kernel size
pad_size = K - L
x_c = mx.pad(x_c, [(0, 0), (0, 0), (pad_size, 0)])
# Apply 1D convolution for this channel
# Apply 1D convolution
try:
y_c = mx.conv_general(
x_c,
w_c,
stride=1,
padding=0 # We've already handled padding
padding=0 # Padding already handled
)
if self.bias is not None:
y_c = y_c + self.bias[c]
outputs.append(mx.squeeze(y_c, axis=1)) # Shape: [B, 1]
# Remove singleton dimension and add to outputs
outputs.append(mx.squeeze(y_c, axis=1))
# Stack all channel outputs
except Exception as e:
raise RuntimeError(f"Convolution failed for channel {c}. Shapes: input={x_c.shape}, weight={w_c.shape}") from e
# Stack channel outputs along last dimension
y = mx.stack(outputs, axis=-1) # Shape: [B, L', C]
# Update cache if needed
if cache is not None:
# Update cache with the most recent K-1 tokens
cache[cache_idx] = x[:, -(K-1):, :] if L >= K else x
# Store last (kernel_size - 1) tokens or entire input if shorter
new_cache = x[:, -(K-1):, :] if L >= K else x
cache[cache_idx] = new_cache
if new_cache.shape != cache[cache_idx].shape:
cache[cache_idx] = new_cache
print(f"Cache updated at index {cache_idx}")
else:
print(f"Skipping cache update at index {cache_idx}, shapes are identical.")
return y
@ -184,9 +216,10 @@ class Mamba2Block(nn.Module):
layer_scale = math.sqrt(1.0 / args.num_hidden_layers)
self.out_proj.weight = self.out_proj.weight * layer_scale
def __call__(self, u: mx.array, cache = None):
if cache is not None and self.args.use_cache:
return self.step(u, cache)
def __call__(self, x: mx.array, cache=None):
# if cache is not None and self.args.use_cache:
if cache is not None:
return self.step(x, cache)
# Calculate sizes
d_model = self.args.intermediate_size
@ -197,7 +230,7 @@ class Mamba2Block(nn.Module):
A = -mx.exp(self.A_log)
# Project input
zxbcdt = self.in_proj(u)
zxbcdt = self.in_proj(x)
# Correct splits for z, xBC, dt
splits = [
@ -262,13 +295,7 @@ class Mamba2Block(nn.Module):
return y
def step(self, u: mx.array, cache: MambaCache):
"""
Process single or multiple tokens while maintaining state.
Args:
u: Input tensor of shape (batch_size, seq_len, hidden_size)
cache: MambaCache object containing conv cache and ssm state
"""
def step(self, u: mx.array, cache):
batch_size = u.shape[0]
seq_len = u.shape[1]
outputs = []
@ -295,17 +322,11 @@ class Mamba2Block(nn.Module):
n_heads = self.args.num_heads
d_head = self.args.head_dim
# Correct splits for z, xBC, dt
splits = [
d_model, # z size
d_model + 2 * d_state, # xBC size (delta, B, C)
n_heads # dt size
]
# Split the projected input
z = zxbcdt[:, :, :splits[0]]
xBC = zxbcdt[:, :, splits[0]:splits[0] + splits[1]]
dt = zxbcdt[:, :, -splits[2]:] # Take last n_heads elements
# Split projected input
# conv_dim = d_model + 2 * d_state (this should match self.conv1d.in_channels)
z = zxbcdt[:, :, :d_model]
xBC = zxbcdt[:, :, d_model:d_model + 2*d_state + d_model] # Include the full conv dimension
dt = zxbcdt[:, :, -(n_heads):]
# Process dt
dt = mx.reshape(dt, (batch_size, n_heads))
@ -316,25 +337,23 @@ class Mamba2Block(nn.Module):
)
dt = mx.maximum(dt, self.args.time_step_floor)
# Process convolution
# Process convolution with correct dimensions
xBC = self.conv1d(xBC, cache=cache, cache_idx=0)
xBC = silu(xBC)
# Split convolved xBC into x, B, C
# Split convolved xBC into x, B, C with correct dimensions
x = xBC[:, :, :d_model]
B = xBC[:, :, d_model:d_model + d_state]
C = xBC[:, :, -d_state:]
# Reshape x into (batch, heads, dim)
# Reshape tensors for SSM computation
x = mx.reshape(x, (batch_size, 1, n_heads, d_head))
x = mx.squeeze(x, axis=1) # (batch, heads, dim)
# Reshape B into (batch, heads, dim, state)
B = mx.reshape(B, (batch_size, 1, d_state))
B = mx.broadcast_to(B, (batch_size, n_heads, d_state))
B = mx.expand_dims(B, axis=2) # (batch, heads, 1, state)
# Reshape C for later use
C = mx.reshape(C, (batch_size, 1, d_state))
C = mx.broadcast_to(C, (batch_size, n_heads, d_state))
C = mx.expand_dims(C, axis=3) # (batch, heads, state, 1)
@ -344,14 +363,11 @@ class Mamba2Block(nn.Module):
dA = mx.exp(dt * mx.expand_dims(A, 0))
dA = mx.expand_dims(mx.expand_dims(dA, -1), -1) # (batch, heads, 1, 1)
# Prepare x for Bx computation
# Update state with proper shapes
x = mx.expand_dims(x, axis=3) # (batch, heads, dim, 1)
# Compute dBx with proper broadcasting
dBx = mx.matmul(x, B) # (batch, heads, dim, state)
# Update state
ssm_state = cache[1] # (batch, heads, dim, state)
ssm_state = cache[1]
ssm_state = ssm_state * dA + dBx
cache[1] = ssm_state
@ -359,18 +375,16 @@ class Mamba2Block(nn.Module):
y = mx.matmul(ssm_state, C) # (batch, heads, dim, 1)
y = mx.squeeze(y, axis=-1) # (batch, heads, dim)
# Add skip connection with D
# Add skip connection
y = y + x[:, :, :, 0] * mx.expand_dims(self.D, -1)
# Reshape to original dimensions
# Reshape and process output
y = mx.reshape(y, (batch_size, 1, n_heads * d_head))
# Apply norm and output projection
y = self.norm(y + z)
y = self.out_proj(y)
if self.args.residual_in_fp32:
y.astype(mx.float32)
y = y.astype(mx.float32)
outputs.append(y)
@ -428,8 +442,8 @@ class Model(nn.Module):
print('ouput')
return logits
def make_cache(self):
return [MambaCache() for _ in range(len(self.layers))]
def make_cache(self, batch_size=1):
return [Mamba2Cache(batch_size, self.args.num_heads, self.args.head_dim, self.args.state_size) for _ in range(len(self.layers))]
def sanitize(self, weights):
sanitized = {}