keep base weights in fp16

This commit is contained in:
Awni Hannun 2023-12-15 10:42:18 -08:00
parent 84f02ef58b
commit 8c8f9d6440

View File

@ -47,7 +47,7 @@ class LoRALinear(nn.Module):
self.lora_b = mx.zeros(shape=(lora_rank, output_dims))
def __call__(self, x):
y = self.linear(x.astype(self.linear.weight.dtype)).astype(x.dtype)
y = self.linear(x.astype(self.linear.weight.dtype))
z = (x @ self.lora_a) @ self.lora_b
return y + 2.0 * z