mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-26 02:33:23 +08:00
cast around lora adapters (#613)
This commit is contained in:
parent
5a52899405
commit
bbfcc103d7
@ -97,9 +97,6 @@ class LoRALinear(nn.Module):
|
|||||||
self.lora_b = mx.zeros(shape=(r, output_dims))
|
self.lora_b = mx.zeros(shape=(r, output_dims))
|
||||||
|
|
||||||
def __call__(self, x):
|
def __call__(self, x):
|
||||||
dtype = self.linear.weight.dtype
|
y = self.linear(x)
|
||||||
if isinstance(self.linear, nn.QuantizedLinear):
|
|
||||||
dtype = self.linear.scales.dtype
|
|
||||||
y = self.linear(x.astype(dtype))
|
|
||||||
z = (self.dropout(x) @ self.lora_a) @ self.lora_b
|
z = (self.dropout(x) @ self.lora_a) @ self.lora_b
|
||||||
return y + self.scale * z
|
return y + (self.scale * z).astype(x.dtype)
|
||||||
|
Loading…
Reference in New Issue
Block a user