Allow quant layer to be unfrozen (#2142)

This commit is contained in:
Awni Hannun
2025-04-30 09:08:29 -07:00
committed by GitHub
parent f1606486d2
commit aa5d84f102
2 changed files with 8 additions and 7 deletions

View File

@@ -193,12 +193,6 @@ class QuantizedLinear(Module):
# Freeze this model's parameters
self.freeze()
def unfreeze(self, *args, **kwargs):
"""Wrap unfreeze so that we unfreeze any layers we might contain but
our parameters will remain frozen."""
super().unfreeze(*args, **kwargs)
self.freeze(recurse=False)
def _extra_repr(self):
out_dims, in_dims = self.weight.shape
in_dims *= 32 // self.bits