mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-09-01 12:49:50 +08:00
Added lora support for Phi-2 (#302)
* Added lora support for Phi-2 * Added Phi-2 support in fuse and convert * format + readme --------- Co-authored-by: Awni Hannun <awni@apple.com>
This commit is contained in:
@@ -7,14 +7,16 @@ import mlx.core as mx
|
||||
import mlx.nn as nn
|
||||
import utils
|
||||
from mlx.utils import tree_flatten
|
||||
from models import Model, ModelArgs
|
||||
|
||||
|
||||
def quantize(weights, config, args):
|
||||
quantized_config = copy.deepcopy(config)
|
||||
|
||||
# Get model classes
|
||||
model_class, model_args_class = utils._get_classes(config=config)
|
||||
|
||||
# Load the model:
|
||||
model = Model(ModelArgs.from_dict(config))
|
||||
model = model_class(model_args_class.from_dict(config))
|
||||
model.load_weights(list(weights.items()))
|
||||
|
||||
# Quantize the model:
|
||||
|
Reference in New Issue
Block a user