mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 17:31:18 +08:00
chore(mlx-lm): add load model with adapter and fix bug in sample (#360)
* chore: add load model with adapter support and fix bug in sample * chore: ignore temp during calculating prob in sample
This commit is contained in:
parent
f5b80c95fb
commit
ab91ac1075
@ -1,10 +1,11 @@
|
|||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
|
import mlx.nn as nn
|
||||||
from mlx.utils import tree_unflatten
|
from mlx.utils import tree_unflatten
|
||||||
|
|
||||||
from .lora import LoRALinear
|
from .lora import LoRALinear
|
||||||
|
|
||||||
|
|
||||||
def apply_lora_layers(model, adapter_file: str):
|
def apply_lora_layers(model: nn.Module, adapter_file: str) -> nn.Module:
|
||||||
adapters = list(mx.load(adapter_file).items())
|
adapters = list(mx.load(adapter_file).items())
|
||||||
linear_replacements = {}
|
linear_replacements = {}
|
||||||
lora_layers = set(
|
lora_layers = set(
|
||||||
|
@ -13,6 +13,7 @@ from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
|
|||||||
|
|
||||||
# Local imports
|
# Local imports
|
||||||
from .models import llama, mixtral, phi2, plamo, qwen
|
from .models import llama, mixtral, phi2, plamo, qwen
|
||||||
|
from .tuner.utils import apply_lora_layers
|
||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
MODEL_MAPPING = {
|
MODEL_MAPPING = {
|
||||||
@ -98,11 +99,14 @@ def generate_step(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def sample(logits: mx.array) -> Tuple[mx.array, float]:
|
def sample(logits: mx.array) -> Tuple[mx.array, float]:
|
||||||
|
softmax_logits = mx.softmax(logits)
|
||||||
|
|
||||||
if temp == 0:
|
if temp == 0:
|
||||||
token = mx.argmax(logits, axis=-1)
|
token = mx.argmax(logits, axis=-1)
|
||||||
else:
|
else:
|
||||||
token = mx.random.categorical(logits * (1 / temp))
|
token = mx.random.categorical(logits * (1 / temp))
|
||||||
prob = mx.softmax(logits / temp)[0, token]
|
|
||||||
|
prob = softmax_logits[0, token]
|
||||||
return token, prob
|
return token, prob
|
||||||
|
|
||||||
y = prompt
|
y = prompt
|
||||||
@ -237,7 +241,7 @@ def load_model(model_path: Path) -> nn.Module:
|
|||||||
|
|
||||||
|
|
||||||
def load(
|
def load(
|
||||||
path_or_hf_repo: str, tokenizer_config={}
|
path_or_hf_repo: str, tokenizer_config={}, adapter_file: str = None
|
||||||
) -> Tuple[nn.Module, PreTrainedTokenizer]:
|
) -> Tuple[nn.Module, PreTrainedTokenizer]:
|
||||||
"""
|
"""
|
||||||
Load the model and tokenizer from a given path or a huggingface repository.
|
Load the model and tokenizer from a given path or a huggingface repository.
|
||||||
@ -246,8 +250,10 @@ def load(
|
|||||||
model_path (Path): The path or the huggingface repository to load the model from.
|
model_path (Path): The path or the huggingface repository to load the model from.
|
||||||
tokenizer_config (dict, optional): Configuration parameters specifically for the tokenizer.
|
tokenizer_config (dict, optional): Configuration parameters specifically for the tokenizer.
|
||||||
Defaults to an empty dictionary.
|
Defaults to an empty dictionary.
|
||||||
|
adapter_file (str, optional): Path to the adapter file. If provided, applies LoRA layers to the model.
|
||||||
|
Defaults to None.
|
||||||
Returns:
|
Returns:
|
||||||
nn.Module: The loaded model.
|
Tuple[nn.Module, PreTrainedTokenizer]: A tuple containing the loaded model and tokenizer.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
FileNotFoundError: If config file or safetensors are not found.
|
FileNotFoundError: If config file or safetensors are not found.
|
||||||
@ -256,6 +262,9 @@ def load(
|
|||||||
model_path = get_model_path(path_or_hf_repo)
|
model_path = get_model_path(path_or_hf_repo)
|
||||||
|
|
||||||
model = load_model(model_path)
|
model = load_model(model_path)
|
||||||
|
if adapter_file is not None:
|
||||||
|
model = apply_lora_layers(model, adapter_file)
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_path, **tokenizer_config)
|
tokenizer = AutoTokenizer.from_pretrained(model_path, **tokenizer_config)
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user