mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-12-15 09:48:54 +08:00
@@ -12,7 +12,7 @@ from huggingface_hub import snapshot_download
|
||||
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
|
||||
|
||||
# Local imports
|
||||
from .models import llama, mixtral, phi2, plamo, qwen, stablelm_epoch, qwen2
|
||||
from .models import llama, mixtral, olmo, phi2, plamo, qwen, qwen2, stablelm_epoch
|
||||
from .tuner.utils import apply_lora_layers
|
||||
|
||||
# Constants
|
||||
@@ -24,10 +24,15 @@ MODEL_MAPPING = {
|
||||
"stablelm_epoch": stablelm_epoch,
|
||||
"qwen": qwen,
|
||||
"plamo": plamo,
|
||||
"qwen2": qwen2
|
||||
"olmo": olmo,
|
||||
"qwen2": qwen2,
|
||||
}
|
||||
LORA_SUPPORTED_MODELS = [
|
||||
llama.Model, mixtral.Model, phi2.Model, stablelm_epoch.Model, qwen2.Model
|
||||
llama.Model,
|
||||
mixtral.Model,
|
||||
phi2.Model,
|
||||
stablelm_epoch.Model,
|
||||
qwen2.Model,
|
||||
]
|
||||
MAX_FILE_SIZE_GB = 5
|
||||
|
||||
|
||||
Reference in New Issue
Block a user