Olmo in MLX LM (#415)

* run olmo

* format
This commit is contained in:
Awni Hannun
2024-02-05 21:13:49 -08:00
committed by GitHub
parent 7fbca214b1
commit aa7447efa2
4 changed files with 171 additions and 6 deletions

View File

@@ -12,7 +12,7 @@ from huggingface_hub import snapshot_download
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
# Local imports
from .models import llama, mixtral, phi2, plamo, qwen, stablelm_epoch, qwen2
from .models import llama, mixtral, olmo, phi2, plamo, qwen, qwen2, stablelm_epoch
from .tuner.utils import apply_lora_layers
# Constants
@@ -24,10 +24,15 @@ MODEL_MAPPING = {
"stablelm_epoch": stablelm_epoch,
"qwen": qwen,
"plamo": plamo,
"qwen2": qwen2
"olmo": olmo,
"qwen2": qwen2,
}
LORA_SUPPORTED_MODELS = [
llama.Model, mixtral.Model, phi2.Model, stablelm_epoch.Model, qwen2.Model
llama.Model,
mixtral.Model,
phi2.Model,
stablelm_epoch.Model,
qwen2.Model,
]
MAX_FILE_SIZE_GB = 5