mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 01:17:28 +08:00
Mixed quant recipes (#1300)
* Mixed 3/6 and 2/6 recipes based on Alex Barron's * format / nits --------- Co-authored-by: Awni Hannun <awni.hannun@gmail.com>
This commit is contained in:
parent
c37e26a1a3
commit
b7f742ef56
@ -1,8 +1,27 @@
|
||||
# Copyright © 2023-2024 Apple Inc.
|
||||
|
||||
import argparse
|
||||
from enum import Enum
|
||||
|
||||
from .utils import convert
|
||||
from .utils import convert, mixed_2_6, mixed_3_6
|
||||
|
||||
|
||||
class MixedQuants(Enum):
|
||||
mixed_3_6 = "mixed_3_6"
|
||||
mixed_2_6 = "mixed_2_6"
|
||||
|
||||
@classmethod
|
||||
def recipe_names(cls):
|
||||
return [member.name for member in cls]
|
||||
|
||||
|
||||
def quant_args(arg):
|
||||
try:
|
||||
return MixedQuants[arg].value
|
||||
except KeyError:
|
||||
raise argparse.ArgumentTypeError(
|
||||
f"Invalid q-recipe {arg!r}. Choose from: {MixedQuants.recipe_names()}"
|
||||
)
|
||||
|
||||
|
||||
def configure_parser() -> argparse.ArgumentParser:
|
||||
@ -29,6 +48,12 @@ def configure_parser() -> argparse.ArgumentParser:
|
||||
parser.add_argument(
|
||||
"--q-bits", help="Bits per weight for quantization.", type=int, default=4
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quant-predicate",
|
||||
help=f"Mixed-bit quantization recipe. Choices: {MixedQuants.recipe_names()}",
|
||||
type=quant_args,
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dtype",
|
||||
help="Type to save the non-quantized parameters.",
|
||||
|
@ -1015,6 +1015,46 @@ def save_config(
|
||||
json.dump(config, fid, indent=4)
|
||||
|
||||
|
||||
def mixed_quant_predicate_builder(
|
||||
low_bits: int = 4, high_bits: int = 4, group_size: int = 64
|
||||
) -> Callable[[str, nn.Module, dict], Union[bool, dict]]:
|
||||
def mixed_quant_predicate(
|
||||
path: str,
|
||||
module: nn.Module,
|
||||
config: dict,
|
||||
) -> Union[bool, dict]:
|
||||
"""Implements mixed quantization predicates with similar choices to, for example, llama.cpp's Q4_K_M.
|
||||
Ref: https://github.com/ggerganov/llama.cpp/blob/917786f43d0f29b7c77a0c56767c0fa4df68b1c5/src/llama.cpp#L5265
|
||||
By Alex Barron: https://gist.github.com/barronalex/84addb8078be21969f1690c1454855f3
|
||||
"""
|
||||
|
||||
if not hasattr(module, "to_quantized"):
|
||||
return False
|
||||
|
||||
index = int(path.split(".")[2]) if len(path.split(".")) > 2 else 0
|
||||
|
||||
num_layers = config["num_hidden_layers"]
|
||||
use_more_bits = (
|
||||
index < num_layers // 8
|
||||
or index >= 7 * num_layers // 8
|
||||
or (index - num_layers // 8) % 3 == 2
|
||||
)
|
||||
if "v_proj" in path and use_more_bits:
|
||||
return {"group_size": group_size, "bits": high_bits}
|
||||
if "down_proj" in path and use_more_bits:
|
||||
return {"group_size": group_size, "bits": high_bits}
|
||||
if "lm_head" in path:
|
||||
return {"group_size": group_size, "bits": high_bits}
|
||||
|
||||
return {"group_size": group_size, "bits": low_bits}
|
||||
|
||||
return mixed_quant_predicate
|
||||
|
||||
|
||||
mixed_3_6 = mixed_quant_predicate_builder(low_bits=3)
|
||||
mixed_2_6 = mixed_quant_predicate_builder(low_bits=2)
|
||||
|
||||
|
||||
def convert(
|
||||
hf_path: str,
|
||||
mlx_path: str = "mlx_model",
|
||||
|
Loading…
Reference in New Issue
Block a user