mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-08-29 05:40:26 +08:00
format / nits
This commit is contained in:
parent
216265bbb5
commit
3862581d57
@ -1,18 +1,20 @@
|
|||||||
# Copyright © 2023-2024 Apple Inc.
|
# Copyright © 2023-2024 Apple Inc.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from .utils import convert, mixed_3_6, mixed_2_6
|
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
from .utils import convert, mixed_2_6, mixed_3_6
|
||||||
|
|
||||||
|
|
||||||
class MixedQuants(Enum):
|
class MixedQuants(Enum):
|
||||||
mixed_3_6 = mixed_3_6
|
mixed_3_6 = "mixed_3_6"
|
||||||
mixed_2_6 = mixed_2_6
|
mixed_2_6 = "mixed_2_6"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def recipe_names(cls):
|
def recipe_names(cls):
|
||||||
return [member.name for member in cls]
|
return [member.name for member in cls]
|
||||||
|
|
||||||
|
|
||||||
def quant_args(arg):
|
def quant_args(arg):
|
||||||
try:
|
try:
|
||||||
return MixedQuants[arg].value
|
return MixedQuants[arg].value
|
||||||
|
@ -1016,11 +1016,13 @@ def save_config(
|
|||||||
|
|
||||||
|
|
||||||
def mixed_quant_predicate_builder(
|
def mixed_quant_predicate_builder(
|
||||||
low_bits: int = 4,
|
low_bits: int = 4, high_bits: int = 4, group_size: int = 64
|
||||||
high_bits: int = 4,
|
|
||||||
group_size: int = 64
|
|
||||||
) -> Callable[[str, nn.Module, dict], Union[bool, dict]]:
|
) -> Callable[[str, nn.Module, dict], Union[bool, dict]]:
|
||||||
def mixed_quant_predicate(path: str, module: nn.Module, config: dict, low_bits: int = 4, high_bits: int = 4, group_size: int = 64) -> Union[bool, dict]:
|
def mixed_quant_predicate(
|
||||||
|
path: str,
|
||||||
|
module: nn.Module,
|
||||||
|
config: dict,
|
||||||
|
) -> Union[bool, dict]:
|
||||||
"""Implements mixed quantization predicates with similar choices to, for example, llama.cpp's Q4_K_M.
|
"""Implements mixed quantization predicates with similar choices to, for example, llama.cpp's Q4_K_M.
|
||||||
Ref: https://github.com/ggerganov/llama.cpp/blob/917786f43d0f29b7c77a0c56767c0fa4df68b1c5/src/llama.cpp#L5265
|
Ref: https://github.com/ggerganov/llama.cpp/blob/917786f43d0f29b7c77a0c56767c0fa4df68b1c5/src/llama.cpp#L5265
|
||||||
By Alex Barron: https://gist.github.com/barronalex/84addb8078be21969f1690c1454855f3
|
By Alex Barron: https://gist.github.com/barronalex/84addb8078be21969f1690c1454855f3
|
||||||
@ -1033,9 +1035,9 @@ def mixed_quant_predicate_builder(
|
|||||||
|
|
||||||
num_layers = config["num_hidden_layers"]
|
num_layers = config["num_hidden_layers"]
|
||||||
use_more_bits = (
|
use_more_bits = (
|
||||||
index < num_layers // 8 or
|
index < num_layers // 8
|
||||||
index >= 7 * num_layers // 8 or
|
or index >= 7 * num_layers // 8
|
||||||
(index - num_layers // 8) % 3 == 2
|
or (index - num_layers // 8) % 3 == 2
|
||||||
)
|
)
|
||||||
if "v_proj" in path and use_more_bits:
|
if "v_proj" in path and use_more_bits:
|
||||||
return {"group_size": group_size, "bits": high_bits}
|
return {"group_size": group_size, "bits": high_bits}
|
||||||
@ -1045,11 +1047,14 @@ def mixed_quant_predicate_builder(
|
|||||||
return {"group_size": group_size, "bits": high_bits}
|
return {"group_size": group_size, "bits": high_bits}
|
||||||
|
|
||||||
return {"group_size": group_size, "bits": low_bits}
|
return {"group_size": group_size, "bits": low_bits}
|
||||||
return functools.partial(mixed_quant_predicate, low_bits=low_bits, high_bits=high_bits, group_size=group_size)
|
|
||||||
|
return mixed_quant_predicate
|
||||||
|
|
||||||
|
|
||||||
mixed_3_6 = mixed_quant_predicate_builder(low_bits=3)
|
mixed_3_6 = mixed_quant_predicate_builder(low_bits=3)
|
||||||
mixed_2_6 = mixed_quant_predicate_builder(low_bits=2)
|
mixed_2_6 = mixed_quant_predicate_builder(low_bits=2)
|
||||||
|
|
||||||
|
|
||||||
def convert(
|
def convert(
|
||||||
hf_path: str,
|
hf_path: str,
|
||||||
mlx_path: str = "mlx_model",
|
mlx_path: str = "mlx_model",
|
||||||
|
Loading…
Reference in New Issue
Block a user