mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 17:31:18 +08:00

* starting * refactor sampler/processor and a few improvements * fix stream * fix stream generate * fix eos handling in stream generate
209 lines
7.1 KiB
Python
209 lines
7.1 KiB
Python
# Copyright © 2023-2024 Apple Inc.
|
|
|
|
from functools import partial
|
|
from typing import Callable, Dict, Optional
|
|
|
|
import mlx.core as mx
|
|
|
|
|
|
def make_sampler(
|
|
temp: float = 0.0,
|
|
top_p: float = 0.0,
|
|
min_p: float = 0.0,
|
|
min_tokens_to_keep: int = 1,
|
|
) -> Callable[mx.array, mx.array]:
|
|
"""
|
|
Make a sampler function for use with ``generate_step``.
|
|
|
|
Args:
|
|
temp (float): The temperature for sampling, if 0 the argmax is used.
|
|
Default: ``0``.
|
|
top_p (float, optional): Nulceus sampling, higher means model considers
|
|
more less likely words.
|
|
min_p (float, optional): The minimum value (scaled by the top token's
|
|
probability) that a token probability must have to be considered.
|
|
min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
|
|
be filtered by min_p sampling.
|
|
|
|
Returns:
|
|
Callable[mx.array, mx.array]:
|
|
A sampler which takes log-probabilities and returns tokens.
|
|
"""
|
|
if temp == 0:
|
|
return lambda x: mx.argmax(x, axis=-1)
|
|
elif top_p > 0 and top_p < 1.0:
|
|
return lambda x: top_p_sampling(x, top_p, temp)
|
|
elif min_p != 0.0:
|
|
return lambda x: min_p_sampling(x, min_p, min_tokens_to_keep, temp)
|
|
else:
|
|
return lambda x: categorical_sampling(x, temp)
|
|
|
|
|
|
def make_logits_processors(
|
|
logit_bias: Optional[Dict[int, float]] = None,
|
|
repetition_penalty: Optional[float] = None,
|
|
repetition_context_size: Optional[int] = 20,
|
|
):
|
|
"""
|
|
Make logits processors for use with ``generate_step``.
|
|
|
|
Args:
|
|
repetition_penalty (float, optional): The penalty factor for repeating
|
|
tokens.
|
|
repetition_context_size (int, optional): The number of tokens to
|
|
consider for repetition penalty. Default: ``20``.
|
|
logit_bias (dictionary, optional): Additive logit bias.
|
|
|
|
Returns:
|
|
List[Callable[[mx.array, mx.array], mx.array]]:
|
|
A list of logits processors. Each processor in the list is a
|
|
callable which takes an array of tokens and an array of logits
|
|
and returns the updated logits.
|
|
"""
|
|
logits_processors = []
|
|
if logit_bias:
|
|
indices = mx.array(list(logit_bias.keys()))
|
|
values = mx.array(list(logit_bias.values()))
|
|
|
|
def logit_bias_processor(_, logits):
|
|
logits[:, indices] += values
|
|
return logits
|
|
|
|
logits_processors.append(logit_bias_processor)
|
|
|
|
if repetition_penalty and repetition_penalty != 0.0:
|
|
logits_processors.append(
|
|
make_repetition_penalty(repetition_penalty, repetition_context_size)
|
|
)
|
|
return logits_processors
|
|
|
|
|
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
def min_p_sampling(
|
|
logits: mx.array,
|
|
min_p: float,
|
|
min_tokens_to_keep: int = 1,
|
|
temperature=1.0,
|
|
) -> mx.array:
|
|
"""
|
|
Apply min-p sampling to the logits.
|
|
|
|
Min-p keeps all tokens that are above a minimum probability, scaled by the
|
|
probability of the most likely token. As a result, the filter is more
|
|
aggressive given a very high-probability token.
|
|
|
|
Args:
|
|
logits: The logits from the model's output.
|
|
min_p (float): Minimum token probability. Typical values are in the
|
|
0.01-0.2 range, comparably selective as setting `top_p` in the
|
|
0.99-0.8 range.
|
|
min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
|
|
be filtered. Default: ``1``.
|
|
|
|
"""
|
|
if not (0 <= min_p <= 1.0):
|
|
raise ValueError(
|
|
f"`min_p` has to be a float in the [0, 1] interval, but is {min_p}"
|
|
)
|
|
if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
|
|
raise ValueError(
|
|
f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}"
|
|
)
|
|
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
|
|
|
# Softmax probabilities
|
|
probs = mx.softmax(logits * (1 / temperature), axis=-1)
|
|
|
|
# Indices sorted in decreasing order
|
|
sorted_indices = mx.argsort(-logits).squeeze(0)
|
|
sorted_probs = probs[..., sorted_indices]
|
|
|
|
# Top probability
|
|
top_probs = probs[..., sorted_indices[0]]
|
|
|
|
# Calculate the min_p threshold
|
|
scaled_min_p = min_p * top_probs
|
|
|
|
# Mask tokens that have a probability less than the scaled min_p
|
|
tokens_to_remove = sorted_probs < scaled_min_p
|
|
tokens_to_remove[..., :min_tokens_to_keep] = False
|
|
|
|
# Create pool of tokens with probability less than scaled min_p
|
|
selected_probs = mx.where(tokens_to_remove, 0, sorted_probs)
|
|
|
|
# Return sampled token
|
|
sorted_token = mx.random.categorical(mx.log(selected_probs))
|
|
return sorted_indices[sorted_token]
|
|
|
|
|
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
def top_p_sampling(logits: mx.array, top_p: float, temperature: float) -> mx.array:
|
|
"""
|
|
Apply top-p (nucleus) sampling to logits.
|
|
|
|
Args:
|
|
logits: The logits from the model's output.
|
|
top_p: The cumulative probability threshold for top-p filtering.
|
|
temperature: Temperature parameter for softmax distribution reshaping.
|
|
Returns:
|
|
token selected based on the top-p criterion.
|
|
"""
|
|
# referenced implementation from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L449-L460
|
|
probs = mx.softmax(logits * (1 / temperature), axis=-1)
|
|
|
|
# sort probs in ascending order
|
|
sorted_indices = mx.argsort(probs, axis=-1)
|
|
sorted_probs = probs[..., sorted_indices.squeeze(0)]
|
|
|
|
cumulative_probs = mx.cumsum(sorted_probs, axis=-1)
|
|
|
|
# select tokens with cumulative probs below threshold
|
|
top_probs = mx.where(
|
|
cumulative_probs > 1 - top_p,
|
|
sorted_probs,
|
|
0,
|
|
)
|
|
|
|
sorted_token = mx.random.categorical(mx.log(top_probs))
|
|
token = sorted_indices.squeeze(0)[sorted_token]
|
|
|
|
return token
|
|
|
|
|
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
def categorical_sampling(logits, temp):
|
|
return mx.random.categorical(logits * (1 / temp))
|
|
|
|
|
|
def make_repetition_penalty(penalty: float, context_size: int = 20):
|
|
"""
|
|
Make repetition penalty processor.
|
|
|
|
Paper: https://arxiv.org/abs/1909.05858
|
|
|
|
Args:
|
|
penalty (float): The repetition penalty factor to be applied.
|
|
context_size (int): The number of previous tokens to use.
|
|
Default: ``20``.
|
|
|
|
Returns:
|
|
Callable[[mx.array, List[int]], mx.array]:
|
|
The repetition penalty processor.
|
|
"""
|
|
if penalty < 0 or not isinstance(penalty, float):
|
|
raise ValueError(f"penalty must be a non-negative float, got {penalty}")
|
|
|
|
def repetition_penalty_processor(tokens, logits):
|
|
if len(tokens) > 0:
|
|
tokens = tokens[-context_size:]
|
|
selected_logits = logits[:, tokens]
|
|
selected_logits = mx.where(
|
|
selected_logits < 0,
|
|
selected_logits * penalty,
|
|
selected_logits / penalty,
|
|
)
|
|
logits[:, tokens] = selected_logits
|
|
return logits
|
|
|
|
return repetition_penalty_processor
|