mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-26 10:41:18 +08:00
Feat: update pre-commit rev (#432)
This commit is contained in:
parent
f45a1ab83c
commit
f1ef378a58
@ -1,6 +1,6 @@
|
|||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: 23.12.1
|
rev: 24.1.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
|
@ -80,9 +80,9 @@ def iterate_batches(dataset, tokenizer, batch_size, max_seq_length, train=False)
|
|||||||
for j in range(batch_size):
|
for j in range(batch_size):
|
||||||
truncated_length = min(lengths[j], max_seq_length)
|
truncated_length = min(lengths[j], max_seq_length)
|
||||||
batch_arr[j, :truncated_length] = batch[j][:truncated_length]
|
batch_arr[j, :truncated_length] = batch[j][:truncated_length]
|
||||||
lengths[
|
lengths[j] = (
|
||||||
j
|
truncated_length # Update lengths to match truncated lengths
|
||||||
] = truncated_length # Update lengths to match truncated lengths
|
)
|
||||||
batch = mx.array(batch_arr)
|
batch = mx.array(batch_arr)
|
||||||
|
|
||||||
yield batch[:, :-1], batch[:, 1:], mx.array(lengths)
|
yield batch[:, :-1], batch[:, 1:], mx.array(lengths)
|
||||||
|
@ -186,9 +186,11 @@ def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
|
|||||||
out_channels=config["out_channels"],
|
out_channels=config["out_channels"],
|
||||||
block_out_channels=config["block_out_channels"],
|
block_out_channels=config["block_out_channels"],
|
||||||
layers_per_block=[config["layers_per_block"]] * n_blocks,
|
layers_per_block=[config["layers_per_block"]] * n_blocks,
|
||||||
num_attention_heads=[config["attention_head_dim"]] * n_blocks
|
num_attention_heads=(
|
||||||
|
[config["attention_head_dim"]] * n_blocks
|
||||||
if isinstance(config["attention_head_dim"], int)
|
if isinstance(config["attention_head_dim"], int)
|
||||||
else config["attention_head_dim"],
|
else config["attention_head_dim"]
|
||||||
|
),
|
||||||
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
|
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
|
||||||
norm_num_groups=config["norm_num_groups"],
|
norm_num_groups=config["norm_num_groups"],
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user