added CIFAR10 + ResNet example

This commit is contained in:
Sarthak Yadav 2023-12-12 19:01:06 +01:00
parent 13f1142eaa
commit f37e777243
5 changed files with 310 additions and 0 deletions

31
cifar/README.md Normal file
View File

@ -0,0 +1,31 @@
# CIFAR and ResNets
* This example shows how to run ResNets on CIFAR10 dataset, in accordance with the original [paper](https://arxiv.org/abs/1512.03385).
* Also illustrates how to use `mlx-data` to download and load the dataset.
## Pre-requisites
* Install the dependencies:
```
pip install -r requirements.txt
```
## Running the example
Run the example with:
```
python main.py
```
By default the example runs on the GPU. To run on the CPU, use:
```
python main.py --cpu_only
```
For all available options, run:
```
python main.py --help
```

39
cifar/dataset.py Normal file
View File

@ -0,0 +1,39 @@
import mlx.core as mx
from mlx.data.datasets import load_cifar10
import math
def get_cifar10(batch_size, root=None):
tr = load_cifar10(root=root)
num_tr_samples = tr.size()
mean = mx.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
std = mx.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
tr_iter = (
tr.shuffle()
.to_stream()
.image_random_h_flip("image", prob=0.5)
.pad("image", 0, 4, 4, 0.0)
.pad("image", 1, 4, 4, 0.0)
.image_random_crop("image", 32, 32)
.key_transform("image", lambda x: (x.astype("float32") / 255.0))
.key_transform("image", lambda x: (x - mean) / std)
.batch(batch_size)
)
test = load_cifar10(root=root, train=False)
num_test_samples = test.size()
test_iter = (
test.to_stream()
.key_transform("image", lambda x: (x.astype("float32") / 255.0))
.key_transform("image", lambda x: (x - mean) / std)
.batch(batch_size)
)
num_tr_steps_per_epoch = num_tr_samples // batch_size
num_test_steps_per_epoch = num_test_samples // batch_size
return tr_iter, test_iter, num_tr_steps_per_epoch, num_test_steps_per_epoch

108
cifar/main.py Normal file
View File

@ -0,0 +1,108 @@
import argparse
import resnet
import numpy as np
import mlx.nn as nn
import mlx.core as mx
import mlx.optimizers as optim
from dataset import get_cifar10
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
"--arch",
type=str,
default="resnet20",
help="model architecture [resnet20, resnet32, resnet44, resnet56, resnet110, resnet1202]",
)
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--epochs", type=int, default=100, help="number of epochs")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate")
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--cpu_only", action="store_true", help="use cpu only")
def loss_fn(model, inp, tgt):
return mx.mean(nn.losses.cross_entropy(model(inp), tgt))
def eval_fn(model, inp, tgt):
return mx.mean(mx.argmax(model(inp), axis=1) == tgt)
def train_epoch(model, train_iter, optimizer, epoch):
def train_step(model, inp, tgt):
output = model(inp)
loss = mx.mean(nn.losses.cross_entropy(output, tgt))
acc = mx.mean(mx.argmax(output, axis=1) == tgt)
return loss, acc
train_step_fn = nn.value_and_grad(model, train_step)
losses = []
accs = []
for batch_counter, batch in enumerate(train_iter):
x = mx.array(batch["image"])
y = mx.array(batch["label"])
(loss, acc), grads = train_step_fn(model, x, y)
optimizer.update(model, grads)
mx.eval(model.parameters(), optimizer.state)
loss_value = loss.item()
acc_value = acc.item()
losses.append(loss_value)
accs.append(acc_value)
if batch_counter % 10 == 0:
print(
f"Epoch {epoch:02d}[{batch_counter:03d}]: tr_loss {loss_value:.3f}, tr_acc {acc_value:.3f}"
)
mean_tr_loss = np.mean(np.array(losses))
mean_tr_acc = np.mean(np.array(accs))
return mean_tr_loss, mean_tr_acc
def test_epoch(model, test_iter, epoch):
accs = []
for batch_counter, batch in enumerate(test_iter):
x = mx.array(batch["image"])
y = mx.array(batch["label"])
acc = eval_fn(model, x, y)
acc_value = acc.item()
accs.append(acc_value)
mean_acc = np.mean(np.array(accs))
return mean_acc
def main(args):
np.random.seed(args.seed)
mx.random.seed(args.seed)
model = resnet.__dict__[args.arch]()
print("num_params: {:0.04f} M".format(model.num_params() / 1e6))
mx.eval(model.parameters())
optimizer = optim.Adam(learning_rate=args.lr)
for epoch in range(args.epochs):
# get data every epoch
# or set .repeat() on the data stream appropriately
train_data, test_data, tr_batches, _ = get_cifar10(args.batch_size)
epoch_tr_loss, epoch_tr_acc = train_epoch(model, train_data, optimizer, epoch)
print(
f"Epoch {epoch}: avg. tr_loss {epoch_tr_loss:.3f}, avg. tr_acc {epoch_tr_acc:.3f}"
)
epoch_test_acc = test_epoch(model, test_data, epoch)
print(f"Epoch {epoch}: Test_acc {epoch_test_acc:.3f}")
if __name__ == "__main__":
args = parser.parse_args()
if args.cpu_only:
mx.set_default_device(mx.cpu)
main(args)

3
cifar/requirements.txt Normal file
View File

@ -0,0 +1,3 @@
mlx
mlx-data
numpy

129
cifar/resnet.py Normal file
View File

@ -0,0 +1,129 @@
"""
Implementation of ResNets for CIFAR-10 as per the original paper [https://arxiv.org/abs/1512.03385].
Configurations include ResNet-20, ResNet-32, ResNet-44, ResNet-56, ResNet-110, ResNet-1202.
There's no BatchNorm is mlx==0.0.4, using LayerNorm instead.
Authors:
Sarthak Yadav, 2023
"""
from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
__all__ = [
"ResNet",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet1202",
]
class ShortcutA(nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def __call__(self, x):
return mx.pad(
x[:, ::2, ::2, :],
pad_width=[(0, 0), (0, 0), (0, 0), (self.dims // 4, self.dims // 4)],
)
class Block(nn.Module):
expansion = 1
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.LayerNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.LayerNorm(dims)
if stride != 1 or in_dims != dims:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.LayerNorm(16)
self.in_dims = 16
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_dims, dims, stride))
self.in_dims = dims * block.expansion
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet20(**kwargs):
return ResNet(Block, [3, 3, 3], **kwargs)
def resnet32(**kwargs):
return ResNet(Block, [5, 5, 5], **kwargs)
def resnet44(**kwargs):
return ResNet(Block, [7, 7, 7], **kwargs)
def resnet56(**kwargs):
return ResNet(Block, [9, 9, 9], **kwargs)
def resnet110(**kwargs):
return ResNet(Block, [18, 18, 18], **kwargs)
def resnet1202(**kwargs):
return ResNet(Block, [200, 200, 200], **kwargs)