mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Compare commits
572 Commits
v0.22.0
...
sign-warns
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24828b1b2f | ||
|
|
9f649b5658 | ||
|
|
18aa921388 | ||
|
|
8d13a0bc6b | ||
|
|
ac75c87fd7 | ||
|
|
7107802e09 | ||
|
|
c5913131cf | ||
|
|
19ab7911f6 | ||
|
|
4a1b1796b7 | ||
|
|
b48d298205 | ||
|
|
8277e71ea9 | ||
|
|
b0d985416a | ||
|
|
8d10f3ec75 | ||
|
|
6343622c67 | ||
|
|
979abf462b | ||
|
|
981d2fdaf0 | ||
|
|
5a306d3495 | ||
|
|
5baa361779 | ||
|
|
1bac0db7e3 | ||
|
|
a1212b4e44 | ||
|
|
45a8b226af | ||
|
|
76ef1e98f3 | ||
|
|
63d91557e0 | ||
|
|
310e501e6a | ||
|
|
cacc3ab7fd | ||
|
|
53525cba23 | ||
|
|
3d67b717a0 | ||
|
|
953b2f5be2 | ||
|
|
26f7155537 | ||
|
|
66fcb9fe94 | ||
|
|
d1e06117e8 | ||
|
|
539d8322d1 | ||
|
|
c4767d110f | ||
|
|
895217f25b | ||
|
|
0cfeeb60ca | ||
|
|
8f8af61a37 | ||
|
|
233384161e | ||
|
|
5bcf3a6794 | ||
|
|
7707196297 | ||
|
|
7e3471c987 | ||
|
|
9f0ba3ddf1 | ||
|
|
4bce5f9b2d | ||
|
|
e9eab527eb | ||
|
|
36ca62dba8 | ||
|
|
9cbb1b0148 | ||
|
|
9bfc476d72 | ||
|
|
25e2356316 | ||
|
|
226a1d24e0 | ||
|
|
630350ad3e | ||
|
|
380aeb58ae | ||
|
|
f37389d100 | ||
|
|
e89e8b4272 | ||
|
|
85a8824a8c | ||
|
|
f5d4397e5c | ||
|
|
343e33b6d5 | ||
|
|
0073096dd1 | ||
|
|
e3d004fed9 | ||
|
|
a393435d28 | ||
|
|
a7a94b29d7 | ||
|
|
22a5da76c8 | ||
|
|
287c63a093 | ||
|
|
1c9ae1eaa1 | ||
|
|
c2c3e0b0a2 | ||
|
|
b0cc71ae71 | ||
|
|
e88f2d4a8e | ||
|
|
9cee557423 | ||
|
|
bbf1423953 | ||
|
|
eb24267b56 | ||
|
|
dc371ae7a5 | ||
|
|
e76a8dd5c5 | ||
|
|
b466dea982 | ||
|
|
7a6adda1e6 | ||
|
|
1a9f820af6 | ||
|
|
d4f4ff3c5e | ||
|
|
7c7e48dbd1 | ||
|
|
fbbf3b9b3e | ||
|
|
bf01ad9367 | ||
|
|
ae438d05fa | ||
|
|
711a645807 | ||
|
|
aa9d44b3d4 | ||
|
|
ec2ab42888 | ||
|
|
787c0d90cd | ||
|
|
e8b604a6a3 | ||
|
|
50cc09887f | ||
|
|
3f730e77aa | ||
|
|
caecbe876a | ||
|
|
8afb6d62f2 | ||
|
|
6ccfa603cd | ||
|
|
36cad99a11 | ||
|
|
ee18e1cbf0 | ||
|
|
af120c2bc0 | ||
|
|
6a3acf2301 | ||
|
|
d6977f2a57 | ||
|
|
db5443e831 | ||
|
|
52b8384d10 | ||
|
|
44cc5da4bc | ||
|
|
dde3682b69 | ||
|
|
17310d91a6 | ||
|
|
b194d65a6a | ||
|
|
a44b27f5f8 | ||
|
|
e5a33f2223 | ||
|
|
c1e3340b23 | ||
|
|
8f163a367d | ||
|
|
89a3df9014 | ||
|
|
c5d2937aa5 | ||
|
|
b61a65e313 | ||
|
|
04cbb4191c | ||
|
|
c5460762e7 | ||
|
|
8ce49cd39e | ||
|
|
9c68b50853 | ||
|
|
111f1e71af | ||
|
|
827003d568 | ||
|
|
d363a76aa4 | ||
|
|
70560b6bd5 | ||
|
|
7ef8a6f2d5 | ||
|
|
31c6f6e33f | ||
|
|
584d48458e | ||
|
|
5cf984ca87 | ||
|
|
a9bac3d9e5 | ||
|
|
5458d43247 | ||
|
|
a4dba65220 | ||
|
|
3dcb286baf | ||
|
|
4822c3dbe9 | ||
|
|
2ca75bb529 | ||
|
|
db14e29a0b | ||
|
|
d2f540f4e0 | ||
|
|
333ffea273 | ||
|
|
f55b6f1f2f | ||
|
|
30561229c7 | ||
|
|
068a4612e9 | ||
|
|
5722c147de | ||
|
|
f6819a1f26 | ||
|
|
f93f87c802 | ||
|
|
9392fc3f88 | ||
|
|
e843c4d8d5 | ||
|
|
0c5fc63a36 | ||
|
|
e397177f6e | ||
|
|
f4c8888cbe | ||
|
|
25c1e03205 | ||
|
|
512281781c | ||
|
|
ac85ddfdb7 | ||
|
|
65d0d40232 | ||
|
|
cea9369610 | ||
|
|
e7c6e1db82 | ||
|
|
c5fcd5b61b | ||
|
|
1df9887998 | ||
|
|
73f22d6226 | ||
|
|
c422050ca7 | ||
|
|
1ba18ff7d9 | ||
|
|
37b440faa8 | ||
|
|
888b13ed63 | ||
|
|
4abb218d21 | ||
|
|
6441c21a94 | ||
|
|
dfb5022eab | ||
|
|
ac207ce7aa | ||
|
|
fce53b61d6 | ||
|
|
8ae4a76308 | ||
|
|
7fde1b6a1e | ||
|
|
aa7b47481a | ||
|
|
56be773610 | ||
|
|
a9bdd67baa | ||
|
|
f2adb5638d | ||
|
|
728d4db582 | ||
|
|
db5c7efcf6 | ||
|
|
7bb96e4249 | ||
|
|
fa89f0b150 | ||
|
|
ca973d1e83 | ||
|
|
828c5f1137 | ||
|
|
7d86a5c108 | ||
|
|
0b807893a7 | ||
|
|
6ad0889c8a | ||
|
|
737dd6d1ac | ||
|
|
aaf78f4c6b | ||
|
|
8831064493 | ||
|
|
be9bc96da4 | ||
|
|
86258f292f | ||
|
|
b26d88591c | ||
|
|
86c6a15571 | ||
|
|
8b25ce62d5 | ||
|
|
da5912e4f2 | ||
|
|
daafee676f | ||
|
|
d32519c8ee | ||
|
|
b405591249 | ||
|
|
3bf81ed1bd | ||
|
|
2204182bba | ||
|
|
3628e5d497 | ||
|
|
a0ae49d397 | ||
|
|
254476718b | ||
|
|
3adba92ebe | ||
|
|
ef631d63af | ||
|
|
970dbe8e25 | ||
|
|
641be9463b | ||
|
|
ab0e608862 | ||
|
|
1588659062 | ||
|
|
b9e88fb976 | ||
|
|
4ad53414dd | ||
|
|
d1165b215e | ||
|
|
dcb8319f3d | ||
|
|
5597fa089c | ||
|
|
9acec364c2 | ||
|
|
7d9d6ef456 | ||
|
|
6f5874a2f2 | ||
|
|
70dc336785 | ||
|
|
4e504039f5 | ||
|
|
d1f4d291e8 | ||
|
|
e1840853ce | ||
|
|
0f5ce173da | ||
|
|
588854195f | ||
|
|
28d068bce6 | ||
|
|
d107d8d495 | ||
|
|
1e496ddb82 | ||
|
|
74eccbf3fa | ||
|
|
08638223ca | ||
|
|
56cc858af9 | ||
|
|
f55c4ed1d6 | ||
|
|
93d70419e7 | ||
|
|
63f663d9c6 | ||
|
|
84b4d96efa | ||
|
|
aec67f2fa6 | ||
|
|
deee214a95 | ||
|
|
45adec102c | ||
|
|
31fc530c76 | ||
|
|
fbb3f65a1a | ||
|
|
6b1b8ea91b | ||
|
|
b2273733ea | ||
|
|
f409b229a4 | ||
|
|
30571e2326 | ||
|
|
d7734edd9f | ||
|
|
2ba69bc8fa | ||
|
|
cb349a291c | ||
|
|
f0a0b077a0 | ||
|
|
49114f28ab | ||
|
|
e7d2ebadd2 | ||
|
|
e569803d7c | ||
|
|
d34f887abc | ||
|
|
5201df5030 | ||
|
|
2d3c26c565 | ||
|
|
6325f60d52 | ||
|
|
42cc9cfbc7 | ||
|
|
8347575ba1 | ||
|
|
b6eec20260 | ||
|
|
0eb035b4b1 | ||
|
|
afb9817599 | ||
|
|
8fb3e7a26c | ||
|
|
8c7bc30ce4 | ||
|
|
85873cb162 | ||
|
|
e14ee12491 | ||
|
|
8b9a3f3cea | ||
|
|
fb4e8b896b | ||
|
|
2ca533b279 | ||
|
|
4a9b29a875 | ||
|
|
a4fcc893cd | ||
|
|
9d10239af7 | ||
|
|
19facd4b20 | ||
|
|
f5299f72cd | ||
|
|
0e0d9ac522 | ||
|
|
8917022deb | ||
|
|
ec0d5db67b | ||
|
|
e76e9b87f0 | ||
|
|
cfb6a244ea | ||
|
|
58f3860306 | ||
|
|
dd4f53db63 | ||
|
|
3d5e17e507 | ||
|
|
33bf1a244b | ||
|
|
772f471ff2 | ||
|
|
2c11d10f8d | ||
|
|
656ed7f780 | ||
|
|
81bb9a2a9e | ||
|
|
5adf185f86 | ||
|
|
c9a9180584 | ||
|
|
76831ed83d | ||
|
|
b3d7b85376 | ||
|
|
cad5c0241c | ||
|
|
b8022c578a | ||
|
|
bc53f8293f | ||
|
|
c552ff2451 | ||
|
|
4fda5fbdf9 | ||
|
|
580776559b | ||
|
|
a14aaa7c9d | ||
|
|
a6d780154f | ||
|
|
6871e2eeb7 | ||
|
|
8402a2acf4 | ||
|
|
fddb6933e1 | ||
|
|
c8b4787e4e | ||
|
|
2188199ff8 | ||
|
|
aa07429bad | ||
|
|
918761a25a | ||
|
|
a4fc671d3e | ||
|
|
f5f65ef48c | ||
|
|
c2dd81a8aa | ||
|
|
d7e680ffe4 | ||
|
|
c371baf53a | ||
|
|
ccf78f566c | ||
|
|
c9fa68664a | ||
|
|
c35f4d089a | ||
|
|
8590c0941e | ||
|
|
095163b8d1 | ||
|
|
99c33d011d | ||
|
|
62fecf3e13 | ||
|
|
7c4eb5d03e | ||
|
|
bae9a6b404 | ||
|
|
004c1d8ef2 | ||
|
|
7ebb2e0193 | ||
|
|
9ce77798b1 | ||
|
|
f8bad60609 | ||
|
|
5866b3857b | ||
|
|
1ca616844b | ||
|
|
2e8cf0b450 | ||
|
|
24f89173d1 | ||
|
|
c6a20b427a | ||
|
|
a5ac9244c4 | ||
|
|
c763fe1be0 | ||
|
|
52dc8c8cd5 | ||
|
|
aede70e81d | ||
|
|
85a8beb5e4 | ||
|
|
0bb89e9e5f | ||
|
|
5685ceb3c7 | ||
|
|
0408ba0a76 | ||
|
|
cbad6c3093 | ||
|
|
1b021f6984 | ||
|
|
95b7551d65 | ||
|
|
db5a7c6192 | ||
|
|
6ef2f67e7f | ||
|
|
f76ee1ffd2 | ||
|
|
54a71f270a | ||
|
|
55b4062dd8 | ||
|
|
79071bfba4 | ||
|
|
7774b87cbd | ||
|
|
35c87741cf | ||
|
|
4cbe605214 | ||
|
|
ab8883dd55 | ||
|
|
eebe73001a | ||
|
|
0359bf02c9 | ||
|
|
237f9e58a8 | ||
|
|
8576e6fe36 | ||
|
|
0654543dcc | ||
|
|
48ef3e74e2 | ||
|
|
7d4b378952 | ||
|
|
7ff5c41e06 | ||
|
|
602f43e3d1 | ||
|
|
a2cadb8218 | ||
|
|
c1eb9d05d9 | ||
|
|
cf6c939e86 | ||
|
|
130df35e1b | ||
|
|
0751263dec | ||
|
|
eca2f3eb97 | ||
|
|
3aa9cf3f9e | ||
|
|
8f3d208dce | ||
|
|
caaa3f1f8c | ||
|
|
659a51919f | ||
|
|
6661387066 | ||
|
|
a7fae8a176 | ||
|
|
0cae0bdac8 | ||
|
|
5a1a5d5ed1 | ||
|
|
1683975acf | ||
|
|
af705590ac | ||
|
|
825124af8f | ||
|
|
9c5e7da507 | ||
|
|
481349495b | ||
|
|
9daa6b003f | ||
|
|
a3a632d567 | ||
|
|
e496c5a4b4 | ||
|
|
ea890d8710 | ||
|
|
aa5d84f102 | ||
|
|
f1606486d2 | ||
|
|
87720a8908 | ||
|
|
bb6565ef14 | ||
|
|
7bb063bcb3 | ||
|
|
b36dd472bb | ||
|
|
167b759a38 | ||
|
|
99b9868859 | ||
|
|
6b2d5448f2 | ||
|
|
eaf709b83e | ||
|
|
f0e70afff0 | ||
|
|
86984cad68 | ||
|
|
fbc89e3ced | ||
|
|
38c1e720c2 | ||
|
|
600e87e03c | ||
|
|
3836445241 | ||
|
|
1d2c9d6a07 | ||
|
|
e8ac6bd2f5 | ||
|
|
fdadc4f22c | ||
|
|
79b527f45f | ||
|
|
dc4eada7f0 | ||
|
|
70ebc3b598 | ||
|
|
b13f2aed16 | ||
|
|
5f04c0f818 | ||
|
|
55935ccae7 | ||
|
|
b529515eb1 | ||
|
|
3cde719eb7 | ||
|
|
5de6d94a90 | ||
|
|
99eefd2ec0 | ||
|
|
e9e268336b | ||
|
|
7275ac7523 | ||
|
|
c4189a38e4 | ||
|
|
68d1b3256b | ||
|
|
9c6953bda7 | ||
|
|
ef7ece9851 | ||
|
|
ddaa4b7dcb | ||
|
|
dfae2c6989 | ||
|
|
515f104926 | ||
|
|
9ecefd56db | ||
|
|
e5d35aa187 | ||
|
|
00794c42bc | ||
|
|
08a1bf3f10 | ||
|
|
60c4154346 | ||
|
|
f2c85308c1 | ||
|
|
1a28b69ee2 | ||
|
|
ba09f01ce8 | ||
|
|
6cf48872b7 | ||
|
|
7b3b8fa000 | ||
|
|
ec5e2aae61 | ||
|
|
86389bf970 | ||
|
|
3290bfa690 | ||
|
|
8777fd104f | ||
|
|
c41f7565ed | ||
|
|
9ba81e3da4 | ||
|
|
c23888acd7 | ||
|
|
f98ce25ab9 | ||
|
|
de5f38fd48 | ||
|
|
ec2854b13a | ||
|
|
90823d2938 | ||
|
|
5f5770e3a2 | ||
|
|
28f39e9038 | ||
|
|
b2d2b37888 | ||
|
|
fe597e141c | ||
|
|
72ca1539e0 | ||
|
|
13b26775f1 | ||
|
|
05d7118561 | ||
|
|
98b901ad66 | ||
|
|
5580b47291 | ||
|
|
bc62932984 | ||
|
|
a6b5d6e759 | ||
|
|
a8931306e1 | ||
|
|
fecdb8717e | ||
|
|
916fd273ea | ||
|
|
0da8506552 | ||
|
|
eda7a7b43e | ||
|
|
022eabb734 | ||
|
|
aba899cef8 | ||
|
|
6a40e1c176 | ||
|
|
9307b2ab8b | ||
|
|
522d8d3917 | ||
|
|
a84cc0123f | ||
|
|
f018e248cd | ||
|
|
cfd7237a80 | ||
|
|
4eef8102c9 | ||
|
|
69e4dd506b | ||
|
|
25814a9458 | ||
|
|
2a980a76ce | ||
|
|
d343782c8b | ||
|
|
4e1994e9d7 | ||
|
|
65a38c452b | ||
|
|
7b7e2352cd | ||
|
|
1177d28395 | ||
|
|
005e7efa64 | ||
|
|
b42d13ec84 | ||
|
|
9adcd1a650 | ||
|
|
3c164fca8c | ||
|
|
95e335db7b | ||
|
|
f90206ad74 | ||
|
|
3779150750 | ||
|
|
0a9777aa5c | ||
|
|
45ad06aac8 | ||
|
|
c6ea2ba329 | ||
|
|
2770a10240 | ||
|
|
d2a94f9e6a | ||
|
|
32da94507a | ||
|
|
736a340478 | ||
|
|
117e1355a2 | ||
|
|
3c3e558c60 | ||
|
|
cffceda6ee | ||
|
|
048805ad2c | ||
|
|
d14c9fe7ea | ||
|
|
5db90ce822 | ||
|
|
d699cc1330 | ||
|
|
c4230747a1 | ||
|
|
5245f12a46 | ||
|
|
a198b2787e | ||
|
|
04edad8c59 | ||
|
|
392b3060b0 | ||
|
|
85b34d59bc | ||
|
|
f599c11bc8 | ||
|
|
0792ff02ff | ||
|
|
fd0d63ba5b | ||
|
|
3835a428c5 | ||
|
|
9680f72cca | ||
|
|
a0737273d3 | ||
|
|
e613d0eaf0 | ||
|
|
6bcd6bcf70 | ||
|
|
ba12e4999a | ||
|
|
4e7cd31d12 | ||
|
|
5e6c130d93 | ||
|
|
5d68082881 | ||
|
|
607181644f | ||
|
|
89d327075f | ||
|
|
6bf00ef631 | ||
|
|
7d042f17fe | ||
|
|
28b8079e30 | ||
|
|
7face5d9fd | ||
|
|
a44dc4bdb0 | ||
|
|
2d0f384b6f | ||
|
|
8ff84b5c43 | ||
|
|
10b271d963 | ||
|
|
0ebc8a3d25 | ||
|
|
bbda0fdbdb | ||
|
|
c86422bdd4 | ||
|
|
c707b2b0a6 | ||
|
|
78ba24c37d | ||
|
|
1a2cb72030 | ||
|
|
344a29506e | ||
|
|
71de73a668 | ||
|
|
4c1dfa58b7 | ||
|
|
5274c3c43f | ||
|
|
1762793989 | ||
|
|
6cec78d8f2 | ||
|
|
2dc307f2e6 | ||
|
|
7aea5b1895 | ||
|
|
9733e16496 | ||
|
|
7f2d1024f3 | ||
|
|
428f589364 | ||
|
|
5cd97f7ffe | ||
|
|
e425dc00c0 | ||
|
|
d274ae77f2 | ||
|
|
55c5ac7820 | ||
|
|
0145911bea | ||
|
|
0a5215693e | ||
|
|
2a45056ba8 | ||
|
|
142b77751d | ||
|
|
a5ededf1c3 | ||
|
|
7df3f792a2 | ||
|
|
9eb7d7362f | ||
|
|
1c0c118f7c | ||
|
|
1a1b2108ec | ||
|
|
b6c6552d20 | ||
|
|
83a0340fa7 | ||
|
|
a62fc1b39f | ||
|
|
af1b725fda | ||
|
|
9174606d4c | ||
|
|
ca305afdbe | ||
|
|
fe5987b81d | ||
|
|
a229c8cef0 | ||
|
|
f6c0499b8d | ||
|
|
1156c84e86 | ||
|
|
ec7c7def40 | ||
|
|
2d8e667400 | ||
|
|
80c863b972 | ||
|
|
f5cc1eea72 | ||
|
|
b7c9f1d38f | ||
|
|
c6fc07f1f4 | ||
|
|
ded914f442 | ||
|
|
4758c8baa1 | ||
|
|
7064fed1b1 | ||
|
|
1017ac4a9e | ||
|
|
ccb61d7aae | ||
|
|
2235dee906 | ||
|
|
28091aa1ff | ||
|
|
121d9a0702 | ||
|
|
0cea88bcc5 | ||
|
|
72146fc4cd | ||
|
|
e6a7ab9675 | ||
|
|
1f4c127fb9 | ||
|
|
90532b1f37 | ||
|
|
a8666a757a | ||
|
|
a4667da1eb | ||
|
|
0c259961ac | ||
|
|
f288db8d34 | ||
|
|
33421c1dd3 | ||
|
|
5cc5201914 | ||
|
|
252e423e81 | ||
|
|
a4a2764a52 | ||
|
|
ab8e832c18 |
@@ -7,15 +7,9 @@ parameters:
|
|||||||
nightly_build:
|
nightly_build:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
weekly_build:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
test_release:
|
test_release:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
linux_release:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_documentation:
|
build_documentation:
|
||||||
@@ -24,21 +18,22 @@ jobs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
macos:
|
macos:
|
||||||
xcode: "15.2.0"
|
xcode: "26.0.0"
|
||||||
resource_class: macos.m1.medium.gen1
|
resource_class: m4pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install
|
name: Install
|
||||||
command: |
|
command: |
|
||||||
brew install python@3.9
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
|
brew install python@3.10
|
||||||
brew install doxygen
|
brew install doxygen
|
||||||
python3.9 -m venv env
|
python3.10 -m venv env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade cmake
|
pip install --upgrade cmake
|
||||||
pip install -r docs/requirements.txt
|
pip install -r docs/requirements.txt
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`sysctl -n hw.ncpu` pip install . -v
|
pip install . -v
|
||||||
- when:
|
- when:
|
||||||
condition:
|
condition:
|
||||||
not: << parameters.upload-docs >>
|
not: << parameters.upload-docs >>
|
||||||
@@ -70,9 +65,9 @@ jobs:
|
|||||||
git push -f origin gh-pages
|
git push -f origin gh-pages
|
||||||
|
|
||||||
linux_build_and_test:
|
linux_build_and_test:
|
||||||
docker:
|
machine:
|
||||||
- image: cimg/python:3.9
|
image: ubuntu-2204:current
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@@ -84,33 +79,36 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
pip install --upgrade cmake
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
pip install nanobind==2.4.0
|
export NEEDRESTART_MODE=a
|
||||||
pip install numpy
|
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
sudo apt-get install -y libblas-dev liblapack-dev liblapacke-dev
|
||||||
|
sudo apt-get install openmpi-bin openmpi-common libopenmpi-dev
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
CMAKE_ARGS="-DMLX_BUILD_METAL=OFF" \
|
uv venv
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`nproc` \
|
uv pip install cmake
|
||||||
python3 setup.py build_ext --inplace
|
DEBUG=1 CMAKE_ARGS="-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" \
|
||||||
CMAKE_ARGS="-DMLX_BUILD_METAL=OFF" \
|
uv pip install -e ".[dev]" -v
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`nproc` \
|
|
||||||
python3 setup.py develop
|
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
command: |
|
command: |
|
||||||
echo "stubs"
|
uv pip install typing_extensions
|
||||||
pip install typing_extensions
|
uv run --no-project setup.py generate_stubs
|
||||||
python setup.py generate_stubs
|
|
||||||
- run:
|
- run:
|
||||||
name: Run Python tests
|
name: Run Python tests
|
||||||
command: |
|
command: |
|
||||||
python3 -m unittest discover python/tests -v
|
source .venv/bin/activate
|
||||||
|
python -m unittest discover python/tests -v
|
||||||
|
mpirun --bind-to none -host localhost:8 -np 8 python python/tests/mpi_test_distributed.py
|
||||||
|
mlx.launch --verbose -n 8 python/tests/ring_test_distributed.py -v 2> >(tee -a stderr.log >&2)
|
||||||
|
if $(grep "\[WARN\]" stderr.log); then echo "Distributed ring test failed"; exit 1; fi
|
||||||
- run:
|
- run:
|
||||||
name: Build CPP only
|
name: Build CPP only
|
||||||
command: |
|
command: |
|
||||||
|
source .venv/bin/activate
|
||||||
mkdir -p build && cd build
|
mkdir -p build && cd build
|
||||||
cmake .. -DMLX_BUILD_METAL=OFF -DCMAKE_BUILD_TYPE=DEBUG
|
cmake .. -DMLX_BUILD_METAL=OFF -DCMAKE_BUILD_TYPE=DEBUG
|
||||||
make -j `nproc`
|
make -j `nproc`
|
||||||
@@ -122,57 +120,64 @@ jobs:
|
|||||||
parameters:
|
parameters:
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "15.2.0"
|
default: "26.0.0"
|
||||||
|
macosx_deployment_target:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
macos:
|
macos:
|
||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
resource_class: macos.m1.medium.gen1
|
environment:
|
||||||
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
|
resource_class: m4pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
brew install python@3.9
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
brew install openmpi
|
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 \
|
||||||
python3.9 -m venv env
|
brew install openmpi uv
|
||||||
source env/bin/activate
|
|
||||||
pip install --upgrade pip
|
|
||||||
pip install --upgrade cmake
|
|
||||||
pip install nanobind==2.4.0
|
|
||||||
pip install numpy
|
|
||||||
pip install torch
|
|
||||||
pip install tensorflow
|
|
||||||
pip install unittest-xml-reporting
|
|
||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
uv venv --python 3.10
|
||||||
DEBUG=1 CMAKE_BUILD_PARALLEL_LEVEL=`sysctl -n hw.ncpu` pip install -e . -v
|
uv pip install \
|
||||||
|
nanobind==2.4.0 \
|
||||||
|
cmake \
|
||||||
|
numpy \
|
||||||
|
torch \
|
||||||
|
tensorflow \
|
||||||
|
unittest-xml-reporting
|
||||||
|
DEBUG=1 CMAKE_ARGS="-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" \
|
||||||
|
uv pip install -e . -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
uv pip install typing_extensions
|
||||||
pip install typing_extensions
|
uv run --no-project setup.py generate_stubs
|
||||||
python setup.py generate_stubs
|
|
||||||
- run:
|
- run:
|
||||||
name: Run Python tests
|
name: Run Python tests
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
source .venv/bin/activate
|
||||||
LOW_MEMORY=1 DEVICE=cpu python -m xmlrunner discover -v python/tests -o test-results/cpu
|
LOW_MEMORY=1 DEVICE=cpu python -m xmlrunner discover -v python/tests -o test-results/cpu
|
||||||
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 METAL_DEBUG_ERROR_MODE=0 python -m xmlrunner discover -v python/tests -o test-results/gpu
|
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 METAL_DEBUG_ERROR_MODE=0 python -m xmlrunner discover -v python/tests -o test-results/gpu
|
||||||
mpirun --bind-to none -host localhost:8 -np 8 -x DYLD_LIBRARY_PATH=/opt/homebrew/lib/ python python/tests/mpi_test_distributed.py
|
mpirun --bind-to none -host localhost:8 -np 8 -x DYLD_LIBRARY_PATH=/opt/homebrew/lib/ python python/tests/mpi_test_distributed.py
|
||||||
|
mlx.launch --verbose -n 8 python/tests/ring_test_distributed.py -v 2> >(tee -a stderr.log >&2)
|
||||||
|
if $(grep "\[WARN\]" stderr.log); then echo "Distributed ring test failed"; exit 1; fi
|
||||||
- run:
|
- run:
|
||||||
name: Build example extension
|
name: Build example extension
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
source .venv/bin/activate
|
||||||
cd examples/extensions
|
cd examples/extensions
|
||||||
pip install -r requirements.txt
|
uv pip install -r requirements.txt
|
||||||
python setup.py build_ext -j8
|
uv run --no-project setup.py build_ext --inplace
|
||||||
|
uv run --no-project python test.py
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: test-results
|
path: test-results
|
||||||
- run:
|
- run:
|
||||||
name: Build CPP only
|
name: Build CPP only
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
source .venv/bin/activate
|
||||||
mkdir -p build && cd build && cmake .. && make -j `sysctl -n hw.ncpu`
|
mkdir -p build && cd build && cmake .. && make -j `sysctl -n hw.ncpu`
|
||||||
- run:
|
- run:
|
||||||
name: Run CPP tests
|
name: Run CPP tests
|
||||||
@@ -181,7 +186,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Build small binary
|
name: Build small binary
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
source .venv/bin/activate
|
||||||
cd build/
|
cd build/
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=MinSizeRel \
|
cmake .. -DCMAKE_BUILD_TYPE=MinSizeRel \
|
||||||
-DBUILD_SHARED_LIBS=ON \
|
-DBUILD_SHARED_LIBS=ON \
|
||||||
@@ -193,38 +198,110 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Run Python tests with JIT
|
name: Run Python tests with JIT
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`sysctl -n hw.ncpu` \
|
|
||||||
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
||||||
pip install -e . -v
|
uv pip install -e . -v
|
||||||
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
||||||
METAL_DEBUG_ERROR_MODE=0 \
|
METAL_DEBUG_ERROR_MODE=0 \
|
||||||
python -m xmlrunner discover -v python/tests -o test-results/gpu_jit
|
uv run --no-project python -m xmlrunner discover \
|
||||||
|
-v python/tests \
|
||||||
|
-o test-results/gpu_jit
|
||||||
|
|
||||||
|
cuda_build_and_test:
|
||||||
|
parameters:
|
||||||
|
image_date:
|
||||||
|
type: string
|
||||||
|
default: "2023.11.1"
|
||||||
|
machine:
|
||||||
|
image: "linux-cuda-12:<< parameters.image_date >>"
|
||||||
|
resource_class: gpu.nvidia.small.gen2
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- cuda-<< parameters.image_date >>-{{ arch }}-
|
||||||
|
- run:
|
||||||
|
name: Install dependencies
|
||||||
|
command: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install libcudnn9-dev-cuda-12
|
||||||
|
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
||||||
|
sudo apt-get install libnccl2 libnccl-dev
|
||||||
|
curl -sL https://github.com/ccache/ccache/releases/download/v4.11.3/ccache-4.11.3-linux-x86_64.tar.xz | tar xJf -
|
||||||
|
sudo mv ccache-4.11.3-linux-x86_64/ccache /usr/bin/ccache
|
||||||
|
rm -rf ccache-4.11.3-linux-x86_64
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
- run:
|
||||||
|
name: Set CCache size
|
||||||
|
command: ccache --max-size 1G
|
||||||
|
- run:
|
||||||
|
name: Install Python package
|
||||||
|
command: |
|
||||||
|
uv venv
|
||||||
|
uv pip install cmake
|
||||||
|
DEBUG=1 CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
||||||
|
uv pip install -e ".[dev]" -v
|
||||||
|
- run:
|
||||||
|
name: Run Python tests
|
||||||
|
command: |
|
||||||
|
source .venv/bin/activate
|
||||||
|
LOW_MEMORY=1 DEVICE=cpu python -m unittest discover python/tests -v
|
||||||
|
LOW_MEMORY=1 DEVICE=gpu python -m tests discover python/tests -v
|
||||||
|
- run:
|
||||||
|
name: Build CPP only
|
||||||
|
command: |
|
||||||
|
source .venv/bin/activate
|
||||||
|
cmake . -B build \
|
||||||
|
-DMLX_BUILD_CUDA=ON \
|
||||||
|
-DCMAKE_CUDA_COMPILER=`which nvcc` \
|
||||||
|
-DCMAKE_BUILD_TYPE=DEBUG
|
||||||
|
cmake --build build -j `nproc`
|
||||||
|
- run:
|
||||||
|
name: Run CPP tests
|
||||||
|
command: ./build/tests/tests -sfe="*fft_tests.cpp,*linalg_tests.cpp"
|
||||||
|
- run:
|
||||||
|
name: CCache report
|
||||||
|
command: |
|
||||||
|
ccache --show-stats
|
||||||
|
ccache --zero-stats
|
||||||
|
ccache --cleanup
|
||||||
|
- save_cache:
|
||||||
|
key: cuda-<< parameters.image_date >>-{{ arch }}-{{ epoch }}
|
||||||
|
paths:
|
||||||
|
- /home/circleci/.cache/ccache
|
||||||
|
|
||||||
build_release:
|
build_release:
|
||||||
parameters:
|
parameters:
|
||||||
python_version:
|
python_version:
|
||||||
type: string
|
type: string
|
||||||
default: "3.9"
|
default: "3.10"
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "15.2.0"
|
default: "26.0.0"
|
||||||
build_env:
|
build_env:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
|
macosx_deployment_target:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
macos:
|
macos:
|
||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
resource_class: macos.m1.medium.gen1
|
resource_class: m4pro.medium
|
||||||
|
environment:
|
||||||
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
brew install python@<< parameters.python_version >>
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
brew install openmpi
|
mkdir -p ~/miniconda3
|
||||||
python<< parameters.python_version >> -m venv env
|
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh
|
||||||
source env/bin/activate
|
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||||
pip install --upgrade pip
|
rm ~/miniconda3/miniconda.sh
|
||||||
|
source ~/miniconda3/bin/activate
|
||||||
|
conda init --all
|
||||||
|
conda create -n env python=<< parameters.python_version >> -y
|
||||||
|
conda activate env
|
||||||
pip install --upgrade cmake
|
pip install --upgrade cmake
|
||||||
pip install nanobind==2.4.0
|
pip install nanobind==2.4.0
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
@@ -234,30 +311,38 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
DEV_RELEASE=1 \
|
env -u MACOSX_DEPLOYMENT_TARGET DEV_RELEASE=1 \
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`sysctl -n hw.ncpu` \
|
|
||||||
pip install . -v
|
pip install . -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
pip install typing_extensions
|
pip install typing_extensions
|
||||||
python setup.py generate_stubs
|
python setup.py generate_stubs
|
||||||
- run:
|
- run:
|
||||||
name: Build Python package
|
name: Build Python package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
<< parameters.build_env >> \
|
python setup.py clean --all
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`sysctl -n hw.ncpu` \
|
<< parameters.build_env >> MLX_BUILD_STAGE=1 python -m build -w
|
||||||
python -m build -w
|
- when:
|
||||||
|
condition:
|
||||||
|
equal: ["3.10", << parameters.python_version >>]
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Build common package
|
||||||
|
command: |
|
||||||
|
conda activate env
|
||||||
|
python setup.py clean --all
|
||||||
|
<< parameters.build_env >> MLX_BUILD_STAGE=2 python -m build -w
|
||||||
- when:
|
- when:
|
||||||
condition: << parameters.build_env >>
|
condition: << parameters.build_env >>
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
name: Upload package
|
name: Upload package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: dist/
|
path: dist/
|
||||||
@@ -266,53 +351,101 @@ jobs:
|
|||||||
parameters:
|
parameters:
|
||||||
python_version:
|
python_version:
|
||||||
type: string
|
type: string
|
||||||
default: "3.9"
|
default: "3.10"
|
||||||
extra_env:
|
build_env:
|
||||||
type: string
|
type: string
|
||||||
default: "DEV_RELEASE=1"
|
default: ""
|
||||||
docker:
|
machine:
|
||||||
- image: ubuntu:20.04
|
image: ubuntu-2204:current
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Build wheel
|
name: Build wheel
|
||||||
command: |
|
command: |
|
||||||
PYTHON=python<< parameters.python_version >>
|
PYTHON=python<< parameters.python_version >>
|
||||||
apt-get update
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
apt-get upgrade -y
|
export NEEDRESTART_MODE=a
|
||||||
DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata
|
sudo apt-get update
|
||||||
apt-get install -y apt-utils
|
TZ=Etc/UTC sudo apt-get -y install tzdata
|
||||||
apt-get install -y software-properties-common
|
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
||||||
add-apt-repository -y ppa:deadsnakes/ppa
|
sudo apt-get install -y $PYTHON $PYTHON-dev $PYTHON-full
|
||||||
apt-get install -y $PYTHON $PYTHON-dev $PYTHON-full
|
sudo apt-get install -y libblas-dev liblapack-dev liblapacke-dev
|
||||||
apt-get install -y libblas-dev liblapack-dev liblapacke-dev
|
|
||||||
apt-get install -y build-essential git
|
|
||||||
$PYTHON -m venv env
|
$PYTHON -m venv env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade cmake
|
pip install --upgrade cmake
|
||||||
pip install nanobind==2.4.0
|
|
||||||
pip install --upgrade setuptools
|
|
||||||
pip install numpy
|
|
||||||
pip install auditwheel
|
pip install auditwheel
|
||||||
pip install patchelf
|
pip install patchelf
|
||||||
pip install build
|
pip install build
|
||||||
pip install twine
|
pip install twine
|
||||||
<< parameters.extra_env >> \
|
<< parameters.build_env >> pip install ".[dev]" -v
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`nproc` \
|
|
||||||
pip install . -v
|
|
||||||
pip install typing_extensions
|
pip install typing_extensions
|
||||||
python setup.py generate_stubs
|
python setup.py generate_stubs
|
||||||
<< parameters.extra_env >> \
|
python setup.py clean --all
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=`nproc` \
|
MLX_BUILD_STAGE=1 << parameters.build_env >> python -m build -w
|
||||||
python -m build --wheel
|
bash python/scripts/repair_linux.sh
|
||||||
auditwheel show dist/*
|
- when:
|
||||||
auditwheel repair dist/* --plat manylinux_2_31_x86_64
|
condition:
|
||||||
|
equal: ["3.10", << parameters.python_version >>]
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Build common package
|
||||||
|
command: |
|
||||||
|
source env/bin/activate
|
||||||
|
python setup.py clean --all
|
||||||
|
<< parameters.build_env >> MLX_BUILD_STAGE=2 \
|
||||||
|
python -m build -w
|
||||||
|
auditwheel repair dist/mlx_cpu*.whl --plat manylinux_2_35_x86_64
|
||||||
|
- when:
|
||||||
|
condition: << parameters.build_env >>
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Upload packages
|
||||||
|
command: |
|
||||||
|
source env/bin/activate
|
||||||
|
twine upload wheelhouse/*.whl
|
||||||
|
- store_artifacts:
|
||||||
|
path: wheelhouse/
|
||||||
|
|
||||||
|
build_cuda_release:
|
||||||
|
parameters:
|
||||||
|
build_env:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2204:current
|
||||||
|
resource_class: xlarge
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: Build wheel
|
||||||
|
command: |
|
||||||
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
export NEEDRESTART_MODE=a
|
||||||
|
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb
|
||||||
|
sudo dpkg -i cuda-keyring_1.1-1_all.deb
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install cuda-toolkit-12-9 libcudnn9-dev-cuda-12
|
||||||
|
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
||||||
|
sudo apt-get install zip
|
||||||
|
pip install auditwheel
|
||||||
|
pip install patchelf
|
||||||
|
pip install build
|
||||||
|
pip install twine
|
||||||
|
export PATH=/usr/local/cuda/bin${PATH:+:${PATH}}
|
||||||
|
export LD_LIBRARY_PATH=/usr/local/cuda/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
|
||||||
|
<< parameters.build_env >> MLX_BUILD_STAGE=2 \
|
||||||
|
CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
||||||
|
python -m build -w
|
||||||
|
bash python/scripts/repair_cuda.sh
|
||||||
|
- when:
|
||||||
|
condition: << parameters.build_env >>
|
||||||
|
steps:
|
||||||
- run:
|
- run:
|
||||||
name: Upload package
|
name: Upload package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
twine upload wheelhouse/*.whl
|
||||||
twine upload wheelhouse/*
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: wheelhouse/
|
path: wheelhouse/
|
||||||
|
|
||||||
@@ -324,21 +457,23 @@ workflows:
|
|||||||
pattern: "^(?!pull/)[-\\w]+$"
|
pattern: "^(?!pull/)[-\\w]+$"
|
||||||
value: << pipeline.git.branch >>
|
value: << pipeline.git.branch >>
|
||||||
- not: << pipeline.parameters.nightly_build >>
|
- not: << pipeline.parameters.nightly_build >>
|
||||||
- not: << pipeline.parameters.weekly_build >>
|
|
||||||
- not: << pipeline.parameters.test_release >>
|
- not: << pipeline.parameters.test_release >>
|
||||||
jobs:
|
jobs:
|
||||||
- mac_build_and_test:
|
- mac_build_and_test:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
xcode_version: ["15.0.0", "15.2.0", "16.0.0"]
|
macosx_deployment_target: ["13.5", "15.0"]
|
||||||
- linux_build_and_test
|
- linux_build_and_test
|
||||||
|
- cuda_build_and_test:
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
image_date: ["2023.11.1", "2025.05.1"]
|
||||||
- build_documentation
|
- build_documentation
|
||||||
|
|
||||||
build_pypi_release:
|
build_pypi_release:
|
||||||
when:
|
when:
|
||||||
and:
|
and:
|
||||||
- not: << pipeline.parameters.nightly_build >>
|
- not: << pipeline.parameters.nightly_build >>
|
||||||
- not: << pipeline.parameters.weekly_build >>
|
|
||||||
- not: << pipeline.parameters.test_release >>
|
- not: << pipeline.parameters.test_release >>
|
||||||
jobs:
|
jobs:
|
||||||
- build_release:
|
- build_release:
|
||||||
@@ -349,9 +484,10 @@ workflows:
|
|||||||
ignore: /.*/
|
ignore: /.*/
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
xcode_version: ["15.0.0", "15.2.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["PYPI_RELEASE=1"]
|
build_env: ["PYPI_RELEASE=1"]
|
||||||
|
xcode_version: ["26.0.0"]
|
||||||
- build_documentation:
|
- build_documentation:
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
@@ -359,6 +495,25 @@ workflows:
|
|||||||
branches:
|
branches:
|
||||||
ignore: /.*/
|
ignore: /.*/
|
||||||
upload-docs: true
|
upload-docs: true
|
||||||
|
- build_linux_release:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /^v.*/
|
||||||
|
branches:
|
||||||
|
ignore: /.*/
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
|
build_env: ["PYPI_RELEASE=1"]
|
||||||
|
- build_cuda_release:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /^v.*/
|
||||||
|
branches:
|
||||||
|
ignore: /.*/
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
build_env: ["PYPI_RELEASE=1"]
|
||||||
|
|
||||||
prb:
|
prb:
|
||||||
when:
|
when:
|
||||||
@@ -374,9 +529,14 @@ workflows:
|
|||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
xcode_version: ["15.0.0", "15.2.0", "16.0.0"]
|
macosx_deployment_target: ["13.5", "15.0"]
|
||||||
- linux_build_and_test:
|
- linux_build_and_test:
|
||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
|
- cuda_build_and_test:
|
||||||
|
requires: [ hold ]
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
image_date: ["2023.11.1", "2025.05.1"]
|
||||||
nightly_build:
|
nightly_build:
|
||||||
when:
|
when:
|
||||||
and:
|
and:
|
||||||
@@ -386,28 +546,34 @@ workflows:
|
|||||||
- build_release:
|
- build_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
xcode_version: ["15.0.0", "15.2.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
weekly_build:
|
xcode_version: ["26.0.0"]
|
||||||
|
- build_linux_release:
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
|
- build_cuda_release
|
||||||
|
|
||||||
|
build_dev_release:
|
||||||
when:
|
when:
|
||||||
and:
|
and:
|
||||||
- equal: [ main, << pipeline.git.branch >> ]
|
- equal: [ main, << pipeline.git.branch >> ]
|
||||||
- << pipeline.parameters.weekly_build >>
|
- << pipeline.parameters.test_release >>
|
||||||
jobs:
|
jobs:
|
||||||
- build_release:
|
- build_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
xcode_version: ["15.0.0", "15.2.0", "16.0.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["DEV_RELEASE=1"]
|
build_env: ["DEV_RELEASE=1"]
|
||||||
linux_test_release:
|
xcode_version: ["26.0.0"]
|
||||||
when:
|
|
||||||
and:
|
|
||||||
- equal: [ main, << pipeline.git.branch >> ]
|
|
||||||
- << pipeline.parameters.linux_release >>
|
|
||||||
jobs:
|
|
||||||
- build_linux_release:
|
- build_linux_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
extra_env: ["PYPI_RELEASE=1"]
|
build_env: ["DEV_RELEASE=1"]
|
||||||
|
- build_cuda_release:
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
build_env: ["DEV_RELEASE=1"]
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -36,6 +36,7 @@ share/python-wheels/
|
|||||||
.installed.cfg
|
.installed.cfg
|
||||||
*.egg
|
*.egg
|
||||||
MANIFEST
|
MANIFEST
|
||||||
|
uv.lock
|
||||||
|
|
||||||
# vim
|
# vim
|
||||||
*.swp
|
*.swp
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||||
rev: v19.1.4
|
rev: v19.1.7
|
||||||
hooks:
|
hooks:
|
||||||
- id: clang-format
|
- id: clang-format
|
||||||
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: 24.10.0
|
rev: 25.1.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
rev: 5.13.2
|
rev: 6.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: isort
|
- id: isort
|
||||||
args:
|
args:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ with a short description of your contribution(s) below. For example:
|
|||||||
|
|
||||||
MLX was developed with contributions from the following individuals:
|
MLX was developed with contributions from the following individuals:
|
||||||
|
|
||||||
- Nripesh Niketan: Added `softsign`, `softmax`, `hardswish`, `logsoftmax` activation functions. Added `dropout3d` ops. Added `LogicalAnd` and `LogicalOR` ops. Added `clip_grad_norm` along with `tree_reduce`. Added `cross`.
|
- Nripesh Niketan: Added `softsign`, `softmax`, `hardswish`, `logsoftmax` activation functions. Added `dropout3d` ops. Added `LogicalAnd` and `LogicalOR` ops. Added `clip_grad_norm` along with `tree_reduce`. Added `cross`. Added `orthogonal` initializer.
|
||||||
- Juarez Bochi: Fixed bug in cross attention.
|
- Juarez Bochi: Fixed bug in cross attention.
|
||||||
- Justin Deschenaux: Sine, Cosine, arange, randint, truncated normal, bernoulli, lion optimizer, Dropout2d, linear and logistic regression python example.
|
- Justin Deschenaux: Sine, Cosine, arange, randint, truncated normal, bernoulli, lion optimizer, Dropout2d, linear and logistic regression python example.
|
||||||
- Diogo Da Cruz: Added `tri`, `tril`, `triu`, `tensordot`, `inner`, `outer`, `tile`, `StreamContext`, `stream`, safetensors support, `einsum`, and `einsum_path`.
|
- Diogo Da Cruz: Added `tri`, `tril`, `triu`, `tensordot`, `inner`, `outer`, `tile`, `StreamContext`, `stream`, safetensors support, `einsum`, and `einsum_path`.
|
||||||
@@ -19,11 +19,17 @@ MLX was developed with contributions from the following individuals:
|
|||||||
- Gleb Pobudzey: Added the `where` primitive, and groups in 1D and 2D convolutions.
|
- Gleb Pobudzey: Added the `where` primitive, and groups in 1D and 2D convolutions.
|
||||||
- Paul Paczuski: Improved stability of BCE loss calculation
|
- Paul Paczuski: Improved stability of BCE loss calculation
|
||||||
- Max-Heinrich Laves: Added `conv_transpose1d`, `conv_transpose2d`, and `conv_transpose3d` ops.
|
- Max-Heinrich Laves: Added `conv_transpose1d`, `conv_transpose2d`, and `conv_transpose3d` ops.
|
||||||
|
- Gökdeniz Gülmez: Added the `Muon (MomentUm Orthogonalized by Newton-schulz)` optimizer, and the `ReLU²` activation function.
|
||||||
|
|
||||||
<a href="https://github.com/ml-explore/mlx/graphs/contributors">
|
<a href="https://github.com/ml-explore/mlx/graphs/contributors">
|
||||||
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
# Organizations
|
||||||
|
|
||||||
|
MLX has received contributions from the following companies:
|
||||||
|
- NVIDIA Corporation & Affiliates
|
||||||
|
|
||||||
# Third-Party Software
|
# Third-Party Software
|
||||||
|
|
||||||
MLX leverages several third-party software, listed here together with
|
MLX leverages several third-party software, listed here together with
|
||||||
|
|||||||
117
CMakeLists.txt
117
CMakeLists.txt
@@ -1,13 +1,36 @@
|
|||||||
cmake_minimum_required(VERSION 3.25)
|
cmake_minimum_required(VERSION 3.25)
|
||||||
|
|
||||||
project(mlx LANGUAGES C CXX)
|
if(NOT MLX_VERSION)
|
||||||
|
file(STRINGS "mlx/version.h" _mlx_h_version REGEX "^#define MLX_VERSION_.*$")
|
||||||
|
string(REGEX MATCH "#define MLX_VERSION_MAJOR ([0-9]+)" _ "${_mlx_h_version}")
|
||||||
|
set(_major ${CMAKE_MATCH_1})
|
||||||
|
string(REGEX MATCH "#define MLX_VERSION_MINOR ([0-9]+)" _ "${_mlx_h_version}")
|
||||||
|
set(_minor ${CMAKE_MATCH_1})
|
||||||
|
string(REGEX MATCH "#define MLX_VERSION_PATCH ([0-9]+)" _ "${_mlx_h_version}")
|
||||||
|
set(_patch ${CMAKE_MATCH_1})
|
||||||
|
set(MLX_PROJECT_VERSION "${_major}.${_minor}.${_patch}")
|
||||||
|
set(MLX_VERSION ${MLX_PROJECT_VERSION})
|
||||||
|
else()
|
||||||
|
string(REGEX REPLACE "^([0-9]+\.[0-9]+\.[0-9]+).*" "\\1" MLX_PROJECT_VERSION
|
||||||
|
${MLX_VERSION})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
project(
|
||||||
|
mlx
|
||||||
|
LANGUAGES C CXX
|
||||||
|
VERSION ${MLX_PROJECT_VERSION})
|
||||||
|
|
||||||
|
if(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||||
|
add_compile_options(-Wall -Wextra)
|
||||||
|
endif()
|
||||||
|
|
||||||
# ----------------------------- Setup -----------------------------
|
# ----------------------------- Setup -----------------------------
|
||||||
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 20)
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
set(CMAKE_INSTALL_MESSAGE NEVER)
|
set(CMAKE_INSTALL_MESSAGE NEVER)
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
# ----------------------------- Configuration -----------------------------
|
# ----------------------------- Configuration -----------------------------
|
||||||
option(MLX_BUILD_TESTS "Build tests for mlx" ON)
|
option(MLX_BUILD_TESTS "Build tests for mlx" ON)
|
||||||
@@ -16,21 +39,18 @@ option(MLX_BUILD_BENCHMARKS "Build benchmarks for mlx" OFF)
|
|||||||
option(MLX_BUILD_PYTHON_BINDINGS "Build python bindings for mlx" OFF)
|
option(MLX_BUILD_PYTHON_BINDINGS "Build python bindings for mlx" OFF)
|
||||||
option(MLX_BUILD_METAL "Build metal backend" ON)
|
option(MLX_BUILD_METAL "Build metal backend" ON)
|
||||||
option(MLX_BUILD_CPU "Build cpu backend" ON)
|
option(MLX_BUILD_CPU "Build cpu backend" ON)
|
||||||
|
option(MLX_BUILD_CUDA "Build cuda backend" OFF)
|
||||||
option(MLX_METAL_DEBUG "Enhance metal debug workflow" OFF)
|
option(MLX_METAL_DEBUG "Enhance metal debug workflow" OFF)
|
||||||
option(MLX_ENABLE_X64_MAC "Enable building for x64 macOS" OFF)
|
option(MLX_ENABLE_X64_MAC "Enable building for x64 macOS" OFF)
|
||||||
option(MLX_BUILD_GGUF "Include support for GGUF format" ON)
|
option(MLX_BUILD_GGUF "Include support for GGUF format" ON)
|
||||||
option(MLX_BUILD_SAFETENSORS "Include support for safetensors format" ON)
|
option(MLX_BUILD_SAFETENSORS "Include support for safetensors format" ON)
|
||||||
option(MLX_BUILD_BLAS_FROM_SOURCE "Build OpenBLAS from source code" OFF)
|
option(MLX_BUILD_BLAS_FROM_SOURCE "Build OpenBLAS from source code" OFF)
|
||||||
option(MLX_METAL_JIT "Use JIT compilation for Metal kernels" OFF)
|
option(MLX_METAL_JIT "Use JIT compilation for Metal kernels" OFF)
|
||||||
|
option(MLX_USE_CCACHE "Use CCache for compilation cache when available" ON)
|
||||||
option(BUILD_SHARED_LIBS "Build mlx as a shared library" OFF)
|
option(BUILD_SHARED_LIBS "Build mlx as a shared library" OFF)
|
||||||
|
option(USE_SYSTEM_FMT "Use system's provided fmt library" OFF)
|
||||||
if(NOT MLX_VERSION)
|
|
||||||
set(MLX_VERSION 0.22.0)
|
|
||||||
endif()
|
|
||||||
add_compile_definitions("MLX_VERSION=${MLX_VERSION}")
|
|
||||||
|
|
||||||
# --------------------- Processor tests -------------------------
|
# --------------------- Processor tests -------------------------
|
||||||
|
|
||||||
message(
|
message(
|
||||||
STATUS
|
STATUS
|
||||||
"Building MLX for ${CMAKE_SYSTEM_PROCESSOR} processor on ${CMAKE_SYSTEM_NAME}"
|
"Building MLX for ${CMAKE_SYSTEM_PROCESSOR} processor on ${CMAKE_SYSTEM_NAME}"
|
||||||
@@ -51,10 +71,17 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
|||||||
message(WARNING "Building for x86_64 arch is not officially supported.")
|
message(WARNING "Building for x86_64 arch is not officially supported.")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
else()
|
else()
|
||||||
set(MLX_BUILD_METAL OFF)
|
set(MLX_BUILD_METAL OFF)
|
||||||
message(WARNING "MLX is prioritised for Apple silicon systems using macOS.")
|
endif()
|
||||||
|
|
||||||
|
if(MLX_USE_CCACHE)
|
||||||
|
find_program(CCACHE_PROGRAM ccache)
|
||||||
|
if(CCACHE_PROGRAM)
|
||||||
|
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}")
|
||||||
|
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}")
|
||||||
|
set(CMAKE_CUDA_COMPILER_LAUNCHER "${CCACHE_PROGRAM}")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# ----------------------------- Lib -----------------------------
|
# ----------------------------- Lib -----------------------------
|
||||||
@@ -65,18 +92,21 @@ cmake_policy(SET CMP0135 NEW)
|
|||||||
|
|
||||||
add_library(mlx)
|
add_library(mlx)
|
||||||
|
|
||||||
if(MLX_BUILD_METAL)
|
if(MLX_BUILD_CUDA)
|
||||||
set(METAL_LIB "-framework Metal")
|
enable_language(CUDA)
|
||||||
set(FOUNDATION_LIB "-framework Foundation")
|
|
||||||
set(QUARTZ_LIB "-framework QuartzCore")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MLX_BUILD_METAL AND NOT METAL_LIB)
|
if(MLX_BUILD_METAL)
|
||||||
message(STATUS "Metal not found. Unable to build GPU")
|
find_library(METAL_LIB Metal)
|
||||||
set(MLX_BUILD_METAL OFF)
|
find_library(FOUNDATION_LIB Foundation)
|
||||||
set(MLX_METAL_DEBUG OFF)
|
find_library(QUARTZ_LIB QuartzCore)
|
||||||
elseif(MLX_BUILD_METAL)
|
if(METAL_LIB)
|
||||||
message(STATUS "Building METAL sources")
|
message(STATUS "Metal found ${METAL_LIB}")
|
||||||
|
else()
|
||||||
|
message(
|
||||||
|
FATAL_ERROR
|
||||||
|
"Metal not found. Set MLX_BUILD_METAL=OFF to build without GPU")
|
||||||
|
endif()
|
||||||
|
|
||||||
if(MLX_METAL_DEBUG)
|
if(MLX_METAL_DEBUG)
|
||||||
add_compile_definitions(MLX_METAL_DEBUG)
|
add_compile_definitions(MLX_METAL_DEBUG)
|
||||||
@@ -85,7 +115,8 @@ elseif(MLX_BUILD_METAL)
|
|||||||
# Throw an error if xcrun not found
|
# Throw an error if xcrun not found
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND zsh "-c" "/usr/bin/xcrun -sdk macosx --show-sdk-version"
|
COMMAND zsh "-c" "/usr/bin/xcrun -sdk macosx --show-sdk-version"
|
||||||
OUTPUT_VARIABLE MACOS_SDK_VERSION COMMAND_ERROR_IS_FATAL ANY)
|
OUTPUT_VARIABLE MACOS_SDK_VERSION
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY)
|
||||||
|
|
||||||
if(${MACOS_SDK_VERSION} LESS 14.0)
|
if(${MACOS_SDK_VERSION} LESS 14.0)
|
||||||
message(
|
message(
|
||||||
@@ -114,6 +145,12 @@ elseif(MLX_BUILD_METAL)
|
|||||||
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||||
|
# With newer clang/gcc versions following libs are implicitly linked, but when
|
||||||
|
# building on old distributions they need to be explicitly listed.
|
||||||
|
target_link_libraries(mlx PRIVATE dl pthread)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
# GGUF does not build with MSVC.
|
# GGUF does not build with MSVC.
|
||||||
@@ -141,12 +178,13 @@ if(MLX_BUILD_CPU)
|
|||||||
message(STATUS "Accelerate found ${ACCELERATE_LIBRARY}")
|
message(STATUS "Accelerate found ${ACCELERATE_LIBRARY}")
|
||||||
set(MLX_BUILD_ACCELERATE ON)
|
set(MLX_BUILD_ACCELERATE ON)
|
||||||
else()
|
else()
|
||||||
message(STATUS "Accelerate or arm neon not found, using default backend.")
|
message(STATUS "Accelerate not found, using default backend.")
|
||||||
set(MLX_BUILD_ACCELERATE OFF)
|
set(MLX_BUILD_ACCELERATE OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MLX_BUILD_ACCELERATE)
|
if(MLX_BUILD_ACCELERATE)
|
||||||
target_link_libraries(mlx PUBLIC ${ACCELERATE_LIBRARY})
|
target_link_libraries(mlx PUBLIC ${ACCELERATE_LIBRARY})
|
||||||
|
add_compile_definitions(MLX_USE_ACCELERATE)
|
||||||
add_compile_definitions(ACCELERATE_NEW_LAPACK)
|
add_compile_definitions(ACCELERATE_NEW_LAPACK)
|
||||||
elseif(MLX_BUILD_BLAS_FROM_SOURCE)
|
elseif(MLX_BUILD_BLAS_FROM_SOURCE)
|
||||||
# Download and build OpenBLAS from source code.
|
# Download and build OpenBLAS from source code.
|
||||||
@@ -199,23 +237,13 @@ else()
|
|||||||
set(MLX_BUILD_ACCELERATE OFF)
|
set(MLX_BUILD_ACCELERATE OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_package(MPI)
|
message(STATUS "Downloading json")
|
||||||
if(MPI_FOUND)
|
FetchContent_Declare(
|
||||||
execute_process(
|
json
|
||||||
COMMAND zsh "-c" "mpirun --version"
|
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz)
|
||||||
OUTPUT_VARIABLE MPI_VERSION
|
FetchContent_MakeAvailable(json)
|
||||||
ERROR_QUIET)
|
target_include_directories(
|
||||||
if(${MPI_VERSION} MATCHES ".*Open MPI.*")
|
mlx PRIVATE $<BUILD_INTERFACE:${json_SOURCE_DIR}/single_include/nlohmann>)
|
||||||
target_include_directories(mlx PRIVATE ${MPI_INCLUDE_PATH})
|
|
||||||
elseif(MPI_VERSION STREQUAL "")
|
|
||||||
set(MPI_FOUND FALSE)
|
|
||||||
message(
|
|
||||||
WARNING "MPI found but mpirun is not available. Building without MPI.")
|
|
||||||
else()
|
|
||||||
set(MPI_FOUND FALSE)
|
|
||||||
message(WARNING "MPI which is not OpenMPI found. Building without MPI.")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mlx)
|
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mlx)
|
||||||
|
|
||||||
@@ -223,12 +251,19 @@ target_include_directories(
|
|||||||
mlx PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>
|
mlx PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>
|
||||||
$<INSTALL_INTERFACE:include>)
|
$<INSTALL_INTERFACE:include>)
|
||||||
|
|
||||||
FetchContent_Declare(
|
# Do not add mlx_EXPORTS define for shared library.
|
||||||
|
set_target_properties(mlx PROPERTIES DEFINE_SYMBOL "")
|
||||||
|
|
||||||
|
if(USE_SYSTEM_FMT)
|
||||||
|
find_package(fmt REQUIRED)
|
||||||
|
else()
|
||||||
|
FetchContent_Declare(
|
||||||
fmt
|
fmt
|
||||||
GIT_REPOSITORY https://github.com/fmtlib/fmt.git
|
GIT_REPOSITORY https://github.com/fmtlib/fmt.git
|
||||||
GIT_TAG 10.2.1
|
GIT_TAG 10.2.1
|
||||||
EXCLUDE_FROM_ALL)
|
EXCLUDE_FROM_ALL)
|
||||||
FetchContent_MakeAvailable(fmt)
|
FetchContent_MakeAvailable(fmt)
|
||||||
|
endif()
|
||||||
target_link_libraries(mlx PRIVATE $<BUILD_INTERFACE:fmt::fmt-header-only>)
|
target_link_libraries(mlx PRIVATE $<BUILD_INTERFACE:fmt::fmt-header-only>)
|
||||||
|
|
||||||
if(MLX_BUILD_PYTHON_BINDINGS)
|
if(MLX_BUILD_PYTHON_BINDINGS)
|
||||||
|
|||||||
@@ -17,11 +17,11 @@ possible.
|
|||||||
|
|
||||||
You can also run the formatters manually as follows:
|
You can also run the formatters manually as follows:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
clang-format -i file.cpp
|
clang-format -i file.cpp
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```shell
|
||||||
black file.py
|
black file.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
include CMakeLists.txt
|
include CMakeLists.txt
|
||||||
|
include mlx.pc.in
|
||||||
recursive-include mlx/ *
|
recursive-include mlx/ *
|
||||||
|
include cmake/*
|
||||||
include python/src/*
|
include python/src/*
|
||||||
include python/mlx/py.typed # support type hinting as in PEP-561
|
include python/mlx/py.typed # support type hinting as in PEP-561
|
||||||
|
|||||||
31
README.md
31
README.md
@@ -11,28 +11,28 @@ brought to you by Apple machine learning research.
|
|||||||
|
|
||||||
Some key features of MLX include:
|
Some key features of MLX include:
|
||||||
|
|
||||||
- **Familiar APIs**: MLX has a Python API that closely follows NumPy. MLX
|
- **Familiar APIs**: MLX has a Python API that closely follows NumPy. MLX
|
||||||
also has fully featured C++, [C](https://github.com/ml-explore/mlx-c), and
|
also has fully featured C++, [C](https://github.com/ml-explore/mlx-c), and
|
||||||
[Swift](https://github.com/ml-explore/mlx-swift/) APIs, which closely mirror
|
[Swift](https://github.com/ml-explore/mlx-swift/) APIs, which closely mirror
|
||||||
the Python API. MLX has higher-level packages like `mlx.nn` and
|
the Python API. MLX has higher-level packages like `mlx.nn` and
|
||||||
`mlx.optimizers` with APIs that closely follow PyTorch to simplify building
|
`mlx.optimizers` with APIs that closely follow PyTorch to simplify building
|
||||||
more complex models.
|
more complex models.
|
||||||
|
|
||||||
- **Composable function transformations**: MLX supports composable function
|
- **Composable function transformations**: MLX supports composable function
|
||||||
transformations for automatic differentiation, automatic vectorization,
|
transformations for automatic differentiation, automatic vectorization,
|
||||||
and computation graph optimization.
|
and computation graph optimization.
|
||||||
|
|
||||||
- **Lazy computation**: Computations in MLX are lazy. Arrays are only
|
- **Lazy computation**: Computations in MLX are lazy. Arrays are only
|
||||||
materialized when needed.
|
materialized when needed.
|
||||||
|
|
||||||
- **Dynamic graph construction**: Computation graphs in MLX are constructed
|
- **Dynamic graph construction**: Computation graphs in MLX are constructed
|
||||||
dynamically. Changing the shapes of function arguments does not trigger
|
dynamically. Changing the shapes of function arguments does not trigger
|
||||||
slow compilations, and debugging is simple and intuitive.
|
slow compilations, and debugging is simple and intuitive.
|
||||||
|
|
||||||
- **Multi-device**: Operations can run on any of the supported devices
|
- **Multi-device**: Operations can run on any of the supported devices
|
||||||
(currently the CPU and the GPU).
|
(currently the CPU and the GPU).
|
||||||
|
|
||||||
- **Unified memory**: A notable difference from MLX and other frameworks
|
- **Unified memory**: A notable difference from MLX and other frameworks
|
||||||
is the *unified memory model*. Arrays in MLX live in shared memory.
|
is the *unified memory model*. Arrays in MLX live in shared memory.
|
||||||
Operations on MLX arrays can be performed on any of the supported
|
Operations on MLX arrays can be performed on any of the supported
|
||||||
device types without transferring data.
|
device types without transferring data.
|
||||||
@@ -68,18 +68,23 @@ in the documentation.
|
|||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
MLX is available on [PyPI](https://pypi.org/project/mlx/). To install the Python API, run:
|
MLX is available on [PyPI](https://pypi.org/project/mlx/). To install MLX on
|
||||||
|
macOS, run:
|
||||||
|
|
||||||
**With `pip`**:
|
```bash
|
||||||
|
|
||||||
```
|
|
||||||
pip install mlx
|
pip install mlx
|
||||||
```
|
```
|
||||||
|
|
||||||
**With `conda`**:
|
To install the CUDA backend on Linux, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install mlx[cuda]
|
||||||
```
|
```
|
||||||
conda install -c conda-forge mlx
|
|
||||||
|
To install a CPU-only Linux package, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install mlx[cpu]
|
||||||
```
|
```
|
||||||
|
|
||||||
Checkout the
|
Checkout the
|
||||||
@@ -105,7 +110,7 @@ Hannun, Jagrit Digani, Angelos Katharopoulos, and Ronan Collobert. If you find
|
|||||||
MLX useful in your research and wish to cite it, please use the following
|
MLX useful in your research and wish to cite it, please use the following
|
||||||
BibTex entry:
|
BibTex entry:
|
||||||
|
|
||||||
```
|
```text
|
||||||
@software{mlx2023,
|
@software{mlx2023,
|
||||||
author = {Awni Hannun and Jagrit Digani and Angelos Katharopoulos and Ronan Collobert},
|
author = {Awni Hannun and Jagrit Digani and Angelos Katharopoulos and Ronan Collobert},
|
||||||
title = {{MLX}: Efficient and flexible machine learning on Apple silicon},
|
title = {{MLX}: Efficient and flexible machine learning on Apple silicon},
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023 Apple Inc.
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
|
|||||||
@@ -192,6 +192,22 @@ void time_reductions() {
|
|||||||
|
|
||||||
auto argmin_along_1 = [&a]() { return mx::argmin(a, 1, false); };
|
auto argmin_along_1 = [&a]() { return mx::argmin(a, 1, false); };
|
||||||
TIME(argmin_along_1);
|
TIME(argmin_along_1);
|
||||||
|
|
||||||
|
auto indices = mx::array({1});
|
||||||
|
auto updates = mx::reshape(mx::array({NAN}), {1, 1, 1});
|
||||||
|
std::vector<int> axes{0};
|
||||||
|
auto b = scatter(a, {indices}, updates, axes);
|
||||||
|
mx::eval(b);
|
||||||
|
|
||||||
|
auto max_along_0 = [&b]() { return mx::max(b, 0, false); };
|
||||||
|
TIME(max_along_0);
|
||||||
|
auto max_along_1 = [&b]() { return mx::max(b, 1, false); };
|
||||||
|
TIME(max_along_1);
|
||||||
|
|
||||||
|
auto min_along_0 = [&b]() { return mx::min(b, 0, false); };
|
||||||
|
TIME(min_along_0);
|
||||||
|
auto min_along_1 = [&b]() { return mx::min(b, 1, false); };
|
||||||
|
TIME(min_along_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void time_gather_scatter() {
|
void time_gather_scatter() {
|
||||||
|
|||||||
@@ -142,9 +142,7 @@ def bench_shape(B, M, N, K, np_dtype, transpose="nn"):
|
|||||||
t_b = (0, 1, 2) if transpose[1] == "n" else (0, 2, 1)
|
t_b = (0, 1, 2) if transpose[1] == "n" else (0, 2, 1)
|
||||||
|
|
||||||
c_mlx = a_mx.transpose(t_a) @ b_mx.transpose(t_b)
|
c_mlx = a_mx.transpose(t_a) @ b_mx.transpose(t_b)
|
||||||
c_npy = a_np.transpose(t_a).astype(np.float32) @ b_np.transpose(t_b).astype(
|
c_npy = a_np.transpose(t_a).astype(np_dtype) @ b_np.transpose(t_b).astype(np_dtype)
|
||||||
np.float32
|
|
||||||
)
|
|
||||||
|
|
||||||
atol = 1e-5 if np_dtype == np.float32 else 1e-4
|
atol = 1e-5 if np_dtype == np.float32 else 1e-4
|
||||||
|
|
||||||
@@ -163,7 +161,7 @@ def get_gflop_count(B, M, N, K):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(description="Run gemm benchmarks")
|
parser = argparse.ArgumentParser(description="Run gemm benchmarks")
|
||||||
|
|
||||||
dtypes = ("float32", "float16")
|
dtypes = ("float32", "float16", "complex64")
|
||||||
transposes = ("nn", "nt", "tn")
|
transposes = ("nn", "nt", "tn")
|
||||||
shapes = (
|
shapes = (
|
||||||
(16, 234, 768, 3072),
|
(16, 234, 768, 3072),
|
||||||
@@ -187,7 +185,7 @@ if __name__ == "__main__":
|
|||||||
diff = gflops_mx / gflops_pt - 1.0
|
diff = gflops_mx / gflops_pt - 1.0
|
||||||
|
|
||||||
print(
|
print(
|
||||||
f"{B:3d}, {M:4d}, {N:4d}, {K:4d}, {dtype}, {transpose}, {gflops_pt:05.3f}, {gflops_mx:05.3f}, {100. * diff:+5.2f}%"
|
f"{B:3d}, {M:4d}, {N:4d}, {K:4d}, {dtype}, {transpose}, {gflops_pt:05.3f}, {gflops_mx:05.3f}, {100.0 * diff:+5.2f}%"
|
||||||
)
|
)
|
||||||
if gflops_pt >= 2.0 * gflops_mx:
|
if gflops_pt >= 2.0 * gflops_mx:
|
||||||
print("ATTENTION ^^^^^^^")
|
print("ATTENTION ^^^^^^^")
|
||||||
|
|||||||
@@ -196,7 +196,7 @@ def bench_with_out_len(ax, out_vec_len, in_vector_lens, dtype, transpose):
|
|||||||
|
|
||||||
|
|
||||||
for transpose in (False, True):
|
for transpose in (False, True):
|
||||||
for dtype in ("float32", "float16"):
|
for dtype in ("float32", "float16", "complex64"):
|
||||||
fig, axs = plt.subplots(
|
fig, axs = plt.subplots(
|
||||||
len(in_vec_sizes), 2, figsize=(8.5, 11), layout="constrained"
|
len(in_vec_sizes), 2, figsize=(8.5, 11), layout="constrained"
|
||||||
)
|
)
|
||||||
@@ -215,7 +215,7 @@ for transpose in (False, True):
|
|||||||
fig.suptitle(f"{device_name}: {dtype} {op_name}")
|
fig.suptitle(f"{device_name}: {dtype} {op_name}")
|
||||||
fig.savefig(
|
fig.savefig(
|
||||||
os.path.join(
|
os.path.join(
|
||||||
results_dir, f'{device_name.replace(" ", "_")}_{dtype}_{op_name}.pdf'
|
results_dir, f"{device_name.replace(' ', '_')}_{dtype}_{op_name}.pdf"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
plt.close(fig)
|
plt.close(fig)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import os
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import torch.cuda
|
||||||
import torch.mps
|
import torch.mps
|
||||||
|
|
||||||
|
|
||||||
@@ -44,8 +45,10 @@ def bench(f, *args):
|
|||||||
|
|
||||||
|
|
||||||
def sync_if_needed(x):
|
def sync_if_needed(x):
|
||||||
if x.device != torch.device("cpu"):
|
if x.device == torch.device("mps"):
|
||||||
torch.mps.synchronize()
|
torch.mps.synchronize()
|
||||||
|
elif x.device == torch.device("cuda"):
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@@ -99,6 +102,14 @@ def reduction(op, axis, x):
|
|||||||
sync_if_needed(x)
|
sync_if_needed(x)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def sum_and_add(axis, x, y):
|
||||||
|
z = x.sum(axis=axis, keepdims=True)
|
||||||
|
for i in range(50):
|
||||||
|
z = (z + y).sum(axis=axis, keepdims=True)
|
||||||
|
sync_if_needed(x)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def softmax(axis, x):
|
def softmax(axis, x):
|
||||||
ys = []
|
ys = []
|
||||||
@@ -340,7 +351,11 @@ if __name__ == "__main__":
|
|||||||
args.axis.pop(0)
|
args.axis.pop(0)
|
||||||
|
|
||||||
torch.set_num_threads(1)
|
torch.set_num_threads(1)
|
||||||
device = "cpu" if args.cpu else "mps"
|
device = "mps"
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = "cuda"
|
||||||
|
if args.cpu:
|
||||||
|
device = "cpu"
|
||||||
|
|
||||||
types = args.dtype
|
types = args.dtype
|
||||||
if not types:
|
if not types:
|
||||||
@@ -460,5 +475,8 @@ if __name__ == "__main__":
|
|||||||
elif args.benchmark == "selu":
|
elif args.benchmark == "selu":
|
||||||
print(bench(selu, x))
|
print(bench(selu, x))
|
||||||
|
|
||||||
|
elif args.benchmark == "sum_and_add":
|
||||||
|
print(bench(sum_and_add, axis, *xs))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown benchmark `{args.benchmark}`.")
|
raise ValueError(f"Unknown benchmark `{args.benchmark}`.")
|
||||||
|
|||||||
107
benchmarks/python/conv_unaligned_bench.py
Normal file
107
benchmarks/python/conv_unaligned_bench.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
import math
|
||||||
|
import time
|
||||||
|
|
||||||
|
import mlx.core as mx
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
N_warmup = 10
|
||||||
|
N_iter_bench = 100
|
||||||
|
N_iter_func = 5
|
||||||
|
|
||||||
|
|
||||||
|
def bench(f, a, b):
|
||||||
|
for i in range(N_warmup):
|
||||||
|
f(a, b)
|
||||||
|
torch.mps.synchronize()
|
||||||
|
|
||||||
|
s = time.perf_counter_ns()
|
||||||
|
for i in range(N_iter_bench):
|
||||||
|
f(a, b)
|
||||||
|
e = time.perf_counter_ns()
|
||||||
|
return (e - s) * 1e-9
|
||||||
|
|
||||||
|
|
||||||
|
def make_mx_conv_2D(strides=(1, 1), padding=(0, 0), groups=1):
|
||||||
|
def mx_conv_2D(a, b):
|
||||||
|
ys = []
|
||||||
|
for i in range(N_iter_func):
|
||||||
|
y = mx.conv2d(a, b, stride=strides, padding=padding, groups=groups)
|
||||||
|
ys.append(y)
|
||||||
|
mx.eval(ys)
|
||||||
|
return ys
|
||||||
|
|
||||||
|
return mx_conv_2D
|
||||||
|
|
||||||
|
|
||||||
|
def make_pt_conv_2D(strides=(1, 1), padding=(0, 0), groups=1):
|
||||||
|
@torch.no_grad()
|
||||||
|
def pt_conv_2D(a, b):
|
||||||
|
ys = []
|
||||||
|
for i in range(N_iter_func):
|
||||||
|
y = torch.conv2d(a, b, stride=strides, padding=padding, groups=groups)
|
||||||
|
ys.append(y)
|
||||||
|
torch.mps.synchronize()
|
||||||
|
return ys
|
||||||
|
|
||||||
|
return pt_conv_2D
|
||||||
|
|
||||||
|
|
||||||
|
def bench_shape(N, H, W, C, kH, kW, O, strides, padding, groups, np_dtype):
|
||||||
|
scale = 1.0 / math.sqrt(kH * kH * C)
|
||||||
|
a_np = np.random.uniform(0, 0.5, (N, H, W, C)).astype(np_dtype)
|
||||||
|
b_np = np.random.uniform(-scale, scale, (O, kH, kW, int(C / groups))).astype(
|
||||||
|
np_dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
a_mx = mx.array(a_np)
|
||||||
|
b_mx = mx.array(b_np)
|
||||||
|
|
||||||
|
a_pt = torch.from_numpy(a_np.transpose((0, 3, 1, 2))).to("mps")
|
||||||
|
b_pt = torch.from_numpy(b_np.transpose((0, 3, 1, 2))).to("mps")
|
||||||
|
|
||||||
|
torch.mps.synchronize()
|
||||||
|
|
||||||
|
f_mx = make_mx_conv_2D(strides, padding, groups)
|
||||||
|
f_pt = make_pt_conv_2D(strides, padding, groups)
|
||||||
|
|
||||||
|
time_torch = bench(f_pt, a_pt, b_pt)
|
||||||
|
time_mlx = bench(f_mx, a_mx, b_mx)
|
||||||
|
|
||||||
|
out_mx = mx.conv2d(a_mx, b_mx, stride=strides, padding=padding, groups=groups)
|
||||||
|
out_pt = torch.conv2d(
|
||||||
|
a_pt.to("cpu"), b_pt.to("cpu"), stride=strides, padding=padding, groups=groups
|
||||||
|
)
|
||||||
|
out_pt = torch.permute(out_pt, (0, 2, 3, 1))
|
||||||
|
out_pt = out_pt.numpy(force=True)
|
||||||
|
|
||||||
|
atol = 2e-5 if np_dtype == np.float32 else 1e-4
|
||||||
|
|
||||||
|
if not np.allclose(out_pt, out_mx, atol=atol):
|
||||||
|
print(
|
||||||
|
f"Failed at {(N, H, W, C)}, {(O, kH, kW, C)} [strides = {strides}, padding = {padding}, groups = {groups}] with max(|a - b|) = {np.max(np.abs(out_pt - out_mx))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return time_mlx, time_torch
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
dtype = "float32"
|
||||||
|
shapes = (
|
||||||
|
(4, 32, 32, 21, 3, 3, 128),
|
||||||
|
(4, 32, 32, 21, 3, 3, 37),
|
||||||
|
(4, 32, 32, 370, 3, 3, 370),
|
||||||
|
(4, 32, 32, 370, 7, 7, 128),
|
||||||
|
(2, 320, 640, 21, 7, 7, 21),
|
||||||
|
)
|
||||||
|
for N, H, W, C, kh, kw, O in shapes:
|
||||||
|
time_mlx, time_torch = bench_shape(
|
||||||
|
N, H, W, C, kh, kw, O, (1, 1), (0, 0), 1, dtype
|
||||||
|
)
|
||||||
|
diff = time_torch / time_mlx - 1.0
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"({N}, {H:3d}, {W:3d}, {C:3d}), ({O:3d}, {kh:2d}, {kw:2d}, {C:3d}), {dtype}, {100. * diff:+5.2f}%"
|
||||||
|
)
|
||||||
|
if time_mlx >= 2.0 * time_torch:
|
||||||
|
print("ATTENTION ^^^^^^^")
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
# Copyright © 2023-2024 Apple Inc.
|
# Copyright © 2023-2024 Apple Inc.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from time import time
|
|
||||||
|
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
import torch
|
import torch
|
||||||
|
|||||||
74
benchmarks/python/gather_mm_bench.py
Normal file
74
benchmarks/python/gather_mm_bench.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
import mlx.core as mx
|
||||||
|
from time_utils import time_fn
|
||||||
|
|
||||||
|
N = 1024
|
||||||
|
D = 1024
|
||||||
|
M = 1024
|
||||||
|
E = 32
|
||||||
|
I = 4
|
||||||
|
|
||||||
|
|
||||||
|
def gather_sort(x, indices):
|
||||||
|
N, M = indices.shape
|
||||||
|
indices = indices.flatten()
|
||||||
|
order = mx.argsort(indices)
|
||||||
|
inv_order = mx.argsort(order)
|
||||||
|
return x.flatten(0, -3)[order // M], indices[order], inv_order
|
||||||
|
|
||||||
|
|
||||||
|
def scatter_unsort(x, inv_order, shape=None):
|
||||||
|
x = x[inv_order]
|
||||||
|
if shape is not None:
|
||||||
|
x = mx.unflatten(x, 0, shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def gather_mm_simulate(x, w, indices):
|
||||||
|
x, idx, inv_order = gather_sort(x, indices)
|
||||||
|
for i in range(2):
|
||||||
|
y = mx.concatenate([x[i] @ w[j].T for i, j in enumerate(idx.tolist())], axis=0)
|
||||||
|
x = y[:, None]
|
||||||
|
x = scatter_unsort(x, inv_order, indices.shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def time_gather_mm():
|
||||||
|
x = mx.random.normal((N, 1, 1, D)) / 1024**0.5
|
||||||
|
w1 = mx.random.normal((E, M, D)) / 1024**0.5
|
||||||
|
w2 = mx.random.normal((E, D, M)) / 1024**0.5
|
||||||
|
indices = (mx.random.uniform(shape=(N, I)) * E).astype(mx.uint32)
|
||||||
|
sorted_indices = mx.sort(indices.flatten()).reshape(N, I)
|
||||||
|
mx.eval(x, w1, w2, indices, sorted_indices)
|
||||||
|
|
||||||
|
def gather_mm(x, w1, w2, indices, sort):
|
||||||
|
idx = indices
|
||||||
|
inv_order = None
|
||||||
|
if sort:
|
||||||
|
x, idx, inv_order = gather_sort(x, indices)
|
||||||
|
x = mx.gather_mm(x, w1.swapaxes(-1, -2), rhs_indices=idx, sorted_indices=sort)
|
||||||
|
x = mx.gather_mm(x, w2.swapaxes(-1, -2), rhs_indices=idx, sorted_indices=sort)
|
||||||
|
if sort:
|
||||||
|
x = scatter_unsort(x, inv_order, indices.shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
time_fn(gather_mm, x, w1, w2, indices, False)
|
||||||
|
time_fn(gather_mm, x, w1, w2, sorted_indices, False)
|
||||||
|
time_fn(gather_mm, x, w1, w2, indices, True)
|
||||||
|
|
||||||
|
x = mx.random.normal((N * I, D)) / 1024**0.5
|
||||||
|
w1 = mx.random.normal((M, D)) / 1024**0.5
|
||||||
|
w2 = mx.random.normal((D, M)) / 1024**0.5
|
||||||
|
mx.eval(x, w1, w2)
|
||||||
|
|
||||||
|
def equivalent_matmul(x, w1, w2):
|
||||||
|
x = x @ w1.T
|
||||||
|
x = x @ w2.T
|
||||||
|
return x
|
||||||
|
|
||||||
|
time_fn(equivalent_matmul, x, w1, w2)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
time_gather_mm()
|
||||||
84
benchmarks/python/gather_qmm_bench.py
Normal file
84
benchmarks/python/gather_qmm_bench.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
import mlx.core as mx
|
||||||
|
from time_utils import time_fn
|
||||||
|
|
||||||
|
N = 1024
|
||||||
|
D = 1024
|
||||||
|
M = 1024
|
||||||
|
E = 32
|
||||||
|
I = 4
|
||||||
|
|
||||||
|
|
||||||
|
def gather_sort(x, indices):
|
||||||
|
N, M = indices.shape
|
||||||
|
indices = indices.flatten()
|
||||||
|
order = mx.argsort(indices)
|
||||||
|
inv_order = mx.argsort(order)
|
||||||
|
return x.flatten(0, -3)[order // M], indices[order], inv_order
|
||||||
|
|
||||||
|
|
||||||
|
def scatter_unsort(x, inv_order, shape=None):
|
||||||
|
x = x[inv_order]
|
||||||
|
if shape is not None:
|
||||||
|
x = mx.unflatten(x, 0, shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def gather_mm_simulate(x, w, indices):
|
||||||
|
x, idx, inv_order = gather_sort(x, indices)
|
||||||
|
for i in range(2):
|
||||||
|
y = mx.concatenate(
|
||||||
|
[
|
||||||
|
mx.quantized_matmul(x[i], w[0][j], w[1][j], w[2][j], transpose=True)
|
||||||
|
for i, j in enumerate(idx.tolist())
|
||||||
|
],
|
||||||
|
axis=0,
|
||||||
|
)
|
||||||
|
x = y[:, None]
|
||||||
|
x = scatter_unsort(x, inv_order, indices.shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def time_gather_qmm():
|
||||||
|
x = mx.random.normal((N, 1, 1, D)) / 1024**0.5
|
||||||
|
w1 = mx.random.normal((E, M, D)) / 1024**0.5
|
||||||
|
w2 = mx.random.normal((E, D, M)) / 1024**0.5
|
||||||
|
w1 = mx.quantize(w1)
|
||||||
|
w2 = mx.quantize(w2)
|
||||||
|
indices = (mx.random.uniform(shape=(N, I)) * E).astype(mx.uint32)
|
||||||
|
sorted_indices = mx.sort(indices.flatten()).reshape(N, I)
|
||||||
|
mx.eval(x, w1, w2, indices, sorted_indices)
|
||||||
|
|
||||||
|
def gather_mm(x, w1, w2, indices, sort):
|
||||||
|
idx = indices
|
||||||
|
inv_order = None
|
||||||
|
if sort:
|
||||||
|
x, idx, inv_order = gather_sort(x, indices)
|
||||||
|
x = mx.gather_qmm(x, *w1, transpose=True, rhs_indices=idx, sorted_indices=sort)
|
||||||
|
x = mx.gather_qmm(x, *w2, transpose=True, rhs_indices=idx, sorted_indices=sort)
|
||||||
|
if sort:
|
||||||
|
x = scatter_unsort(x, inv_order, indices.shape)
|
||||||
|
return x
|
||||||
|
|
||||||
|
time_fn(gather_mm, x, w1, w2, indices, False)
|
||||||
|
time_fn(gather_mm, x, w1, w2, sorted_indices, False)
|
||||||
|
time_fn(gather_mm, x, w1, w2, indices, True)
|
||||||
|
|
||||||
|
x = mx.random.normal((N * I, D)) / 1024**0.5
|
||||||
|
w1 = mx.random.normal((M, D)) / 1024**0.5
|
||||||
|
w2 = mx.random.normal((D, M)) / 1024**0.5
|
||||||
|
w1 = mx.quantize(w1)
|
||||||
|
w2 = mx.quantize(w2)
|
||||||
|
mx.eval(x, w1, w2)
|
||||||
|
|
||||||
|
def equivalent_matmul(x, w1, w2):
|
||||||
|
x = mx.quantized_matmul(x, *w1, transpose=True)
|
||||||
|
x = mx.quantized_matmul(x, *w2, transpose=True)
|
||||||
|
return x
|
||||||
|
|
||||||
|
time_fn(equivalent_matmul, x, w1, w2)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
time_gather_qmm()
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
# Copyright © 2023-2024 Apple Inc.
|
# Copyright © 2023-2024 Apple Inc.
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
import mlx.nn as nn
|
import mlx.nn as nn
|
||||||
from time_utils import time_fn
|
from time_utils import time_fn
|
||||||
@@ -10,32 +12,71 @@ def layer_norm(x, w, b, eps):
|
|||||||
x = x.astype(mx.float32)
|
x = x.astype(mx.float32)
|
||||||
mu = mx.mean(x, -1, keepdims=True)
|
mu = mx.mean(x, -1, keepdims=True)
|
||||||
v = mx.var(x, -1, keepdims=True)
|
v = mx.var(x, -1, keepdims=True)
|
||||||
return (x - mu) * mx.rsqrt(v + eps) * w + b
|
y = (x - mu) * mx.rsqrt(v + eps)
|
||||||
|
if w is not None:
|
||||||
|
y = y * w
|
||||||
|
if b is not None:
|
||||||
|
y = y + b
|
||||||
|
return y
|
||||||
|
|
||||||
|
|
||||||
def time_layer_norm():
|
def time_layer_norm(N, dt):
|
||||||
|
L = 1024
|
||||||
f1 = lambda x, w, b, y: (layer_norm(x, w, b, 1e-5) * y).sum()
|
f1 = lambda x, w, b, y: (layer_norm(x, w, b, 1e-5) * y).sum()
|
||||||
f2 = lambda x, w, b, y: (mx.fast.layer_norm(x, w, b, 1e-5) * y).sum()
|
f2 = lambda x, w, b, y: (mx.fast.layer_norm(x, w, b, 1e-5) * y).sum()
|
||||||
g1 = mx.grad(f1, argnums=(0, 1, 2))
|
g1 = mx.grad(f1, argnums=(0, 1, 2))
|
||||||
g2 = mx.grad(f2, argnums=(0, 1, 2))
|
g2 = mx.grad(f2, argnums=(0, 1, 2))
|
||||||
|
|
||||||
x = mx.random.uniform(shape=(8, 1024, 4096)).astype(mx.float16)
|
x = mx.random.uniform(shape=(8, L, N)).astype(dt)
|
||||||
w = mx.random.uniform(shape=(4096,)).astype(mx.float16)
|
w = mx.random.uniform(shape=(N,)).astype(dt)
|
||||||
b = mx.random.uniform(shape=(4096,)).astype(mx.float16)
|
b = mx.random.uniform(shape=(N,)).astype(dt)
|
||||||
y = mx.random.uniform(shape=(8, 1024, 4096)).astype(mx.float16)
|
y = mx.random.uniform(shape=(8, L, N)).astype(dt)
|
||||||
mx.eval(x, w, b, y)
|
mx.eval(x, w, b, y)
|
||||||
|
|
||||||
def layer_norm_loop(g, x, w, b):
|
def layer_norm_loop(f, x, w, b):
|
||||||
|
for _ in range(32):
|
||||||
|
x = f(x, w, b)
|
||||||
|
return x
|
||||||
|
|
||||||
|
time_fn(layer_norm_loop, partial(layer_norm, eps=1e-5), x, w, b)
|
||||||
|
time_fn(layer_norm_loop, partial(mx.fast.layer_norm, eps=1e-5), x, w, b)
|
||||||
|
|
||||||
|
def layer_norm_grad_loop(g, x, w, b):
|
||||||
gx, gw, gb = x, w, b
|
gx, gw, gb = x, w, b
|
||||||
for _ in range(32):
|
for _ in range(32):
|
||||||
gx, gw, gb = g(gx, gw, gb, y)
|
gx, gw, gb = g(gx, gw, gb, y)
|
||||||
return gx, gw, gb
|
return gx, gw, gb
|
||||||
|
|
||||||
time_fn(layer_norm_loop, g1, x, w, b)
|
time_fn(layer_norm_grad_loop, g1, x, w, b)
|
||||||
time_fn(layer_norm_loop, g2, x, w, b)
|
time_fn(layer_norm_grad_loop, g2, x, w, b)
|
||||||
time_fn(layer_norm_loop, mx.compile(g1), x, w, b)
|
time_fn(layer_norm_grad_loop, mx.compile(g1), x, w, b)
|
||||||
time_fn(layer_norm_loop, mx.compile(g2), x, w, b)
|
time_fn(layer_norm_grad_loop, mx.compile(g2), x, w, b)
|
||||||
|
|
||||||
|
f1 = lambda x, y: (layer_norm(x, None, None, 1e-5) * y).sum()
|
||||||
|
f2 = lambda x, y: (mx.fast.layer_norm(x, None, None, 1e-5) * y).sum()
|
||||||
|
g1 = mx.grad(f1, argnums=(0,))
|
||||||
|
g2 = mx.grad(f2, argnums=(0,))
|
||||||
|
|
||||||
|
x = mx.random.uniform(shape=(8, L, N)).astype(dt)
|
||||||
|
w = mx.random.uniform(shape=(N,)).astype(dt)
|
||||||
|
b = mx.random.uniform(shape=(N,)).astype(dt)
|
||||||
|
y = mx.random.uniform(shape=(8, L, N)).astype(dt)
|
||||||
|
mx.eval(x, w, b, y)
|
||||||
|
|
||||||
|
def layer_norm_grad_x_loop(g, x):
|
||||||
|
gx = x
|
||||||
|
for _ in range(32):
|
||||||
|
gx = g(gx, y)
|
||||||
|
return gx
|
||||||
|
|
||||||
|
time_fn(layer_norm_grad_x_loop, g1, x)
|
||||||
|
time_fn(layer_norm_grad_x_loop, g2, x)
|
||||||
|
time_fn(layer_norm_grad_x_loop, mx.compile(g1), x)
|
||||||
|
time_fn(layer_norm_grad_x_loop, mx.compile(g2), x)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
time_layer_norm()
|
for dt in [mx.float32, mx.float16, mx.bfloat16]:
|
||||||
|
for n in [1024, 2048, 4096, 8192, 8192 + 1024]:
|
||||||
|
print(dt, n)
|
||||||
|
time_layer_norm(n, dt)
|
||||||
|
|||||||
@@ -9,7 +9,10 @@ def rms_norm(x, w, eps):
|
|||||||
ot = x.dtype
|
ot = x.dtype
|
||||||
x = x.astype(mx.float32)
|
x = x.astype(mx.float32)
|
||||||
n = mx.rsqrt(x.square().mean(-1, keepdims=True) + eps)
|
n = mx.rsqrt(x.square().mean(-1, keepdims=True) + eps)
|
||||||
return (x * n).astype(ot) * w
|
y = (x * n).astype(ot)
|
||||||
|
if w is not None:
|
||||||
|
y = y * w
|
||||||
|
return y
|
||||||
|
|
||||||
|
|
||||||
def time_rms_norm():
|
def time_rms_norm():
|
||||||
@@ -34,6 +37,27 @@ def time_rms_norm():
|
|||||||
time_fn(rms_norm_loop, mx.compile(g1), x, w)
|
time_fn(rms_norm_loop, mx.compile(g1), x, w)
|
||||||
time_fn(rms_norm_loop, mx.compile(g2), x, w)
|
time_fn(rms_norm_loop, mx.compile(g2), x, w)
|
||||||
|
|
||||||
|
f1 = lambda x, y: (rms_norm(x, None, 1e-5) * y).sum()
|
||||||
|
f2 = lambda x, y: (mx.fast.rms_norm(x, None, 1e-5) * y).sum()
|
||||||
|
g1 = mx.grad(f1, argnums=(0,))
|
||||||
|
g2 = mx.grad(f2, argnums=(0,))
|
||||||
|
|
||||||
|
x = mx.random.uniform(shape=(8, 1024, 4096)).astype(mx.float16)
|
||||||
|
w = mx.random.uniform(shape=(4096,)).astype(mx.float16)
|
||||||
|
y = mx.random.uniform(shape=(8, 1024, 4096)).astype(mx.float16)
|
||||||
|
mx.eval(x, w, y)
|
||||||
|
|
||||||
|
def rms_norm_loop(g, x):
|
||||||
|
gx = x
|
||||||
|
for _ in range(32):
|
||||||
|
gx = g(gx, y)
|
||||||
|
return gx
|
||||||
|
|
||||||
|
time_fn(rms_norm_loop, g1, x)
|
||||||
|
time_fn(rms_norm_loop, g2, x)
|
||||||
|
time_fn(rms_norm_loop, mx.compile(g1), x)
|
||||||
|
time_fn(rms_norm_loop, mx.compile(g2), x)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
time_rms_norm()
|
time_rms_norm()
|
||||||
|
|||||||
@@ -28,11 +28,34 @@ def bench(f, *args):
|
|||||||
return (e - s) * 1e-9
|
return (e - s) * 1e-9
|
||||||
|
|
||||||
|
|
||||||
def mlx_sdpa_fused_inner(q, k, v, scale):
|
def prepare_inputs(B, qL, kL, D, qH, kH, mask, transpose, dtype):
|
||||||
return mx.fast.scaled_dot_product_attention(q, k, v, scale=scale, mask=None)
|
np_dtype = getattr(np, dtype)
|
||||||
|
|
||||||
|
shape_q = (B, qL, qH, D) if transpose else (B, qH, qL, D)
|
||||||
|
shape_kv = (B, kL, kH, D) if transpose else (B, kH, kL, D)
|
||||||
|
|
||||||
|
scale = 1.0 / math.sqrt(D)
|
||||||
|
|
||||||
|
q_np = np.random.normal(0.0, 1.0, shape_q).astype(np_dtype)
|
||||||
|
k_np = np.random.normal(0.0, scale, shape_kv).astype(np_dtype)
|
||||||
|
v_np = np.random.normal(0.0, scale, shape_kv).astype(np_dtype)
|
||||||
|
|
||||||
|
q_mx = mx.array(q_np)
|
||||||
|
k_mx = mx.array(k_np)
|
||||||
|
v_mx = mx.array(v_np)
|
||||||
|
|
||||||
|
if mask is not None:
|
||||||
|
if mask == "additive":
|
||||||
|
mask_np = np.random.normal(0.0, 1.0, (B, qH, qL, kL)).astype(np_dtype)
|
||||||
|
mask = mx.array(mask_np)
|
||||||
|
elif mask == "bool":
|
||||||
|
mask_np = np.random.uniform(0.0, 1.0, (B, qH, qL, kL)) < 0.5
|
||||||
|
mask = mx.array(mask_np)
|
||||||
|
|
||||||
|
return q_mx, k_mx, v_mx, scale, mask
|
||||||
|
|
||||||
|
|
||||||
def mlx_sdpa_unfused_inner(q, k, v, scale, f32softmax=False):
|
def mlx_ref_attn(q, k, v, scale=1.0, mask=None):
|
||||||
q_dtype = q.dtype
|
q_dtype = q.dtype
|
||||||
q = q * mx.array(scale, q_dtype)
|
q = q * mx.array(scale, q_dtype)
|
||||||
n_q_heads = q.shape[-3]
|
n_q_heads = q.shape[-3]
|
||||||
@@ -41,6 +64,7 @@ def mlx_sdpa_unfused_inner(q, k, v, scale, f32softmax=False):
|
|||||||
|
|
||||||
B = q.shape[0]
|
B = q.shape[0]
|
||||||
L = q.shape[2]
|
L = q.shape[2]
|
||||||
|
kL = k.shape[2]
|
||||||
|
|
||||||
if n_repeats > 1:
|
if n_repeats > 1:
|
||||||
q = mx.reshape(q, [B, n_kv_heads, n_repeats, L, -1])
|
q = mx.reshape(q, [B, n_kv_heads, n_repeats, L, -1])
|
||||||
@@ -48,10 +72,27 @@ def mlx_sdpa_unfused_inner(q, k, v, scale, f32softmax=False):
|
|||||||
v = mx.expand_dims(v, 2)
|
v = mx.expand_dims(v, 2)
|
||||||
|
|
||||||
scores = q @ mx.swapaxes(k, -1, -2)
|
scores = q @ mx.swapaxes(k, -1, -2)
|
||||||
if f32softmax:
|
|
||||||
scores = mx.softmax(scores.astype(mx.float32), axis=-1).astype(q_dtype)
|
if mask is not None:
|
||||||
|
|
||||||
|
if mask == "causal":
|
||||||
|
q_offset = max(0, kL - L)
|
||||||
|
q_indices = mx.arange(q_offset, q_offset + L)
|
||||||
|
k_indices = mx.arange(kL)
|
||||||
|
mask = q_indices[:, None] >= k_indices[None]
|
||||||
|
|
||||||
|
if n_repeats > 1 and mask.ndim >= 3:
|
||||||
|
if mask.shape[-3] == 1:
|
||||||
|
mask = mx.expand_dims(mask, -3)
|
||||||
else:
|
else:
|
||||||
scores = mx.softmax(scores, axis=-1)
|
mask = mx.unflatten(mask, -3, (n_kv_heads, n_repeats))
|
||||||
|
|
||||||
|
if mask.dtype == mx.bool_:
|
||||||
|
scores = mx.where(mask, scores, -np.float32(np.inf))
|
||||||
|
else:
|
||||||
|
scores += mask
|
||||||
|
|
||||||
|
scores = mx.softmax(scores, axis=-1, precise=True)
|
||||||
|
|
||||||
out = scores @ v
|
out = scores @ v
|
||||||
if n_repeats > 1:
|
if n_repeats > 1:
|
||||||
@@ -60,74 +101,55 @@ def mlx_sdpa_unfused_inner(q, k, v, scale, f32softmax=False):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
def mlx_spda_unfused(q, k, v, scale, transpose):
|
def mlx_fused_attn(q, k, v, scale, mask):
|
||||||
q_out = q
|
return mx.fast.scaled_dot_product_attention(q, k, v, scale=scale, mask=mask)
|
||||||
|
|
||||||
|
|
||||||
|
def do_attention(f, q, k, v, scale, mask=None, transpose=False):
|
||||||
if transpose:
|
if transpose:
|
||||||
k = mx.transpose(k, (0, 2, 1, 3))
|
q_t = mx.transpose(q, (0, 2, 1, 3))
|
||||||
v = mx.transpose(v, (0, 2, 1, 3))
|
k_t = mx.transpose(k, (0, 2, 1, 3))
|
||||||
|
v_t = mx.transpose(v, (0, 2, 1, 3))
|
||||||
|
o_t = f(q_t, k_t, v_t, scale=scale, mask=mask)
|
||||||
|
return mx.transpose(o_t, (0, 2, 1, 3))
|
||||||
|
else:
|
||||||
|
return f(q, k, v, scale=scale, mask=mask)
|
||||||
|
|
||||||
|
|
||||||
|
def do_attention_bench(f, q, k, v, scale, mask=None, transpose=False):
|
||||||
|
q_out = q
|
||||||
|
|
||||||
for i in range(N_iter_func):
|
for i in range(N_iter_func):
|
||||||
if transpose:
|
q_out = do_attention(f, q_out, k, v, scale, mask=mask, transpose=transpose)
|
||||||
q_out = mx.transpose(q_out, (0, 2, 1, 3))
|
|
||||||
q_out = mlx_sdpa_unfused_inner(q_out, k, v, scale)
|
|
||||||
if transpose:
|
|
||||||
q_out = mx.transpose(q_out, (0, 2, 1, 3))
|
|
||||||
|
|
||||||
mx.eval(q_out)
|
mx.eval(q_out)
|
||||||
return q_out
|
return q_out
|
||||||
|
|
||||||
|
|
||||||
def mlx_spda_fused(q, k, v, scale, transpose):
|
def bench_shape(
|
||||||
q_out = q
|
B, qsl, ksl, head_dim, n_q_heads, n_kv_heads, dtype, transpose=True, mask_in=None
|
||||||
if transpose:
|
):
|
||||||
k = mx.transpose(k, (0, 2, 1, 3))
|
q_mx, k_mx, v_mx, scale, mask = prepare_inputs(
|
||||||
v = mx.transpose(v, (0, 2, 1, 3))
|
B, qsl, ksl, head_dim, n_q_heads, n_kv_heads, mask_in, transpose, dtype
|
||||||
|
|
||||||
for i in range(N_iter_func):
|
|
||||||
if transpose:
|
|
||||||
q_out = mx.transpose(q_out, (0, 2, 1, 3))
|
|
||||||
q_out = mlx_sdpa_fused_inner(q_out, k, v, scale)
|
|
||||||
if transpose:
|
|
||||||
q_out = mx.transpose(q_out, (0, 2, 1, 3))
|
|
||||||
|
|
||||||
mx.eval(q_out)
|
|
||||||
return q_out
|
|
||||||
|
|
||||||
|
|
||||||
def bench_shape(B, qsl, ksl, head_dim, n_q_heads, n_kv_heads, np_dtype, transpose=True):
|
|
||||||
shape_q = (
|
|
||||||
(B, qsl, n_q_heads, head_dim) if transpose else (B, n_q_heads, qsl, head_dim)
|
|
||||||
)
|
|
||||||
shape_kv = (
|
|
||||||
(B, ksl, n_kv_heads, head_dim) if transpose else (B, n_kv_heads, ksl, head_dim)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
q_np = np.random.normal(0.0, 1.0 / math.sqrt(head_dim), shape_q).astype(np_dtype)
|
time_mlx_unfused = bench(
|
||||||
k_np = np.random.normal(0.0, 1.0 / math.sqrt(head_dim), shape_kv).astype(np_dtype)
|
do_attention_bench, mlx_ref_attn, q_mx, k_mx, v_mx, scale, mask, transpose
|
||||||
v_np = np.random.normal(0.0, 1.0 / math.sqrt(head_dim), shape_kv).astype(np_dtype)
|
)
|
||||||
|
time_mlx_fused = bench(
|
||||||
|
do_attention_bench, mlx_fused_attn, q_mx, k_mx, v_mx, scale, mask, transpose
|
||||||
|
)
|
||||||
|
|
||||||
scale = math.sqrt(1.0 / head_dim)
|
o_mlx_fused = do_attention(mlx_ref_attn, q_mx, k_mx, v_mx, scale, mask, transpose)
|
||||||
|
o_mlx_unfused = do_attention(
|
||||||
|
mlx_fused_attn, q_mx, k_mx, v_mx, scale, mask, transpose
|
||||||
|
)
|
||||||
|
|
||||||
q_mx = mx.array(q_np)
|
atol = 1e-5 if dtype == "float32" else 2e-4
|
||||||
k_mx = mx.array(k_np)
|
|
||||||
v_mx = mx.array(v_np)
|
|
||||||
|
|
||||||
time_mlx_unfused = bench(mlx_spda_unfused, q_mx, k_mx, v_mx, scale, transpose)
|
if not mx.allclose(o_mlx_fused, o_mlx_unfused, atol=atol, rtol=atol):
|
||||||
time_mlx_fused = bench(mlx_spda_fused, q_mx, k_mx, v_mx, scale, transpose)
|
|
||||||
|
|
||||||
if transpose:
|
|
||||||
q_mx = mx.transpose(q_mx, (0, 2, 1, 3))
|
|
||||||
k_mx = mx.transpose(k_mx, (0, 2, 1, 3))
|
|
||||||
v_mx = mx.transpose(v_mx, (0, 2, 1, 3))
|
|
||||||
|
|
||||||
o_mlx_fused = mlx_sdpa_fused_inner(q_mx, k_mx, v_mx, scale)
|
|
||||||
o_mlx_unfused = mlx_sdpa_unfused_inner(q_mx, k_mx, v_mx, scale, f32softmax=True)
|
|
||||||
|
|
||||||
atol = 1e-5 if np_dtype == np.float32 else 1e-4
|
|
||||||
|
|
||||||
if not mx.allclose(o_mlx_fused, o_mlx_unfused, atol=atol):
|
|
||||||
print(
|
print(
|
||||||
f"Failed at (B: {B}, qsl: {qsl}, ksl: {ksl}, head_dim: {head_dim}, n_qh: {n_q_heads}, n_kvh: {n_kv_heads}) [tpose = {transpose}] with max(|a - b|) = {mx.max(mx.abs(o_mlx_unfused - o_mlx_fused)):3.2e}"
|
f"Failed at (B: {B}, qsl: {qsl}, ksl: {ksl}, head_dim: {head_dim}, n_qh: {n_q_heads}, n_kvh: {n_kv_heads}, mask: {mask_in}) [tpose = {transpose}] with max(|a - b|) = {mx.max(mx.abs(o_mlx_unfused - o_mlx_fused)):3.2e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
return time_mlx_fused, time_mlx_unfused
|
return time_mlx_fused, time_mlx_unfused
|
||||||
@@ -151,39 +173,51 @@ if __name__ == "__main__":
|
|||||||
( 1, 128, 128, 64, 32, 32),
|
( 1, 128, 128, 64, 32, 32),
|
||||||
( 1, 256, 256, 64, 32, 32),
|
( 1, 256, 256, 64, 32, 32),
|
||||||
( 1, 512, 512, 64, 32, 32),
|
( 1, 512, 512, 64, 32, 32),
|
||||||
( 1, 1024, 1024, 64, 32, 32),
|
( 1, 1024, 1024, 64, 32, 8),
|
||||||
( 1, 2048, 2048, 64, 32, 32),
|
( 1, 2048, 2048, 64, 32, 8),
|
||||||
( 1, 4096, 4096, 64, 32, 32),
|
( 1, 4096, 4096, 64, 32, 8),
|
||||||
)
|
)
|
||||||
|
|
||||||
shapes_80 = (
|
shapes_80 = (
|
||||||
# ( B, qsl, ksl, head_dim, n_qh, n_kvh)
|
# ( B, qsl, ksl, head_dim, n_qh, n_kvh)
|
||||||
( 1, 1024, 1024, 80, 32, 32),
|
( 1, 1024, 1024, 80, 32, 8),
|
||||||
( 1, 2048, 2048, 80, 32, 32),
|
( 1, 2048, 2048, 80, 32, 8),
|
||||||
( 1, 4096, 4096, 80, 32, 32),
|
( 1, 4096, 4096, 80, 32, 8),
|
||||||
)
|
)
|
||||||
|
|
||||||
shapes_128 = (
|
shapes_128 = (
|
||||||
# ( B, qsl, ksl, head_dim, n_qh, n_kvh)
|
# ( B, qsl, ksl, head_dim, n_qh, n_kvh)
|
||||||
( 1, 1024, 1024, 128, 32, 32),
|
( 1, 1024, 1024, 128, 32, 8),
|
||||||
( 1, 2048, 2048, 128, 32, 32),
|
( 1, 2048, 2048, 128, 32, 8),
|
||||||
( 1, 4096, 4096, 128, 32, 32),
|
( 1, 4096, 4096, 128, 32, 8),
|
||||||
)
|
)
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
shapes = shapes_64 + shapes_80 + shapes_128
|
shapes = shapes_64 + shapes_80 + shapes_128
|
||||||
|
|
||||||
print(" B, qsl, ksl, hdim, n_qh, n_kvh, tpose, dtype, t_unfs, t_fuse, diff%")
|
masks = [None, "bool", "causal"]
|
||||||
|
|
||||||
|
print(
|
||||||
|
" B, qsl, ksl, hdim, n_qh, n_kvh, t, dtype, mask, t_unfs, t_fuse, diff%"
|
||||||
|
)
|
||||||
|
|
||||||
for dtype in dtypes:
|
for dtype in dtypes:
|
||||||
for transpose in transposes:
|
for transpose in transposes:
|
||||||
for B, qsl, ksl, head_dim, n_q_heads, n_kv_heads in shapes:
|
for B, qsl, ksl, head_dim, n_q_heads, n_kv_heads in shapes:
|
||||||
np_dtype = getattr(np, dtype)
|
for mask_in in masks:
|
||||||
time_mlx_fused, time_mlx_unfused = bench_shape(
|
time_mlx_fused, time_mlx_unfused = bench_shape(
|
||||||
B, qsl, ksl, head_dim, n_q_heads, n_kv_heads, np_dtype, transpose
|
B,
|
||||||
|
qsl,
|
||||||
|
ksl,
|
||||||
|
head_dim,
|
||||||
|
n_q_heads,
|
||||||
|
n_kv_heads,
|
||||||
|
dtype,
|
||||||
|
transpose,
|
||||||
|
mask_in,
|
||||||
)
|
)
|
||||||
diff = time_mlx_unfused / time_mlx_fused - 1.0
|
diff = time_mlx_unfused / time_mlx_fused - 1.0
|
||||||
t_str = 1 if transpose else 0
|
t_str = 1 if transpose else 0
|
||||||
print(
|
print(
|
||||||
f"{B:3d}, {qsl:5d}, {ksl:5d}, {head_dim:4d}, {n_q_heads:4d}, {n_kv_heads:5d}, {t_str:5d}, {dtype}, {time_mlx_unfused: 2.3f}, {time_mlx_fused: 2.3f}, {100. * diff:+5.2f}%"
|
f"{B:3d}, {qsl:5d}, {ksl:5d}, {head_dim:4d}, {n_q_heads:4d}, {n_kv_heads:5d}, {t_str:1d}, {dtype}, {str(mask_in):>8}, {time_mlx_unfused: 2.3f}, {time_mlx_fused: 2.3f}, {100. * diff:+5.2f}%"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,14 +8,23 @@ L = 16384
|
|||||||
H = 32
|
H = 32
|
||||||
H_k = H // 4
|
H_k = H // 4
|
||||||
D = 128
|
D = 128
|
||||||
|
V = 128
|
||||||
dtype = mx.float16
|
dtype = mx.float16
|
||||||
loops = 10
|
loops = 10
|
||||||
|
|
||||||
|
|
||||||
def attention(q, k, v, mask=None):
|
def upproject(x, w):
|
||||||
|
if w is None:
|
||||||
|
return x
|
||||||
|
else:
|
||||||
|
return x @ w.T
|
||||||
|
|
||||||
|
|
||||||
|
def attention(q, k, v, mask=None, w=None):
|
||||||
def _sdpa(q, k, v):
|
def _sdpa(q, k, v):
|
||||||
B, Hq, L, D = q.shape
|
B, Hq, L, D = q.shape
|
||||||
_, Hk, S, _ = k.shape
|
_, Hk, S, _ = k.shape
|
||||||
|
_, _, _, V = v.shape
|
||||||
q = q.reshape(B, Hk, Hq // Hk, L, D)
|
q = q.reshape(B, Hk, Hq // Hk, L, D)
|
||||||
k = k[:, :, None, :, :]
|
k = k[:, :, None, :, :]
|
||||||
v = v[:, :, None, :, :]
|
v = v[:, :, None, :, :]
|
||||||
@@ -25,16 +34,18 @@ def attention(q, k, v, mask=None):
|
|||||||
s = mx.where(m, s, mx.finfo(s.dtype).min)
|
s = mx.where(m, s, mx.finfo(s.dtype).min)
|
||||||
p = mx.softmax(s.astype(mx.float32), axis=-1).astype(s.dtype)
|
p = mx.softmax(s.astype(mx.float32), axis=-1).astype(s.dtype)
|
||||||
o = p @ v
|
o = p @ v
|
||||||
return o.reshape(B, Hq, L, D)
|
return o.reshape(B, Hq, L, V)
|
||||||
|
|
||||||
for i in range(loops):
|
for i in range(loops):
|
||||||
q = _sdpa(q, k, v)
|
q = _sdpa(q, k, v)
|
||||||
|
q = upproject(q, w)
|
||||||
return q
|
return q
|
||||||
|
|
||||||
|
|
||||||
def sdpa(q, k, v, mask=None):
|
def sdpa(q, k, v, mask=None, w=None):
|
||||||
for i in range(loops):
|
for i in range(loops):
|
||||||
q = mx.fast.scaled_dot_product_attention(q, k, v, scale=1.0, mask=mask)
|
q = mx.fast.scaled_dot_product_attention(q, k, v, scale=1.0, mask=mask)
|
||||||
|
q = upproject(q, w)
|
||||||
return q
|
return q
|
||||||
|
|
||||||
|
|
||||||
@@ -42,34 +53,37 @@ def time_self_attention_primitives():
|
|||||||
mx.random.seed(3)
|
mx.random.seed(3)
|
||||||
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
||||||
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
||||||
v = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
v = mx.random.uniform(shape=(1, H_k, L, V)).astype(dtype)
|
||||||
mx.eval(q, k, v)
|
w = mx.random.uniform(shape=(D, V)).astype(dtype) if V != D else None
|
||||||
time_fn(attention, q, k, v)
|
mx.eval(q, k, v, w)
|
||||||
|
time_fn(attention, q, k, v, w=w)
|
||||||
|
|
||||||
|
|
||||||
def time_self_attention_sdpa():
|
def time_self_attention_sdpa():
|
||||||
mx.random.seed(3)
|
mx.random.seed(3)
|
||||||
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
||||||
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
||||||
v = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
v = mx.random.uniform(shape=(1, H_k, L, V)).astype(dtype)
|
||||||
mx.eval(q, k, v)
|
w = mx.random.uniform(shape=(D, V)).astype(dtype) if V != D else None
|
||||||
time_fn(sdpa, q, k, v)
|
mx.eval(q, k, v, w)
|
||||||
|
time_fn(sdpa, q, k, v, w=w)
|
||||||
|
|
||||||
|
|
||||||
def time_self_attention_sdpa_with_mask():
|
def time_self_attention_sdpa_with_mask():
|
||||||
mx.random.seed(3)
|
mx.random.seed(3)
|
||||||
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
q = mx.random.uniform(shape=(1, H, 1, D)).astype(dtype)
|
||||||
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
k = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
||||||
v = mx.random.uniform(shape=(1, H_k, L, D)).astype(dtype)
|
v = mx.random.uniform(shape=(1, H_k, L, V)).astype(dtype)
|
||||||
|
w = mx.random.uniform(shape=(D, V)).astype(dtype) if V != D else None
|
||||||
mask = mx.full((L,), True)
|
mask = mx.full((L,), True)
|
||||||
mask[L // 2 :] = False
|
mask[L // 2 :] = False
|
||||||
mx.eval(q, k, v, mask)
|
mx.eval(q, k, v, mask, w)
|
||||||
|
|
||||||
def sdpa_mask(*args):
|
def sdpa_mask(*args):
|
||||||
return sdpa(*args, mask=mask)
|
return sdpa(*args, mask=mask, w=w)
|
||||||
|
|
||||||
def attention_mask(*args):
|
def attention_mask(*args):
|
||||||
return attention(*args, mask=mask)
|
return attention(*args, mask=mask, w=w)
|
||||||
|
|
||||||
time_fn(attention_mask, q, k, v)
|
time_fn(attention_mask, q, k, v)
|
||||||
time_fn(sdpa_mask, q, k, v)
|
time_fn(sdpa_mask, q, k, v)
|
||||||
|
|||||||
@@ -51,6 +51,20 @@ def time_maximum():
|
|||||||
time_fn(mx.maximum, a, b)
|
time_fn(mx.maximum, a, b)
|
||||||
|
|
||||||
|
|
||||||
|
def time_max():
|
||||||
|
a = mx.random.uniform(shape=(32, 1024, 1024))
|
||||||
|
a[1, 1] = mx.nan
|
||||||
|
mx.eval(a)
|
||||||
|
time_fn(mx.max, a, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def time_min():
|
||||||
|
a = mx.random.uniform(shape=(32, 1024, 1024))
|
||||||
|
a[1, 1] = mx.nan
|
||||||
|
mx.eval(a)
|
||||||
|
time_fn(mx.min, a, 0)
|
||||||
|
|
||||||
|
|
||||||
def time_negative():
|
def time_negative():
|
||||||
a = mx.random.uniform(shape=(10000, 1000))
|
a = mx.random.uniform(shape=(10000, 1000))
|
||||||
mx.eval(a)
|
mx.eval(a)
|
||||||
@@ -108,6 +122,8 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
time_add()
|
time_add()
|
||||||
time_matmul()
|
time_matmul()
|
||||||
|
time_min()
|
||||||
|
time_max()
|
||||||
time_maximum()
|
time_maximum()
|
||||||
time_exp()
|
time_exp()
|
||||||
time_negative()
|
time_negative()
|
||||||
|
|||||||
55
benchmarks/python/synchronize_bench.py
Normal file
55
benchmarks/python/synchronize_bench.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
import time
|
||||||
|
|
||||||
|
import mlx.core as mx
|
||||||
|
|
||||||
|
rank = mx.distributed.init().rank()
|
||||||
|
|
||||||
|
|
||||||
|
def timeit(fn, a):
|
||||||
|
|
||||||
|
# warmup
|
||||||
|
for _ in range(5):
|
||||||
|
mx.eval(fn(a))
|
||||||
|
|
||||||
|
its = 10
|
||||||
|
tic = time.perf_counter()
|
||||||
|
for _ in range(its):
|
||||||
|
mx.eval(fn(a))
|
||||||
|
toc = time.perf_counter()
|
||||||
|
ms = 1000 * (toc - tic) / its
|
||||||
|
return ms
|
||||||
|
|
||||||
|
|
||||||
|
def all_reduce_benchmark():
|
||||||
|
a = mx.ones((5, 5), mx.int32)
|
||||||
|
|
||||||
|
its_per_eval = 100
|
||||||
|
|
||||||
|
def fn(x):
|
||||||
|
for _ in range(its_per_eval):
|
||||||
|
x = mx.distributed.all_sum(x)
|
||||||
|
x = x - 1
|
||||||
|
return x
|
||||||
|
|
||||||
|
ms = timeit(fn, a) / its_per_eval
|
||||||
|
if rank == 0:
|
||||||
|
print(f"All Reduce: time per iteration {ms:.6f} (ms)")
|
||||||
|
|
||||||
|
|
||||||
|
def all_gather_benchmark():
|
||||||
|
a = mx.ones((5, 5), mx.int32)
|
||||||
|
its_per_eval = 100
|
||||||
|
|
||||||
|
def fn(x):
|
||||||
|
for _ in range(its_per_eval):
|
||||||
|
x = mx.distributed.all_gather(x)[0]
|
||||||
|
return x
|
||||||
|
|
||||||
|
ms = timeit(fn, a) / its_per_eval
|
||||||
|
if rank == 0:
|
||||||
|
print(f"All gather: time per iteration {ms:.6f} (ms)")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
all_reduce_benchmark()
|
||||||
|
all_gather_benchmark()
|
||||||
54
cmake/FindNCCL.cmake
Normal file
54
cmake/FindNCCL.cmake
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# FindNCCL.cmake This module finds the NVIDIA NCCL library and its include
|
||||||
|
# directories.
|
||||||
|
|
||||||
|
set(NCCL_ROOT_DIR
|
||||||
|
$ENV{NCCL_ROOT_DIR}
|
||||||
|
CACHE PATH "Folder contains NVIDIA NCCL")
|
||||||
|
|
||||||
|
find_path(
|
||||||
|
NCCL_INCLUDE_DIRS
|
||||||
|
NAMES nccl.h
|
||||||
|
HINTS ${NCCL_INCLUDE_DIR} ${NCCL_ROOT_DIR} ${NCCL_ROOT_DIR}/include
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/include)
|
||||||
|
|
||||||
|
if($ENV{USE_STATIC_NCCL})
|
||||||
|
message(
|
||||||
|
STATUS "USE_STATIC_NCCL detected. Linking against static NCCL library")
|
||||||
|
set(NCCL_LIBNAME "libnccl_static.a")
|
||||||
|
else()
|
||||||
|
set(NCCL_LIBNAME "nccl")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_library(
|
||||||
|
NCCL_LIBRARIES
|
||||||
|
NAMES ${NCCL_LIBNAME}
|
||||||
|
HINTS ${NCCL_LIB_DIR}
|
||||||
|
${NCCL_ROOT_DIR}
|
||||||
|
${NCCL_ROOT_DIR}/lib
|
||||||
|
${NCCL_ROOT_DIR}/lib/x86_64-linux-gnu
|
||||||
|
${NCCL_ROOT_DIR}/lib64
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/lib
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/lib64)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIRS
|
||||||
|
NCCL_LIBRARIES)
|
||||||
|
|
||||||
|
if(NCCL_FOUND)
|
||||||
|
set(NCCL_HEADER_FILE "${NCCL_INCLUDE_DIRS}/nccl.h")
|
||||||
|
message(
|
||||||
|
STATUS "Determining NCCL version from the header file: ${NCCL_HEADER_FILE}")
|
||||||
|
file(
|
||||||
|
STRINGS ${NCCL_HEADER_FILE} NCCL_MAJOR_VERSION_DEFINED
|
||||||
|
REGEX "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+[0-9]+.*$"
|
||||||
|
LIMIT_COUNT 1)
|
||||||
|
if(NCCL_MAJOR_VERSION_DEFINED)
|
||||||
|
string(REGEX REPLACE "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+" ""
|
||||||
|
NCCL_MAJOR_VERSION ${NCCL_MAJOR_VERSION_DEFINED})
|
||||||
|
message(STATUS "NCCL_MAJOR_VERSION: ${NCCL_MAJOR_VERSION}")
|
||||||
|
endif()
|
||||||
|
message(
|
||||||
|
STATUS
|
||||||
|
"Found NCCL (include: ${NCCL_INCLUDE_DIRS}, library: ${NCCL_LIBRARIES})")
|
||||||
|
mark_as_advanced(NCCL_ROOT_DIR NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
|
||||||
|
endif()
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
include(CMakeParseArguments)
|
include(CMakeParseArguments)
|
||||||
|
|
||||||
|
# clang format off
|
||||||
|
#
|
||||||
# ##############################################################################
|
# ##############################################################################
|
||||||
# Build metal library
|
# Build metal library
|
||||||
#
|
#
|
||||||
@@ -9,11 +11,14 @@ include(CMakeParseArguments)
|
|||||||
# Args: TARGET: Custom target to be added for the metal library TITLE: Name of
|
# Args: TARGET: Custom target to be added for the metal library TITLE: Name of
|
||||||
# the .metallib OUTPUT_DIRECTORY: Where to place ${TITLE}.metallib SOURCES: List
|
# the .metallib OUTPUT_DIRECTORY: Where to place ${TITLE}.metallib SOURCES: List
|
||||||
# of source files INCLUDE_DIRS: List of include dirs DEPS: List of dependency
|
# of source files INCLUDE_DIRS: List of include dirs DEPS: List of dependency
|
||||||
# files (like headers)
|
# files (like headers) DEBUG: Boolean, if true, enables debug compile options
|
||||||
|
# for this specific library. If not provided, uses global MLX_METAL_DEBUG.
|
||||||
#
|
#
|
||||||
|
# clang format on
|
||||||
|
|
||||||
macro(mlx_build_metallib)
|
macro(mlx_build_metallib)
|
||||||
# Parse args
|
# Parse args
|
||||||
set(oneValueArgs TARGET TITLE OUTPUT_DIRECTORY)
|
set(oneValueArgs TARGET TITLE OUTPUT_DIRECTORY DEBUG)
|
||||||
set(multiValueArgs SOURCES INCLUDE_DIRS DEPS)
|
set(multiValueArgs SOURCES INCLUDE_DIRS DEPS)
|
||||||
cmake_parse_arguments(MTLLIB "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
cmake_parse_arguments(MTLLIB "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||||
|
|
||||||
@@ -21,7 +26,11 @@ macro(mlx_build_metallib)
|
|||||||
set(MTLLIB_BUILD_TARGET "${MTLLIB_OUTPUT_DIRECTORY}/${MTLLIB_TITLE}.metallib")
|
set(MTLLIB_BUILD_TARGET "${MTLLIB_OUTPUT_DIRECTORY}/${MTLLIB_TITLE}.metallib")
|
||||||
|
|
||||||
# Collect compile options
|
# Collect compile options
|
||||||
set(MTLLIB_COMPILE_OPTIONS -Wall -Wextra -fno-fast-math)
|
set(MTLLIB_COMPILE_OPTIONS -Wall -Wextra -fno-fast-math -Wno-c++17-extensions)
|
||||||
|
if(MLX_METAL_DEBUG OR MTLLIB_DEBUG)
|
||||||
|
set(MTLLIB_COMPILE_OPTIONS ${MTLLIB_COMPILE_OPTIONS} -gline-tables-only
|
||||||
|
-frecord-sources)
|
||||||
|
endif()
|
||||||
|
|
||||||
# Prepare metallib build command
|
# Prepare metallib build command
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ EXCLUDE_PATTERNS = */private/*
|
|||||||
CREATE_SUBDIRS = NO
|
CREATE_SUBDIRS = NO
|
||||||
FULL_PATH_NAMES = YES
|
FULL_PATH_NAMES = YES
|
||||||
RECURSIVE = YES
|
RECURSIVE = YES
|
||||||
GENERATE_HTML = YES
|
GENERATE_HTML = NO
|
||||||
GENERATE_LATEX = NO
|
GENERATE_LATEX = NO
|
||||||
GENERATE_XML = YES
|
GENERATE_XML = YES
|
||||||
XML_PROGRAMLISTING = YES
|
XML_PROGRAMLISTING = YES
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
sphinx
|
sphinx
|
||||||
breathe
|
breathe
|
||||||
sphinx-book-theme
|
sphinx-book-theme
|
||||||
|
sphinx-copybutton
|
||||||
mlx
|
mlx
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import mlx.core as mx
|
|||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = "MLX"
|
project = "MLX"
|
||||||
copyright = "2023, MLX Contributors"
|
copyright = "2023, Apple"
|
||||||
author = "MLX Contributors"
|
author = "MLX Contributors"
|
||||||
version = ".".join(mx.__version__.split(".")[:3])
|
version = ".".join(mx.__version__.split(".")[:3])
|
||||||
release = version
|
release = version
|
||||||
@@ -18,6 +18,7 @@ release = version
|
|||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
extensions = [
|
extensions = [
|
||||||
|
"sphinx_copybutton",
|
||||||
"sphinx.ext.autodoc",
|
"sphinx.ext.autodoc",
|
||||||
"sphinx.ext.autosummary",
|
"sphinx.ext.autosummary",
|
||||||
"sphinx.ext.intersphinx",
|
"sphinx.ext.intersphinx",
|
||||||
|
|||||||
@@ -8,11 +8,12 @@ MLX supports writing custom Metal kernels through the Python and C++ APIs.
|
|||||||
Simple Example
|
Simple Example
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
.. currentmodule:: mlx.core
|
||||||
|
|
||||||
Let's write a custom kernel that computes ``exp`` elementwise:
|
Let's write a custom kernel that computes ``exp`` elementwise:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
def exp_elementwise(a: mx.array):
|
|
||||||
source = """
|
source = """
|
||||||
uint elem = thread_position_in_grid.x;
|
uint elem = thread_position_in_grid.x;
|
||||||
T tmp = inp[elem];
|
T tmp = inp[elem];
|
||||||
@@ -25,6 +26,8 @@ Let's write a custom kernel that computes ``exp`` elementwise:
|
|||||||
output_names=["out"],
|
output_names=["out"],
|
||||||
source=source,
|
source=source,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def exp_elementwise(a: mx.array):
|
||||||
outputs = kernel(
|
outputs = kernel(
|
||||||
inputs=[a],
|
inputs=[a],
|
||||||
template=[("T", mx.float32)],
|
template=[("T", mx.float32)],
|
||||||
@@ -39,8 +42,13 @@ Let's write a custom kernel that computes ``exp`` elementwise:
|
|||||||
b = exp_elementwise(a)
|
b = exp_elementwise(a)
|
||||||
assert mx.allclose(b, mx.exp(a))
|
assert mx.allclose(b, mx.exp(a))
|
||||||
|
|
||||||
|
Every time you make a kernel, a new Metal library is created and possibly
|
||||||
|
JIT compiled. To reduce the overhead from that, build the kernel once with
|
||||||
|
:func:`fast.metal_kernel` and then use it many times.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
We are only required to pass the body of the Metal kernel in ``source``.
|
Only pass the body of the Metal kernel in ``source``. The function
|
||||||
|
signature is generated automatically.
|
||||||
|
|
||||||
The full function signature will be generated using:
|
The full function signature will be generated using:
|
||||||
|
|
||||||
@@ -78,29 +86,34 @@ Putting this all together, the generated function signature for ``myexp`` is as
|
|||||||
|
|
||||||
template [[host_name("custom_kernel_myexp_float")]] [[kernel]] decltype(custom_kernel_myexp_float<float>) custom_kernel_myexp_float<float>;
|
template [[host_name("custom_kernel_myexp_float")]] [[kernel]] decltype(custom_kernel_myexp_float<float>) custom_kernel_myexp_float<float>;
|
||||||
|
|
||||||
Note: ``grid`` and ``threadgroup`` are parameters to the Metal `dispatchThreads <https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/2866532-dispatchthreads>`_ function.
|
Note: ``grid`` and ``threadgroup`` are parameters to the Metal `dispatchThreads
|
||||||
This means we will launch ``mx.prod(grid)`` threads, subdivided into ``threadgroup`` size threadgroups.
|
<https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/2866532-dispatchthreads>`_
|
||||||
For optimal performance, each thread group dimension should be less than or equal to the corresponding grid dimension.
|
function. This means we will launch ``mx.prod(grid)`` threads, subdivided into
|
||||||
|
``threadgroup`` size threadgroups. For optimal performance, each thread group
|
||||||
|
dimension should be less than or equal to the corresponding grid dimension.
|
||||||
|
|
||||||
Passing ``verbose=True`` to ``mx.fast.metal_kernel.__call__`` will print the generated code for debugging purposes.
|
Passing ``verbose=True`` to :func:`ast.metal_kernel.__call__` will print the
|
||||||
|
generated code for debugging purposes.
|
||||||
|
|
||||||
Using Shape/Strides
|
Using Shape/Strides
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
``mx.fast.metal_kernel`` supports an argument ``ensure_row_contiguous`` which is ``True`` by default.
|
:func:`fast.metal_kernel` supports an argument ``ensure_row_contiguous`` which
|
||||||
This will copy the ``mx.array`` inputs if needed before the kernel is launched to ensure that the memory layout is row contiguous.
|
is ``True`` by default. This will copy the array inputs if needed
|
||||||
Generally this makes writing the kernel easier, since we don't have to worry about gaps or the ordering of the dims
|
before the kernel is launched to ensure that the memory layout is row
|
||||||
when indexing.
|
contiguous. Generally this makes writing the kernel easier, since we don't
|
||||||
|
have to worry about gaps or the ordering of the dims when indexing.
|
||||||
|
|
||||||
If we want to avoid this copy, ``metal_kernel`` automatically passes ``a_shape``, ``a_strides`` and ``a_ndim`` for each
|
If we want to avoid this copy, :func:`fast.metal_kernel` automatically passes
|
||||||
input array ``a`` if any are present in ``source``.
|
``a_shape``, ``a_strides`` and ``a_ndim`` for each input array ``a`` if any are
|
||||||
We can then use MLX's built in indexing utils to fetch the right elements for each thread.
|
present in ``source``. We can then use MLX's built in indexing utils to fetch
|
||||||
|
the right elements for each thread.
|
||||||
|
|
||||||
Let's convert ``myexp`` above to support arbitrarily strided arrays without relying on a copy from ``ensure_row_contiguous``:
|
Let's convert ``myexp`` above to support arbitrarily strided arrays without
|
||||||
|
relying on a copy from ``ensure_row_contiguous``:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
def exp_elementwise(a: mx.array):
|
|
||||||
source = """
|
source = """
|
||||||
uint elem = thread_position_in_grid.x;
|
uint elem = thread_position_in_grid.x;
|
||||||
// Utils from `mlx/backend/metal/kernels/utils.h` are automatically included
|
// Utils from `mlx/backend/metal/kernels/utils.h` are automatically included
|
||||||
@@ -114,8 +127,11 @@ Let's convert ``myexp`` above to support arbitrarily strided arrays without rely
|
|||||||
name="myexp_strided",
|
name="myexp_strided",
|
||||||
input_names=["inp"],
|
input_names=["inp"],
|
||||||
output_names=["out"],
|
output_names=["out"],
|
||||||
source=source
|
source=source,
|
||||||
|
ensure_row_contiguous=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def exp_elementwise(a: mx.array):
|
||||||
outputs = kernel(
|
outputs = kernel(
|
||||||
inputs=[a],
|
inputs=[a],
|
||||||
template=[("T", mx.float32)],
|
template=[("T", mx.float32)],
|
||||||
@@ -123,7 +139,6 @@ Let's convert ``myexp`` above to support arbitrarily strided arrays without rely
|
|||||||
threadgroup=(256, 1, 1),
|
threadgroup=(256, 1, 1),
|
||||||
output_shapes=[a.shape],
|
output_shapes=[a.shape],
|
||||||
output_dtypes=[a.dtype],
|
output_dtypes=[a.dtype],
|
||||||
ensure_row_contiguous=False,
|
|
||||||
)
|
)
|
||||||
return outputs[0]
|
return outputs[0]
|
||||||
|
|
||||||
@@ -183,25 +198,13 @@ We'll start with the following MLX implementation using standard ops:
|
|||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
Now let's use ``mx.custom_function`` together with ``mx.fast.metal_kernel``
|
Now let's use :func:`custom_function` together with :func:`fast.metal_kernel`
|
||||||
to write a fast GPU kernel for both the forward and backward passes.
|
to write a fast GPU kernel for both the forward and backward passes.
|
||||||
|
|
||||||
First we'll implement the forward pass as a fused kernel:
|
First we'll implement the forward pass as a fused kernel:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@mx.custom_function
|
|
||||||
def grid_sample(x, grid):
|
|
||||||
|
|
||||||
assert x.ndim == 4, "`x` must be 4D."
|
|
||||||
assert grid.ndim == 4, "`grid` must be 4D."
|
|
||||||
|
|
||||||
B, _, _, C = x.shape
|
|
||||||
_, gN, gM, D = grid.shape
|
|
||||||
out_shape = (B, gN, gM, C)
|
|
||||||
|
|
||||||
assert D == 2, "Last dim of `grid` must be size 2."
|
|
||||||
|
|
||||||
source = """
|
source = """
|
||||||
uint elem = thread_position_in_grid.x;
|
uint elem = thread_position_in_grid.x;
|
||||||
int H = x_shape[1];
|
int H = x_shape[1];
|
||||||
@@ -251,12 +254,26 @@ First we'll implement the forward pass as a fused kernel:
|
|||||||
|
|
||||||
out[elem] = nw * I_nw + ne * I_ne + sw * I_sw + se * I_se;
|
out[elem] = nw * I_nw + ne * I_ne + sw * I_sw + se * I_se;
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kernel = mx.fast.metal_kernel(
|
kernel = mx.fast.metal_kernel(
|
||||||
name="grid_sample",
|
name="grid_sample",
|
||||||
input_names=["x", "grid"],
|
input_names=["x", "grid"],
|
||||||
output_names=["out"],
|
output_names=["out"],
|
||||||
source=source,
|
source=source,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@mx.custom_function
|
||||||
|
def grid_sample(x, grid):
|
||||||
|
|
||||||
|
assert x.ndim == 4, "`x` must be 4D."
|
||||||
|
assert grid.ndim == 4, "`grid` must be 4D."
|
||||||
|
|
||||||
|
B, _, _, C = x.shape
|
||||||
|
_, gN, gM, D = grid.shape
|
||||||
|
out_shape = (B, gN, gM, C)
|
||||||
|
|
||||||
|
assert D == 2, "Last dim of `grid` must be size 2."
|
||||||
|
|
||||||
outputs = kernel(
|
outputs = kernel(
|
||||||
inputs=[x, grid],
|
inputs=[x, grid],
|
||||||
template=[("T", x.dtype)],
|
template=[("T", x.dtype)],
|
||||||
@@ -281,11 +298,11 @@ On an M1 Max, we see a big performance improvement:
|
|||||||
Grid Sample VJP
|
Grid Sample VJP
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Since we decorated ``grid_sample`` with ``mx.custom_function``, we can now define
|
Since we decorated ``grid_sample`` with :func:`custom_function`, we can now
|
||||||
its custom vjp transform so MLX can differentiate it.
|
define its custom vjp transform so MLX can differentiate it.
|
||||||
|
|
||||||
The backwards pass requires atomically updating ``x_grad``/``grid_grad`` and so
|
The backwards pass requires atomically updating ``x_grad``/``grid_grad`` and so
|
||||||
requires a few extra ``mx.fast.metal_kernel`` features:
|
requires a few extra :func:`fast.metal_kernel` features:
|
||||||
|
|
||||||
* ``init_value=0``
|
* ``init_value=0``
|
||||||
Initialize all of the kernel's outputs to this value before it runs. This allows us to update only part of the output arrays with the kernel.
|
Initialize all of the kernel's outputs to this value before it runs. This allows us to update only part of the output arrays with the kernel.
|
||||||
@@ -299,14 +316,6 @@ We can then implement the backwards pass as follows:
|
|||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@grid_sample.vjp
|
|
||||||
def grid_sample_vjp(primals, cotangent, _):
|
|
||||||
x, grid = primals
|
|
||||||
B, _, _, C = x.shape
|
|
||||||
_, gN, gM, D = grid.shape
|
|
||||||
|
|
||||||
assert D == 2, "Last dim of `grid` must be size 2."
|
|
||||||
|
|
||||||
source = """
|
source = """
|
||||||
uint elem = thread_position_in_grid.x;
|
uint elem = thread_position_in_grid.x;
|
||||||
int H = x_shape[1];
|
int H = x_shape[1];
|
||||||
@@ -406,6 +415,15 @@ We can then implement the backwards pass as follows:
|
|||||||
source=source,
|
source=source,
|
||||||
atomic_outputs=True,
|
atomic_outputs=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@grid_sample.vjp
|
||||||
|
def grid_sample_vjp(primals, cotangent, _):
|
||||||
|
x, grid = primals
|
||||||
|
B, _, _, C = x.shape
|
||||||
|
_, gN, gM, D = grid.shape
|
||||||
|
|
||||||
|
assert D == 2, "Last dim of `grid` must be size 2."
|
||||||
|
|
||||||
# pad the output channels to simd group size
|
# pad the output channels to simd group size
|
||||||
# so that our `simd_sum`s don't overlap.
|
# so that our `simd_sum`s don't overlap.
|
||||||
simdgroup_size = 32
|
simdgroup_size = 32
|
||||||
|
|||||||
@@ -22,12 +22,12 @@ You can do that in MLX directly:
|
|||||||
This function performs that operation while leaving the implementation and
|
This function performs that operation while leaving the implementation and
|
||||||
function transformations to MLX.
|
function transformations to MLX.
|
||||||
|
|
||||||
However you may need to customize the underlying implementation, perhaps to
|
However, you may want to customize the underlying implementation, perhaps to
|
||||||
make it faster or for custom differentiation. In this tutorial we will go
|
make it faster. In this tutorial we will go through adding custom extensions.
|
||||||
through adding custom extensions. It will cover:
|
It will cover:
|
||||||
|
|
||||||
* The structure of the MLX library.
|
* The structure of the MLX library.
|
||||||
* Implementing a CPU operation that redirects to Accelerate_ when appropriate.
|
* Implementing a CPU operation.
|
||||||
* Implementing a GPU operation using metal.
|
* Implementing a GPU operation using metal.
|
||||||
* Adding the ``vjp`` and ``jvp`` function transformation.
|
* Adding the ``vjp`` and ``jvp`` function transformation.
|
||||||
* Building a custom extension and binding it to python.
|
* Building a custom extension and binding it to python.
|
||||||
@@ -45,7 +45,7 @@ Operations
|
|||||||
Operations are the front-end functions that operate on arrays. They are defined
|
Operations are the front-end functions that operate on arrays. They are defined
|
||||||
in the C++ API (:ref:`cpp_ops`), and the Python API (:ref:`ops`) binds them.
|
in the C++ API (:ref:`cpp_ops`), and the Python API (:ref:`ops`) binds them.
|
||||||
|
|
||||||
We would like an operation, :meth:`axpby` that takes in two arrays ``x`` and
|
We would like an operation :meth:`axpby` that takes in two arrays, ``x`` and
|
||||||
``y``, and two scalars, ``alpha`` and ``beta``. This is how to define it in
|
``y``, and two scalars, ``alpha`` and ``beta``. This is how to define it in
|
||||||
C++:
|
C++:
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ C++:
|
|||||||
* Scale and sum two vectors element-wise
|
* Scale and sum two vectors element-wise
|
||||||
* z = alpha * x + beta * y
|
* z = alpha * x + beta * y
|
||||||
*
|
*
|
||||||
* Follow numpy style broadcasting between x and y
|
* Use NumPy-style broadcasting between x and y
|
||||||
* Inputs are upcasted to floats if needed
|
* Inputs are upcasted to floats if needed
|
||||||
**/
|
**/
|
||||||
array axpby(
|
array axpby(
|
||||||
@@ -66,7 +66,7 @@ C++:
|
|||||||
StreamOrDevice s = {} // Stream on which to schedule the operation
|
StreamOrDevice s = {} // Stream on which to schedule the operation
|
||||||
);
|
);
|
||||||
|
|
||||||
The simplest way to this operation is in terms of existing operations:
|
The simplest way to implement this is with existing operations:
|
||||||
|
|
||||||
.. code-block:: C++
|
.. code-block:: C++
|
||||||
|
|
||||||
@@ -93,9 +93,9 @@ Primitives
|
|||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
|
||||||
A :class:`Primitive` is part of the computation graph of an :class:`array`. It
|
A :class:`Primitive` is part of the computation graph of an :class:`array`. It
|
||||||
defines how to create outputs arrays given a input arrays. Further, a
|
defines how to create output arrays given input arrays. Further, a
|
||||||
:class:`Primitive` has methods to run on the CPU or GPU and for function
|
:class:`Primitive` has methods to run on the CPU or GPU and for function
|
||||||
transformations such as ``vjp`` and ``jvp``. Lets go back to our example to be
|
transformations such as ``vjp`` and ``jvp``. Let's go back to our example to be
|
||||||
more concrete:
|
more concrete:
|
||||||
|
|
||||||
.. code-block:: C++
|
.. code-block:: C++
|
||||||
@@ -128,7 +128,7 @@ more concrete:
|
|||||||
/** The vector-Jacobian product. */
|
/** The vector-Jacobian product. */
|
||||||
std::vector<array> vjp(
|
std::vector<array> vjp(
|
||||||
const std::vector<array>& primals,
|
const std::vector<array>& primals,
|
||||||
const array& cotan,
|
const std::vector<array>& cotangents,
|
||||||
const std::vector<int>& argnums,
|
const std::vector<int>& argnums,
|
||||||
const std::vector<array>& outputs) override;
|
const std::vector<array>& outputs) override;
|
||||||
|
|
||||||
@@ -138,13 +138,13 @@ more concrete:
|
|||||||
* representing the vectorized computation and the axis which
|
* representing the vectorized computation and the axis which
|
||||||
* corresponds to the output vectorized dimension.
|
* corresponds to the output vectorized dimension.
|
||||||
*/
|
*/
|
||||||
virtual std::pair<std::vector<array>, std::vector<int>> vmap(
|
std::pair<std::vector<array>, std::vector<int>> vmap(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
const std::vector<int>& axes) override;
|
const std::vector<int>& axes) override;
|
||||||
|
|
||||||
/** Print the primitive. */
|
/** The name of primitive. */
|
||||||
void print(std::ostream& os) override {
|
const char* name() const override {
|
||||||
os << "Axpby";
|
return "Axpby";
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Equivalence check **/
|
/** Equivalence check **/
|
||||||
@@ -153,9 +153,6 @@ more concrete:
|
|||||||
private:
|
private:
|
||||||
float alpha_;
|
float alpha_;
|
||||||
float beta_;
|
float beta_;
|
||||||
|
|
||||||
/** Fall back implementation for evaluation on CPU */
|
|
||||||
void eval(const std::vector<array>& inputs, array& out);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
The :class:`Axpby` class derives from the base :class:`Primitive` class. The
|
The :class:`Axpby` class derives from the base :class:`Primitive` class. The
|
||||||
@@ -188,7 +185,7 @@ Let's reimplement our operation now in terms of our :class:`Axpby` primitive.
|
|||||||
auto promoted_dtype = promote_types(x.dtype(), y.dtype());
|
auto promoted_dtype = promote_types(x.dtype(), y.dtype());
|
||||||
|
|
||||||
// Upcast to float32 for non-floating point inputs x and y
|
// Upcast to float32 for non-floating point inputs x and y
|
||||||
auto out_dtype = is_floating_point(promoted_dtype)
|
auto out_dtype = issubdtype(promoted_dtype, float32)
|
||||||
? promoted_dtype
|
? promoted_dtype
|
||||||
: promote_types(promoted_dtype, float32);
|
: promote_types(promoted_dtype, float32);
|
||||||
|
|
||||||
@@ -234,11 +231,9 @@ the execution of the computation graph, and calls :meth:`Axpby::eval_cpu` or
|
|||||||
Implementing the CPU Back-end
|
Implementing the CPU Back-end
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Let's start by implementing a naive and generic version of
|
Let's start by implementing :meth:`Axpby::eval_cpu`.
|
||||||
:meth:`Axpby::eval_cpu`. We declared this as a private member function of
|
|
||||||
:class:`Axpby` earlier called :meth:`Axpby::eval`.
|
|
||||||
|
|
||||||
Our naive method will go over each element of the output array, find the
|
The method will go over each element of the output array, find the
|
||||||
corresponding input elements of ``x`` and ``y`` and perform the operation
|
corresponding input elements of ``x`` and ``y`` and perform the operation
|
||||||
point-wise. This is captured in the templated function :meth:`axpby_impl`.
|
point-wise. This is captured in the templated function :meth:`axpby_impl`.
|
||||||
|
|
||||||
@@ -246,36 +241,46 @@ point-wise. This is captured in the templated function :meth:`axpby_impl`.
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void axpby_impl(
|
void axpby_impl(
|
||||||
const array& x,
|
const mx::array& x,
|
||||||
const array& y,
|
const mx::array& y,
|
||||||
array& out,
|
mx::array& out,
|
||||||
float alpha_,
|
float alpha_,
|
||||||
float beta_) {
|
float beta_,
|
||||||
// We only allocate memory when we are ready to fill the output
|
mx::Stream stream) {
|
||||||
// malloc_or_wait synchronously allocates available memory
|
out.set_data(mx::allocator::malloc(out.nbytes()));
|
||||||
// There may be a wait executed here if the allocation is requested
|
|
||||||
// under memory-pressured conditions
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
// Collect input and output data pointers
|
// Get the CPU command encoder and register input and output arrays
|
||||||
const T* x_ptr = x.data<T>();
|
auto& encoder = mx::cpu::get_command_encoder(stream);
|
||||||
const T* y_ptr = y.data<T>();
|
encoder.set_input_array(x);
|
||||||
T* out_ptr = out.data<T>();
|
encoder.set_input_array(y);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
// Launch the CPU kernel
|
||||||
|
encoder.dispatch([x_ptr = x.data<T>(),
|
||||||
|
y_ptr = y.data<T>(),
|
||||||
|
out_ptr = out.data<T>(),
|
||||||
|
size = out.size(),
|
||||||
|
shape = out.shape(),
|
||||||
|
x_strides = x.strides(),
|
||||||
|
y_strides = y.strides(),
|
||||||
|
alpha_,
|
||||||
|
beta_]() {
|
||||||
|
|
||||||
// Cast alpha and beta to the relevant types
|
// Cast alpha and beta to the relevant types
|
||||||
T alpha = static_cast<T>(alpha_);
|
T alpha = static_cast<T>(alpha_);
|
||||||
T beta = static_cast<T>(beta_);
|
T beta = static_cast<T>(beta_);
|
||||||
|
|
||||||
// Do the element-wise operation for each output
|
// Do the element-wise operation for each output
|
||||||
for (size_t out_idx = 0; out_idx < out.size(); out_idx++) {
|
for (size_t out_idx = 0; out_idx < size; out_idx++) {
|
||||||
// Map linear indices to offsets in x and y
|
// Map linear indices to offsets in x and y
|
||||||
auto x_offset = elem_to_loc(out_idx, x.shape(), x.strides());
|
auto x_offset = mx::elem_to_loc(out_idx, shape, x_strides);
|
||||||
auto y_offset = elem_to_loc(out_idx, y.shape(), y.strides());
|
auto y_offset = mx::elem_to_loc(out_idx, shape, y_strides);
|
||||||
|
|
||||||
// We allocate the output to be contiguous and regularly strided
|
// We allocate the output to be contiguous and regularly strided
|
||||||
// (defaults to row major) and hence it doesn't need additional mapping
|
// (defaults to row major) and hence it doesn't need additional mapping
|
||||||
out_ptr[out_idx] = alpha * x_ptr[x_offset] + beta * y_ptr[y_offset];
|
out_ptr[out_idx] = alpha * x_ptr[x_offset] + beta * y_ptr[y_offset];
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Our implementation should work for all incoming floating point arrays.
|
Our implementation should work for all incoming floating point arrays.
|
||||||
@@ -284,112 +289,32 @@ Accordingly, we add dispatches for ``float32``, ``float16``, ``bfloat16`` and
|
|||||||
|
|
||||||
.. code-block:: C++
|
.. code-block:: C++
|
||||||
|
|
||||||
/** Fall back implementation for evaluation on CPU */
|
void Axpby::eval_cpu(
|
||||||
void Axpby::eval(
|
const std::vector<mx::array>& inputs,
|
||||||
const std::vector<array>& inputs,
|
std::vector<mx::array>& outputs) {
|
||||||
const std::vector<array>& outputs) {
|
|
||||||
auto& x = inputs[0];
|
auto& x = inputs[0];
|
||||||
auto& y = inputs[1];
|
auto& y = inputs[1];
|
||||||
auto& out = outputs[0];
|
auto& out = outputs[0];
|
||||||
|
|
||||||
// Dispatch to the correct dtype
|
// Dispatch to the correct dtype
|
||||||
if (out.dtype() == float32) {
|
if (out.dtype() == mx::float32) {
|
||||||
return axpby_impl<float>(x, y, out, alpha_, beta_);
|
return axpby_impl<float>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == float16) {
|
} else if (out.dtype() == mx::float16) {
|
||||||
return axpby_impl<float16_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::float16_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == bfloat16) {
|
} else if (out.dtype() == mx::bfloat16) {
|
||||||
return axpby_impl<bfloat16_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::bfloat16_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == complex64) {
|
} else if (out.dtype() == mx::complex64) {
|
||||||
return axpby_impl<complex64_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::complex64_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"[Axpby] Only supports floating point types.");
|
"Axpby is only supported for floating point types.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
This is good as a fallback implementation. We can use the ``axpby`` routine
|
|
||||||
provided by the Accelerate_ framework for a faster implementation in certain
|
|
||||||
cases:
|
|
||||||
|
|
||||||
#. Accelerate does not provide implementations of ``axpby`` for half precision
|
|
||||||
floats. We can only use it for ``float32`` types.
|
|
||||||
#. Accelerate assumes the inputs ``x`` and ``y`` are contiguous and all
|
|
||||||
elements have fixed strides between them. We only direct to Accelerate
|
|
||||||
if both ``x`` and ``y`` are row contiguous or column contiguous.
|
|
||||||
#. Accelerate performs the routine ``Y = (alpha * X) + (beta * Y)`` in-place.
|
|
||||||
MLX expects to write the output to a new array. We must copy the elements
|
|
||||||
of ``y`` into the output and use that as an input to ``axpby``.
|
|
||||||
|
|
||||||
Let's write an implementation that uses Accelerate in the right conditions.
|
|
||||||
It allocates data for the output, copies ``y`` into it, and then calls the
|
|
||||||
:func:`catlas_saxpby` from accelerate.
|
|
||||||
|
|
||||||
.. code-block:: C++
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void axpby_impl_accelerate(
|
|
||||||
const array& x,
|
|
||||||
const array& y,
|
|
||||||
array& out,
|
|
||||||
float alpha_,
|
|
||||||
float beta_) {
|
|
||||||
// Accelerate library provides catlas_saxpby which does
|
|
||||||
// Y = (alpha * X) + (beta * Y) in place
|
|
||||||
// To use it, we first copy the data in y over to the output array
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
// We then copy over the elements using the contiguous vector specialization
|
|
||||||
copy_inplace(y, out, CopyType::Vector);
|
|
||||||
|
|
||||||
// Get x and y pointers for catlas_saxpby
|
|
||||||
const T* x_ptr = x.data<T>();
|
|
||||||
T* y_ptr = out.data<T>();
|
|
||||||
|
|
||||||
T alpha = static_cast<T>(alpha_);
|
|
||||||
T beta = static_cast<T>(beta_);
|
|
||||||
|
|
||||||
// Call the inplace accelerate operator
|
|
||||||
catlas_saxpby(
|
|
||||||
/* N = */ out.size(),
|
|
||||||
/* ALPHA = */ alpha,
|
|
||||||
/* X = */ x_ptr,
|
|
||||||
/* INCX = */ 1,
|
|
||||||
/* BETA = */ beta,
|
|
||||||
/* Y = */ y_ptr,
|
|
||||||
/* INCY = */ 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
For inputs that do not fit the criteria for accelerate, we fall back to
|
|
||||||
:meth:`Axpby::eval`. With this in mind, let's finish our
|
|
||||||
:meth:`Axpby::eval_cpu`.
|
|
||||||
|
|
||||||
.. code-block:: C++
|
|
||||||
|
|
||||||
/** Evaluate primitive on CPU using accelerate specializations */
|
|
||||||
void Axpby::eval_cpu(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<array>& outputs) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& x = inputs[0];
|
|
||||||
auto& y = inputs[1];
|
|
||||||
auto& out = outputs[0];
|
|
||||||
|
|
||||||
// Accelerate specialization for contiguous single precision float arrays
|
|
||||||
if (out.dtype() == float32 &&
|
|
||||||
((x.flags().row_contiguous && y.flags().row_contiguous) ||
|
|
||||||
(x.flags().col_contiguous && y.flags().col_contiguous))) {
|
|
||||||
axpby_impl_accelerate<float>(x, y, out, alpha_, beta_);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall back to common back-end if specializations are not available
|
|
||||||
eval(inputs, outputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
Just this much is enough to run the operation :meth:`axpby` on a CPU stream! If
|
Just this much is enough to run the operation :meth:`axpby` on a CPU stream! If
|
||||||
you do not plan on running the operation on the GPU or using transforms on
|
you do not plan on running the operation on the GPU or using transforms on
|
||||||
computation graphs that contain :class:`Axpby`, you can stop implementing the
|
computation graphs that contain :class:`Axpby`, you can stop implementing the
|
||||||
primitive here and enjoy the speed-ups you get from the Accelerate library.
|
primitive here.
|
||||||
|
|
||||||
Implementing the GPU Back-end
|
Implementing the GPU Back-end
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
@@ -466,17 +391,17 @@ below.
|
|||||||
auto& d = metal::device(s.device);
|
auto& d = metal::device(s.device);
|
||||||
|
|
||||||
// Allocate output memory
|
// Allocate output memory
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
// Resolve name of kernel
|
// Resolve name of kernel
|
||||||
std::ostringstream kname;
|
std::stream kname;
|
||||||
kname << "axpby_" << "general_" << type_to_name(out);
|
kname = "axpby_general_" + type_to_name(out);
|
||||||
|
|
||||||
// Make sure the metal library is available
|
// Load the metal library
|
||||||
d.register_library("mlx_ext");
|
auto lib = d.get_library("mlx_ext", current_binary_dir());
|
||||||
|
|
||||||
// Make a kernel from this metal library
|
// Make a kernel from this metal library
|
||||||
auto kernel = d.get_kernel(kname.str(), "mlx_ext");
|
auto kernel = d.get_kernel(kname, lib);
|
||||||
|
|
||||||
// Prepare to encode kernel
|
// Prepare to encode kernel
|
||||||
auto& compute_encoder = d.get_command_encoder(s.index);
|
auto& compute_encoder = d.get_command_encoder(s.index);
|
||||||
@@ -544,7 +469,7 @@ one we just defined:
|
|||||||
const std::vector<array>& tangents,
|
const std::vector<array>& tangents,
|
||||||
const std::vector<int>& argnums) {
|
const std::vector<int>& argnums) {
|
||||||
// Forward mode diff that pushes along the tangents
|
// Forward mode diff that pushes along the tangents
|
||||||
// The jvp transform on the primitive can built with ops
|
// The jvp transform on the primitive can be built with ops
|
||||||
// that are scheduled on the same stream as the primitive
|
// that are scheduled on the same stream as the primitive
|
||||||
|
|
||||||
// If argnums = {0}, we only push along x in which case the
|
// If argnums = {0}, we only push along x in which case the
|
||||||
@@ -556,7 +481,7 @@ one we just defined:
|
|||||||
auto scale_arr = array(scale, tangents[0].dtype());
|
auto scale_arr = array(scale, tangents[0].dtype());
|
||||||
return {multiply(scale_arr, tangents[0], stream())};
|
return {multiply(scale_arr, tangents[0], stream())};
|
||||||
}
|
}
|
||||||
// If, argnums = {0, 1}, we take contributions from both
|
// If argnums = {0, 1}, we take contributions from both
|
||||||
// which gives us jvp = tangent_x * alpha + tangent_y * beta
|
// which gives us jvp = tangent_x * alpha + tangent_y * beta
|
||||||
else {
|
else {
|
||||||
return {axpby(tangents[0], tangents[1], alpha_, beta_, stream())};
|
return {axpby(tangents[0], tangents[1], alpha_, beta_, stream())};
|
||||||
@@ -810,7 +735,7 @@ Let's look at a simple script and its results:
|
|||||||
|
|
||||||
print(f"c shape: {c.shape}")
|
print(f"c shape: {c.shape}")
|
||||||
print(f"c dtype: {c.dtype}")
|
print(f"c dtype: {c.dtype}")
|
||||||
print(f"c correct: {mx.all(c == 6.0).item()}")
|
print(f"c is correct: {mx.all(c == 6.0).item()}")
|
||||||
|
|
||||||
Output:
|
Output:
|
||||||
|
|
||||||
@@ -818,13 +743,13 @@ Output:
|
|||||||
|
|
||||||
c shape: [3, 4]
|
c shape: [3, 4]
|
||||||
c dtype: float32
|
c dtype: float32
|
||||||
c correctness: True
|
c is correct: True
|
||||||
|
|
||||||
Results
|
Results
|
||||||
^^^^^^^
|
^^^^^^^
|
||||||
|
|
||||||
Let's run a quick benchmark and see how our new ``axpby`` operation compares
|
Let's run a quick benchmark and see how our new ``axpby`` operation compares
|
||||||
with the naive :meth:`simple_axpby` we first defined on the CPU.
|
with the naive :meth:`simple_axpby` we first defined.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@@ -832,13 +757,11 @@ with the naive :meth:`simple_axpby` we first defined on the CPU.
|
|||||||
from mlx_sample_extensions import axpby
|
from mlx_sample_extensions import axpby
|
||||||
import time
|
import time
|
||||||
|
|
||||||
mx.set_default_device(mx.cpu)
|
|
||||||
|
|
||||||
def simple_axpby(x: mx.array, y: mx.array, alpha: float, beta: float) -> mx.array:
|
def simple_axpby(x: mx.array, y: mx.array, alpha: float, beta: float) -> mx.array:
|
||||||
return alpha * x + beta * y
|
return alpha * x + beta * y
|
||||||
|
|
||||||
M = 256
|
M = 4096
|
||||||
N = 512
|
N = 4096
|
||||||
|
|
||||||
x = mx.random.normal((M, N))
|
x = mx.random.normal((M, N))
|
||||||
y = mx.random.normal((M, N))
|
y = mx.random.normal((M, N))
|
||||||
@@ -849,24 +772,24 @@ with the naive :meth:`simple_axpby` we first defined on the CPU.
|
|||||||
|
|
||||||
def bench(f):
|
def bench(f):
|
||||||
# Warm up
|
# Warm up
|
||||||
for i in range(100):
|
for i in range(5):
|
||||||
z = f(x, y, alpha, beta)
|
z = f(x, y, alpha, beta)
|
||||||
mx.eval(z)
|
mx.eval(z)
|
||||||
|
|
||||||
# Timed run
|
# Timed run
|
||||||
s = time.time()
|
s = time.time()
|
||||||
for i in range(5000):
|
for i in range(100):
|
||||||
z = f(x, y, alpha, beta)
|
z = f(x, y, alpha, beta)
|
||||||
mx.eval(z)
|
mx.eval(z)
|
||||||
e = time.time()
|
e = time.time()
|
||||||
return e - s
|
return 1000 * (e - s) / 100
|
||||||
|
|
||||||
simple_time = bench(simple_axpby)
|
simple_time = bench(simple_axpby)
|
||||||
custom_time = bench(axpby)
|
custom_time = bench(axpby)
|
||||||
|
|
||||||
print(f"Simple axpby: {simple_time:.3f} s | Custom axpby: {custom_time:.3f} s")
|
print(f"Simple axpby: {simple_time:.3f} ms | Custom axpby: {custom_time:.3f} ms")
|
||||||
|
|
||||||
The results are ``Simple axpby: 0.114 s | Custom axpby: 0.109 s``. We see
|
The results are ``Simple axpby: 1.559 ms | Custom axpby: 0.774 ms``. We see
|
||||||
modest improvements right away!
|
modest improvements right away!
|
||||||
|
|
||||||
This operation is now good to be used to build other operations, in
|
This operation is now good to be used to build other operations, in
|
||||||
|
|||||||
@@ -70,6 +70,8 @@ are the CPU and GPU.
|
|||||||
python/fft
|
python/fft
|
||||||
python/linalg
|
python/linalg
|
||||||
python/metal
|
python/metal
|
||||||
|
python/cuda
|
||||||
|
python/memory_management
|
||||||
python/nn
|
python/nn
|
||||||
python/optimizers
|
python/optimizers
|
||||||
python/distributed
|
python/distributed
|
||||||
|
|||||||
@@ -13,22 +13,49 @@ silicon computer is
|
|||||||
|
|
||||||
pip install mlx
|
pip install mlx
|
||||||
|
|
||||||
To install from PyPI you must meet the following requirements:
|
To install from PyPI your system must meet the following requirements:
|
||||||
|
|
||||||
- Using an M series chip (Apple silicon)
|
- Using an M series chip (Apple silicon)
|
||||||
- Using a native Python >= 3.9
|
- Using a native Python >= 3.10
|
||||||
- macOS >= 13.5
|
- macOS >= 13.5
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
MLX is only available on devices running macOS >= 13.5
|
MLX is only available on devices running macOS >= 13.5
|
||||||
It is highly recommended to use macOS 14 (Sonoma)
|
It is highly recommended to use macOS 14 (Sonoma)
|
||||||
|
|
||||||
|
CUDA
|
||||||
|
^^^^
|
||||||
|
|
||||||
MLX is also available on conda-forge. To install MLX with conda do:
|
MLX has a CUDA backend which you can install with:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
conda install conda-forge::mlx
|
pip install mlx[cuda]
|
||||||
|
|
||||||
|
To install the CUDA package from PyPi your system must meet the following
|
||||||
|
requirements:
|
||||||
|
|
||||||
|
- Nvidia architecture >= SM 7.0 (Volta)
|
||||||
|
- Nvidia driver >= 550.54.14
|
||||||
|
- CUDA toolkit >= 12.0
|
||||||
|
- Linux distribution with glibc >= 2.35
|
||||||
|
- Python >= 3.10
|
||||||
|
|
||||||
|
|
||||||
|
CPU-only (Linux)
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
For a CPU-only version of MLX that runs on Linux use:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
pip install mlx[cpu]
|
||||||
|
|
||||||
|
To install the CPU-only package from PyPi your system must meet the following
|
||||||
|
requirements:
|
||||||
|
|
||||||
|
- Linux distribution with glibc >= 2.35
|
||||||
|
- Python >= 3.10
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
@@ -65,6 +92,8 @@ Build Requirements
|
|||||||
Python API
|
Python API
|
||||||
^^^^^^^^^^
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
.. _python install:
|
||||||
|
|
||||||
To build and install the MLX python library from source, first, clone MLX from
|
To build and install the MLX python library from source, first, clone MLX from
|
||||||
`its GitHub repo <https://github.com/ml-explore/mlx>`_:
|
`its GitHub repo <https://github.com/ml-explore/mlx>`_:
|
||||||
|
|
||||||
@@ -76,20 +105,20 @@ Then simply build and install MLX using pip:
|
|||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=8 pip install .
|
pip install .
|
||||||
|
|
||||||
For developing, install the package with development dependencies, and use an
|
For developing, install the package with development dependencies, and use an
|
||||||
editable install:
|
editable install:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=8 pip install -e ".[dev]"
|
pip install -e ".[dev]"
|
||||||
|
|
||||||
Once the development dependencies are installed, you can build faster with:
|
Once the development dependencies are installed, you can build faster with:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL=8 python setup.py build_ext --inplace
|
python setup.py build_ext --inplace
|
||||||
|
|
||||||
Run the tests with:
|
Run the tests with:
|
||||||
|
|
||||||
@@ -107,6 +136,8 @@ IDE:
|
|||||||
C++ API
|
C++ API
|
||||||
^^^^^^^
|
^^^^^^^
|
||||||
|
|
||||||
|
.. _cpp install:
|
||||||
|
|
||||||
Currently, MLX must be built and installed from source.
|
Currently, MLX must be built and installed from source.
|
||||||
|
|
||||||
Similarly to the python library, to build and install the MLX C++ library start
|
Similarly to the python library, to build and install the MLX C++ library start
|
||||||
@@ -185,6 +216,7 @@ should point to the path to the built metal library.
|
|||||||
|
|
||||||
xcrun -sdk macosx --show-sdk-version
|
xcrun -sdk macosx --show-sdk-version
|
||||||
|
|
||||||
|
|
||||||
Binary Size Minimization
|
Binary Size Minimization
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@@ -213,6 +245,50 @@ be anwywhere from a few hundred millisecond to a few seconds depending on the
|
|||||||
application. Once a kernel is compiled, it will be cached by the system. The
|
application. Once a kernel is compiled, it will be cached by the system. The
|
||||||
Metal kernel cache persists across reboots.
|
Metal kernel cache persists across reboots.
|
||||||
|
|
||||||
|
Linux
|
||||||
|
^^^^^
|
||||||
|
|
||||||
|
To build from source on Linux (CPU only), install the BLAS and LAPACK headers.
|
||||||
|
For example on Ubuntu, run the following:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install libblas-dev liblapack-dev liblapacke-dev -y
|
||||||
|
|
||||||
|
From here follow the instructions to install either the :ref:`Python <python
|
||||||
|
install>` or :ref:`C++ <cpp install>` APIs.
|
||||||
|
|
||||||
|
CUDA
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
To build from source on Linux with CUDA, install the BLAS and LAPACK headers
|
||||||
|
and the CUDA toolkit. For example on Ubuntu, run the following:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
||||||
|
dpkg -i cuda-keyring_1.1-1_all.deb
|
||||||
|
apt-get update -y
|
||||||
|
apt-get -y install cuda-toolkit-12-9
|
||||||
|
apt-get install libblas-dev liblapack-dev liblapacke-dev libcudnn9-dev-cuda-12 -y
|
||||||
|
|
||||||
|
|
||||||
|
When building either the Python or C++ APIs make sure to pass the cmake flag
|
||||||
|
``MLX_BUILD_CUDA=ON``. For example, to build the Python API run:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
CMAKE_ARGS="-DMLX_BUILD_CUDA=ON" pip install -e ".[dev]"
|
||||||
|
|
||||||
|
To build the C++ package run:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
mkdir -p build && cd build
|
||||||
|
cmake .. -DMLX_BUILD_CUDA=ON && make -j
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ Array
|
|||||||
array.ndim
|
array.ndim
|
||||||
array.shape
|
array.shape
|
||||||
array.size
|
array.size
|
||||||
|
array.real
|
||||||
|
array.imag
|
||||||
array.abs
|
array.abs
|
||||||
array.all
|
array.all
|
||||||
array.any
|
array.any
|
||||||
@@ -38,6 +40,7 @@ Array
|
|||||||
array.log10
|
array.log10
|
||||||
array.log1p
|
array.log1p
|
||||||
array.log2
|
array.log2
|
||||||
|
array.logcumsumexp
|
||||||
array.logsumexp
|
array.logsumexp
|
||||||
array.max
|
array.max
|
||||||
array.mean
|
array.mean
|
||||||
|
|||||||
9
docs/src/python/cuda.rst
Normal file
9
docs/src/python/cuda.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
CUDA
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. currentmodule:: mlx.core.cuda
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
:toctree: _autosummary
|
||||||
|
|
||||||
|
is_available
|
||||||
@@ -51,11 +51,20 @@ The default floating point type is ``float32`` and the default integer type is
|
|||||||
* - ``float32``
|
* - ``float32``
|
||||||
- 4
|
- 4
|
||||||
- 32-bit float
|
- 32-bit float
|
||||||
|
* - ``float64``
|
||||||
|
- 4
|
||||||
|
- 64-bit double
|
||||||
* - ``complex64``
|
* - ``complex64``
|
||||||
- 8
|
- 8
|
||||||
- 64-bit complex float
|
- 64-bit complex float
|
||||||
|
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Arrays with type ``float64`` only work with CPU operations. Using
|
||||||
|
``float64`` arrays on the GPU will result in an exception.
|
||||||
|
|
||||||
|
|
||||||
Data type are aranged in a hierarchy. See the :obj:`DtypeCategory` object
|
Data type are aranged in a hierarchy. See the :obj:`DtypeCategory` object
|
||||||
documentation for more information. Use :func:`issubdtype` to determine if one
|
documentation for more information. Use :func:`issubdtype` to determine if one
|
||||||
``dtype`` (or category) is a subtype of another category.
|
``dtype`` (or category) is a subtype of another category.
|
||||||
|
|||||||
@@ -13,3 +13,4 @@ Fast
|
|||||||
rope
|
rope
|
||||||
scaled_dot_product_attention
|
scaled_dot_product_attention
|
||||||
metal_kernel
|
metal_kernel
|
||||||
|
cuda_kernel
|
||||||
|
|||||||
@@ -20,3 +20,5 @@ FFT
|
|||||||
irfft2
|
irfft2
|
||||||
rfftn
|
rfftn
|
||||||
irfftn
|
irfftn
|
||||||
|
fftshift
|
||||||
|
ifftshift
|
||||||
|
|||||||
@@ -16,5 +16,12 @@ Linear Algebra
|
|||||||
cross
|
cross
|
||||||
qr
|
qr
|
||||||
svd
|
svd
|
||||||
|
eigvals
|
||||||
|
eig
|
||||||
eigvalsh
|
eigvalsh
|
||||||
eigh
|
eigh
|
||||||
|
lu
|
||||||
|
lu_factor
|
||||||
|
pinv
|
||||||
|
solve
|
||||||
|
solve_triangular
|
||||||
|
|||||||
16
docs/src/python/memory_management.rst
Normal file
16
docs/src/python/memory_management.rst
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
Memory Management
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. currentmodule:: mlx.core
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
:toctree: _autosummary
|
||||||
|
|
||||||
|
get_active_memory
|
||||||
|
get_peak_memory
|
||||||
|
reset_peak_memory
|
||||||
|
get_cache_memory
|
||||||
|
set_memory_limit
|
||||||
|
set_cache_limit
|
||||||
|
set_wired_limit
|
||||||
|
clear_cache
|
||||||
@@ -8,13 +8,5 @@ Metal
|
|||||||
|
|
||||||
is_available
|
is_available
|
||||||
device_info
|
device_info
|
||||||
get_active_memory
|
|
||||||
get_peak_memory
|
|
||||||
reset_peak_memory
|
|
||||||
get_cache_memory
|
|
||||||
set_memory_limit
|
|
||||||
set_cache_limit
|
|
||||||
set_wired_limit
|
|
||||||
clear_cache
|
|
||||||
start_capture
|
start_capture
|
||||||
stop_capture
|
stop_capture
|
||||||
|
|||||||
@@ -174,6 +174,7 @@ In detail:
|
|||||||
|
|
||||||
value_and_grad
|
value_and_grad
|
||||||
quantize
|
quantize
|
||||||
|
average_gradients
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ simple functions.
|
|||||||
mish
|
mish
|
||||||
prelu
|
prelu
|
||||||
relu
|
relu
|
||||||
|
relu2
|
||||||
relu6
|
relu6
|
||||||
selu
|
selu
|
||||||
sigmoid
|
sigmoid
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ Layers
|
|||||||
QuantizedLinear
|
QuantizedLinear
|
||||||
RMSNorm
|
RMSNorm
|
||||||
ReLU
|
ReLU
|
||||||
|
ReLU2
|
||||||
ReLU6
|
ReLU6
|
||||||
RNN
|
RNN
|
||||||
RoPE
|
RoPE
|
||||||
|
|||||||
@@ -32,13 +32,16 @@ Operations
|
|||||||
atleast_2d
|
atleast_2d
|
||||||
atleast_3d
|
atleast_3d
|
||||||
bitwise_and
|
bitwise_and
|
||||||
|
bitwise_invert
|
||||||
bitwise_or
|
bitwise_or
|
||||||
bitwise_xor
|
bitwise_xor
|
||||||
block_masked_mm
|
block_masked_mm
|
||||||
|
broadcast_arrays
|
||||||
broadcast_to
|
broadcast_to
|
||||||
ceil
|
ceil
|
||||||
clip
|
clip
|
||||||
concatenate
|
concatenate
|
||||||
|
contiguous
|
||||||
conj
|
conj
|
||||||
conjugate
|
conjugate
|
||||||
convolve
|
convolve
|
||||||
@@ -100,6 +103,7 @@ Operations
|
|||||||
log10
|
log10
|
||||||
log1p
|
log1p
|
||||||
logaddexp
|
logaddexp
|
||||||
|
logcumsumexp
|
||||||
logical_not
|
logical_not
|
||||||
logical_and
|
logical_and
|
||||||
logical_or
|
logical_or
|
||||||
@@ -108,6 +112,7 @@ Operations
|
|||||||
max
|
max
|
||||||
maximum
|
maximum
|
||||||
mean
|
mean
|
||||||
|
median
|
||||||
meshgrid
|
meshgrid
|
||||||
min
|
min
|
||||||
minimum
|
minimum
|
||||||
|
|||||||
@@ -51,14 +51,14 @@ the saved state. Here's a simple example:
|
|||||||
optimizer.update(model, grads)
|
optimizer.update(model, grads)
|
||||||
|
|
||||||
# Save the state
|
# Save the state
|
||||||
state = tree_flatten(optimizer.state)
|
state = tree_flatten(optimizer.state, destination={})
|
||||||
mx.save_safetensors("optimizer.safetensors", dict(state))
|
mx.save_safetensors("optimizer.safetensors", state)
|
||||||
|
|
||||||
# Later on, for example when loading from a checkpoint,
|
# Later on, for example when loading from a checkpoint,
|
||||||
# recreate the optimizer and load the state
|
# recreate the optimizer and load the state
|
||||||
optimizer = optim.Adam(learning_rate=1e-2)
|
optimizer = optim.Adam(learning_rate=1e-2)
|
||||||
|
|
||||||
state = tree_unflatten(list(mx.load("optimizer.safetensors").items()))
|
state = tree_unflatten(mx.load("optimizer.safetensors"))
|
||||||
optimizer.state = state
|
optimizer.state = state
|
||||||
|
|
||||||
Note, not every optimizer configuation parameter is saved in the state. For
|
Note, not every optimizer configuation parameter is saved in the state. For
|
||||||
|
|||||||
@@ -18,3 +18,5 @@ Common Optimizers
|
|||||||
AdamW
|
AdamW
|
||||||
Adamax
|
Adamax
|
||||||
Lion
|
Lion
|
||||||
|
MultiOptimizer
|
||||||
|
Muon
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ Transforms
|
|||||||
:toctree: _autosummary
|
:toctree: _autosummary
|
||||||
|
|
||||||
eval
|
eval
|
||||||
|
async_eval
|
||||||
compile
|
compile
|
||||||
custom_function
|
custom_function
|
||||||
disable_compile
|
disable_compile
|
||||||
|
|||||||
@@ -130,8 +130,8 @@ Now make an array, and benchmark both functions:
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
x = mx.random.uniform(shape=(32, 1000, 4096))
|
x = mx.random.uniform(shape=(32, 1000, 4096))
|
||||||
timeit(nn.gelu, x)
|
timeit(gelu, x)
|
||||||
timeit(mx.compile(nn.gelu), x)
|
timeit(mx.compile(gelu), x)
|
||||||
|
|
||||||
On an M1 Max the times are 15.5 and 3.1 milliseconds. The compiled ``gelu`` is
|
On an M1 Max the times are 15.5 and 3.1 milliseconds. The compiled ``gelu`` is
|
||||||
five times faster.
|
five times faster.
|
||||||
@@ -225,7 +225,7 @@ In some cases returning updated state can be pretty inconvenient. Hence,
|
|||||||
def fun(x, y):
|
def fun(x, y):
|
||||||
z = x + y
|
z = x + y
|
||||||
state.append(z)
|
state.append(z)
|
||||||
return mx.exp(z), state
|
return mx.exp(z)
|
||||||
|
|
||||||
fun(mx.array(1.0), mx.array(2.0))
|
fun(mx.array(1.0), mx.array(2.0))
|
||||||
# Prints [array(3, dtype=float32)]
|
# Prints [array(3, dtype=float32)]
|
||||||
|
|||||||
@@ -5,21 +5,27 @@ Distributed Communication
|
|||||||
|
|
||||||
.. currentmodule:: mlx.core.distributed
|
.. currentmodule:: mlx.core.distributed
|
||||||
|
|
||||||
MLX utilizes `MPI <https://en.wikipedia.org/wiki/Message_Passing_Interface>`_ to
|
MLX supports distributed communication operations that allow the computational cost
|
||||||
provide distributed communication operations that allow the computational cost
|
of training or inference to be shared across many physical machines. At the
|
||||||
of training or inference to be shared across many physical machines. You can
|
moment we support two different communication backends:
|
||||||
see a list of the supported operations in the :ref:`API docs<distributed>`.
|
|
||||||
|
* `MPI <https://en.wikipedia.org/wiki/Message_Passing_Interface>`_ a
|
||||||
|
full-featured and mature distributed communications library
|
||||||
|
* A **ring** backend of our own that uses native TCP sockets and should be
|
||||||
|
faster for thunderbolt connections.
|
||||||
|
|
||||||
|
The list of all currently supported operations and their documentation can be
|
||||||
|
seen in the :ref:`API docs<distributed>`.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
A lot of operations may not be supported or not as fast as they should be.
|
Some operations may not be supported or not as fast as they should be.
|
||||||
We are adding more and tuning the ones we have as we are figuring out the
|
We are adding more and tuning the ones we have as we are figuring out the
|
||||||
best way to do distributed computing on Macs using MLX.
|
best way to do distributed computing on Macs using MLX.
|
||||||
|
|
||||||
Getting Started
|
Getting Started
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
MLX already comes with the ability to "talk" to MPI if it is installed on the
|
A distributed program in MLX is as simple as:
|
||||||
machine. The minimal distributed program in MLX is as simple as:
|
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
@@ -30,74 +36,79 @@ machine. The minimal distributed program in MLX is as simple as:
|
|||||||
print(world.rank(), x)
|
print(world.rank(), x)
|
||||||
|
|
||||||
The program above sums the array ``mx.ones(10)`` across all
|
The program above sums the array ``mx.ones(10)`` across all
|
||||||
distributed processes. If simply run with ``python``, however, only one
|
distributed processes. However, when this script is run with ``python`` only
|
||||||
process is launched and no distributed communication takes place.
|
one process is launched and no distributed communication takes place. Namely,
|
||||||
|
all operations in ``mx.distributed`` are noops when the distributed group has a
|
||||||
|
size of one. This property allows us to avoid code that checks if we are in a
|
||||||
|
distributed setting similar to the one below:
|
||||||
|
|
||||||
To launch the program in distributed mode we need to use ``mpirun`` or
|
.. code:: python
|
||||||
``mpiexec`` depending on the MPI installation. The simplest possible way is the
|
|
||||||
following:
|
import mlx.core as mx
|
||||||
|
|
||||||
|
x = ...
|
||||||
|
world = mx.distributed.init()
|
||||||
|
# No need for the check we can simply do x = mx.distributed.all_sum(x)
|
||||||
|
if world.size() > 1:
|
||||||
|
x = mx.distributed.all_sum(x)
|
||||||
|
|
||||||
|
Running Distributed Programs
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
MLX provides ``mlx.launch`` a helper script to launch distributed programs.
|
||||||
|
Continuing with our initial example we can run it on localhost with 4 processes using
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ mpirun -np 2 python test.py
|
$ mlx.launch -n 4 my_script.py
|
||||||
1 array([2, 2, 2, ..., 2, 2, 2], dtype=float32)
|
3 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
0 array([2, 2, 2, ..., 2, 2, 2], dtype=float32)
|
2 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
1 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
0 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
|
||||||
The above launches two processes on the same (local) machine and we can see
|
We can also run it on some remote hosts by providing their IPs (provided that
|
||||||
both standard output streams. The processes send the array of 1s to each other
|
the script exists on all hosts and they are reachable by ssh)
|
||||||
and compute the sum which is printed. Launching with ``mpirun -np 4 ...`` would
|
|
||||||
print 4 etc.
|
|
||||||
|
|
||||||
Installing MPI
|
|
||||||
---------------
|
|
||||||
|
|
||||||
MPI can be installed with Homebrew, using the Anaconda package manager or
|
|
||||||
compiled from source. Most of our testing is done using ``openmpi`` installed
|
|
||||||
with the Anaconda package manager as follows:
|
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ conda install openmpi
|
$ mlx.launch --hosts ip1,ip2,ip3,ip4 my_script.py
|
||||||
|
3 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
2 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
1 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
0 array([4, 4, 4, ..., 4, 4, 4], dtype=float32)
|
||||||
|
|
||||||
Installing with Homebrew may require specifying the location of ``libmpi.dyld``
|
Consult the dedicated :doc:`usage guide<launching_distributed>` for more
|
||||||
so that MLX can find it and load it at runtime. This can simply be achieved by
|
information on using ``mlx.launch``.
|
||||||
passing the ``DYLD_LIBRARY_PATH`` environment variable to ``mpirun``.
|
|
||||||
|
|
||||||
.. code:: shell
|
Selecting Backend
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
$ mpirun -np 2 -x DYLD_LIBRARY_PATH=/opt/homebrew/lib/ python test.py
|
You can select the backend you want to use when calling :func:`init` by passing
|
||||||
|
one of ``{'any', 'ring', 'mpi'}``. When passing ``any``, MLX will try to
|
||||||
Setting up Remote Hosts
|
initialize the ``ring`` backend and if it fails the ``mpi`` backend. If they
|
||||||
-----------------------
|
both fail then a singleton group is created.
|
||||||
|
|
||||||
MPI can automatically connect to remote hosts and set up the communication over
|
|
||||||
the network if the remote hosts can be accessed via ssh. A good checklist to
|
|
||||||
debug connectivity issues is the following:
|
|
||||||
|
|
||||||
* ``ssh hostname`` works from all machines to all machines without asking for
|
|
||||||
password or host confirmation
|
|
||||||
* ``mpirun`` is accessible on all machines. You can call ``mpirun`` using its
|
|
||||||
full path to force all machines to use a specific path.
|
|
||||||
* Ensure that the ``hostname`` used by MPI is the one that you have configured
|
|
||||||
in the ``.ssh/config`` files on all machines.
|
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
For an example hostname ``foo.bar.com`` MPI can use only ``foo`` as
|
After a distributed backend is successfully initialized :func:`init` will
|
||||||
the hostname passed to ssh if the current hostname matches ``*.bar.com``.
|
return **the same backend** if called without arguments or with backend set to
|
||||||
|
``any``.
|
||||||
|
|
||||||
An easy way to pass the host names to MPI is using a host file. A host file
|
The following examples aim to clarify the backend initialization logic in MLX:
|
||||||
looks like the following, where ``host1`` and ``host2`` should be the fully
|
|
||||||
qualified domain names or IPs for these hosts.
|
|
||||||
|
|
||||||
.. code::
|
.. code:: python
|
||||||
|
|
||||||
host1 slots=1
|
# Case 1: Initialize MPI regardless if it was possible to initialize the ring backend
|
||||||
host2 slots=1
|
world = mx.distributed.init(backend="mpi")
|
||||||
|
world2 = mx.distributed.init() # subsequent calls return the MPI backend!
|
||||||
|
|
||||||
When using MLX, it is very likely that you want to use 1 slot per host, ie one
|
# Case 2: Initialize any backend
|
||||||
process per host. The hostfile also needs to contain the current
|
world = mx.distributed.init(backend="any") # equivalent to no arguments
|
||||||
host if you want to run on the local host. Passing the host file to
|
world2 = mx.distributed.init() # same as above
|
||||||
``mpirun`` is simply done using the ``--hostfile`` command line argument.
|
|
||||||
|
# Case 3: Initialize both backends at the same time
|
||||||
|
world_mpi = mx.distributed.init(backend="mpi")
|
||||||
|
world_ring = mx.distributed.init(backend="ring")
|
||||||
|
world_any = mx.distributed.init() # same as MPI because it was initialized first!
|
||||||
|
|
||||||
Training Example
|
Training Example
|
||||||
----------------
|
----------------
|
||||||
@@ -155,13 +166,179 @@ everything else remaining the same.
|
|||||||
optimizer.update(model, grads)
|
optimizer.update(model, grads)
|
||||||
return loss
|
return loss
|
||||||
|
|
||||||
Tuning All Reduce
|
Utilizing ``nn.average_gradients``
|
||||||
-----------------
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
We are working on improving the performance of all reduce on MLX but for now
|
Although the code example above works correctly; it performs one communication
|
||||||
the two main things one can do to extract the most out of distributed training with MLX are:
|
per gradient. It is significantly more efficient to aggregate several gradients
|
||||||
|
together and perform fewer communication steps.
|
||||||
|
|
||||||
1. Perform a few large reductions instead of many small ones to improve
|
This is the purpose of :func:`mlx.nn.average_gradients`. The final code looks
|
||||||
bandwidth and latency
|
almost identical to the example above:
|
||||||
2. Pass ``--mca btl_tcp_links 4`` to ``mpirun`` to configure it to use 4 tcp
|
|
||||||
connections between each host to improve bandwidth
|
.. code:: python
|
||||||
|
|
||||||
|
model = ...
|
||||||
|
optimizer = ...
|
||||||
|
dataset = ...
|
||||||
|
|
||||||
|
def step(model, x, y):
|
||||||
|
loss, grads = loss_grad_fn(model, x, y)
|
||||||
|
grads = mx.nn.average_gradients(grads) # <---- This line was added
|
||||||
|
optimizer.update(model, grads)
|
||||||
|
return loss
|
||||||
|
|
||||||
|
for x, y in dataset:
|
||||||
|
loss = step(model, x, y)
|
||||||
|
mx.eval(loss, model.parameters())
|
||||||
|
|
||||||
|
|
||||||
|
Getting Started with MPI
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
MLX already comes with the ability to "talk" to MPI if it is installed on the
|
||||||
|
machine. Launching distributed MLX programs that use MPI can be done with
|
||||||
|
``mpirun`` as expected. However, in the following examples we will be using
|
||||||
|
``mlx.launch --backend mpi`` which takes care of some nuisances such as setting
|
||||||
|
absolute paths for the ``mpirun`` executable and the ``libmpi.dyld`` shared
|
||||||
|
library.
|
||||||
|
|
||||||
|
The simplest possible usage is the following which, assuming the minimal
|
||||||
|
example in the beginning of this page, should result in:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
$ mlx.launch --backend mpi -n 2 test.py
|
||||||
|
1 array([2, 2, 2, ..., 2, 2, 2], dtype=float32)
|
||||||
|
0 array([2, 2, 2, ..., 2, 2, 2], dtype=float32)
|
||||||
|
|
||||||
|
The above launches two processes on the same (local) machine and we can see
|
||||||
|
both standard output streams. The processes send the array of 1s to each other
|
||||||
|
and compute the sum which is printed. Launching with ``mlx.launch -n 4 ...`` would
|
||||||
|
print 4 etc.
|
||||||
|
|
||||||
|
Installing MPI
|
||||||
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
MPI can be installed with Homebrew, using the Anaconda package manager or
|
||||||
|
compiled from source. Most of our testing is done using ``openmpi`` installed
|
||||||
|
with the Anaconda package manager as follows:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
$ conda install conda-forge::openmpi
|
||||||
|
|
||||||
|
Installing with Homebrew may require specifying the location of ``libmpi.dyld``
|
||||||
|
so that MLX can find it and load it at runtime. This can simply be achieved by
|
||||||
|
passing the ``DYLD_LIBRARY_PATH`` environment variable to ``mpirun`` and it is
|
||||||
|
done automatically by ``mlx.launch``.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
$ mpirun -np 2 -x DYLD_LIBRARY_PATH=/opt/homebrew/lib/ python test.py
|
||||||
|
$ # or simply
|
||||||
|
$ mlx.launch -n 2 test.py
|
||||||
|
|
||||||
|
Setting up Remote Hosts
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
MPI can automatically connect to remote hosts and set up the communication over
|
||||||
|
the network if the remote hosts can be accessed via ssh. A good checklist to
|
||||||
|
debug connectivity issues is the following:
|
||||||
|
|
||||||
|
* ``ssh hostname`` works from all machines to all machines without asking for
|
||||||
|
password or host confirmation
|
||||||
|
* ``mpirun`` is accessible on all machines.
|
||||||
|
* Ensure that the ``hostname`` used by MPI is the one that you have configured
|
||||||
|
in the ``.ssh/config`` files on all machines.
|
||||||
|
|
||||||
|
Tuning MPI All Reduce
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
For faster all reduce consider using the ring backend either with Thunderbolt
|
||||||
|
connections or over Ethernet.
|
||||||
|
|
||||||
|
Configure MPI to use N tcp connections between each host to improve bandwidth
|
||||||
|
by passing ``--mca btl_tcp_links N``.
|
||||||
|
|
||||||
|
Force MPI to use the most performant network interface by setting ``--mca
|
||||||
|
btl_tcp_if_include <iface>`` where ``<iface>`` should be the interface you want
|
||||||
|
to use.
|
||||||
|
|
||||||
|
Getting Started with Ring
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
The ring backend does not depend on any third party library so it is always
|
||||||
|
available. It uses TCP sockets so the nodes need to be reachable via a network.
|
||||||
|
As the name suggests the nodes are connected in a ring which means that rank 1
|
||||||
|
can only communicate with rank 0 and rank 2, rank 2 only with rank 1 and rank 3
|
||||||
|
and so on and so forth. As a result :func:`send` and :func:`recv` with
|
||||||
|
arbitrary sender and receiver is not supported in the ring backend.
|
||||||
|
|
||||||
|
Defining a Ring
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The easiest way to define and use a ring is via a JSON hostfile and the
|
||||||
|
``mlx.launch`` :doc:`helper script <launching_distributed>`. For each node one
|
||||||
|
defines a hostname to ssh into to run commands on this node and one or more IPs
|
||||||
|
that this node will listen to for connections.
|
||||||
|
|
||||||
|
For example the hostfile below defines a 4 node ring. ``hostname1`` will be
|
||||||
|
rank 0, ``hostname2`` rank 1 etc.
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
[
|
||||||
|
{"ssh": "hostname1", "ips": ["123.123.123.1"]},
|
||||||
|
{"ssh": "hostname2", "ips": ["123.123.123.2"]},
|
||||||
|
{"ssh": "hostname3", "ips": ["123.123.123.3"]},
|
||||||
|
{"ssh": "hostname4", "ips": ["123.123.123.4"]}
|
||||||
|
]
|
||||||
|
|
||||||
|
Running ``mlx.launch --hostfile ring-4.json my_script.py`` will ssh into each
|
||||||
|
node, run the script which will listen for connections in each of the provided
|
||||||
|
IPs. Specifically, ``hostname1`` will connect to ``123.123.123.2`` and accept a
|
||||||
|
connection from ``123.123.123.4`` and so on and so forth.
|
||||||
|
|
||||||
|
Thunderbolt Ring
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Although the ring backend can have benefits over MPI even for Ethernet, its
|
||||||
|
main purpose is to use Thunderbolt rings for higher bandwidth communication.
|
||||||
|
Setting up such thunderbolt rings can be done manually, but is a relatively
|
||||||
|
tedious process. To simplify this, we provide the utility ``mlx.distributed_config``.
|
||||||
|
|
||||||
|
To use ``mlx.distributed_config`` your computers need to be accessible by ssh via
|
||||||
|
Ethernet or Wi-Fi. Subsequently, connect them via thunderbolt cables and then call the
|
||||||
|
utility as follows:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mlx.distributed_config --verbose --hosts host1,host2,host3,host4
|
||||||
|
|
||||||
|
By default the script will attempt to discover the thunderbolt ring and provide
|
||||||
|
you with the commands to configure each node as well as the ``hostfile.json``
|
||||||
|
to use with ``mlx.launch``. If password-less ``sudo`` is available on the nodes
|
||||||
|
then ``--auto-setup`` can be used to configure them automatically.
|
||||||
|
|
||||||
|
To validate your connection without configuring anything
|
||||||
|
``mlx.distributed_config`` can also plot the ring using DOT format.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mlx.distributed_config --verbose --hosts host1,host2,host3,host4 --dot >ring.dot
|
||||||
|
dot -Tpng ring.dot >ring.png
|
||||||
|
open ring.png
|
||||||
|
|
||||||
|
If you want to go through the process manually, the steps are as follows:
|
||||||
|
|
||||||
|
* Disable the thunderbolt bridge interface
|
||||||
|
* For the cable connecting rank ``i`` to rank ``i + 1`` find the interfaces
|
||||||
|
corresponding to that cable in nodes ``i`` and ``i + 1``.
|
||||||
|
* Set up a unique subnetwork connecting the two nodes for the corresponding
|
||||||
|
interfaces. For instance if the cable corresponds to ``en2`` on node ``i``
|
||||||
|
and ``en2`` also on node ``i + 1`` then we may assign IPs ``192.168.0.1`` and
|
||||||
|
``192.168.0.2`` respectively to the two nodes. For more details you can see
|
||||||
|
the commands prepared by the utility script.
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ parameters, pass them as inputs to the ``call`` wrapper:
|
|||||||
model.update(tree_unflatten(list(params.items())))
|
model.update(tree_unflatten(list(params.items())))
|
||||||
return model(x)
|
return model(x)
|
||||||
|
|
||||||
params = dict(tree_flatten(model.parameters()))
|
params = tree_flatten(model.parameters(), destination={})
|
||||||
mx.export_function("model.mlxfn", call, (mx.zeros(4),), params)
|
mx.export_function("model.mlxfn", call, (mx.zeros(4),), params)
|
||||||
|
|
||||||
|
|
||||||
@@ -164,11 +164,11 @@ to export a function which can be used for inputs with variable shapes:
|
|||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
mx.export_function("fun.mlxfn", mx.abs, mx.array(0.0), shapeless=True)
|
mx.export_function("fun.mlxfn", mx.abs, mx.array([0.0]), shapeless=True)
|
||||||
imported_abs = mx.import_function("fun.mlxfn")
|
imported_abs = mx.import_function("fun.mlxfn")
|
||||||
|
|
||||||
# Ok
|
# Ok
|
||||||
out, = imported_abs(mx.array(-1.0))
|
out, = imported_abs(mx.array([-1.0]))
|
||||||
|
|
||||||
# Also ok
|
# Also ok
|
||||||
out, = imported_abs(mx.array([-1.0, -2.0]))
|
out, = imported_abs(mx.array([-1.0, -2.0]))
|
||||||
|
|||||||
@@ -107,6 +107,28 @@ same array:
|
|||||||
>>> a
|
>>> a
|
||||||
array([1, 2, 0], dtype=int32)
|
array([1, 2, 0], dtype=int32)
|
||||||
|
|
||||||
|
Note that unlike NumPy, slicing an array creates a copy, not a view. So
|
||||||
|
mutating it does not mutate the original array:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
>>> a = mx.array([1, 2, 3])
|
||||||
|
>>> b = a[:]
|
||||||
|
>>> b[2] = 0
|
||||||
|
>>> b
|
||||||
|
array([1, 2, 0], dtype=int32)
|
||||||
|
>>> a
|
||||||
|
array([1, 2, 3], dtype=int32)
|
||||||
|
|
||||||
|
Also unlike NumPy, updates to the same location are nondeterministic:
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
>>> a = mx.array([1, 2, 3])
|
||||||
|
>>> a[[0, 0]] = mx.array([4, 5])
|
||||||
|
|
||||||
|
The first element of ``a`` could be ``4`` or ``5``.
|
||||||
|
|
||||||
Transformations of functions which use in-place updates are allowed and work as
|
Transformations of functions which use in-place updates are allowed and work as
|
||||||
expected. For example:
|
expected. For example:
|
||||||
|
|
||||||
|
|||||||
105
docs/src/usage/launching_distributed.rst
Normal file
105
docs/src/usage/launching_distributed.rst
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
:orphan:
|
||||||
|
|
||||||
|
.. _usage_launch_distributed:
|
||||||
|
|
||||||
|
Launching Distributed Programs
|
||||||
|
==============================
|
||||||
|
|
||||||
|
.. currentmodule:: mlx.core.distributed
|
||||||
|
|
||||||
|
Installing the MLX python package provides a helper script ``mlx.launch`` that
|
||||||
|
can be used to run python scripts distributed on several nodes. It allows
|
||||||
|
launching using either the MPI backend or the ring backend. See the
|
||||||
|
:doc:`distributed docs <distributed>` for the different backends.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
The minimal usage example of ``mlx.launch`` is simply
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mlx.launch --hosts ip1,ip2 my_script.py
|
||||||
|
|
||||||
|
or for testing on localhost
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mlx.launch -n 2 my_script.py
|
||||||
|
|
||||||
|
The ``mlx.launch`` command connects to the provided host and launches the input
|
||||||
|
script on each host. It monitors each of the launched processes and terminates
|
||||||
|
the rest if one of them fails unexpectedly or if ``mlx.launch`` is terminated.
|
||||||
|
It also takes care of forwarding the output of each remote process to stdout
|
||||||
|
and stderr respectively.
|
||||||
|
|
||||||
|
Providing Hosts
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Hosts can be provided as command line arguments, like above, but the way that
|
||||||
|
allows to fully define a list of hosts is via a JSON hostfile. The hostfile has
|
||||||
|
a very simple schema. It is simply a list of objects that define each host via
|
||||||
|
a hostname to ssh to and a list of IPs to utilize for the communication.
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
[
|
||||||
|
{"ssh": "hostname1", "ips": ["123.123.1.1", "123.123.2.1"]},
|
||||||
|
{"ssh": "hostname2", "ips": ["123.123.1.2", "123.123.2.2"]}
|
||||||
|
]
|
||||||
|
|
||||||
|
You can use ``mlx.distributed_config --over ethernet`` to create a hostfile
|
||||||
|
with IPs corresponding to the ``en0`` interface.
|
||||||
|
|
||||||
|
Setting up Remote Hosts
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
In order to be able to launch the script on each host we need to be able to
|
||||||
|
connect via ssh. Moreover the input script and python binary need to be on each
|
||||||
|
host and on the same path. A good checklist to debug errors is the following:
|
||||||
|
|
||||||
|
* ``ssh hostname`` works without asking for password or host confirmation
|
||||||
|
* the python binary is available on all hosts at the same path. You can use
|
||||||
|
``mlx.launch --print-python`` to see what that path is.
|
||||||
|
* the script you want to run is available on all hosts at the same path
|
||||||
|
|
||||||
|
.. _mpi_specifics:
|
||||||
|
|
||||||
|
MPI Specifics
|
||||||
|
-------------
|
||||||
|
|
||||||
|
One can use MPI by passing ``--backend mpi`` to ``mlx.launch``. In that case,
|
||||||
|
``mlx.launch`` is a thin wrapper over ``mpirun``. Moreover,
|
||||||
|
|
||||||
|
* The IPs in the hostfile are ignored
|
||||||
|
* The ssh connectivity requirement is stronger as every node needs to be able
|
||||||
|
to connect to every other node
|
||||||
|
* ``mpirun`` needs to be available on every node at the same path
|
||||||
|
|
||||||
|
Finally, one can pass arguments to ``mpirun`` using ``--mpi-arg``. For instance
|
||||||
|
to choose a specific interface for the byte-transfer-layer of MPI we can call
|
||||||
|
``mlx.launch`` as follows:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mlx.launch --backend mpi --mpi-arg '--mca btl_tcp_if_include en0' --hostfile hosts.json my_script.py
|
||||||
|
|
||||||
|
|
||||||
|
.. _ring_specifics:
|
||||||
|
|
||||||
|
Ring Specifics
|
||||||
|
--------------
|
||||||
|
|
||||||
|
The ring backend, which is also the default backend, can be explicitly selected
|
||||||
|
with the argument ``--backend ring``. The ring backend has some specific
|
||||||
|
requirements and arguments that are different to MPI:
|
||||||
|
|
||||||
|
* The argument ``--hosts`` only accepts IPs and not hostnames. If we need to
|
||||||
|
ssh to a hostname that does not correspond to the IP we want to bind to we
|
||||||
|
have to provide a hostfile.
|
||||||
|
* ``--starting-port`` defines the port to bind to on the remote hosts.
|
||||||
|
Specifically rank 0 for the first IP will use this port and each subsequent
|
||||||
|
IP or rank will add 1 to this port.
|
||||||
|
* ``--connections-per-ip`` allows us to increase the number of connections
|
||||||
|
between neighboring nodes. This corresponds to ``--mca btl_tcp_links 2`` for
|
||||||
|
``mpirun``.
|
||||||
@@ -21,11 +21,13 @@ Let's convert an array to NumPy and back.
|
|||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Since NumPy does not support ``bfloat16`` arrays, you will need to convert to ``float16`` or ``float32`` first:
|
Since NumPy does not support ``bfloat16`` arrays, you will need to convert
|
||||||
``np.array(a.astype(mx.float32))``.
|
to ``float16`` or ``float32`` first: ``np.array(a.astype(mx.float32))``.
|
||||||
Otherwise, you will receive an error like: ``Item size 2 for PEP 3118 buffer format string does not match the dtype V item size 0.``
|
Otherwise, you will receive an error like: ``Item size 2 for PEP 3118
|
||||||
|
buffer format string does not match the dtype V item size 0.``
|
||||||
|
|
||||||
By default, NumPy copies data to a new array. This can be prevented by creating an array view:
|
By default, NumPy copies data to a new array. This can be prevented by creating
|
||||||
|
an array view:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@@ -35,10 +37,16 @@ By default, NumPy copies data to a new array. This can be prevented by creating
|
|||||||
a_view[0] = 1
|
a_view[0] = 1
|
||||||
print(a[0].item()) # 1
|
print(a[0].item()) # 1
|
||||||
|
|
||||||
A NumPy array view is a normal NumPy array, except that it does not own its memory.
|
.. note::
|
||||||
This means writing to the view is reflected in the original array.
|
|
||||||
|
|
||||||
While this is quite powerful to prevent copying arrays, it should be noted that external changes to the memory of arrays cannot be reflected in gradients.
|
NumPy arrays with type ``float64`` will be default converted to MLX arrays
|
||||||
|
with type ``float32``.
|
||||||
|
|
||||||
|
A NumPy array view is a normal NumPy array, except that it does not own its
|
||||||
|
memory. This means writing to the view is reflected in the original array.
|
||||||
|
|
||||||
|
While this is quite powerful to prevent copying arrays, it should be noted that
|
||||||
|
external changes to the memory of arrays cannot be reflected in gradients.
|
||||||
|
|
||||||
Let's demonstrate this in an example:
|
Let's demonstrate this in an example:
|
||||||
|
|
||||||
@@ -56,11 +64,12 @@ Let's demonstrate this in an example:
|
|||||||
|
|
||||||
|
|
||||||
The function ``f`` indirectly modifies the array ``x`` through a memory view.
|
The function ``f`` indirectly modifies the array ``x`` through a memory view.
|
||||||
However, this modification is not reflected in the gradient, as seen in the last line outputting ``1.0``,
|
However, this modification is not reflected in the gradient, as seen in the
|
||||||
representing the gradient of the sum operation alone.
|
last line outputting ``1.0``, representing the gradient of the sum operation
|
||||||
The squaring of ``x`` occurs externally to MLX, meaning that no gradient is incorporated.
|
alone. The squaring of ``x`` occurs externally to MLX, meaning that no
|
||||||
It's important to note that a similar issue arises during array conversion and copying.
|
gradient is incorporated. It's important to note that a similar issue arises
|
||||||
For instance, a function defined as ``mx.array(np.array(x)**2).sum()`` would also result in an incorrect gradient,
|
during array conversion and copying. For instance, a function defined as
|
||||||
|
``mx.array(np.array(x)**2).sum()`` would also result in an incorrect gradient,
|
||||||
even though no in-place operations on MLX memory are executed.
|
even though no in-place operations on MLX memory are executed.
|
||||||
|
|
||||||
PyTorch
|
PyTorch
|
||||||
@@ -71,7 +80,8 @@ PyTorch
|
|||||||
PyTorch Support for :obj:`memoryview` is experimental and can break for
|
PyTorch Support for :obj:`memoryview` is experimental and can break for
|
||||||
multi-dimensional arrays. Casting to NumPy first is advised for now.
|
multi-dimensional arrays. Casting to NumPy first is advised for now.
|
||||||
|
|
||||||
PyTorch supports the buffer protocol, but it requires an explicit :obj:`memoryview`.
|
PyTorch supports the buffer protocol, but it requires an explicit
|
||||||
|
:obj:`memoryview`.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
@@ -82,7 +92,8 @@ PyTorch supports the buffer protocol, but it requires an explicit :obj:`memoryvi
|
|||||||
b = torch.tensor(memoryview(a))
|
b = torch.tensor(memoryview(a))
|
||||||
c = mx.array(b.numpy())
|
c = mx.array(b.numpy())
|
||||||
|
|
||||||
Conversion from PyTorch tensors back to arrays must be done via intermediate NumPy arrays with ``numpy()``.
|
Conversion from PyTorch tensors back to arrays must be done via intermediate
|
||||||
|
NumPy arrays with ``numpy()``.
|
||||||
|
|
||||||
JAX
|
JAX
|
||||||
---
|
---
|
||||||
@@ -100,7 +111,8 @@ JAX fully supports the buffer protocol.
|
|||||||
TensorFlow
|
TensorFlow
|
||||||
----------
|
----------
|
||||||
|
|
||||||
TensorFlow supports the buffer protocol, but it requires an explicit :obj:`memoryview`.
|
TensorFlow supports the buffer protocol, but it requires an explicit
|
||||||
|
:obj:`memoryview`.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
|
|||||||
@@ -14,14 +14,17 @@ void array_basics() {
|
|||||||
// Get the value out of it:
|
// Get the value out of it:
|
||||||
auto s = x.item<float>();
|
auto s = x.item<float>();
|
||||||
assert(s == 1.0);
|
assert(s == 1.0);
|
||||||
|
(void)s;
|
||||||
|
|
||||||
// Scalars have a size of 1:
|
// Scalars have a size of 1:
|
||||||
size_t size = x.size();
|
int64_t size = x.size();
|
||||||
assert(size == 1);
|
assert(size == 1);
|
||||||
|
(void)size;
|
||||||
|
|
||||||
// Scalars have 0 dimensions:
|
// Scalars have 0 dimensions:
|
||||||
int ndim = x.ndim();
|
int ndim = x.ndim();
|
||||||
assert(ndim == 0);
|
assert(ndim == 0);
|
||||||
|
(void)ndim;
|
||||||
|
|
||||||
// The shape should be an empty vector:
|
// The shape should be an empty vector:
|
||||||
auto shape = x.shape();
|
auto shape = x.shape();
|
||||||
@@ -30,6 +33,7 @@ void array_basics() {
|
|||||||
// The datatype should be float32:
|
// The datatype should be float32:
|
||||||
auto dtype = x.dtype();
|
auto dtype = x.dtype();
|
||||||
assert(dtype == mx::float32);
|
assert(dtype == mx::float32);
|
||||||
|
(void)dtype;
|
||||||
|
|
||||||
// Specify the dtype when constructing the array:
|
// Specify the dtype when constructing the array:
|
||||||
x = mx::array(1, mx::int32);
|
x = mx::array(1, mx::int32);
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
|||||||
option(BUILD_SHARED_LIBS "Build extensions as a shared library" ON)
|
option(BUILD_SHARED_LIBS "Build extensions as a shared library" ON)
|
||||||
|
|
||||||
# ----------------------------- Dependencies -----------------------------
|
# ----------------------------- Dependencies -----------------------------
|
||||||
find_package(MLX CONFIG REQUIRED)
|
|
||||||
find_package(
|
find_package(
|
||||||
Python 3.8
|
Python 3.8
|
||||||
COMPONENTS Interpreter Development.Module
|
COMPONENTS Interpreter Development.Module
|
||||||
@@ -21,6 +20,12 @@ execute_process(
|
|||||||
OUTPUT_VARIABLE nanobind_ROOT)
|
OUTPUT_VARIABLE nanobind_ROOT)
|
||||||
find_package(nanobind CONFIG REQUIRED)
|
find_package(nanobind CONFIG REQUIRED)
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND "${Python_EXECUTABLE}" -m mlx --cmake-dir
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
OUTPUT_VARIABLE MLX_ROOT)
|
||||||
|
find_package(MLX CONFIG REQUIRED)
|
||||||
|
|
||||||
# ----------------------------- Extensions -----------------------------
|
# ----------------------------- Extensions -----------------------------
|
||||||
|
|
||||||
# Add library
|
# Add library
|
||||||
|
|||||||
@@ -1,19 +1,15 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
// Copyright © 2023-2025 Apple Inc.
|
||||||
|
|
||||||
#include <cassert>
|
#include <dlfcn.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/utils.h"
|
#include "mlx/backend/common/utils.h"
|
||||||
|
#include "mlx/backend/cpu/encoder.h"
|
||||||
#include "mlx/utils.h"
|
#include "mlx/utils.h"
|
||||||
|
|
||||||
#include "axpby/axpby.h"
|
#include "axpby/axpby.h"
|
||||||
|
|
||||||
#ifdef ACCELERATE_NEW_LAPACK
|
|
||||||
#include <vecLib/cblas_new.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef _METAL_
|
#ifdef _METAL_
|
||||||
#include "mlx/backend/metal/device.h"
|
#include "mlx/backend/metal/device.h"
|
||||||
#include "mlx/backend/metal/utils.h"
|
#include "mlx/backend/metal/utils.h"
|
||||||
@@ -21,6 +17,19 @@
|
|||||||
|
|
||||||
namespace my_ext {
|
namespace my_ext {
|
||||||
|
|
||||||
|
// A helper function to find the location of the current binary on disk.
|
||||||
|
// The Metal library ("mlx_ext.mtllib"), should be in the same directory.
|
||||||
|
std::string current_binary_dir() {
|
||||||
|
static std::string binary_dir = []() {
|
||||||
|
Dl_info info;
|
||||||
|
if (!dladdr(reinterpret_cast<void*>(¤t_binary_dir), &info)) {
|
||||||
|
throw std::runtime_error("Unable to get current binary dir.");
|
||||||
|
}
|
||||||
|
return std::filesystem::path(info.dli_fname).parent_path().string();
|
||||||
|
}();
|
||||||
|
return binary_dir;
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Operation Implementation
|
// Operation Implementation
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
@@ -75,136 +84,65 @@ void axpby_impl(
|
|||||||
const mx::array& y,
|
const mx::array& y,
|
||||||
mx::array& out,
|
mx::array& out,
|
||||||
float alpha_,
|
float alpha_,
|
||||||
float beta_) {
|
float beta_,
|
||||||
// We only allocate memory when we are ready to fill the output
|
mx::Stream stream) {
|
||||||
// malloc_or_wait synchronously allocates available memory
|
out.set_data(mx::allocator::malloc(out.nbytes()));
|
||||||
// There may be a wait executed here if the allocation is requested
|
|
||||||
// under memory-pressured conditions
|
|
||||||
out.set_data(mx::allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
// Collect input and output data pointers
|
// Get the CPU command encoder and register input and output arrays
|
||||||
const T* x_ptr = x.data<T>();
|
auto& encoder = mx::cpu::get_command_encoder(stream);
|
||||||
const T* y_ptr = y.data<T>();
|
encoder.set_input_array(x);
|
||||||
T* out_ptr = out.data<T>();
|
encoder.set_input_array(y);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
// Launch the CPU kernel
|
||||||
|
encoder.dispatch([x_ptr = x.data<T>(),
|
||||||
|
y_ptr = y.data<T>(),
|
||||||
|
out_ptr = out.data<T>(),
|
||||||
|
size = out.size(),
|
||||||
|
shape = out.shape(),
|
||||||
|
x_strides = x.strides(),
|
||||||
|
y_strides = y.strides(),
|
||||||
|
alpha_,
|
||||||
|
beta_]() {
|
||||||
// Cast alpha and beta to the relevant types
|
// Cast alpha and beta to the relevant types
|
||||||
T alpha = static_cast<T>(alpha_);
|
T alpha = static_cast<T>(alpha_);
|
||||||
T beta = static_cast<T>(beta_);
|
T beta = static_cast<T>(beta_);
|
||||||
|
|
||||||
// Do the element-wise operation for each output
|
// Do the element-wise operation for each output
|
||||||
for (size_t out_idx = 0; out_idx < out.size(); out_idx++) {
|
for (size_t out_idx = 0; out_idx < size; out_idx++) {
|
||||||
// Map linear indices to offsets in x and y
|
// Map linear indices to offsets in x and y
|
||||||
auto x_offset = mx::elem_to_loc(out_idx, x.shape(), x.strides());
|
auto x_offset = mx::elem_to_loc(out_idx, shape, x_strides);
|
||||||
auto y_offset = mx::elem_to_loc(out_idx, y.shape(), y.strides());
|
auto y_offset = mx::elem_to_loc(out_idx, shape, y_strides);
|
||||||
|
|
||||||
// We allocate the output to be contiguous and regularly strided
|
// We allocate the output to be contiguous and regularly strided
|
||||||
// (defaults to row major) and hence it doesn't need additional mapping
|
// (defaults to row major) and hence it doesn't need additional mapping
|
||||||
out_ptr[out_idx] = alpha * x_ptr[x_offset] + beta * y_ptr[y_offset];
|
out_ptr[out_idx] = alpha * x_ptr[x_offset] + beta * y_ptr[y_offset];
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Fall back implementation for evaluation on CPU */
|
void Axpby::eval_cpu(
|
||||||
void Axpby::eval(
|
|
||||||
const std::vector<mx::array>& inputs,
|
const std::vector<mx::array>& inputs,
|
||||||
std::vector<mx::array>& outputs) {
|
std::vector<mx::array>& outputs) {
|
||||||
// Check the inputs (registered in the op while constructing the out array)
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& x = inputs[0];
|
auto& x = inputs[0];
|
||||||
auto& y = inputs[1];
|
auto& y = inputs[1];
|
||||||
auto& out = outputs[0];
|
auto& out = outputs[0];
|
||||||
|
|
||||||
// Dispatch to the correct dtype
|
// Dispatch to the correct dtype
|
||||||
if (out.dtype() == mx::float32) {
|
if (out.dtype() == mx::float32) {
|
||||||
return axpby_impl<float>(x, y, out, alpha_, beta_);
|
return axpby_impl<float>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == mx::float16) {
|
} else if (out.dtype() == mx::float16) {
|
||||||
return axpby_impl<mx::float16_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::float16_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == mx::bfloat16) {
|
} else if (out.dtype() == mx::bfloat16) {
|
||||||
return axpby_impl<mx::bfloat16_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::bfloat16_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else if (out.dtype() == mx::complex64) {
|
} else if (out.dtype() == mx::complex64) {
|
||||||
return axpby_impl<mx::complex64_t>(x, y, out, alpha_, beta_);
|
return axpby_impl<mx::complex64_t>(x, y, out, alpha_, beta_, stream());
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"Axpby is only supported for floating point types.");
|
"Axpby is only supported for floating point types.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Primitive Accelerate Backend Implementation
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
#ifdef ACCELERATE_NEW_LAPACK
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void axpby_impl_accelerate(
|
|
||||||
const mx::array& x,
|
|
||||||
const mx::array& y,
|
|
||||||
mx::array& out,
|
|
||||||
float alpha_,
|
|
||||||
float beta_) {
|
|
||||||
// Accelerate library provides catlas_saxpby which does
|
|
||||||
// Y = (alpha * X) + (beta * Y) in place
|
|
||||||
// To use it, we first copy the data in y over to the output array
|
|
||||||
|
|
||||||
// This specialization requires both x and y be contiguous in the same mode
|
|
||||||
// i.e: corresponding linear indices in both point to corresponding elements
|
|
||||||
// The data in the output array is allocated to match the strides in y
|
|
||||||
// such that x, y, and out are contiguous in the same mode and
|
|
||||||
// no transposition is needed
|
|
||||||
out.set_data(mx::allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
// We then copy over the elements using the contiguous vector specialization
|
|
||||||
copy_inplace(y, out, mx::CopyType::Vector);
|
|
||||||
|
|
||||||
// Get x and y pointers for catlas_saxpby
|
|
||||||
const T* x_ptr = x.data<T>();
|
|
||||||
T* y_ptr = out.data<T>();
|
|
||||||
|
|
||||||
T alpha = static_cast<T>(alpha_);
|
|
||||||
T beta = static_cast<T>(beta_);
|
|
||||||
|
|
||||||
// Call the inplace accelerate operator
|
|
||||||
catlas_saxpby(
|
|
||||||
/* N = */ out.size(),
|
|
||||||
/* ALPHA = */ alpha,
|
|
||||||
/* X = */ x_ptr,
|
|
||||||
/* INCX = */ 1,
|
|
||||||
/* BETA = */ beta,
|
|
||||||
/* Y = */ y_ptr,
|
|
||||||
/* INCY = */ 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Evaluate primitive on CPU using accelerate specializations */
|
|
||||||
void Axpby::eval_cpu(
|
|
||||||
const std::vector<mx::array>& inputs,
|
|
||||||
std::vector<mx::array>& outputs) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& x = inputs[0];
|
|
||||||
auto& y = inputs[1];
|
|
||||||
auto& out = outputs[0];
|
|
||||||
|
|
||||||
// Accelerate specialization for contiguous single precision float arrays
|
|
||||||
if (out.dtype() == mx::float32 &&
|
|
||||||
((x.flags().row_contiguous && y.flags().row_contiguous) ||
|
|
||||||
(x.flags().col_contiguous && y.flags().col_contiguous))) {
|
|
||||||
axpby_impl_accelerate<float>(x, y, out, alpha_, beta_);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall back to common backend if specializations are not available
|
|
||||||
eval(inputs, outputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else // Accelerate not available
|
|
||||||
|
|
||||||
/** Evaluate primitive on CPU falling back to common backend */
|
|
||||||
void Axpby::eval_cpu(
|
|
||||||
const std::vector<mx::array>& inputs,
|
|
||||||
std::vector<mx::array>& outputs) {
|
|
||||||
eval(inputs, outputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Primitive Metal Backend Implementation
|
// Primitive Metal Backend Implementation
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
@@ -216,7 +154,6 @@ void Axpby::eval_gpu(
|
|||||||
const std::vector<mx::array>& inputs,
|
const std::vector<mx::array>& inputs,
|
||||||
std::vector<mx::array>& outputs) {
|
std::vector<mx::array>& outputs) {
|
||||||
// Prepare inputs
|
// Prepare inputs
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& x = inputs[0];
|
auto& x = inputs[0];
|
||||||
auto& y = inputs[1];
|
auto& y = inputs[1];
|
||||||
auto& out = outputs[0];
|
auto& out = outputs[0];
|
||||||
@@ -235,25 +172,24 @@ void Axpby::eval_gpu(
|
|||||||
// Allocate output memory with strides based on specialization
|
// Allocate output memory with strides based on specialization
|
||||||
if (contiguous_kernel) {
|
if (contiguous_kernel) {
|
||||||
out.set_data(
|
out.set_data(
|
||||||
mx::allocator::malloc_or_wait(x.data_size() * out.itemsize()),
|
mx::allocator::malloc(x.data_size() * out.itemsize()),
|
||||||
x.data_size(),
|
x.data_size(),
|
||||||
x.strides(),
|
x.strides(),
|
||||||
x.flags());
|
x.flags());
|
||||||
} else {
|
} else {
|
||||||
out.set_data(mx::allocator::malloc_or_wait(out.nbytes()));
|
out.set_data(mx::allocator::malloc(out.nbytes()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve name of kernel (corresponds to axpby.metal)
|
// Resolve name of kernel (corresponds to axpby.metal)
|
||||||
std::ostringstream kname;
|
std::string kname = "axpby_";
|
||||||
kname << "axpby_";
|
kname += (contiguous_kernel ? "contiguous_" : "general_");
|
||||||
kname << (contiguous_kernel ? "contiguous_" : "general_");
|
kname += type_to_name(out);
|
||||||
kname << type_to_name(out);
|
|
||||||
|
|
||||||
// Make sure the metal library is available
|
// Load the metal library
|
||||||
d.register_library("mlx_ext");
|
auto lib = d.get_library("mlx_ext", current_binary_dir());
|
||||||
|
|
||||||
// Make a kernel from this metal library
|
// Make a kernel from this metal library
|
||||||
auto kernel = d.get_kernel(kname.str(), "mlx_ext");
|
auto kernel = d.get_kernel(kname, lib);
|
||||||
|
|
||||||
// Prepare to encode kernel
|
// Prepare to encode kernel
|
||||||
auto& compute_encoder = d.get_command_encoder(s.index);
|
auto& compute_encoder = d.get_command_encoder(s.index);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023-2025 Apple Inc.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
@@ -74,9 +74,9 @@ class Axpby : public mx::Primitive {
|
|||||||
const std::vector<mx::array>& inputs,
|
const std::vector<mx::array>& inputs,
|
||||||
const std::vector<int>& axes) override;
|
const std::vector<int>& axes) override;
|
||||||
|
|
||||||
/** Print the primitive. */
|
/** The name of primitive. */
|
||||||
void print(std::ostream& os) override {
|
const char* name() const override {
|
||||||
os << "Axpby";
|
return "Axpby";
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Equivalence check **/
|
/** Equivalence check **/
|
||||||
@@ -85,11 +85,6 @@ class Axpby : public mx::Primitive {
|
|||||||
private:
|
private:
|
||||||
float alpha_;
|
float alpha_;
|
||||||
float beta_;
|
float beta_;
|
||||||
|
|
||||||
/** Fall back implementation for evaluation on CPU */
|
|
||||||
void eval(
|
|
||||||
const std::vector<mx::array>& inputs,
|
|
||||||
std::vector<mx::array>& outputs);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace my_ext
|
} // namespace my_ext
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023-2025 Apple Inc.
|
||||||
|
|
||||||
#include <metal_stdlib>
|
#include <metal_stdlib>
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
setuptools>=42
|
setuptools>=42
|
||||||
cmake>=3.25
|
cmake>=3.25
|
||||||
mlx>=0.21.0
|
mlx>=0.21.0
|
||||||
nanobind==2.2.0
|
nanobind==2.4.0
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ from mlx_sample_extensions import axpby
|
|||||||
|
|
||||||
a = mx.ones((3, 4))
|
a = mx.ones((3, 4))
|
||||||
b = mx.ones((3, 4))
|
b = mx.ones((3, 4))
|
||||||
c = axpby(a, b, 4.0, 2.0, stream=mx.cpu)
|
c_cpu = axpby(a, b, 4.0, 2.0, stream=mx.cpu)
|
||||||
|
c_gpu = axpby(a, b, 4.0, 2.0, stream=mx.gpu)
|
||||||
|
|
||||||
print(f"c shape: {c.shape}")
|
print(f"c shape: {c_cpu.shape}")
|
||||||
print(f"c dtype: {c.dtype}")
|
print(f"c dtype: {c_cpu.dtype}")
|
||||||
print(f"c correct: {mx.all(c == 6.0).item()}")
|
print(f"c_cpu correct: {mx.all(c_cpu == 6.0).item()}")
|
||||||
|
print(f"c_gpu correct: {mx.all(c_gpu == 6.0).item()}")
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/compile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/compile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/dtype.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/dtype.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/dtype_utils.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/export.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/export.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/einsum.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/einsum.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/fast.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/fast.cpp
|
||||||
@@ -19,6 +20,11 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/linalg.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/linalg.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/backend/metal/metal.h)
|
${CMAKE_CURRENT_SOURCE_DIR}/backend/metal/metal.h)
|
||||||
|
|
||||||
|
# Define MLX_VERSION only in the version.cpp file.
|
||||||
|
add_library(mlx_version OBJECT ${CMAKE_CURRENT_SOURCE_DIR}/version.cpp)
|
||||||
|
target_compile_definitions(mlx_version PRIVATE MLX_VERSION="${MLX_VERSION}")
|
||||||
|
target_link_libraries(mlx PRIVATE $<BUILD_INTERFACE:mlx_version>)
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
# Disable some MSVC warnings to speed up compilation.
|
# Disable some MSVC warnings to speed up compilation.
|
||||||
target_compile_options(mlx PUBLIC /wd4068 /wd4244 /wd4267 /wd4804)
|
target_compile_options(mlx PUBLIC /wd4068 /wd4244 /wd4267 /wd4804)
|
||||||
@@ -29,24 +35,33 @@ if(WIN32)
|
|||||||
set_target_properties(mlx PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
|
set_target_properties(mlx PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/common)
|
||||||
|
|
||||||
if(MLX_BUILD_CPU)
|
if(MLX_BUILD_CPU)
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/common)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/cpu)
|
||||||
else()
|
else()
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/no_cpu)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/no_cpu)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/distributed)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/distributed)
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/io)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/io)
|
||||||
if(MLX_BUILD_ACCELERATE)
|
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/accelerate)
|
|
||||||
elseif(MLX_BUILD_CPU)
|
|
||||||
target_sources(
|
|
||||||
mlx
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/backend/common/default_primitives.cpp)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(MLX_BUILD_METAL)
|
if(MLX_BUILD_METAL)
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/metal)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/metal)
|
||||||
else()
|
else()
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/no_metal)
|
target_sources(mlx
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/backend/metal/no_metal.cpp)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(MLX_BUILD_CUDA)
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/cuda)
|
||||||
|
else()
|
||||||
|
target_sources(mlx
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/backend/cuda/no_cuda.cpp)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(MLX_BUILD_METAL OR MLX_BUILD_CUDA)
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/gpu)
|
||||||
|
else()
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backend/no_gpu)
|
||||||
endif()
|
endif()
|
||||||
|
|||||||
@@ -4,12 +4,11 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
#include "mlx/allocator.h"
|
||||||
#include "mlx/scheduler.h"
|
|
||||||
|
|
||||||
namespace mlx::core::allocator {
|
namespace mlx::core::allocator {
|
||||||
|
|
||||||
Buffer malloc(size_t size) {
|
Buffer malloc(size_t size) {
|
||||||
auto buffer = allocator().malloc(size, /* allow_swap */ true);
|
auto buffer = allocator().malloc(size);
|
||||||
if (size && !buffer.ptr()) {
|
if (size && !buffer.ptr()) {
|
||||||
std::ostringstream msg;
|
std::ostringstream msg;
|
||||||
msg << "[malloc] Unable to allocate " << size << " bytes.";
|
msg << "[malloc] Unable to allocate " << size << " bytes.";
|
||||||
@@ -22,45 +21,4 @@ void free(Buffer buffer) {
|
|||||||
allocator().free(buffer);
|
allocator().free(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
Buffer CommonAllocator::malloc(size_t size, bool) {
|
|
||||||
void* ptr = std::malloc(size + sizeof(size_t));
|
|
||||||
if (ptr != nullptr) {
|
|
||||||
*static_cast<size_t*>(ptr) = size;
|
|
||||||
}
|
|
||||||
return Buffer{ptr};
|
|
||||||
}
|
|
||||||
|
|
||||||
void CommonAllocator::free(Buffer buffer) {
|
|
||||||
std::free(buffer.ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t CommonAllocator::size(Buffer buffer) const {
|
|
||||||
if (buffer.ptr() == nullptr) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return *static_cast<size_t*>(buffer.ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
Buffer malloc_or_wait(size_t size) {
|
|
||||||
auto buffer = allocator().malloc(size);
|
|
||||||
|
|
||||||
while (size && !buffer.ptr() && scheduler::n_active_tasks() > 0) {
|
|
||||||
scheduler::wait_for_one();
|
|
||||||
buffer = allocator().malloc(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try swapping if needed
|
|
||||||
if (size && !buffer.ptr()) {
|
|
||||||
buffer = allocator().malloc(size, /* allow_swap = */ true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (size && !buffer.ptr()) {
|
|
||||||
std::ostringstream msg;
|
|
||||||
msg << "[malloc_or_wait] Unable to allocate " << size << " bytes.";
|
|
||||||
throw std::runtime_error(msg.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::allocator
|
} // namespace mlx::core::allocator
|
||||||
|
|||||||
@@ -32,14 +32,10 @@ Buffer malloc(size_t size);
|
|||||||
|
|
||||||
void free(Buffer buffer);
|
void free(Buffer buffer);
|
||||||
|
|
||||||
// Wait for running tasks to finish and free up memory
|
|
||||||
// if allocation fails
|
|
||||||
Buffer malloc_or_wait(size_t size);
|
|
||||||
|
|
||||||
class Allocator {
|
class Allocator {
|
||||||
/** Abstract base class for a memory allocator. */
|
/** Abstract base class for a memory allocator. */
|
||||||
public:
|
public:
|
||||||
virtual Buffer malloc(size_t size, bool allow_swap = false) = 0;
|
virtual Buffer malloc(size_t size) = 0;
|
||||||
virtual void free(Buffer buffer) = 0;
|
virtual void free(Buffer buffer) = 0;
|
||||||
virtual size_t size(Buffer buffer) const = 0;
|
virtual size_t size(Buffer buffer) const = 0;
|
||||||
|
|
||||||
@@ -53,16 +49,4 @@ class Allocator {
|
|||||||
|
|
||||||
Allocator& allocator();
|
Allocator& allocator();
|
||||||
|
|
||||||
class CommonAllocator : public Allocator {
|
|
||||||
/** A general CPU allocator. */
|
|
||||||
public:
|
|
||||||
virtual Buffer malloc(size_t size, bool allow_swap = false) override;
|
|
||||||
virtual void free(Buffer buffer) override;
|
|
||||||
virtual size_t size(Buffer buffer) const override;
|
|
||||||
|
|
||||||
private:
|
|
||||||
CommonAllocator() = default;
|
|
||||||
friend Allocator& allocator();
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace mlx::core::allocator
|
} // namespace mlx::core::allocator
|
||||||
|
|||||||
@@ -25,7 +25,18 @@ array::array(
|
|||||||
std::move(shape),
|
std::move(shape),
|
||||||
dtype,
|
dtype,
|
||||||
std::move(primitive),
|
std::move(primitive),
|
||||||
std::move(inputs))) {}
|
std::move(inputs))) {
|
||||||
|
if (has_primitive() && this->primitive().stream().device == Device::gpu) {
|
||||||
|
for (auto& in : this->inputs()) {
|
||||||
|
if (in.dtype() == float64) {
|
||||||
|
throw std::invalid_argument("float64 is not supported on the GPU");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (this->dtype() == float64) {
|
||||||
|
throw std::invalid_argument("float64 is not supported on the GPU");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<array> array::make_arrays(
|
std::vector<array> array::make_arrays(
|
||||||
std::vector<Shape> shapes,
|
std::vector<Shape> shapes,
|
||||||
@@ -33,11 +44,11 @@ std::vector<array> array::make_arrays(
|
|||||||
const std::shared_ptr<Primitive>& primitive,
|
const std::shared_ptr<Primitive>& primitive,
|
||||||
const std::vector<array>& inputs) {
|
const std::vector<array>& inputs) {
|
||||||
std::vector<array> outputs;
|
std::vector<array> outputs;
|
||||||
for (size_t i = 0; i < shapes.size(); ++i) {
|
for (int i = 0; i < std::ssize(shapes); ++i) {
|
||||||
outputs.emplace_back(std::move(shapes[i]), dtypes[i], primitive, inputs);
|
outputs.emplace_back(std::move(shapes[i]), dtypes[i], primitive, inputs);
|
||||||
}
|
}
|
||||||
// For each node in |outputs|, its siblings are the other nodes.
|
// For each node in |outputs|, its siblings are the other nodes.
|
||||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
for (int i = 0; i < std::ssize(outputs); ++i) {
|
||||||
auto siblings = outputs;
|
auto siblings = outputs;
|
||||||
siblings.erase(siblings.begin() + i);
|
siblings.erase(siblings.begin() + i);
|
||||||
outputs[i].set_siblings(std::move(siblings), i);
|
outputs[i].set_siblings(std::move(siblings), i);
|
||||||
@@ -45,6 +56,18 @@ std::vector<array> array::make_arrays(
|
|||||||
return outputs;
|
return outputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
array array::unsafe_weak_copy(const array& other) {
|
||||||
|
auto cpy = array(other.shape(), other.dtype(), nullptr, {});
|
||||||
|
cpy.set_data(
|
||||||
|
other.buffer(),
|
||||||
|
other.data_size(),
|
||||||
|
other.strides(),
|
||||||
|
other.flags(),
|
||||||
|
[](auto) {});
|
||||||
|
cpy.array_desc_->data_ptr = other.array_desc_->data_ptr;
|
||||||
|
return cpy;
|
||||||
|
}
|
||||||
|
|
||||||
array::array(std::initializer_list<float> data)
|
array::array(std::initializer_list<float> data)
|
||||||
: array_desc_(std::make_shared<ArrayDesc>(
|
: array_desc_(std::make_shared<ArrayDesc>(
|
||||||
Shape{static_cast<ShapeElem>(data.size())},
|
Shape{static_cast<ShapeElem>(data.size())},
|
||||||
@@ -66,22 +89,26 @@ array::array(allocator::Buffer data, Shape shape, Dtype dtype, Deleter deleter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void array::detach() {
|
void array::detach() {
|
||||||
|
array_desc_->primitive = nullptr;
|
||||||
|
for (auto& s : array_desc_->siblings) {
|
||||||
|
s.array_desc_->primitive = nullptr;
|
||||||
|
}
|
||||||
for (auto& s : array_desc_->siblings) {
|
for (auto& s : array_desc_->siblings) {
|
||||||
s.array_desc_->inputs.clear();
|
s.array_desc_->inputs.clear();
|
||||||
s.array_desc_->siblings.clear();
|
s.array_desc_->siblings.clear();
|
||||||
s.array_desc_->position = 0;
|
s.array_desc_->position = 0;
|
||||||
s.array_desc_->primitive = nullptr;
|
|
||||||
}
|
}
|
||||||
array_desc_->inputs.clear();
|
array_desc_->inputs.clear();
|
||||||
array_desc_->siblings.clear();
|
array_desc_->siblings.clear();
|
||||||
array_desc_->position = 0;
|
array_desc_->position = 0;
|
||||||
array_desc_->primitive = nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool array::is_available() const {
|
bool array::is_available() const {
|
||||||
if (status() == Status::available) {
|
if (status() == Status::available) {
|
||||||
return true;
|
return true;
|
||||||
} else if (status() == Status::evaluated && event().is_signaled()) {
|
} else if (
|
||||||
|
status() == Status::evaluated &&
|
||||||
|
(!event().valid() || event().is_signaled())) {
|
||||||
set_status(Status::available);
|
set_status(Status::available);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -90,7 +117,10 @@ bool array::is_available() const {
|
|||||||
|
|
||||||
void array::wait() {
|
void array::wait() {
|
||||||
if (!is_available()) {
|
if (!is_available()) {
|
||||||
|
if (event().valid()) {
|
||||||
event().wait();
|
event().wait();
|
||||||
|
detach_event();
|
||||||
|
}
|
||||||
set_status(Status::available);
|
set_status(Status::available);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -115,8 +145,9 @@ void array::set_data(allocator::Buffer buffer, Deleter d) {
|
|||||||
array_desc_->data_size = size();
|
array_desc_->data_size = size();
|
||||||
array_desc_->flags.contiguous = true;
|
array_desc_->flags.contiguous = true;
|
||||||
array_desc_->flags.row_contiguous = true;
|
array_desc_->flags.row_contiguous = true;
|
||||||
auto max_dim = std::max_element(shape().begin(), shape().end());
|
auto max_dim =
|
||||||
array_desc_->flags.col_contiguous = size() <= 1 || size() == *max_dim;
|
static_cast<int64_t>(*std::max_element(shape().begin(), shape().end()));
|
||||||
|
array_desc_->flags.col_contiguous = size() <= 1 || size() == max_dim;
|
||||||
}
|
}
|
||||||
|
|
||||||
void array::set_data(
|
void array::set_data(
|
||||||
@@ -151,39 +182,18 @@ void array::copy_shared_buffer(const array& other) {
|
|||||||
copy_shared_buffer(other, other.strides(), other.flags(), other.data_size());
|
copy_shared_buffer(other, other.strides(), other.flags(), other.data_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void array::move_shared_buffer(
|
|
||||||
array other,
|
|
||||||
const Strides& strides,
|
|
||||||
Flags flags,
|
|
||||||
size_t data_size,
|
|
||||||
size_t offset /* = 0 */) {
|
|
||||||
array_desc_->data = std::move(other.array_desc_->data);
|
|
||||||
array_desc_->strides = strides;
|
|
||||||
array_desc_->flags = flags;
|
|
||||||
array_desc_->data_size = data_size;
|
|
||||||
auto char_offset = sizeof(char) * itemsize() * offset;
|
|
||||||
auto data_ptr = other.array_desc_->data_ptr;
|
|
||||||
other.array_desc_->data_ptr = nullptr;
|
|
||||||
array_desc_->data_ptr =
|
|
||||||
static_cast<void*>(static_cast<char*>(data_ptr) + char_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
void array::move_shared_buffer(array other) {
|
|
||||||
move_shared_buffer(other, other.strides(), other.flags(), other.data_size());
|
|
||||||
}
|
|
||||||
|
|
||||||
array::~array() {
|
array::~array() {
|
||||||
if (array_desc_ == nullptr) {
|
if (array_desc_ == nullptr) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore arrays that might be detached during eval
|
// Detached/detaching
|
||||||
if (status() == array::Status::scheduled) {
|
if (array_desc_->primitive == nullptr) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Break circular reference for non-detached arrays with siblings
|
// Break circular reference for non-detached arrays with siblings
|
||||||
if (auto n = siblings().size(); n > 0) {
|
if (auto n = std::ssize(siblings()); n > 0) {
|
||||||
bool do_detach = true;
|
bool do_detach = true;
|
||||||
// If all siblings have siblings.size() references except
|
// If all siblings have siblings.size() references except
|
||||||
// the one we are currently destroying (which has siblings.size() + 1)
|
// the one we are currently destroying (which has siblings.size() + 1)
|
||||||
@@ -232,8 +242,8 @@ array::ArrayDesc::ArrayDesc(
|
|||||||
std::vector<array> inputs)
|
std::vector<array> inputs)
|
||||||
: shape(std::move(shape)),
|
: shape(std::move(shape)),
|
||||||
dtype(dtype),
|
dtype(dtype),
|
||||||
status(Status::unscheduled),
|
|
||||||
primitive(std::move(primitive)),
|
primitive(std::move(primitive)),
|
||||||
|
status(Status::unscheduled),
|
||||||
inputs(std::move(inputs)) {
|
inputs(std::move(inputs)) {
|
||||||
init();
|
init();
|
||||||
}
|
}
|
||||||
@@ -265,7 +275,7 @@ array::ArrayDesc::~ArrayDesc() {
|
|||||||
ad.inputs.clear();
|
ad.inputs.clear();
|
||||||
for (auto& [_, a] : input_map) {
|
for (auto& [_, a] : input_map) {
|
||||||
bool is_deletable =
|
bool is_deletable =
|
||||||
(a.array_desc_.use_count() <= a.siblings().size() + 1);
|
(a.array_desc_.use_count() <= std::ssize(a.siblings()) + 1);
|
||||||
// An array with siblings is deletable only if all of its siblings
|
// An array with siblings is deletable only if all of its siblings
|
||||||
// are deletable
|
// are deletable
|
||||||
for (auto& s : a.siblings()) {
|
for (auto& s : a.siblings()) {
|
||||||
@@ -274,7 +284,7 @@ array::ArrayDesc::~ArrayDesc() {
|
|||||||
}
|
}
|
||||||
int is_input = (input_map.find(s.id()) != input_map.end());
|
int is_input = (input_map.find(s.id()) != input_map.end());
|
||||||
is_deletable &=
|
is_deletable &=
|
||||||
s.array_desc_.use_count() <= a.siblings().size() + is_input;
|
s.array_desc_.use_count() <= std::ssize(a.siblings()) + is_input;
|
||||||
}
|
}
|
||||||
if (is_deletable) {
|
if (is_deletable) {
|
||||||
for_deletion.push_back(std::move(a.array_desc_));
|
for_deletion.push_back(std::move(a.array_desc_));
|
||||||
|
|||||||
59
mlx/array.h
59
mlx/array.h
@@ -10,6 +10,7 @@
|
|||||||
#include "mlx/allocator.h"
|
#include "mlx/allocator.h"
|
||||||
#include "mlx/dtype.h"
|
#include "mlx/dtype.h"
|
||||||
#include "mlx/event.h"
|
#include "mlx/event.h"
|
||||||
|
#include "mlx/small_vector.h"
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
@@ -18,8 +19,8 @@ class Primitive;
|
|||||||
|
|
||||||
using Deleter = std::function<void(allocator::Buffer)>;
|
using Deleter = std::function<void(allocator::Buffer)>;
|
||||||
using ShapeElem = int32_t;
|
using ShapeElem = int32_t;
|
||||||
using Shape = std::vector<ShapeElem>;
|
using Shape = SmallVector<ShapeElem>;
|
||||||
using Strides = std::vector<int64_t>;
|
using Strides = SmallVector<int64_t>;
|
||||||
|
|
||||||
class array {
|
class array {
|
||||||
/* An array is really a node in a graph. It contains a shared ArrayDesc
|
/* An array is really a node in a graph. It contains a shared ArrayDesc
|
||||||
@@ -80,22 +81,22 @@ class array {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** The size of the array's datatype in bytes. */
|
/** The size of the array's datatype in bytes. */
|
||||||
size_t itemsize() const {
|
int itemsize() const {
|
||||||
return size_of(dtype());
|
return size_of(dtype());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** The number of elements in the array. */
|
/** The number of elements in the array. */
|
||||||
size_t size() const {
|
int64_t size() const {
|
||||||
return array_desc_->size;
|
return array_desc_->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** The number of bytes in the array. */
|
/** The number of bytes in the array. */
|
||||||
size_t nbytes() const {
|
int64_t nbytes() const {
|
||||||
return size() * itemsize();
|
return size() * itemsize();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** The number of dimensions of the array. */
|
/** The number of dimensions of the array. */
|
||||||
size_t ndim() const {
|
int ndim() const {
|
||||||
return array_desc_->shape.size();
|
return array_desc_->shape.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,6 +200,13 @@ class array {
|
|||||||
const std::shared_ptr<Primitive>& primitive,
|
const std::shared_ptr<Primitive>& primitive,
|
||||||
const std::vector<array>& inputs);
|
const std::vector<array>& inputs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a new array that refers to the same data as the input but with a
|
||||||
|
* non-owning pointer to it. Note the array is detached from the graph and has
|
||||||
|
* no inputs, siblings or primitive.
|
||||||
|
*/
|
||||||
|
static array unsafe_weak_copy(const array& other);
|
||||||
|
|
||||||
/** A unique identifier for an array. */
|
/** A unique identifier for an array. */
|
||||||
std::uintptr_t id() const {
|
std::uintptr_t id() const {
|
||||||
return reinterpret_cast<std::uintptr_t>(array_desc_.get());
|
return reinterpret_cast<std::uintptr_t>(array_desc_.get());
|
||||||
@@ -217,6 +225,10 @@ class array {
|
|||||||
// Not copyable
|
// Not copyable
|
||||||
Data(const Data& d) = delete;
|
Data(const Data& d) = delete;
|
||||||
Data& operator=(const Data& d) = delete;
|
Data& operator=(const Data& d) = delete;
|
||||||
|
Data(Data&& o) : buffer(o.buffer), d(o.d) {
|
||||||
|
o.buffer = allocator::Buffer(nullptr);
|
||||||
|
o.d = [](allocator::Buffer) {};
|
||||||
|
}
|
||||||
~Data() {
|
~Data() {
|
||||||
d(buffer);
|
d(buffer);
|
||||||
}
|
}
|
||||||
@@ -317,7 +329,7 @@ class array {
|
|||||||
* corresponding to ``arr[-1, -1, ...]``) then ``data_size = last - first``.
|
* corresponding to ``arr[-1, -1, ...]``) then ``data_size = last - first``.
|
||||||
* Note, ``data_size`` is in units of ``item_size`` (not bytes).
|
* Note, ``data_size`` is in units of ``item_size`` (not bytes).
|
||||||
**/
|
**/
|
||||||
size_t data_size() const {
|
int64_t data_size() const {
|
||||||
return array_desc_->data_size;
|
return array_desc_->data_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -328,15 +340,15 @@ class array {
|
|||||||
return array_desc_->data->buffer;
|
return array_desc_->data->buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t buffer_size() const {
|
int64_t buffer_size() const {
|
||||||
return allocator::allocator().size(buffer());
|
return allocator::allocator().size(buffer());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a copy of the shared pointer
|
// Return the shared pointer to the array::Data struct
|
||||||
// to the array::Data struct
|
const std::shared_ptr<Data>& data_shared_ptr() const {
|
||||||
std::shared_ptr<Data> data_shared_ptr() const {
|
|
||||||
return array_desc_->data;
|
return array_desc_->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a raw pointer to the arrays data
|
// Return a raw pointer to the arrays data
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T* data() {
|
T* data() {
|
||||||
@@ -349,15 +361,10 @@ class array {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum Status {
|
enum Status {
|
||||||
// The ouptut of a computation which has not been scheduled.
|
// The output of a computation which has not been scheduled.
|
||||||
// For example, the status of `x` in `auto x = a + b`.
|
// For example, the status of `x` in `auto x = a + b`.
|
||||||
unscheduled,
|
unscheduled,
|
||||||
|
|
||||||
// The ouptut of a computation which has been scheduled but `eval_*` has
|
|
||||||
// not yet been called on the array's primitive. A possible
|
|
||||||
// status of `x` in `auto x = a + b; eval(x);`
|
|
||||||
scheduled,
|
|
||||||
|
|
||||||
// The array's `eval_*` function has been run, but the computation is not
|
// The array's `eval_*` function has been run, but the computation is not
|
||||||
// necessarily complete. The array will have memory allocated and if it is
|
// necessarily complete. The array will have memory allocated and if it is
|
||||||
// not a tracer then it will be detached from the graph.
|
// not a tracer then it will be detached from the graph.
|
||||||
@@ -394,6 +401,10 @@ class array {
|
|||||||
array_desc_->event = std::move(e);
|
array_desc_->event = std::move(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void detach_event() const {
|
||||||
|
array_desc_->event = Event{};
|
||||||
|
}
|
||||||
|
|
||||||
// Mark the array as a tracer array (true) or not.
|
// Mark the array as a tracer array (true) or not.
|
||||||
void set_tracer(bool is_tracer) {
|
void set_tracer(bool is_tracer) {
|
||||||
array_desc_->is_tracer = is_tracer;
|
array_desc_->is_tracer = is_tracer;
|
||||||
@@ -419,15 +430,6 @@ class array {
|
|||||||
|
|
||||||
void copy_shared_buffer(const array& other);
|
void copy_shared_buffer(const array& other);
|
||||||
|
|
||||||
void move_shared_buffer(
|
|
||||||
array other,
|
|
||||||
const Strides& strides,
|
|
||||||
Flags flags,
|
|
||||||
size_t data_size,
|
|
||||||
size_t offset = 0);
|
|
||||||
|
|
||||||
void move_shared_buffer(array other);
|
|
||||||
|
|
||||||
void overwrite_descriptor(const array& other) {
|
void overwrite_descriptor(const array& other) {
|
||||||
array_desc_ = other.array_desc_;
|
array_desc_ = other.array_desc_;
|
||||||
}
|
}
|
||||||
@@ -528,7 +530,7 @@ array::array(
|
|||||||
Shape shape,
|
Shape shape,
|
||||||
Dtype dtype /* = TypeToDtype<T>() */)
|
Dtype dtype /* = TypeToDtype<T>() */)
|
||||||
: array_desc_(std::make_shared<ArrayDesc>(std::move(shape), dtype)) {
|
: array_desc_(std::make_shared<ArrayDesc>(std::move(shape), dtype)) {
|
||||||
if (data.size() != size()) {
|
if (std::ssize(data) != size()) {
|
||||||
throw std::invalid_argument(
|
throw std::invalid_argument(
|
||||||
"Data size and provided shape mismatch in array construction.");
|
"Data size and provided shape mismatch in array construction.");
|
||||||
}
|
}
|
||||||
@@ -594,6 +596,9 @@ void array::init(It src) {
|
|||||||
case float32:
|
case float32:
|
||||||
std::copy(src, src + size(), data<float>());
|
std::copy(src, src + size(), data<float>());
|
||||||
break;
|
break;
|
||||||
|
case float64:
|
||||||
|
std::copy(src, src + size(), data<double>());
|
||||||
|
break;
|
||||||
case bfloat16:
|
case bfloat16:
|
||||||
std::copy(src, src + size(), data<bfloat16_t>());
|
std::copy(src, src + size(), data<bfloat16_t>());
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
target_sources(
|
|
||||||
mlx
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/matmul.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/primitives.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/reduce.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cpp)
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
#include <simd/vector.h>
|
|
||||||
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
#include "mlx/utils.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
void Convolution::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
eval(inputs, out);
|
|
||||||
|
|
||||||
// TODO: Add accelerate based optimizations for CPU conv
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,253 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
|
|
||||||
#include "mlx/backend/accelerate/utils.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
#include "mlx/utils.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
std::tuple<bool, size_t, array> check_transpose(const array& arr) {
|
|
||||||
auto stx = arr.strides()[arr.ndim() - 2];
|
|
||||||
auto sty = arr.strides()[arr.ndim() - 1];
|
|
||||||
if (stx == arr.shape(-1) && sty == 1) {
|
|
||||||
return std::make_tuple(false, stx, arr);
|
|
||||||
} else if (stx == 1 && sty == arr.shape(-2)) {
|
|
||||||
return std::make_tuple(true, sty, arr);
|
|
||||||
} else {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::General);
|
|
||||||
size_t stx = arr.shape(-1);
|
|
||||||
return std::make_tuple(false, stx, arr_copy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void matmul_cblas_general(
|
|
||||||
const array& a_pre,
|
|
||||||
const array& b_pre,
|
|
||||||
array& out,
|
|
||||||
float alpha = 1.0f,
|
|
||||||
float beta = 0.0f) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[matmul_cblas] on CPU currently only supports float32");
|
|
||||||
}
|
|
||||||
|
|
||||||
auto [a_transposed, lda, a] = check_transpose(a_pre);
|
|
||||||
auto [b_transposed, ldb, b] = check_transpose(b_pre);
|
|
||||||
size_t M = a.shape(-2);
|
|
||||||
size_t N = b.shape(-1);
|
|
||||||
size_t K = a.shape(-1);
|
|
||||||
|
|
||||||
if (M == 0 || N == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (K == 0) {
|
|
||||||
std::memset(static_cast<void*>(out.data<float>()), 0, out.nbytes());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (a.size() / (M * K)); ++i) {
|
|
||||||
cblas_sgemm(
|
|
||||||
CblasRowMajor,
|
|
||||||
a_transposed ? CblasTrans : CblasNoTrans, // transA
|
|
||||||
b_transposed ? CblasTrans : CblasNoTrans, // transB
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
alpha, // alpha
|
|
||||||
a.data<float>() + elem_to_loc(M * K * i, a.shape(), a.strides()),
|
|
||||||
lda,
|
|
||||||
b.data<float>() + elem_to_loc(K * N * i, b.shape(), b.strides()),
|
|
||||||
ldb,
|
|
||||||
beta, // beta
|
|
||||||
out.data<float>() + M * N * i,
|
|
||||||
out.shape(-1) // ldc
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void matmul_cblas(const array& a_pre, const array& b_pre, array& out) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[matmul_cblas] on CPU currently only supports float32");
|
|
||||||
}
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
return matmul_cblas_general(a_pre, b_pre, out);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void matmul_bnns_general(
|
|
||||||
const array& a_pre,
|
|
||||||
const array& b_pre,
|
|
||||||
array& out,
|
|
||||||
float alpha = 1.0f,
|
|
||||||
float beta = 0.0f) {
|
|
||||||
// TODO: Update to utilize BNNS broadcasting
|
|
||||||
|
|
||||||
auto [a_transposed, lda, a] = check_transpose(a_pre);
|
|
||||||
auto [b_transposed, ldb, b] = check_transpose(b_pre);
|
|
||||||
size_t M = a.shape(-2);
|
|
||||||
size_t N = b.shape(-1);
|
|
||||||
size_t K = a.shape(-1);
|
|
||||||
|
|
||||||
if (M == 0 || N == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (K == 0) {
|
|
||||||
std::memset(static_cast<void*>(out.data<float>()), 0, out.nbytes());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
BNNSDataType bnns_dtype = to_bnns_dtype(out.dtype());
|
|
||||||
|
|
||||||
const BNNSLayerParametersBroadcastMatMul gemm_params{
|
|
||||||
/* float alpha = */ alpha,
|
|
||||||
/* float beta = */ beta,
|
|
||||||
/* bool transA = */ a_transposed,
|
|
||||||
/* bool transB = */ b_transposed,
|
|
||||||
/* bool quadratic = */ false,
|
|
||||||
/* bool a_is_weights = */ false,
|
|
||||||
/* bool b_is_weights = */ false,
|
|
||||||
/* BNNSNDArrayDescriptor iA_desc = */
|
|
||||||
BNNSNDArrayDescriptor{
|
|
||||||
/* BNNSNDArrayFlags flags = */ BNNSNDArrayFlagBackpropSet,
|
|
||||||
/* BNNSDataLayout layout = */ BNNSDataLayoutRowMajorMatrix,
|
|
||||||
|
|
||||||
/* size_t size[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{lda, (M * K) / lda, 0, 0, 0, 0, 0, 0},
|
|
||||||
/* size_t stride[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{1, lda, 0, 0, 0, 0, 0, 0},
|
|
||||||
|
|
||||||
/* void * _Nullable data = */ nullptr,
|
|
||||||
/* BNNSDataType data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* void * _Nullable table_data = */ nullptr,
|
|
||||||
/* BNNSDataType table_data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* float data_scale = */ 1.0,
|
|
||||||
/* float data_bias = */ 0.0,
|
|
||||||
},
|
|
||||||
/* BNNSNDArrayDescriptor iB_desc = */
|
|
||||||
BNNSNDArrayDescriptor{
|
|
||||||
/* BNNSNDArrayFlags flags = */ BNNSNDArrayFlagBackpropSet,
|
|
||||||
/* BNNSDataLayout layout = */ BNNSDataLayoutRowMajorMatrix,
|
|
||||||
|
|
||||||
/* size_t size[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{ldb, (K * N) / ldb, 0, 0, 0, 0, 0, 0},
|
|
||||||
/* size_t stride[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{1, ldb, 0, 0, 0, 0, 0, 0},
|
|
||||||
|
|
||||||
/* void * _Nullable data = */ nullptr,
|
|
||||||
/* BNNSDataType data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* void * _Nullable table_data = */ nullptr,
|
|
||||||
/* BNNSDataType table_data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* float data_scale = */ 1.0,
|
|
||||||
/* float data_bias = */ 0.0,
|
|
||||||
},
|
|
||||||
/* BNNSNDArrayDescriptor o_desc = */
|
|
||||||
BNNSNDArrayDescriptor{
|
|
||||||
/* BNNSNDArrayFlags flags = */ BNNSNDArrayFlagBackpropSet,
|
|
||||||
/* BNNSDataLayout layout = */ BNNSDataLayoutRowMajorMatrix,
|
|
||||||
|
|
||||||
/* size_t size[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{N, M, 0, 0, 0, 0, 0, 0},
|
|
||||||
/* size_t stride[BNNS_MAX_TENSOR_DIMENSION] = */
|
|
||||||
{1, N, 0, 0, 0, 0, 0, 0},
|
|
||||||
|
|
||||||
/* void * _Nullable data = */ nullptr,
|
|
||||||
/* BNNSDataType data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* void * _Nullable table_data = */ nullptr,
|
|
||||||
/* BNNSDataType table_data_type = */ bnns_dtype,
|
|
||||||
|
|
||||||
/* float data_scale = */ 1.0,
|
|
||||||
/* float data_bias = */ 0.0,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
auto bnns_filter =
|
|
||||||
BNNSFilterCreateLayerBroadcastMatMul(&gemm_params, nullptr);
|
|
||||||
|
|
||||||
for (int i = 0; i < (a.size() / (M * K)); ++i) {
|
|
||||||
BNNSFilterApplyTwoInput(
|
|
||||||
bnns_filter,
|
|
||||||
a.data<uint8_t>() +
|
|
||||||
elem_to_loc(M * K * i, a.shape(), a.strides()) * a.itemsize(),
|
|
||||||
b.data<uint8_t>() +
|
|
||||||
elem_to_loc(K * N * i, b.shape(), b.strides()) * b.itemsize(),
|
|
||||||
out.data<uint8_t>() + M * N * i * out.itemsize());
|
|
||||||
}
|
|
||||||
|
|
||||||
BNNSFilterDestroy(bnns_filter);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void matmul_bnns(const array& a_pre, const array& b_pre, array& out) {
|
|
||||||
// TODO: Update to utilize BNNS broadcasting
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
return matmul_bnns_general(a_pre, b_pre, out);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline void mask_matrix(
|
|
||||||
T* data,
|
|
||||||
const bool* mask,
|
|
||||||
int tile_size,
|
|
||||||
const int X,
|
|
||||||
const int Y,
|
|
||||||
const size_t X_data_str,
|
|
||||||
const size_t Y_data_str,
|
|
||||||
const size_t X_mask_str,
|
|
||||||
const size_t Y_mask_str) {
|
|
||||||
int tX = (X + tile_size - 1) / tile_size;
|
|
||||||
int tY = (Y + tile_size - 1) / tile_size;
|
|
||||||
|
|
||||||
for (int i = 0; i < tX; i++) {
|
|
||||||
for (int j = 0; j < tY; j++) {
|
|
||||||
bool do_mask = mask[i * X_mask_str + j * Y_mask_str];
|
|
||||||
if (!do_mask) {
|
|
||||||
int loc_x = i * tile_size;
|
|
||||||
int loc_y = j * tile_size;
|
|
||||||
T* data_block = data + loc_x * X_data_str + loc_y * Y_data_str;
|
|
||||||
|
|
||||||
int size_x = std::min(tile_size, X - loc_x);
|
|
||||||
int size_y = std::min(tile_size, Y - loc_y);
|
|
||||||
for (int ii = 0; ii < size_x; ii++) {
|
|
||||||
for (int jj = 0; jj < size_y; jj++) {
|
|
||||||
data_block[ii * X_data_str + jj * Y_data_str] = T(0.);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Matmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
if (out.dtype() == float32) {
|
|
||||||
return matmul_cblas(inputs[0], inputs[1], out);
|
|
||||||
}
|
|
||||||
return matmul_bnns(inputs[0], inputs[1], out);
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
// Fill output with C
|
|
||||||
auto& c = inputs[2];
|
|
||||||
CopyType ctype = c.data_size() == 1 ? CopyType::Scalar : CopyType::General;
|
|
||||||
copy(c, out, ctype);
|
|
||||||
|
|
||||||
if (out.dtype() == float32) {
|
|
||||||
return matmul_cblas_general(inputs[0], inputs[1], out, alpha_, beta_);
|
|
||||||
}
|
|
||||||
return matmul_bnns_general(inputs[0], inputs[1], out, alpha_, beta_);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,603 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/backend/common/binary.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/unary.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
#define DEFAULT(primitive) \
|
|
||||||
void primitive::eval_cpu(const std::vector<array>& inputs, array& out) { \
|
|
||||||
primitive::eval(inputs, out); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DEFAULT_MULTI(primitive) \
|
|
||||||
void primitive::eval_cpu( \
|
|
||||||
const std::vector<array>& inputs, std::vector<array>& outputs) { \
|
|
||||||
primitive::eval(inputs, outputs); \
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
// Use the default implementation for the following primitives
|
|
||||||
DEFAULT(Arange)
|
|
||||||
DEFAULT(ArgPartition)
|
|
||||||
DEFAULT(ArgReduce)
|
|
||||||
DEFAULT(ArgSort)
|
|
||||||
DEFAULT(AsStrided)
|
|
||||||
DEFAULT(BlockMaskedMM)
|
|
||||||
DEFAULT(Broadcast)
|
|
||||||
DEFAULT(BroadcastAxes)
|
|
||||||
DEFAULT(Ceil)
|
|
||||||
DEFAULT(Concatenate)
|
|
||||||
DEFAULT(Conjugate)
|
|
||||||
DEFAULT(Copy)
|
|
||||||
DEFAULT_MULTI(CustomTransforms)
|
|
||||||
DEFAULT_MULTI(Depends)
|
|
||||||
DEFAULT_MULTI(DivMod)
|
|
||||||
DEFAULT(NumberOfElements)
|
|
||||||
DEFAULT(Equal)
|
|
||||||
DEFAULT(Erf)
|
|
||||||
DEFAULT(ErfInv)
|
|
||||||
DEFAULT(ExpandDims)
|
|
||||||
DEFAULT(FFT)
|
|
||||||
DEFAULT(Floor)
|
|
||||||
DEFAULT(Gather)
|
|
||||||
DEFAULT(GatherMM)
|
|
||||||
DEFAULT(GatherQMM)
|
|
||||||
DEFAULT(Greater)
|
|
||||||
DEFAULT(GreaterEqual)
|
|
||||||
DEFAULT(Hadamard)
|
|
||||||
DEFAULT(Less)
|
|
||||||
DEFAULT(LessEqual)
|
|
||||||
DEFAULT(Load)
|
|
||||||
DEFAULT(LogicalNot)
|
|
||||||
DEFAULT(LogicalAnd)
|
|
||||||
DEFAULT(LogicalOr)
|
|
||||||
DEFAULT(LogAddExp)
|
|
||||||
DEFAULT(Maximum)
|
|
||||||
DEFAULT(Minimum)
|
|
||||||
DEFAULT(NotEqual)
|
|
||||||
DEFAULT(Pad)
|
|
||||||
DEFAULT(Partition)
|
|
||||||
DEFAULT_MULTI(QRF)
|
|
||||||
DEFAULT(RandomBits)
|
|
||||||
DEFAULT(Remainder)
|
|
||||||
DEFAULT(Round)
|
|
||||||
DEFAULT(Scatter)
|
|
||||||
DEFAULT(Select)
|
|
||||||
DEFAULT(Sigmoid)
|
|
||||||
DEFAULT(Sign)
|
|
||||||
DEFAULT(Slice)
|
|
||||||
DEFAULT(SliceUpdate)
|
|
||||||
DEFAULT_MULTI(Split)
|
|
||||||
DEFAULT(Sort)
|
|
||||||
DEFAULT(Squeeze)
|
|
||||||
DEFAULT(StopGradient)
|
|
||||||
DEFAULT_MULTI(SVD)
|
|
||||||
DEFAULT(Transpose)
|
|
||||||
DEFAULT(Inverse)
|
|
||||||
DEFAULT(Cholesky)
|
|
||||||
DEFAULT_MULTI(Eigh)
|
|
||||||
|
|
||||||
void Abs::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
if (in.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vabs(in.data<float>(), 1, out.data<float>(), 1, in.data_size());
|
|
||||||
} else if (in.dtype() == int32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vabsi(in.data<int>(), 1, out.data<int>(), 1, in.data_size());
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Add::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
|
|
||||||
if (a.dtype() == float32) {
|
|
||||||
binary_op<float>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x + y; },
|
|
||||||
[](const auto* s, const auto* vec, auto* o, auto n) {
|
|
||||||
vDSP_vsadd((const float*)vec, 1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
vDSP_vsadd((const float*)vec, 1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vadd((const float*)a, 1, (const float*)b, 1, (float*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else if (a.dtype() == int32) {
|
|
||||||
binary_op<int>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x + y; },
|
|
||||||
[](const auto* s, const auto* vec, auto* o, auto n) {
|
|
||||||
vDSP_vsaddi((const int*)vec, 1, (const int*)s, (int*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
vDSP_vsaddi((const int*)vec, 1, (const int*)s, (int*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vaddi((const int*)a, 1, (const int*)b, 1, (int*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcCos::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvacosf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcCosh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvacoshf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcSin::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvasinf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcSinh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvasinhf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcTan::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvatanf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcTan2::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
if (out.dtype() == float32 && a.flags().row_contiguous &&
|
|
||||||
b.flags().row_contiguous) {
|
|
||||||
if (a.is_donatable()) {
|
|
||||||
out.copy_shared_buffer(a);
|
|
||||||
} else if (b.is_donatable()) {
|
|
||||||
out.copy_shared_buffer(b);
|
|
||||||
} else {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
}
|
|
||||||
int size = a.data_size();
|
|
||||||
vvatan2f(out.data<float>(), a.data<float>(), b.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcTanh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvatanhf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AsType::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
|
|
||||||
if (in.flags().contiguous) {
|
|
||||||
// Use accelerate functions if possible
|
|
||||||
if (in.dtype() == float32 && out.dtype() == uint32) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vfixu32(
|
|
||||||
in.data<float>(), 1, out.data<uint32_t>(), 1, in.data_size());
|
|
||||||
return;
|
|
||||||
} else if (in.dtype() == float32 && out.dtype() == int32) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vfix32(in.data<float>(), 1, out.data<int32_t>(), 1, in.data_size());
|
|
||||||
return;
|
|
||||||
} else if (in.dtype() == uint32 && out.dtype() == float32) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vfltu32(
|
|
||||||
in.data<uint32_t>(), 1, out.data<float>(), 1, in.data_size());
|
|
||||||
return;
|
|
||||||
} else if (in.dtype() == int32 && out.dtype() == float32) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vflt32(in.data<int32_t>(), 1, out.data<float>(), 1, in.data_size());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Cos::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvcosf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Cosh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvcoshf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Divide::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
|
|
||||||
if (a.dtype() == int32) {
|
|
||||||
binary_op<int>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x / y; },
|
|
||||||
UseDefaultBinaryOp(),
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
vDSP_vsdivi((const int*)vec, 1, (const int*)s, (int*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vdivi((const int*)b, 1, (const int*)a, 1, (int*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else if (a.dtype() == float32) {
|
|
||||||
binary_op<float>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x / y; },
|
|
||||||
[](const auto* s, const auto* vec, auto* o, auto n) {
|
|
||||||
vDSP_svdiv((const float*)s, (const float*)vec, 1, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
vDSP_vsdiv((const float*)vec, 1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vdiv((const float*)b, 1, (const float*)a, 1, (float*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Exp::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
auto size = in.data_size();
|
|
||||||
vvexpf(out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Expm1::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
auto size = in.data_size();
|
|
||||||
vvexpm1f(
|
|
||||||
out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Full::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
assert(in.dtype() == out.dtype());
|
|
||||||
if (in.data_size() == 1 && out.dtype() == float32) {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
vDSP_vfill(in.data<float>(), out.data<float>(), 1, out.size());
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Log::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
auto size = in.data_size();
|
|
||||||
switch (base_) {
|
|
||||||
case Base::e:
|
|
||||||
vvlogf(
|
|
||||||
out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
break;
|
|
||||||
case Base::two:
|
|
||||||
vvlog2f(
|
|
||||||
out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
break;
|
|
||||||
case Base::ten:
|
|
||||||
vvlog10f(
|
|
||||||
out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Log1p::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
auto size = in.data_size();
|
|
||||||
vvlog1pf(
|
|
||||||
out.data<float>(), in.data<float>(), reinterpret_cast<int*>(&size));
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Multiply::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
|
|
||||||
if (a.dtype() == float32) {
|
|
||||||
binary_op<float>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x * y; },
|
|
||||||
[](const auto* s, const auto* vec, auto* o, auto n) {
|
|
||||||
vDSP_vsmul((const float*)vec, 1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
vDSP_vsmul((const float*)vec, 1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vmul((const float*)a, 1, (const float*)b, 1, (float*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Negative::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
if (in.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
vDSP_vneg(in.data<float>(), 1, out.data<float>(), 1, in.data_size());
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Power::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
if (out.dtype() == float32 && a.flags().row_contiguous &&
|
|
||||||
b.flags().row_contiguous) {
|
|
||||||
int size = a.size();
|
|
||||||
if (a.is_donatable() && a.itemsize() == out.itemsize()) {
|
|
||||||
out.copy_shared_buffer(a);
|
|
||||||
} else if (b.is_donatable() && b.itemsize() == out.itemsize()) {
|
|
||||||
out.copy_shared_buffer(b);
|
|
||||||
} else {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
}
|
|
||||||
vvpowf(out.data<float>(), b.data<float>(), a.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scan::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (reduce_type_ == Scan::Sum && out.dtype() == float32 &&
|
|
||||||
in.flags().row_contiguous && in.strides()[axis_] == 1 && !inclusive_) {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
int stride = in.shape(axis_);
|
|
||||||
int count = in.size() / stride;
|
|
||||||
const float* input = in.data<float>();
|
|
||||||
float* output = out.data<float>();
|
|
||||||
float s = 1.0;
|
|
||||||
if (!reverse_) {
|
|
||||||
for (int i = 0; i < count; i++) {
|
|
||||||
vDSP_vrsum(input - 1, 1, &s, output, 1, stride);
|
|
||||||
input += stride;
|
|
||||||
output += stride;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (int i = 0; i < count; i++) {
|
|
||||||
input += stride - 1;
|
|
||||||
output += stride - 1;
|
|
||||||
vDSP_vrsum(input + 1, -1, &s, output, -1, stride);
|
|
||||||
input++;
|
|
||||||
output++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Sin::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvsinf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Sinh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvsinhf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Square::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
if (in.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
auto size = in.data_size();
|
|
||||||
vDSP_vsq(in.data<float>(), 1, out.data<float>(), 1, size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Sqrt::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
if (in.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
if (recip_) {
|
|
||||||
vvrsqrtf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
vvsqrtf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Subtract::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
|
|
||||||
if (a.dtype() == float32) {
|
|
||||||
binary_op<float>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x - y; },
|
|
||||||
[](const auto* s, const auto* vec, auto* o, auto n) {
|
|
||||||
float minus_1 = -1;
|
|
||||||
vDSP_vsmsa(
|
|
||||||
(const float*)vec, 1, &minus_1, (const float*)s, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
float val = -(*s);
|
|
||||||
vDSP_vsadd((const float*)vec, 1, &val, (float*)o, 1, n);
|
|
||||||
},
|
|
||||||
[](const auto* a, const auto* b, auto* o, auto n) {
|
|
||||||
vDSP_vsub((const float*)b, 1, (const float*)a, 1, (float*)o, 1, n);
|
|
||||||
});
|
|
||||||
} else if (a.dtype() == int32) {
|
|
||||||
binary_op<int>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
[](auto x, auto y) { return x - y; },
|
|
||||||
UseDefaultBinaryOp(),
|
|
||||||
[](const auto* vec, const auto* s, auto* o, auto n) {
|
|
||||||
int val = -(*s);
|
|
||||||
vDSP_vsaddi((const int*)vec, 1, &val, (int*)o, 1, n);
|
|
||||||
},
|
|
||||||
UseDefaultBinaryOp());
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Tan::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvtanf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Tanh::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
if (out.dtype() == float32 && in.flags().contiguous) {
|
|
||||||
set_unary_output_data(in, out);
|
|
||||||
int size = in.data_size();
|
|
||||||
vvtanhf(out.data<float>(), in.data<float>(), &size);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include <simd/vector.h>
|
|
||||||
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
void _qmm_t_4_64(
|
|
||||||
float* result,
|
|
||||||
const float* x,
|
|
||||||
const uint32_t* w,
|
|
||||||
const float* scales,
|
|
||||||
const float* biases,
|
|
||||||
int M,
|
|
||||||
int N,
|
|
||||||
int K,
|
|
||||||
int B,
|
|
||||||
bool batched_w) {
|
|
||||||
constexpr int bits = 4;
|
|
||||||
constexpr int group_size = 64;
|
|
||||||
constexpr int bitmask = (1 << bits) - 1;
|
|
||||||
constexpr int pack_factor = 32 / bits;
|
|
||||||
constexpr int packs_in_group = group_size / pack_factor;
|
|
||||||
|
|
||||||
int w_els = N * K / pack_factor;
|
|
||||||
int g_els = w_els * pack_factor / group_size;
|
|
||||||
|
|
||||||
for (int i = 0; i < B; i++) {
|
|
||||||
for (int m = 0; m < M; m++) {
|
|
||||||
const uint32_t* w_local = w;
|
|
||||||
const float* scales_local = scales;
|
|
||||||
const float* biases_local = biases;
|
|
||||||
|
|
||||||
for (int n = 0; n < N; n++) {
|
|
||||||
const simd_float16* x_local = (simd_float16*)x;
|
|
||||||
simd_float16 sum = 0;
|
|
||||||
for (int k = 0; k < K; k += group_size) {
|
|
||||||
float scale = *scales_local++;
|
|
||||||
float bias = *biases_local++;
|
|
||||||
|
|
||||||
for (int kw = 0; kw < packs_in_group; kw += 2) {
|
|
||||||
// TODO: vectorize this properly
|
|
||||||
simd_uint16 wi;
|
|
||||||
for (int e = 0; e < 2; e++) {
|
|
||||||
uint32_t wii = *w_local++;
|
|
||||||
for (int p = 0; p < 8; p++) {
|
|
||||||
wi[e * 8 + p] = wii & bitmask;
|
|
||||||
wii >>= bits;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
simd_float16 wf = simd_float(wi);
|
|
||||||
wf *= scale;
|
|
||||||
wf += bias;
|
|
||||||
|
|
||||||
sum += (*x_local) * wf;
|
|
||||||
x_local++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*result = simd_reduce_add(sum);
|
|
||||||
result++;
|
|
||||||
}
|
|
||||||
|
|
||||||
x += K;
|
|
||||||
}
|
|
||||||
if (batched_w) {
|
|
||||||
w += w_els;
|
|
||||||
scales += g_els;
|
|
||||||
biases += g_els;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 4);
|
|
||||||
|
|
||||||
auto& x = inputs[0];
|
|
||||||
auto& w = inputs[1];
|
|
||||||
auto& scales = inputs[2];
|
|
||||||
auto& biases = inputs[3];
|
|
||||||
|
|
||||||
bool condition =
|
|
||||||
(transpose_ && x.flags().row_contiguous && w.flags().row_contiguous &&
|
|
||||||
scales.flags().row_contiguous && biases.flags().row_contiguous &&
|
|
||||||
x.dtype() == float32 && bits_ == 4 && group_size_ == 64);
|
|
||||||
|
|
||||||
if (condition) {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
int K = x.shape(-1);
|
|
||||||
int M = x.shape(-2);
|
|
||||||
int N = out.shape(-1);
|
|
||||||
int B = x.size() / K / M;
|
|
||||||
bool batched_w = w.ndim() > 2;
|
|
||||||
_qmm_t_4_64(
|
|
||||||
out.data<float>(),
|
|
||||||
x.data<float>(),
|
|
||||||
w.data<uint32_t>(),
|
|
||||||
scales.data<float>(),
|
|
||||||
biases.data<float>(),
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
B,
|
|
||||||
batched_w);
|
|
||||||
} else {
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,139 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
#include <simd/vector.h>
|
|
||||||
|
|
||||||
#include "mlx/backend/common/reduce.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename T, typename VT>
|
|
||||||
struct MinReduction {
|
|
||||||
T operator()(const T& a, const T& b) {
|
|
||||||
return std::min(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT operator()(VT a, VT b) {
|
|
||||||
return simd_min(a, b);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename VT>
|
|
||||||
struct MaxReduction {
|
|
||||||
T operator()(const T& a, const T& b) {
|
|
||||||
return std::max(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT operator()(VT a, VT b) {
|
|
||||||
return simd_max(a, b);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename VT>
|
|
||||||
struct SumReduction {
|
|
||||||
T operator()(const T& a, const T& b) {
|
|
||||||
return a + b;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT operator()(VT a, VT b) {
|
|
||||||
return a + b;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename VT, int N, typename Reduction>
|
|
||||||
struct StridedReduce {
|
|
||||||
void operator()(const T* x, T* accum, int size, size_t stride) {
|
|
||||||
Reduction op;
|
|
||||||
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
size_t s = stride;
|
|
||||||
T* a = accum;
|
|
||||||
while (s >= N) {
|
|
||||||
*(VT*)a = op((*(VT*)x), (*(VT*)a));
|
|
||||||
x += N;
|
|
||||||
a += N;
|
|
||||||
s -= N;
|
|
||||||
}
|
|
||||||
while (s-- > 0) {
|
|
||||||
*a = op(*a, *x);
|
|
||||||
a++;
|
|
||||||
x++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Reduce::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
|
|
||||||
if (in.dtype() == float32) {
|
|
||||||
if (reduce_type_ == Reduce::Sum) {
|
|
||||||
reduction_op<float, float>(
|
|
||||||
in,
|
|
||||||
out,
|
|
||||||
axes_,
|
|
||||||
0,
|
|
||||||
StridedReduce<
|
|
||||||
float,
|
|
||||||
simd_float16,
|
|
||||||
16,
|
|
||||||
SumReduction<float, simd_float16>>(),
|
|
||||||
[](const auto* x, auto* accum, int size) {
|
|
||||||
float acc;
|
|
||||||
vDSP_sve((const float*)x, 1, &acc, size);
|
|
||||||
(*accum) += acc;
|
|
||||||
},
|
|
||||||
[](auto* accum, auto x) { *accum += x; });
|
|
||||||
return;
|
|
||||||
} else if (reduce_type_ == Reduce::Max) {
|
|
||||||
reduction_op<float, float>(
|
|
||||||
in,
|
|
||||||
out,
|
|
||||||
axes_,
|
|
||||||
-std::numeric_limits<float>::infinity(),
|
|
||||||
StridedReduce<
|
|
||||||
float,
|
|
||||||
simd_float16,
|
|
||||||
16,
|
|
||||||
MaxReduction<float, simd_float16>>(),
|
|
||||||
[](const auto* x, auto* accum, int size) {
|
|
||||||
float max;
|
|
||||||
vDSP_maxv((const float*)x, 1, &max, size);
|
|
||||||
(*accum) = (*accum < max) ? max : *accum;
|
|
||||||
},
|
|
||||||
[](auto* accum, auto x) { (*accum) = (*accum < x) ? x : *accum; });
|
|
||||||
return;
|
|
||||||
} else if (reduce_type_ == Reduce::Min) {
|
|
||||||
reduction_op<float, float>(
|
|
||||||
in,
|
|
||||||
out,
|
|
||||||
axes_,
|
|
||||||
std::numeric_limits<float>::infinity(),
|
|
||||||
StridedReduce<
|
|
||||||
float,
|
|
||||||
simd_float16,
|
|
||||||
16,
|
|
||||||
MinReduction<float, simd_float16>>(),
|
|
||||||
[](const auto* x, auto* accum, int size) {
|
|
||||||
float min;
|
|
||||||
vDSP_minv((const float*)x, 1, &min, size);
|
|
||||||
(*accum) = (*accum > min) ? min : *accum;
|
|
||||||
},
|
|
||||||
[](auto* accum, auto x) { (*accum) = (*accum > x) ? x : *accum; });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO: Add integer addition and min/max using the templates above and
|
|
||||||
// simd_int16 and friends.
|
|
||||||
eval(inputs, out);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,393 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
#include <arm_neon.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <simd/math.h>
|
|
||||||
#include <simd/vector.h>
|
|
||||||
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compute exp(x) in an optimizer friendly way as follows:
|
|
||||||
*
|
|
||||||
* First change the problem to computing 2**y where y = x / ln(2).
|
|
||||||
*
|
|
||||||
* Now we will compute 2**y as 2**y1 * 2**y2 where y1 is the integer part
|
|
||||||
* `ipart` and y2 is fractional part. For the integer part we perform bit
|
|
||||||
* shifting and for the fractional part we use a polynomial approximation.
|
|
||||||
*
|
|
||||||
* The algorithm and constants of the polynomial taken from
|
|
||||||
* https://github.com/akohlmey/fastermath/blob/master/src/exp.c which took them
|
|
||||||
* from Cephes math library.
|
|
||||||
*
|
|
||||||
* Note: The implementation below is a general fast exp. There could be faster
|
|
||||||
* implementations for numbers strictly < 0.
|
|
||||||
*/
|
|
||||||
inline simd_float16 simd_fast_exp(simd_float16 x_init) {
|
|
||||||
auto x = x_init * 1.442695; // multiply with log_2(e)
|
|
||||||
simd_float16 ipart, fpart;
|
|
||||||
simd_int16 epart;
|
|
||||||
x = simd_clamp(x, -80, 80);
|
|
||||||
ipart = simd::floor(x + 0.5);
|
|
||||||
fpart = x - ipart;
|
|
||||||
|
|
||||||
x = 1.535336188319500e-4f;
|
|
||||||
x = x * fpart + 1.339887440266574e-3f;
|
|
||||||
x = x * fpart + 9.618437357674640e-3f;
|
|
||||||
x = x * fpart + 5.550332471162809e-2f;
|
|
||||||
x = x * fpart + 2.402264791363012e-1f;
|
|
||||||
x = x * fpart + 6.931472028550421e-1f;
|
|
||||||
x = x * fpart + 1.000000000000000f;
|
|
||||||
|
|
||||||
// generate 2**ipart in the floating point representation using integer
|
|
||||||
// bitshifting
|
|
||||||
epart = (simd_int(ipart) + 127) << 23;
|
|
||||||
|
|
||||||
// Avoid supressing NaNs
|
|
||||||
simd_int16 eq = (x_init == x_init);
|
|
||||||
return simd_bitselect(x_init, (*(simd_float16*)&epart) * x, eq);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
/**
|
|
||||||
* The ARM neon equivalent of the fast exp above.
|
|
||||||
*/
|
|
||||||
inline float16x8_t neon_fast_exp(float16x8_t x) {
|
|
||||||
x = vmulq_f16(x, vdupq_n_f16(float16_t(1.442695f))); // multiply with log_2(e)
|
|
||||||
x = vmaxq_f16(x, vdupq_n_f16(float16_t(-14.f))); // clamp under with -14
|
|
||||||
x = vminq_f16(x, vdupq_n_f16(float16_t(14.f))); // clamp over with 14
|
|
||||||
|
|
||||||
float16x8_t ipart = vrndmq_f16(vaddq_f16(x, vdupq_n_f16(float16_t(0.5f))));
|
|
||||||
float16x8_t fpart = vsubq_f16(x, ipart);
|
|
||||||
|
|
||||||
x = vdupq_n_f16(float16_t(1.535336188319500e-4f));
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(1.339887440266574e-3f)), x, fpart);
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(9.618437357674640e-3f)), x, fpart);
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(5.550332471162809e-2f)), x, fpart);
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(2.402264791363012e-1f)), x, fpart);
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(6.931472028550421e-1f)), x, fpart);
|
|
||||||
x = vfmaq_f16(vdupq_n_f16(float16_t(1.000000000000000f)), x, fpart);
|
|
||||||
|
|
||||||
// generate 2**ipart in the floating point representation using integer
|
|
||||||
// bitshifting
|
|
||||||
int16x8_t epart = vcvtq_s16_f16(ipart);
|
|
||||||
epart = vaddq_s16(epart, vdupq_n_s16(15));
|
|
||||||
epart = vshlq_n_s16(epart, 10);
|
|
||||||
|
|
||||||
return vmulq_f16(vreinterpretq_f16_s16(epart), x);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Implementation of folding maximum for ARM neon. This should possibly be
|
|
||||||
* refactored out of softmax.cpp at some point.
|
|
||||||
*/
|
|
||||||
inline float16_t neon_reduce_max(float16x8_t x) {
|
|
||||||
float16x4_t y;
|
|
||||||
y = vpmax_f16(vget_low_f16(x), vget_high_f16(x));
|
|
||||||
y = vpmax_f16(y, y);
|
|
||||||
y = vpmax_f16(y, y);
|
|
||||||
return vget_lane_f16(y, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Implementation of folding sum for ARM neon. This should possibly be
|
|
||||||
* refactored out of softmax.cpp at some point.
|
|
||||||
*/
|
|
||||||
inline float16_t neon_reduce_add(float16x8_t x) {
|
|
||||||
float16x4_t y;
|
|
||||||
float16x4_t zero = vdup_n_f16(0);
|
|
||||||
y = vpadd_f16(vget_low_f16(x), vget_high_f16(x));
|
|
||||||
y = vpadd_f16(y, zero);
|
|
||||||
y = vpadd_f16(y, zero);
|
|
||||||
return vget_lane_f16(y, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename VT>
|
|
||||||
struct NeonFp16SimdOps {
|
|
||||||
VT init(T a) {
|
|
||||||
return vdupq_n_f16(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT load(const T* a) {
|
|
||||||
return vld1q_f16(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
void store(T* dst, VT x) {
|
|
||||||
vst1q_f16(dst, x);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT max(VT a, VT b) {
|
|
||||||
return vmaxq_f16(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT exp(VT x) {
|
|
||||||
return neon_fast_exp(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT add(VT a, VT b) {
|
|
||||||
return vaddq_f16(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT sub(VT a, T b) {
|
|
||||||
return vsubq_f16(a, vdupq_n_f16(b));
|
|
||||||
}
|
|
||||||
|
|
||||||
VT mul(VT a, VT b) {
|
|
||||||
return vmulq_f16(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT mul(VT a, T b) {
|
|
||||||
return vmulq_f16(a, vdupq_n_f16(b));
|
|
||||||
}
|
|
||||||
|
|
||||||
T reduce_max(VT x) {
|
|
||||||
return neon_reduce_max(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
T reduce_add(VT x) {
|
|
||||||
return neon_reduce_add(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
|
|
||||||
template <typename T, typename VT>
|
|
||||||
struct AccelerateSimdOps {
|
|
||||||
VT init(T a) {
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT load(const T* a) {
|
|
||||||
return *(VT*)a;
|
|
||||||
}
|
|
||||||
|
|
||||||
void store(T* dst, VT x) {
|
|
||||||
*(VT*)dst = x;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT max(VT a, VT b) {
|
|
||||||
return simd_max(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT exp(VT x) {
|
|
||||||
return simd_fast_exp(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
VT add(VT a, VT b) {
|
|
||||||
return a + b;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT sub(VT a, T b) {
|
|
||||||
return a - b;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT mul(VT a, VT b) {
|
|
||||||
return a * b;
|
|
||||||
}
|
|
||||||
|
|
||||||
VT mul(VT a, T b) {
|
|
||||||
return a * b;
|
|
||||||
}
|
|
||||||
|
|
||||||
T reduce_max(VT x) {
|
|
||||||
return simd_reduce_max(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
T reduce_add(VT x) {
|
|
||||||
return simd_reduce_add(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename AccT, typename VT, typename Ops, int N>
|
|
||||||
void softmax(const array& in, array& out) {
|
|
||||||
Ops ops;
|
|
||||||
|
|
||||||
const T* in_ptr = in.data<T>();
|
|
||||||
T* out_ptr = out.data<T>();
|
|
||||||
int M = in.shape().back();
|
|
||||||
int L = in.data_size() / M;
|
|
||||||
const T* current_in_ptr;
|
|
||||||
T* current_out_ptr;
|
|
||||||
|
|
||||||
for (int i = 0; i < L; i++, in_ptr += M, out_ptr += M) {
|
|
||||||
// Find the maximum
|
|
||||||
current_in_ptr = in_ptr;
|
|
||||||
VT vmaximum = ops.init(-std::numeric_limits<float>::infinity());
|
|
||||||
size_t s = M;
|
|
||||||
while (s >= N) {
|
|
||||||
VT vals;
|
|
||||||
if constexpr (std::is_same<T, AccT>::value) {
|
|
||||||
vals = ops.load(current_in_ptr);
|
|
||||||
} else {
|
|
||||||
for (int i = 0; i < N; ++i) {
|
|
||||||
vals[i] = static_cast<AccT>(current_in_ptr[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vmaximum = ops.max(vals, vmaximum);
|
|
||||||
current_in_ptr += N;
|
|
||||||
s -= N;
|
|
||||||
}
|
|
||||||
AccT maximum = ops.reduce_max(vmaximum);
|
|
||||||
while (s-- > 0) {
|
|
||||||
maximum = std::max(maximum, static_cast<AccT>(*current_in_ptr));
|
|
||||||
current_in_ptr++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the normalizer and the exponentials
|
|
||||||
VT vnormalizer = ops.init(0.0);
|
|
||||||
current_out_ptr = out_ptr;
|
|
||||||
current_in_ptr = in_ptr;
|
|
||||||
s = M;
|
|
||||||
while (s >= N) {
|
|
||||||
VT vexp;
|
|
||||||
if constexpr (std::is_same<T, AccT>::value) {
|
|
||||||
vexp = ops.load(current_in_ptr);
|
|
||||||
} else {
|
|
||||||
for (int i = 0; i < N; ++i) {
|
|
||||||
vexp[i] = static_cast<AccT>(current_in_ptr[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vexp = ops.exp(ops.sub(vexp, maximum));
|
|
||||||
if constexpr (std::is_same<T, AccT>::value) {
|
|
||||||
ops.store(current_out_ptr, vexp);
|
|
||||||
}
|
|
||||||
vnormalizer = ops.add(vnormalizer, vexp);
|
|
||||||
current_in_ptr += N;
|
|
||||||
current_out_ptr += N;
|
|
||||||
s -= N;
|
|
||||||
}
|
|
||||||
AccT normalizer = ops.reduce_add(vnormalizer);
|
|
||||||
while (s-- > 0) {
|
|
||||||
AccT _exp = std::exp(*current_in_ptr - maximum);
|
|
||||||
if (std::is_same<T, AccT>::value) {
|
|
||||||
*current_out_ptr = _exp;
|
|
||||||
}
|
|
||||||
normalizer += _exp;
|
|
||||||
current_in_ptr++;
|
|
||||||
current_out_ptr++;
|
|
||||||
}
|
|
||||||
normalizer = 1 / normalizer;
|
|
||||||
|
|
||||||
// Normalize
|
|
||||||
current_out_ptr = out_ptr;
|
|
||||||
current_in_ptr = in_ptr;
|
|
||||||
s = M;
|
|
||||||
while (s >= N) {
|
|
||||||
if constexpr (std::is_same<T, AccT>::value) {
|
|
||||||
ops.store(current_out_ptr, ops.mul(*(VT*)current_out_ptr, normalizer));
|
|
||||||
} else {
|
|
||||||
VT vexp;
|
|
||||||
for (int i = 0; i < N; ++i) {
|
|
||||||
vexp[i] = static_cast<AccT>(current_in_ptr[i]);
|
|
||||||
}
|
|
||||||
vexp = ops.mul(ops.exp(ops.sub(vexp, maximum)), normalizer);
|
|
||||||
for (int i = 0; i < N; ++i) {
|
|
||||||
current_out_ptr[i] = vexp[i];
|
|
||||||
}
|
|
||||||
current_in_ptr += N;
|
|
||||||
}
|
|
||||||
current_out_ptr += N;
|
|
||||||
s -= N;
|
|
||||||
}
|
|
||||||
while (s-- > 0) {
|
|
||||||
if constexpr (std::is_same<T, AccT>::value) {
|
|
||||||
*current_out_ptr *= normalizer;
|
|
||||||
} else {
|
|
||||||
AccT _exp = std::exp(*current_in_ptr - maximum);
|
|
||||||
*current_out_ptr = static_cast<T>(_exp * normalizer);
|
|
||||||
current_in_ptr++;
|
|
||||||
}
|
|
||||||
current_out_ptr++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Softmax::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
|
|
||||||
// Make sure that the last dimension is contiguous
|
|
||||||
auto check_input = [](array x) {
|
|
||||||
bool no_copy = x.strides()[x.ndim() - 1] == 1;
|
|
||||||
if (x.ndim() > 1) {
|
|
||||||
auto s = x.strides()[x.ndim() - 2];
|
|
||||||
no_copy &= (s == 0 || s == x.shape().back());
|
|
||||||
}
|
|
||||||
if (no_copy) {
|
|
||||||
return x;
|
|
||||||
} else {
|
|
||||||
array x_copy(x.shape(), x.dtype(), nullptr, {});
|
|
||||||
copy(x, x_copy, CopyType::General);
|
|
||||||
return x_copy;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
array in = check_input(std::move(inputs[0]));
|
|
||||||
out.set_data(
|
|
||||||
allocator::malloc_or_wait(in.data_size() * in.itemsize()),
|
|
||||||
in.data_size(),
|
|
||||||
in.strides(),
|
|
||||||
in.flags());
|
|
||||||
|
|
||||||
switch (in.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
case uint8:
|
|
||||||
case uint16:
|
|
||||||
case uint32:
|
|
||||||
case uint64:
|
|
||||||
case int8:
|
|
||||||
case int16:
|
|
||||||
case int32:
|
|
||||||
case int64:
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"Softmax is defined only for floating point types");
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
softmax<
|
|
||||||
float,
|
|
||||||
float,
|
|
||||||
simd_float16,
|
|
||||||
AccelerateSimdOps<float, simd_float16>,
|
|
||||||
16>(in, out);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
if (precise_) {
|
|
||||||
softmax<
|
|
||||||
float16_t,
|
|
||||||
float,
|
|
||||||
simd_float16,
|
|
||||||
AccelerateSimdOps<float, simd_float16>,
|
|
||||||
16>(in, out);
|
|
||||||
} else {
|
|
||||||
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
softmax<
|
|
||||||
float16_t,
|
|
||||||
float16_t,
|
|
||||||
float16x8_t,
|
|
||||||
NeonFp16SimdOps<float16_t, float16x8_t>,
|
|
||||||
8>(in, out);
|
|
||||||
#else // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
eval(inputs, out); // Redirect to common backend for consistency
|
|
||||||
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
eval(inputs, out);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
eval(inputs, out);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
#include "mlx/dtype.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
BNNSDataType to_bnns_dtype(Dtype mlx_dtype) {
|
|
||||||
uint32_t size_bits = size_of(mlx_dtype) * 8;
|
|
||||||
switch (kindof(mlx_dtype)) {
|
|
||||||
case Dtype::Kind::b:
|
|
||||||
return BNNSDataTypeBoolean;
|
|
||||||
case Dtype::Kind::u:
|
|
||||||
return BNNSDataType(BNNSDataTypeUIntBit | size_bits);
|
|
||||||
case Dtype::Kind::i:
|
|
||||||
return BNNSDataType(BNNSDataTypeIntBit | size_bits);
|
|
||||||
case Dtype::Kind::f:
|
|
||||||
return BNNSDataType(BNNSDataTypeFloatBit | size_bits);
|
|
||||||
case Dtype::Kind::V:
|
|
||||||
return BNNSDataTypeBFloat16;
|
|
||||||
case Dtype::Kind::c:
|
|
||||||
throw std::invalid_argument("BNNS does not support complex types");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,71 +1,9 @@
|
|||||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
|
||||||
set(COMPILER ${CMAKE_C_COMPILER})
|
|
||||||
set(CLANG TRUE)
|
|
||||||
else()
|
|
||||||
set(COMPILER ${CMAKE_CXX_COMPILER})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(MSVC)
|
|
||||||
set(SHELL_EXT ps1)
|
|
||||||
set(SHELL_CMD powershell -ExecutionPolicy Bypass -File)
|
|
||||||
else()
|
|
||||||
set(SHELL_EXT sh)
|
|
||||||
set(SHELL_CMD /bin/bash)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_custom_command(
|
|
||||||
OUTPUT compiled_preamble.cpp
|
|
||||||
COMMAND
|
|
||||||
${SHELL_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/make_compiled_preamble.${SHELL_EXT}
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/compiled_preamble.cpp ${COMPILER}
|
|
||||||
${PROJECT_SOURCE_DIR} ${CLANG} ${CMAKE_SYSTEM_PROCESSOR}
|
|
||||||
DEPENDS make_compiled_preamble.${SHELL_EXT}
|
|
||||||
compiled_preamble.h
|
|
||||||
${PROJECT_SOURCE_DIR}/mlx/types/half_types.h
|
|
||||||
${PROJECT_SOURCE_DIR}/mlx/types/fp16.h
|
|
||||||
${PROJECT_SOURCE_DIR}/mlx/types/bf16.h
|
|
||||||
${PROJECT_SOURCE_DIR}/mlx/types/complex.h
|
|
||||||
ops.h)
|
|
||||||
|
|
||||||
add_custom_target(cpu_compiled_preamble DEPENDS compiled_preamble.cpp)
|
|
||||||
|
|
||||||
add_dependencies(mlx cpu_compiled_preamble)
|
|
||||||
|
|
||||||
target_sources(
|
target_sources(
|
||||||
mlx
|
mlx
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arg_reduce.cpp
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/broadcasting.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/binary.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/common.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/common.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/eigh.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/erf.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/fft.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/hadamard.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/masked_mm.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/primitives.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/reduce.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/reduce_utils.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/scan.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/select.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/slicing.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/sort.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/threefry.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/indexing.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/load.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/load.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/qrf.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/reduce.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/svd.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/slicing.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/inverse.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp)
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cholesky.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/compiled_preamble.cpp)
|
|
||||||
|
|
||||||
if(IOS)
|
|
||||||
target_sources(mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/compiled_nocpu.cpp)
|
|
||||||
else()
|
|
||||||
target_sources(mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/compiled_cpu.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/jit_compiler.cpp)
|
|
||||||
endif()
|
|
||||||
|
|||||||
@@ -1,74 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/array.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void arange(T start, T next, array& out, size_t size) {
|
|
||||||
auto ptr = out.data<T>();
|
|
||||||
auto step_size = next - start;
|
|
||||||
for (int i = 0; i < size; ++i) {
|
|
||||||
ptr[i] = start;
|
|
||||||
start += step_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void arange(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
array& out,
|
|
||||||
double start,
|
|
||||||
double step) {
|
|
||||||
assert(inputs.size() == 0);
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
switch (out.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
throw std::runtime_error("Bool type unsupported for arange.");
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
arange<uint8_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
arange<uint16_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
arange<uint32_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
arange<uint64_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
arange<int8_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
arange<int16_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
arange<int32_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
arange<int64_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
arange<float16_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
arange<float>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
arange<bfloat16_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
arange<complex64_t>(start, start + step, out, out.size());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
#include "utils.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename InT, typename OpT>
|
|
||||||
void arg_reduce(const array& in, array& out, const OpT& op, int axis) {
|
|
||||||
auto axis_size = in.shape()[axis];
|
|
||||||
auto axis_stride = in.strides()[axis];
|
|
||||||
Strides strides = in.strides();
|
|
||||||
Shape shape = in.shape();
|
|
||||||
strides.erase(strides.begin() + axis);
|
|
||||||
shape.erase(shape.begin() + axis);
|
|
||||||
for (uint32_t i = 0; i < out.size(); ++i) {
|
|
||||||
auto loc = elem_to_loc(i, shape, strides);
|
|
||||||
auto in_ptr = in.data<InT>() + loc;
|
|
||||||
uint32_t ind_v = 0;
|
|
||||||
InT v = (*in_ptr);
|
|
||||||
for (uint32_t j = 0; j < axis_size; ++j, in_ptr += axis_stride) {
|
|
||||||
op(j, (*in_ptr), &ind_v, &v);
|
|
||||||
}
|
|
||||||
out.data<uint32_t>()[i] = ind_v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename InT>
|
|
||||||
void arg_reduce_dispatch(
|
|
||||||
const array& in,
|
|
||||||
array& out,
|
|
||||||
ArgReduce::ReduceType rtype,
|
|
||||||
int axis) {
|
|
||||||
switch (rtype) {
|
|
||||||
case ArgReduce::ArgMin: {
|
|
||||||
auto op = [](auto ind_x, auto x, auto ind_y, auto y) {
|
|
||||||
if (x < (*y)) {
|
|
||||||
(*y) = x;
|
|
||||||
(*ind_y) = ind_x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
arg_reduce<InT>(in, out, op, axis);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case ArgReduce::ArgMax: {
|
|
||||||
auto op = [](auto ind_x, auto x, auto ind_y, auto y) {
|
|
||||||
if (x > (*y)) {
|
|
||||||
(*y) = x;
|
|
||||||
(*ind_y) = ind_x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
arg_reduce<InT>(in, out, op, axis);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void ArgReduce::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
auto& in = inputs[0];
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
switch (in.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
arg_reduce_dispatch<bool>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
arg_reduce_dispatch<uint8_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
arg_reduce_dispatch<uint16_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
arg_reduce_dispatch<uint32_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
arg_reduce_dispatch<uint64_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
arg_reduce_dispatch<int8_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
arg_reduce_dispatch<int16_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
arg_reduce_dispatch<int32_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
arg_reduce_dispatch<int64_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
arg_reduce_dispatch<float16_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
arg_reduce_dispatch<float>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
arg_reduce_dispatch<bfloat16_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
arg_reduce_dispatch<complex64_t>(in, out, reduce_type_, axis_);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,331 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <cmath>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/backend/common/binary.h"
|
|
||||||
#include "mlx/backend/common/binary_two.h"
|
|
||||||
#include "mlx/backend/common/ops.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
#include "mlx/utils.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename T, typename U, typename Op>
|
|
||||||
void comparison_op(const array& a, const array& b, array& out, Op op) {
|
|
||||||
DefaultScalarVector<T, U, Op> opsv(op);
|
|
||||||
DefaultVectorScalar<T, U, Op> opvs(op);
|
|
||||||
DefaultVectorVector<T, U, Op> opvv(op);
|
|
||||||
binary_op<T, U>(a, b, out, op, opsv, opvs, opvv);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Op>
|
|
||||||
void comparison_op(const array& a, const array& b, array& out, Op op) {
|
|
||||||
switch (a.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
comparison_op<bool, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
comparison_op<uint8_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
comparison_op<uint16_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
comparison_op<uint32_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
comparison_op<uint64_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
comparison_op<int8_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
comparison_op<int16_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
comparison_op<int32_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
comparison_op<int64_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
comparison_op<float16_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
comparison_op<float, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
comparison_op<bfloat16_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
comparison_op<complex64_t, bool>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Add::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Add());
|
|
||||||
}
|
|
||||||
|
|
||||||
void DivMod::eval(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
std::vector<array>& outputs) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
auto integral_op = [](auto x, auto y) {
|
|
||||||
return std::make_pair(x / y, x % y);
|
|
||||||
};
|
|
||||||
auto float_op = [](auto x, auto y) {
|
|
||||||
return std::make_pair(std::trunc(x / y), std::fmod(x, y));
|
|
||||||
};
|
|
||||||
switch (outputs[0].dtype()) {
|
|
||||||
case bool_:
|
|
||||||
binary_op<bool>(a, b, outputs, integral_op);
|
|
||||||
case uint8:
|
|
||||||
binary_op<uint8_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
binary_op<uint16_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
binary_op<uint32_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
binary_op<uint64_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
binary_op<int8_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
binary_op<int16_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
binary_op<int32_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
binary_op<int64_t>(a, b, outputs, integral_op);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
binary_op<float16_t>(a, b, outputs, float_op);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
binary_op<float>(a, b, outputs, float_op);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
binary_op<bfloat16_t>(a, b, outputs, float_op);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
// Should never get here
|
|
||||||
throw std::runtime_error("[DivMod] Complex type not supported");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Divide::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Divide());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Remainder::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Remainder());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Equal::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
if (equal_nan_) {
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::NaNEqual());
|
|
||||||
} else {
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::Equal());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Greater::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::Greater());
|
|
||||||
}
|
|
||||||
|
|
||||||
void GreaterEqual::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::GreaterEqual());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Less::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::Less());
|
|
||||||
}
|
|
||||||
|
|
||||||
void LessEqual::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::LessEqual());
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogAddExp::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
if (out.dtype() == float32) {
|
|
||||||
binary_op<float>(a, b, out, detail::LogAddExp());
|
|
||||||
} else if (out.dtype() == float16) {
|
|
||||||
binary_op<float16_t>(a, b, out, detail::LogAddExp());
|
|
||||||
} else if (out.dtype() == bfloat16) {
|
|
||||||
binary_op<bfloat16_t>(a, b, out, detail::LogAddExp());
|
|
||||||
} else if (issubdtype(out.dtype(), inexact)) {
|
|
||||||
std::ostringstream err;
|
|
||||||
err << "[logaddexp] Does not support " << out.dtype();
|
|
||||||
throw std::invalid_argument(err.str());
|
|
||||||
} else {
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"[logaddexp] Cannot compute logaddexp for arrays with"
|
|
||||||
" non floating point type.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogicalAnd::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2); // LogicalAnd requires two input arrays
|
|
||||||
auto& in1 = inputs[0];
|
|
||||||
auto& in2 = inputs[1];
|
|
||||||
binary(in1, in2, out, detail::LogicalAnd());
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogicalOr::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2); // LogicalOr requires two input arrays
|
|
||||||
auto& in1 = inputs[0];
|
|
||||||
auto& in2 = inputs[1];
|
|
||||||
binary(in1, in2, out, detail::LogicalOr());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Maximum::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Maximum());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Minimum::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Minimum());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Multiply::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Multiply());
|
|
||||||
}
|
|
||||||
|
|
||||||
void NotEqual::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
comparison_op(inputs[0], inputs[1], out, detail::NotEqual());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Power::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Power());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Subtract::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
binary(a, b, out, detail::Subtract());
|
|
||||||
}
|
|
||||||
|
|
||||||
void BitwiseBinary::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
auto& a = inputs[0];
|
|
||||||
auto& b = inputs[1];
|
|
||||||
auto dispatch_type = [&a, &b, &out](auto op) {
|
|
||||||
switch (out.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
binary_op<bool>(a, b, out, op);
|
|
||||||
case uint8:
|
|
||||||
binary_op<uint8_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
binary_op<uint16_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
binary_op<uint32_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
binary_op<uint64_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
binary_op<int8_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
binary_op<int16_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
binary_op<int32_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
binary_op<int64_t>(a, b, out, op);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[BitwiseBinary::eval_cpu] Type not supported");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
switch (op_) {
|
|
||||||
case BitwiseBinary::And:
|
|
||||||
dispatch_type(detail::BitwiseAnd());
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Or:
|
|
||||||
dispatch_type(detail::BitwiseOr());
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Xor:
|
|
||||||
dispatch_type(detail::BitwiseXor());
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::LeftShift:
|
|
||||||
dispatch_type(detail::LeftShift());
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::RightShift:
|
|
||||||
dispatch_type(detail::RightShift());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ArcTan2::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() == 2);
|
|
||||||
const auto& a = inputs[0];
|
|
||||||
const auto& b = inputs[1];
|
|
||||||
if (out.dtype() == float32) {
|
|
||||||
binary_op<float>(a, b, out, detail::ArcTan2());
|
|
||||||
} else if (out.dtype() == float16) {
|
|
||||||
binary_op<float16_t>(a, b, out, detail::ArcTan2());
|
|
||||||
} else if (out.dtype() == bfloat16) {
|
|
||||||
binary_op<bfloat16_t>(a, b, out, detail::ArcTan2());
|
|
||||||
} else if (issubdtype(out.dtype(), inexact)) {
|
|
||||||
std::ostringstream err;
|
|
||||||
err << "[arctan2] Does not support " << out.dtype();
|
|
||||||
throw std::invalid_argument(err.str());
|
|
||||||
} else {
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"[arctan2] Cannot compute inverse tangent for arrays"
|
|
||||||
" with non floating point type.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023 Apple Inc.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
#include "mlx/allocator.h"
|
||||||
#include "mlx/array.h"
|
#include "mlx/array.h"
|
||||||
@@ -9,8 +8,6 @@
|
|||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
enum class BinaryOpType {
|
enum class BinaryOpType {
|
||||||
ScalarScalar,
|
ScalarScalar,
|
||||||
ScalarVector,
|
ScalarVector,
|
||||||
@@ -19,7 +16,7 @@ enum class BinaryOpType {
|
|||||||
General,
|
General,
|
||||||
};
|
};
|
||||||
|
|
||||||
BinaryOpType get_binary_op_type(const array& a, const array& b) {
|
inline BinaryOpType get_binary_op_type(const array& a, const array& b) {
|
||||||
BinaryOpType bopt;
|
BinaryOpType bopt;
|
||||||
if (a.data_size() == 1 && b.data_size() == 1) {
|
if (a.data_size() == 1 && b.data_size() == 1) {
|
||||||
bopt = BinaryOpType::ScalarScalar;
|
bopt = BinaryOpType::ScalarScalar;
|
||||||
@@ -37,29 +34,24 @@ BinaryOpType get_binary_op_type(const array& a, const array& b) {
|
|||||||
return bopt;
|
return bopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_binary_op_output_data(
|
inline void set_binary_op_output_data(
|
||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
array& out,
|
array& out,
|
||||||
BinaryOpType bopt,
|
BinaryOpType bopt) {
|
||||||
bool donate_with_move = false) {
|
|
||||||
bool b_donatable = is_donatable(b, out);
|
bool b_donatable = is_donatable(b, out);
|
||||||
bool a_donatable = is_donatable(a, out);
|
bool a_donatable = is_donatable(a, out);
|
||||||
switch (bopt) {
|
switch (bopt) {
|
||||||
case BinaryOpType::ScalarScalar:
|
case BinaryOpType::ScalarScalar:
|
||||||
out.set_data(
|
out.set_data(
|
||||||
allocator::malloc_or_wait(out.itemsize()), 1, a.strides(), a.flags());
|
allocator::malloc(out.itemsize()), 1, a.strides(), a.flags());
|
||||||
break;
|
break;
|
||||||
case BinaryOpType::ScalarVector:
|
case BinaryOpType::ScalarVector:
|
||||||
if (b_donatable) {
|
if (b_donatable) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(b);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(b);
|
out.copy_shared_buffer(b);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
out.set_data(
|
out.set_data(
|
||||||
allocator::malloc_or_wait(b.data_size() * out.itemsize()),
|
allocator::malloc(b.data_size() * out.itemsize()),
|
||||||
b.data_size(),
|
b.data_size(),
|
||||||
b.strides(),
|
b.strides(),
|
||||||
b.flags());
|
b.flags());
|
||||||
@@ -67,14 +59,10 @@ void set_binary_op_output_data(
|
|||||||
break;
|
break;
|
||||||
case BinaryOpType::VectorScalar:
|
case BinaryOpType::VectorScalar:
|
||||||
if (a_donatable) {
|
if (a_donatable) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(a);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(a);
|
out.copy_shared_buffer(a);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
out.set_data(
|
out.set_data(
|
||||||
allocator::malloc_or_wait(a.data_size() * out.itemsize()),
|
allocator::malloc(a.data_size() * out.itemsize()),
|
||||||
a.data_size(),
|
a.data_size(),
|
||||||
a.strides(),
|
a.strides(),
|
||||||
a.flags());
|
a.flags());
|
||||||
@@ -82,20 +70,12 @@ void set_binary_op_output_data(
|
|||||||
break;
|
break;
|
||||||
case BinaryOpType::VectorVector:
|
case BinaryOpType::VectorVector:
|
||||||
if (a_donatable) {
|
if (a_donatable) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(a);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(a);
|
out.copy_shared_buffer(a);
|
||||||
}
|
|
||||||
} else if (b_donatable) {
|
} else if (b_donatable) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(b);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(b);
|
out.copy_shared_buffer(b);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
out.set_data(
|
out.set_data(
|
||||||
allocator::malloc_or_wait(a.data_size() * out.itemsize()),
|
allocator::malloc(a.data_size() * out.itemsize()),
|
||||||
a.data_size(),
|
a.data_size(),
|
||||||
a.strides(),
|
a.strides(),
|
||||||
a.flags());
|
a.flags());
|
||||||
@@ -103,428 +83,15 @@ void set_binary_op_output_data(
|
|||||||
break;
|
break;
|
||||||
case BinaryOpType::General:
|
case BinaryOpType::General:
|
||||||
if (a_donatable && a.flags().row_contiguous && a.size() == out.size()) {
|
if (a_donatable && a.flags().row_contiguous && a.size() == out.size()) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(a);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(a);
|
out.copy_shared_buffer(a);
|
||||||
}
|
|
||||||
} else if (
|
} else if (
|
||||||
b_donatable && b.flags().row_contiguous && b.size() == out.size()) {
|
b_donatable && b.flags().row_contiguous && b.size() == out.size()) {
|
||||||
if (donate_with_move) {
|
|
||||||
out.move_shared_buffer(b);
|
|
||||||
} else {
|
|
||||||
out.copy_shared_buffer(b);
|
out.copy_shared_buffer(b);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct UseDefaultBinaryOp {};
|
|
||||||
|
|
||||||
template <typename T, typename U, typename Op>
|
|
||||||
struct DefaultVectorScalar {
|
|
||||||
Op op;
|
|
||||||
|
|
||||||
DefaultVectorScalar(Op op_) : op(op_) {}
|
|
||||||
|
|
||||||
void operator()(const T* a, const T* b, U* dst, int size) {
|
|
||||||
T scalar = *b;
|
|
||||||
while (size-- > 0) {
|
|
||||||
*dst = op(*a, scalar);
|
|
||||||
dst++;
|
|
||||||
a++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename U, typename Op>
|
|
||||||
struct DefaultScalarVector {
|
|
||||||
Op op;
|
|
||||||
|
|
||||||
DefaultScalarVector(Op op_) : op(op_) {}
|
|
||||||
|
|
||||||
void operator()(const T* a, const T* b, U* dst, int size) {
|
|
||||||
T scalar = *a;
|
|
||||||
while (size-- > 0) {
|
|
||||||
*dst = op(scalar, *b);
|
|
||||||
dst++;
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename U, typename Op>
|
|
||||||
struct DefaultVectorVector {
|
|
||||||
Op op;
|
|
||||||
|
|
||||||
DefaultVectorVector(Op op_) : op(op_) {}
|
|
||||||
|
|
||||||
void operator()(const T* a, const T* b, U* dst, int size) {
|
|
||||||
while (size-- > 0) {
|
|
||||||
*dst = op(*a, *b);
|
|
||||||
dst++;
|
|
||||||
a++;
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename U, typename Op, int D, bool Strided>
|
|
||||||
void binary_op_dims(
|
|
||||||
const T* a,
|
|
||||||
const T* b,
|
|
||||||
U* out,
|
|
||||||
Op op,
|
|
||||||
const Shape& shape,
|
|
||||||
const Strides& a_strides,
|
|
||||||
const Strides& b_strides,
|
|
||||||
const Strides& out_strides,
|
|
||||||
int axis) {
|
|
||||||
auto stride_a = a_strides[axis];
|
|
||||||
auto stride_b = b_strides[axis];
|
|
||||||
auto stride_out = out_strides[axis];
|
|
||||||
auto N = shape[axis];
|
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) {
|
|
||||||
if constexpr (D > 1) {
|
|
||||||
binary_op_dims<T, U, Op, D - 1, Strided>(
|
|
||||||
a, b, out, op, shape, a_strides, b_strides, out_strides, axis + 1);
|
|
||||||
} else {
|
|
||||||
if constexpr (Strided) {
|
|
||||||
op(a, b, out, stride_out);
|
|
||||||
} else {
|
|
||||||
*out = op(*a, *b);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out += stride_out;
|
|
||||||
a += stride_a;
|
|
||||||
b += stride_b;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename U, bool Strided, typename Op>
|
|
||||||
void binary_op_dispatch_dims(
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
array& out,
|
|
||||||
Op op,
|
|
||||||
int dim,
|
|
||||||
const Shape& shape,
|
|
||||||
const Strides& a_strides,
|
|
||||||
const Strides& b_strides,
|
|
||||||
const Strides& out_strides) {
|
|
||||||
const T* a_ptr = a.data<T>();
|
|
||||||
const T* b_ptr = b.data<T>();
|
|
||||||
U* out_ptr = out.data<U>();
|
|
||||||
switch (dim) {
|
|
||||||
case 1:
|
|
||||||
binary_op_dims<T, U, Op, 1, Strided>(
|
|
||||||
a_ptr,
|
|
||||||
b_ptr,
|
|
||||||
out_ptr,
|
|
||||||
op,
|
|
||||||
shape,
|
|
||||||
a_strides,
|
|
||||||
b_strides,
|
|
||||||
out_strides,
|
|
||||||
0);
|
|
||||||
return;
|
|
||||||
case 2:
|
|
||||||
binary_op_dims<T, U, Op, 2, Strided>(
|
|
||||||
a_ptr,
|
|
||||||
b_ptr,
|
|
||||||
out_ptr,
|
|
||||||
op,
|
|
||||||
shape,
|
|
||||||
a_strides,
|
|
||||||
b_strides,
|
|
||||||
out_strides,
|
|
||||||
0);
|
|
||||||
return;
|
|
||||||
case 3:
|
|
||||||
binary_op_dims<T, U, Op, 3, Strided>(
|
|
||||||
a_ptr,
|
|
||||||
b_ptr,
|
|
||||||
out_ptr,
|
|
||||||
op,
|
|
||||||
shape,
|
|
||||||
a_strides,
|
|
||||||
b_strides,
|
|
||||||
out_strides,
|
|
||||||
0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ContiguousIterator a_it(shape, a_strides, dim - 3);
|
|
||||||
ContiguousIterator b_it(shape, b_strides, dim - 3);
|
|
||||||
auto stride = out_strides[dim - 4];
|
|
||||||
for (int64_t elem = 0; elem < a.size(); elem += stride) {
|
|
||||||
binary_op_dims<T, U, Op, 3, Strided>(
|
|
||||||
a_ptr + a_it.loc,
|
|
||||||
b_ptr + b_it.loc,
|
|
||||||
out_ptr + elem,
|
|
||||||
op,
|
|
||||||
shape,
|
|
||||||
a_strides,
|
|
||||||
b_strides,
|
|
||||||
out_strides,
|
|
||||||
dim - 3);
|
|
||||||
a_it.step();
|
|
||||||
b_it.step();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <
|
|
||||||
typename T,
|
|
||||||
typename U,
|
|
||||||
typename Op,
|
|
||||||
typename OpSV,
|
|
||||||
typename OpVS,
|
|
||||||
typename OpVV>
|
|
||||||
void binary_op(
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
array& out,
|
|
||||||
Op op,
|
|
||||||
OpSV opsv,
|
|
||||||
OpVS opvs,
|
|
||||||
OpVV opvv) {
|
|
||||||
auto bopt = get_binary_op_type(a, b);
|
|
||||||
set_binary_op_output_data(a, b, out, bopt);
|
|
||||||
|
|
||||||
// The full computation is scalar scalar so call the base op once
|
|
||||||
if (bopt == BinaryOpType::ScalarScalar) {
|
|
||||||
*(out.data<U>()) = op(*a.data<T>(), *b.data<T>());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The full computation is scalar vector so delegate to the op
|
|
||||||
if (bopt == BinaryOpType::ScalarVector) {
|
|
||||||
opsv(a.data<T>(), b.data<T>(), out.data<U>(), b.data_size());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The full computation is vector scalar so delegate to the op
|
|
||||||
if (bopt == BinaryOpType::VectorScalar) {
|
|
||||||
opvs(a.data<T>(), b.data<T>(), out.data<U>(), a.data_size());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The full computation is vector vector so delegate to the op
|
|
||||||
if (bopt == BinaryOpType::VectorVector) {
|
|
||||||
opvv(a.data<T>(), b.data<T>(), out.data<U>(), out.size());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// General computation so let's try to optimize
|
|
||||||
auto [new_shape, new_strides] = collapse_contiguous_dims(
|
|
||||||
a.shape(), {a.strides(), b.strides(), out.strides()});
|
|
||||||
const auto& a_strides = new_strides[0];
|
|
||||||
const auto& b_strides = new_strides[1];
|
|
||||||
const auto& strides = new_strides[2];
|
|
||||||
|
|
||||||
// Get the left-most dim such that the array is row contiguous after
|
|
||||||
auto leftmost_rc_dim = [&strides](const auto& arr_strides) {
|
|
||||||
int d = arr_strides.size() - 1;
|
|
||||||
for (; d >= 0 && arr_strides[d] == strides[d]; d--) {
|
|
||||||
}
|
|
||||||
return d + 1;
|
|
||||||
};
|
|
||||||
auto a_rc_dim = leftmost_rc_dim(a_strides);
|
|
||||||
auto b_rc_dim = leftmost_rc_dim(b_strides);
|
|
||||||
|
|
||||||
// Get the left-most dim such that the array is a broadcasted "scalar" after
|
|
||||||
auto leftmost_s_dim = [](const auto& arr_strides) {
|
|
||||||
int d = arr_strides.size() - 1;
|
|
||||||
for (; d >= 0 && arr_strides[d] == 0; d--) {
|
|
||||||
}
|
|
||||||
return d + 1;
|
|
||||||
};
|
|
||||||
auto a_s_dim = leftmost_s_dim(a_strides);
|
|
||||||
auto b_s_dim = leftmost_s_dim(b_strides);
|
|
||||||
|
|
||||||
auto ndim = new_shape.size();
|
|
||||||
|
|
||||||
// Case 1: LxM and FxM where L and F are broadcastable and M is row contiguous
|
|
||||||
int dim = ndim;
|
|
||||||
if (int d = std::max(a_rc_dim, b_rc_dim); d < ndim) {
|
|
||||||
bopt = BinaryOpType::VectorVector;
|
|
||||||
dim = d;
|
|
||||||
// Case 2: LxM and Fx1 where L and F are broadcastable and M is row
|
|
||||||
// contiguous
|
|
||||||
} else if (int d = std::max(a_rc_dim, b_s_dim); d < ndim) {
|
|
||||||
bopt = BinaryOpType::VectorScalar;
|
|
||||||
dim = d;
|
|
||||||
// Case 3: Lx1 and FxM where L and F are broadcastable and M is row
|
|
||||||
// contiguous
|
|
||||||
} else if (int d = std::max(a_s_dim, b_rc_dim); d < ndim) {
|
|
||||||
bopt = BinaryOpType::ScalarVector;
|
|
||||||
dim = d;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can be sure dim > 0 since otherwise we would have used one of the fully
|
|
||||||
// contiguous methods above. Except for the case that the flags do not
|
|
||||||
// correspond to the underlying contiguity.
|
|
||||||
if (dim == 0 || strides[dim - 1] < 16) {
|
|
||||||
bopt = BinaryOpType::General;
|
|
||||||
dim = ndim;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (bopt) {
|
|
||||||
case BinaryOpType::VectorVector:
|
|
||||||
binary_op_dispatch_dims<T, U, true>(
|
|
||||||
a, b, out, opvv, dim, new_shape, a_strides, b_strides, strides);
|
|
||||||
break;
|
|
||||||
case BinaryOpType::VectorScalar:
|
|
||||||
binary_op_dispatch_dims<T, U, true>(
|
|
||||||
a, b, out, opvs, dim, new_shape, a_strides, b_strides, strides);
|
|
||||||
break;
|
|
||||||
case BinaryOpType::ScalarVector:
|
|
||||||
binary_op_dispatch_dims<T, U, true>(
|
|
||||||
a, b, out, opsv, dim, new_shape, a_strides, b_strides, strides);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
binary_op_dispatch_dims<T, U, false>(
|
|
||||||
a, b, out, op, dim, new_shape, a_strides, b_strides, strides);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename Op, typename OpSV, typename OpVS, typename OpVV>
|
|
||||||
void binary_op(
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
array& out,
|
|
||||||
Op op,
|
|
||||||
OpSV opsv,
|
|
||||||
OpVS opvs,
|
|
||||||
OpVV opvv) {
|
|
||||||
// TODO: The following mess of constexpr evaluations can probably be achieved
|
|
||||||
// with template specializations and overloading. Would it be simpler?
|
|
||||||
|
|
||||||
if constexpr (std::is_same<decltype(opsv), UseDefaultBinaryOp>::value) {
|
|
||||||
if constexpr (std::is_same<decltype(opvs), UseDefaultBinaryOp>::value) {
|
|
||||||
if constexpr (std::is_same<decltype(opvv), UseDefaultBinaryOp>::value) {
|
|
||||||
// All ops are UseDefaultBinaryOp (why oh why would someone call that?)
|
|
||||||
binary_op<T, T>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
op,
|
|
||||||
DefaultScalarVector<T, T, Op>(op),
|
|
||||||
DefaultVectorScalar<T, T, Op>(op),
|
|
||||||
DefaultVectorVector<T, T, Op>(op));
|
|
||||||
} else {
|
|
||||||
// opsv and opvs were UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
op,
|
|
||||||
DefaultScalarVector<T, T, Op>(op),
|
|
||||||
DefaultVectorScalar<T, T, Op>(op),
|
|
||||||
opvv);
|
|
||||||
}
|
|
||||||
} else if constexpr (std::is_same<decltype(opvv), UseDefaultBinaryOp>::
|
|
||||||
value) {
|
|
||||||
// opsv and opvv were UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
op,
|
|
||||||
DefaultScalarVector<T, T, Op>(op),
|
|
||||||
opvs,
|
|
||||||
DefaultVectorVector<T, T, Op>(op));
|
|
||||||
} else {
|
|
||||||
// opsv was UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a, b, out, op, DefaultScalarVector<T, T, Op>(op), opvs, opvv);
|
|
||||||
}
|
|
||||||
} else if constexpr (std::is_same<decltype(opvs), UseDefaultBinaryOp>::
|
|
||||||
value) {
|
|
||||||
if (std::is_same<decltype(opvv), UseDefaultBinaryOp>::value) {
|
|
||||||
// opvs and opvv were UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
op,
|
|
||||||
opsv,
|
|
||||||
DefaultVectorScalar<T, T, Op>(op),
|
|
||||||
DefaultVectorVector<T, T, Op>(op));
|
|
||||||
} else {
|
|
||||||
// opvs was UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a, b, out, op, opsv, DefaultVectorScalar<T, T, Op>(op), opvv);
|
|
||||||
}
|
|
||||||
} else if constexpr (std::is_same<decltype(opvv), UseDefaultBinaryOp>::
|
|
||||||
value) {
|
|
||||||
// opvv was UseDefaultBinaryOp
|
|
||||||
binary_op<T, T>(
|
|
||||||
a, b, out, op, opsv, opvs, DefaultVectorVector<T, T, Op>(op));
|
|
||||||
} else {
|
|
||||||
// All ops provided
|
|
||||||
binary_op<T, T>(a, b, out, op, opsv, opvs, opvv);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename Op>
|
|
||||||
void binary_op(const array& a, const array& b, array& out, Op op) {
|
|
||||||
DefaultScalarVector<T, T, Op> opsv(op);
|
|
||||||
DefaultVectorScalar<T, T, Op> opvs(op);
|
|
||||||
DefaultVectorVector<T, T, Op> opvv(op);
|
|
||||||
binary_op<T, T>(a, b, out, op, opsv, opvs, opvv);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename... Ops>
|
|
||||||
void binary(const array& a, const array& b, array& out, Ops... ops) {
|
|
||||||
switch (out.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
binary_op<bool>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
binary_op<uint8_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
binary_op<uint16_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
binary_op<uint32_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
binary_op<uint64_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
binary_op<int8_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
binary_op<int16_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
binary_op<int32_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
binary_op<int64_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
binary_op<float16_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
binary_op<float>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
binary_op<bfloat16_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
binary_op<complex64_t>(a, b, out, ops...);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
24
mlx/backend/common/broadcasting.cpp
Normal file
24
mlx/backend/common/broadcasting.cpp
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// Copyright © 2024 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/common/utils.h"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
void broadcast(const array& in, array& out) {
|
||||||
|
if (out.size() == 0) {
|
||||||
|
out.set_data(nullptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Strides strides(out.ndim(), 0);
|
||||||
|
int diff = out.ndim() - in.ndim();
|
||||||
|
for (int i = in.ndim() - 1; i >= 0; --i) {
|
||||||
|
strides[i + diff] = (in.shape()[i] == 1) ? 0 : in.strides()[i];
|
||||||
|
}
|
||||||
|
auto flags = in.flags();
|
||||||
|
if (out.size() > in.size()) {
|
||||||
|
flags.row_contiguous = flags.col_contiguous = false;
|
||||||
|
}
|
||||||
|
out.copy_shared_buffer(in, strides, flags, in.data_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
11
mlx/backend/common/broadcasting.h
Normal file
11
mlx/backend/common/broadcasting.h
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// Copyright © 2024 Apple Inc.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/array.h"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
void broadcast(const array& in, array& out);
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
157
mlx/backend/common/buffer_cache.h
Normal file
157
mlx/backend/common/buffer_cache.h
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <functional>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class BufferCache {
|
||||||
|
public:
|
||||||
|
BufferCache(
|
||||||
|
size_t page_size,
|
||||||
|
std::function<size_t(T*)> get_size,
|
||||||
|
std::function<void(T*)> free)
|
||||||
|
: page_size_(page_size),
|
||||||
|
get_size_(std::move(get_size)),
|
||||||
|
free_(std::move(free)) {}
|
||||||
|
|
||||||
|
~BufferCache() {
|
||||||
|
clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferCache(const BufferCache&) = delete;
|
||||||
|
BufferCache& operator=(const BufferCache&) = delete;
|
||||||
|
|
||||||
|
T* reuse_from_cache(size_t size) {
|
||||||
|
// Find the closest buffer in pool.
|
||||||
|
auto it = buffer_pool_.lower_bound(size);
|
||||||
|
if (it == buffer_pool_.end() ||
|
||||||
|
it->first >= std::min(2 * size, size + 2 * page_size_)) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect from the cache.
|
||||||
|
T* buf = it->second->buf;
|
||||||
|
pool_size_ -= it->first;
|
||||||
|
|
||||||
|
// Remove from record.
|
||||||
|
remove_from_list(it->second);
|
||||||
|
buffer_pool_.erase(it);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
void recycle_to_cache(T* buf) {
|
||||||
|
assert(buf);
|
||||||
|
// Add to cache.
|
||||||
|
BufferHolder* bh = new BufferHolder(buf);
|
||||||
|
add_at_head(bh);
|
||||||
|
size_t size = get_size_(buf);
|
||||||
|
pool_size_ += size;
|
||||||
|
buffer_pool_.emplace(size, bh);
|
||||||
|
}
|
||||||
|
|
||||||
|
int release_cached_buffers(size_t min_bytes_to_free) {
|
||||||
|
if (min_bytes_to_free >= 0.9 * pool_size_) {
|
||||||
|
return clear();
|
||||||
|
} else {
|
||||||
|
int n_release = 0;
|
||||||
|
size_t total_bytes_freed = 0;
|
||||||
|
|
||||||
|
while (tail_ && (total_bytes_freed < min_bytes_to_free)) {
|
||||||
|
// Release buffer.
|
||||||
|
size_t size = get_size_(tail_->buf);
|
||||||
|
total_bytes_freed += size;
|
||||||
|
free_(tail_->buf);
|
||||||
|
n_release++;
|
||||||
|
|
||||||
|
// Remove from record.
|
||||||
|
auto its = buffer_pool_.equal_range(size);
|
||||||
|
auto it = std::find_if(its.first, its.second, [this](const auto& el) {
|
||||||
|
return el.second == tail_;
|
||||||
|
});
|
||||||
|
assert(it != buffer_pool_.end());
|
||||||
|
buffer_pool_.erase(it);
|
||||||
|
remove_from_list(tail_);
|
||||||
|
}
|
||||||
|
|
||||||
|
pool_size_ -= total_bytes_freed;
|
||||||
|
return n_release;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int clear() {
|
||||||
|
int n_release = 0;
|
||||||
|
for (auto& [size, holder] : buffer_pool_) {
|
||||||
|
free_(holder->buf);
|
||||||
|
n_release++;
|
||||||
|
delete holder;
|
||||||
|
}
|
||||||
|
buffer_pool_.clear();
|
||||||
|
pool_size_ = 0;
|
||||||
|
head_ = nullptr;
|
||||||
|
tail_ = nullptr;
|
||||||
|
return n_release;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t cache_size() const {
|
||||||
|
return pool_size_;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t page_size() const {
|
||||||
|
return page_size_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct BufferHolder {
|
||||||
|
public:
|
||||||
|
explicit BufferHolder(T* buf_) : buf(buf_) {}
|
||||||
|
|
||||||
|
BufferHolder* prev{nullptr};
|
||||||
|
BufferHolder* next{nullptr};
|
||||||
|
T* buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
void add_at_head(BufferHolder* to_add) {
|
||||||
|
if (!head_) {
|
||||||
|
head_ = to_add;
|
||||||
|
tail_ = to_add;
|
||||||
|
} else {
|
||||||
|
head_->prev = to_add;
|
||||||
|
to_add->next = head_;
|
||||||
|
head_ = to_add;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void remove_from_list(BufferHolder* to_remove) {
|
||||||
|
if (to_remove->prev && to_remove->next) { // if middle
|
||||||
|
to_remove->prev->next = to_remove->next;
|
||||||
|
to_remove->next->prev = to_remove->prev;
|
||||||
|
} else if (to_remove->prev && to_remove == tail_) { // if tail
|
||||||
|
tail_ = to_remove->prev;
|
||||||
|
tail_->next = nullptr;
|
||||||
|
} else if (to_remove == head_ && to_remove->next) { // if head
|
||||||
|
head_ = to_remove->next;
|
||||||
|
head_->prev = nullptr;
|
||||||
|
} else if (to_remove == head_ && to_remove == tail_) { // if only element
|
||||||
|
head_ = nullptr;
|
||||||
|
tail_ = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
delete to_remove;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::multimap<size_t, BufferHolder*> buffer_pool_;
|
||||||
|
BufferHolder* head_{nullptr};
|
||||||
|
BufferHolder* tail_{nullptr};
|
||||||
|
size_t pool_size_{0};
|
||||||
|
|
||||||
|
const size_t page_size_;
|
||||||
|
std::function<size_t(T*)> get_size_;
|
||||||
|
std::function<void(T*)> free_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/lapack.h"
|
|
||||||
#include "mlx/linalg.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
void cholesky_impl(const array& a, array& factor, bool upper) {
|
|
||||||
// Lapack uses the column-major convention. We take advantage of the fact that
|
|
||||||
// the matrix should be symmetric:
|
|
||||||
// (A)ᵀ = A
|
|
||||||
// and that a column-major lower triangular matrix is a row-major upper
|
|
||||||
// triangular matrix, so uplo is the opposite of what we would expect from
|
|
||||||
// upper
|
|
||||||
|
|
||||||
char uplo = (upper) ? 'L' : 'U';
|
|
||||||
|
|
||||||
// The decomposition is computed in place, so just copy the input to the
|
|
||||||
// output.
|
|
||||||
copy(
|
|
||||||
a,
|
|
||||||
factor,
|
|
||||||
a.flags().row_contiguous ? CopyType::Vector : CopyType::General);
|
|
||||||
|
|
||||||
const int N = a.shape(-1);
|
|
||||||
const size_t num_matrices = a.size() / (N * N);
|
|
||||||
|
|
||||||
float* matrix = factor.data<float>();
|
|
||||||
|
|
||||||
for (int i = 0; i < num_matrices; i++) {
|
|
||||||
// Compute Cholesky factorization.
|
|
||||||
int info;
|
|
||||||
MLX_LAPACK_FUNC(spotrf)
|
|
||||||
(
|
|
||||||
/* uplo = */ &uplo,
|
|
||||||
/* n = */ &N,
|
|
||||||
/* a = */ matrix,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* info = */ &info);
|
|
||||||
|
|
||||||
// TODO: We do nothing when the matrix is not positive semi-definite
|
|
||||||
// because throwing an error would result in a crash. If we figure out how
|
|
||||||
// to catch errors from the implementation we should throw.
|
|
||||||
if (info < 0) {
|
|
||||||
std::stringstream msg;
|
|
||||||
msg << "[cholesky] Cholesky decomposition failed with error code "
|
|
||||||
<< info;
|
|
||||||
throw std::runtime_error(msg.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zero out the upper/lower triangle while advancing the pointer to the
|
|
||||||
// next matrix at the same time.
|
|
||||||
for (int row = 0; row < N; row++) {
|
|
||||||
if (upper) {
|
|
||||||
std::fill(matrix, matrix + row, 0);
|
|
||||||
} else {
|
|
||||||
std::fill(matrix + row + 1, matrix + N, 0);
|
|
||||||
}
|
|
||||||
matrix += N;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Cholesky::eval(const std::vector<array>& inputs, array& output) {
|
|
||||||
if (inputs[0].dtype() != float32) {
|
|
||||||
throw std::runtime_error("[Cholesky::eval] only supports float32.");
|
|
||||||
}
|
|
||||||
cholesky_impl(inputs[0], output, upper_);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
// Copyright © 2024 Apple Inc.
|
// Copyright © 2024 Apple Inc.
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
|
#include "mlx/backend/common/broadcasting.h"
|
||||||
#include "mlx/backend/common/utils.h"
|
#include "mlx/backend/common/utils.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
@@ -20,8 +21,8 @@ void AsStrided::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
|
|
||||||
// Compute the flags given the shape and strides
|
// Compute the flags given the shape and strides
|
||||||
bool row_contiguous = true, col_contiguous = true;
|
bool row_contiguous = true, col_contiguous = true;
|
||||||
size_t r = 1, c = 1;
|
int64_t r = 1, c = 1;
|
||||||
for (int i = strides_.size() - 1, j = 0; i >= 0; i--, j++) {
|
for (int i = std::ssize(strides_) - 1, j = 0; i >= 0; i--, j++) {
|
||||||
row_contiguous &= (r == strides_[i]) || (shape_[i] == 1);
|
row_contiguous &= (r == strides_[i]) || (shape_[i] == 1);
|
||||||
col_contiguous &= (c == strides_[j]) || (shape_[j] == 1);
|
col_contiguous &= (c == strides_[j]) || (shape_[j] == 1);
|
||||||
r *= shape_[i];
|
r *= shape_[i];
|
||||||
@@ -39,24 +40,7 @@ void AsStrided::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
// rely on data_size anyway.
|
// rely on data_size anyway.
|
||||||
size_t data_size = out.size();
|
size_t data_size = out.size();
|
||||||
|
|
||||||
return move_or_copy(in, out, strides_, flags, data_size, offset_);
|
return out.copy_shared_buffer(in, strides_, flags, data_size, offset_);
|
||||||
}
|
|
||||||
|
|
||||||
void broadcast(const array& in, array& out) {
|
|
||||||
if (out.size() == 0) {
|
|
||||||
out.set_data(nullptr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Strides strides(out.ndim(), 0);
|
|
||||||
int diff = out.ndim() - in.ndim();
|
|
||||||
for (int i = in.ndim() - 1; i >= 0; --i) {
|
|
||||||
strides[i + diff] = (in.shape()[i] == 1) ? 0 : in.strides()[i];
|
|
||||||
}
|
|
||||||
auto flags = in.flags();
|
|
||||||
if (out.size() > in.size()) {
|
|
||||||
flags.row_contiguous = flags.col_contiguous = false;
|
|
||||||
}
|
|
||||||
move_or_copy(in, out, strides, flags, in.data_size());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Broadcast::eval(const std::vector<array>& inputs, array& out) {
|
void Broadcast::eval(const std::vector<array>& inputs, array& out) {
|
||||||
@@ -69,16 +53,17 @@ void BroadcastAxes::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
|
|
||||||
void Copy::eval(const std::vector<array>& inputs, array& out) {
|
void Copy::eval(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 1);
|
assert(inputs.size() == 1);
|
||||||
move_or_copy(inputs[0], out);
|
out.copy_shared_buffer(inputs[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CustomTransforms::eval(
|
void CustomTransforms::eval(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
assert(inputs.size() > outputs.size());
|
assert(inputs.size() > outputs.size());
|
||||||
for (int i = 0, j = inputs.size() - outputs.size(); i < outputs.size();
|
for (int i = 0, j = std::ssize(inputs) - std::ssize(outputs);
|
||||||
|
i < std::ssize(outputs);
|
||||||
i++, j++) {
|
i++, j++) {
|
||||||
move_or_copy(inputs[j], outputs[i]);
|
outputs[i].copy_shared_buffer(inputs[j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,8 +71,8 @@ void Depends::eval(
|
|||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
assert(inputs.size() > outputs.size());
|
assert(inputs.size() > outputs.size());
|
||||||
for (int i = 0; i < outputs.size(); i++) {
|
for (int i = 0; i < std::ssize(outputs); i++) {
|
||||||
move_or_copy(inputs[i], outputs[i]);
|
outputs[i].copy_shared_buffer(inputs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,12 +83,12 @@ void ExpandDims::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
for (auto ax : axes_) {
|
for (auto ax : axes_) {
|
||||||
strides.insert(strides.begin() + ax, 1);
|
strides.insert(strides.begin() + ax, 1);
|
||||||
}
|
}
|
||||||
move_or_copy(in, out, strides, in.flags(), in.data_size());
|
out.copy_shared_buffer(in, strides, in.flags(), in.data_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void NumberOfElements::eval(const std::vector<array>& inputs, array& out) {
|
void NumberOfElements::eval(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 1);
|
assert(inputs.size() == 1);
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
double numel = 1;
|
double numel = 1;
|
||||||
for (auto ax : axes_) {
|
for (auto ax : axes_) {
|
||||||
@@ -151,6 +136,9 @@ void NumberOfElements::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
case bfloat16:
|
case bfloat16:
|
||||||
*out.data<bfloat16_t>() = static_cast<bfloat16_t>(numel);
|
*out.data<bfloat16_t>() = static_cast<bfloat16_t>(numel);
|
||||||
break;
|
break;
|
||||||
|
case float64:
|
||||||
|
*out.data<double>() = static_cast<double>(numel);
|
||||||
|
break;
|
||||||
case complex64:
|
case complex64:
|
||||||
*out.data<complex64_t>() = static_cast<complex64_t>(numel);
|
*out.data<complex64_t>() = static_cast<complex64_t>(numel);
|
||||||
break;
|
break;
|
||||||
@@ -207,7 +195,7 @@ void shared_buffer_reshape(
|
|||||||
auto max_dim = std::max_element(out.shape().begin(), out.shape().end());
|
auto max_dim = std::max_element(out.shape().begin(), out.shape().end());
|
||||||
flags.col_contiguous = out.size() <= 1 || out.size() == *max_dim;
|
flags.col_contiguous = out.size() <= 1 || out.size() == *max_dim;
|
||||||
}
|
}
|
||||||
move_or_copy(in, out, out_strides, flags, in.data_size());
|
out.copy_shared_buffer(in, out_strides, flags, in.data_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Split::eval(
|
void Split::eval(
|
||||||
@@ -219,11 +207,11 @@ void Split::eval(
|
|||||||
|
|
||||||
auto compute_new_flags = [](const auto& shape,
|
auto compute_new_flags = [](const auto& shape,
|
||||||
const auto& strides,
|
const auto& strides,
|
||||||
size_t in_data_size,
|
int64_t in_data_size,
|
||||||
auto flags) {
|
auto flags) {
|
||||||
size_t data_size = 1;
|
int64_t data_size = 1;
|
||||||
size_t f_stride = 1;
|
int64_t f_stride = 1;
|
||||||
size_t b_stride = 1;
|
int64_t b_stride = 1;
|
||||||
flags.row_contiguous = true;
|
flags.row_contiguous = true;
|
||||||
flags.col_contiguous = true;
|
flags.col_contiguous = true;
|
||||||
for (int i = 0, ri = shape.size() - 1; ri >= 0; i++, ri--) {
|
for (int i = 0, ri = shape.size() - 1; ri >= 0; i++, ri--) {
|
||||||
@@ -253,7 +241,7 @@ void Split::eval(
|
|||||||
|
|
||||||
std::vector<int> indices(1, 0);
|
std::vector<int> indices(1, 0);
|
||||||
indices.insert(indices.end(), indices_.begin(), indices_.end());
|
indices.insert(indices.end(), indices_.begin(), indices_.end());
|
||||||
for (int i = 0; i < indices.size(); i++) {
|
for (int i = 0; i < std::ssize(indices); i++) {
|
||||||
size_t offset = indices[i] * in.strides()[axis_];
|
size_t offset = indices[i] * in.strides()[axis_];
|
||||||
auto [new_flags, data_size] = compute_new_flags(
|
auto [new_flags, data_size] = compute_new_flags(
|
||||||
outputs[i].shape(), in.strides(), in.data_size(), in.flags());
|
outputs[i].shape(), in.strides(), in.data_size(), in.flags());
|
||||||
@@ -267,25 +255,25 @@ void Squeeze::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
const auto& in = inputs[0];
|
const auto& in = inputs[0];
|
||||||
Strides strides;
|
Strides strides;
|
||||||
for (int i = 0, j = 0; i < in.ndim(); ++i) {
|
for (int i = 0, j = 0; i < in.ndim(); ++i) {
|
||||||
if (j < axes_.size() && i == axes_[j]) {
|
if (j < std::ssize(axes_) && i == axes_[j]) {
|
||||||
j++;
|
j++;
|
||||||
} else {
|
} else {
|
||||||
strides.push_back(in.strides(i));
|
strides.push_back(in.strides(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
move_or_copy(in, out, strides, in.flags(), in.data_size());
|
out.copy_shared_buffer(in, strides, in.flags(), in.data_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void StopGradient::eval(const std::vector<array>& inputs, array& out) {
|
void StopGradient::eval(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 1);
|
assert(inputs.size() == 1);
|
||||||
move_or_copy(inputs[0], out);
|
out.copy_shared_buffer(inputs[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Transpose::eval(const std::vector<array>& inputs, array& out) {
|
void Transpose::eval(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 1);
|
assert(inputs.size() == 1);
|
||||||
Strides out_strides(out.ndim());
|
Strides out_strides(out.ndim());
|
||||||
auto& in = inputs[0];
|
auto& in = inputs[0];
|
||||||
for (int ax = 0; ax < axes_.size(); ++ax) {
|
for (int ax = 0; ax < std::ssize(axes_); ++ax) {
|
||||||
out_strides[ax] = in.strides()[axes_[ax]];
|
out_strides[ax] = in.strides()[axes_[ax]];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,7 +300,7 @@ void Transpose::eval(const std::vector<array>& inputs, array& out) {
|
|||||||
b_stride *= out.shape(ri);
|
b_stride *= out.shape(ri);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
move_or_copy(in, out, out_strides, flags, in.data_size());
|
out.copy_shared_buffer(in, out_strides, flags, in.data_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
// Copyright © 2023-2024 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/common/compiled.h"
|
#include "mlx/backend/common/compiled.h"
|
||||||
#include "mlx/graph_utils.h"
|
#include "mlx/backend/common/utils.h"
|
||||||
#include "mlx/primitives.h"
|
|
||||||
#include "mlx/utils.h"
|
#include "mlx/utils.h"
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
@@ -15,6 +14,8 @@ void print_constant(std::ostream& os, const array& x) {
|
|||||||
return print_float_constant<float16_t>(os, x);
|
return print_float_constant<float16_t>(os, x);
|
||||||
case bfloat16:
|
case bfloat16:
|
||||||
return print_float_constant<bfloat16_t>(os, x);
|
return print_float_constant<bfloat16_t>(os, x);
|
||||||
|
case float64:
|
||||||
|
return print_float_constant<double>(os, x);
|
||||||
case complex64:
|
case complex64:
|
||||||
return print_complex_constant<complex64_t>(os, x);
|
return print_complex_constant<complex64_t>(os, x);
|
||||||
case int8:
|
case int8:
|
||||||
@@ -51,6 +52,8 @@ std::string get_type_string(Dtype d) {
|
|||||||
return "float16_t";
|
return "float16_t";
|
||||||
case bfloat16:
|
case bfloat16:
|
||||||
return "bfloat16_t";
|
return "bfloat16_t";
|
||||||
|
case float64:
|
||||||
|
return "double";
|
||||||
case complex64:
|
case complex64:
|
||||||
return "complex64_t";
|
return "complex64_t";
|
||||||
case bool_:
|
case bool_:
|
||||||
@@ -79,55 +82,6 @@ std::string get_type_string(Dtype d) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string build_lib_name(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<array>& outputs,
|
|
||||||
const std::vector<array>& tape,
|
|
||||||
const std::unordered_set<uintptr_t>& constant_ids) {
|
|
||||||
NodeNamer namer;
|
|
||||||
std::ostringstream os;
|
|
||||||
std::ostringstream constant_hasher;
|
|
||||||
|
|
||||||
// Fill the input names. This is not really necessary, I just like having A,
|
|
||||||
// B, C, ... as the inputs.
|
|
||||||
for (auto& x : inputs) {
|
|
||||||
namer.get_name(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The primitives describing the tape. For unary and binary primitives this
|
|
||||||
// must be enough to describe the full computation.
|
|
||||||
for (auto& a : tape) {
|
|
||||||
// name and type of output
|
|
||||||
os << namer.get_name(a) << kindof(a.dtype()) << a.itemsize();
|
|
||||||
// computation performed
|
|
||||||
a.primitive().print(os);
|
|
||||||
// name of inputs to the function
|
|
||||||
for (auto& inp : a.inputs()) {
|
|
||||||
os << namer.get_name(inp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
os << "_";
|
|
||||||
|
|
||||||
for (auto& x : inputs) {
|
|
||||||
if (constant_ids.find(x.id()) != constant_ids.end()) {
|
|
||||||
os << "C";
|
|
||||||
print_constant(constant_hasher, x);
|
|
||||||
} else {
|
|
||||||
os << (is_scalar(x) ? "S" : "V");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
os << "_";
|
|
||||||
for (auto& x : inputs) {
|
|
||||||
if (constant_ids.find(x.id()) != constant_ids.end()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
os << kindof(x.dtype()) << x.itemsize();
|
|
||||||
}
|
|
||||||
os << "_" << std::hash<std::string>{}(constant_hasher.str());
|
|
||||||
|
|
||||||
return os.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool compiled_check_contiguity(
|
bool compiled_check_contiguity(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
const Shape& shape) {
|
const Shape& shape) {
|
||||||
@@ -159,16 +113,14 @@ bool compiled_check_contiguity(
|
|||||||
void compiled_allocate_outputs(
|
void compiled_allocate_outputs(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs,
|
std::vector<array>& outputs,
|
||||||
const std::vector<array>& inputs_,
|
const std::function<bool(size_t)>& is_constant,
|
||||||
const std::unordered_set<uintptr_t>& constant_ids_,
|
bool contiguous) {
|
||||||
bool contiguous,
|
|
||||||
bool move_buffers /* = false */) {
|
|
||||||
if (contiguous) {
|
if (contiguous) {
|
||||||
int o = 0;
|
int o = 0;
|
||||||
Strides strides;
|
Strides strides;
|
||||||
size_t data_size;
|
size_t data_size;
|
||||||
array::Flags flags;
|
array::Flags flags;
|
||||||
for (int i = 0; i < inputs.size() && o < outputs.size(); ++i) {
|
for (int i = 0; i < std::ssize(inputs) && o < std::ssize(outputs); ++i) {
|
||||||
auto& in = inputs[i];
|
auto& in = inputs[i];
|
||||||
// Conditions for donation
|
// Conditions for donation
|
||||||
// - Correct size
|
// - Correct size
|
||||||
@@ -176,14 +128,9 @@ void compiled_allocate_outputs(
|
|||||||
// - Donatable
|
// - Donatable
|
||||||
// - Not a constant
|
// - Not a constant
|
||||||
if (in.itemsize() == outputs[o].itemsize() && !is_scalar(in) &&
|
if (in.itemsize() == outputs[o].itemsize() && !is_scalar(in) &&
|
||||||
in.is_donatable() &&
|
in.is_donatable() && is_constant(i)) {
|
||||||
constant_ids_.find(inputs_[i].id()) == constant_ids_.end()) {
|
|
||||||
if (move_buffers) {
|
|
||||||
outputs[o++].move_shared_buffer(in);
|
|
||||||
} else {
|
|
||||||
outputs[o++].copy_shared_buffer(in);
|
outputs[o++].copy_shared_buffer(in);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Get representative input flags to properly set non-donated outputs
|
// Get representative input flags to properly set non-donated outputs
|
||||||
if (strides.empty() && in.size() == outputs[0].size()) {
|
if (strides.empty() && in.size() == outputs[0].size()) {
|
||||||
strides = in.strides();
|
strides = in.strides();
|
||||||
@@ -191,16 +138,16 @@ void compiled_allocate_outputs(
|
|||||||
data_size = in.data_size();
|
data_size = in.data_size();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; o < outputs.size(); ++o) {
|
for (; o < std::ssize(outputs); ++o) {
|
||||||
outputs[o].set_data(
|
outputs[o].set_data(
|
||||||
allocator::malloc_or_wait(data_size * outputs[o].itemsize()),
|
allocator::malloc(data_size * outputs[o].itemsize()),
|
||||||
data_size,
|
data_size,
|
||||||
strides,
|
strides,
|
||||||
flags);
|
flags);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int o = 0;
|
int o = 0;
|
||||||
for (int i = 0; i < inputs.size() && o < outputs.size(); ++i) {
|
for (int i = 0; i < std::ssize(inputs) && o < std::ssize(outputs); ++i) {
|
||||||
auto& in = inputs[i];
|
auto& in = inputs[i];
|
||||||
// Conditions for donation
|
// Conditions for donation
|
||||||
// - Row contiguous
|
// - Row contiguous
|
||||||
@@ -209,21 +156,86 @@ void compiled_allocate_outputs(
|
|||||||
// - Not a constant
|
// - Not a constant
|
||||||
if (in.flags().row_contiguous && in.size() == outputs[o].size() &&
|
if (in.flags().row_contiguous && in.size() == outputs[o].size() &&
|
||||||
in.itemsize() == outputs[o].itemsize() && in.is_donatable() &&
|
in.itemsize() == outputs[o].itemsize() && in.is_donatable() &&
|
||||||
constant_ids_.find(inputs_[i].id()) == constant_ids_.end()) {
|
is_constant(i)) {
|
||||||
if (move_buffers) {
|
|
||||||
outputs[o].move_shared_buffer(
|
|
||||||
in, outputs[o].strides(), in.flags(), in.data_size());
|
|
||||||
} else {
|
|
||||||
outputs[o].copy_shared_buffer(
|
outputs[o].copy_shared_buffer(
|
||||||
in, outputs[o].strides(), in.flags(), in.data_size());
|
in, outputs[o].strides(), in.flags(), in.data_size());
|
||||||
}
|
|
||||||
o++;
|
o++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; o < outputs.size(); ++o) {
|
for (; o < std::ssize(outputs); ++o) {
|
||||||
outputs[o].set_data(allocator::malloc_or_wait(outputs[o].nbytes()));
|
outputs[o].set_data(allocator::malloc(outputs[o].nbytes()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::tuple<bool, Shape, std::vector<Strides>> compiled_collapse_contiguous_dims(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const array& out,
|
||||||
|
const std::function<bool(size_t)>& is_constant) {
|
||||||
|
const Shape& shape = out.shape();
|
||||||
|
bool contiguous = compiled_check_contiguity(inputs, shape);
|
||||||
|
if (contiguous) {
|
||||||
|
return {true, shape, {}};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<Strides> strides_vec{out.strides()};
|
||||||
|
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||||
|
// Skip constants.
|
||||||
|
if (is_constant(i)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip scalar inputs.
|
||||||
|
const auto& x = inputs[i];
|
||||||
|
if (is_scalar(x)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Broadcast the inputs to the output shape.
|
||||||
|
Strides xstrides;
|
||||||
|
int j = 0;
|
||||||
|
for (; j < shape.size() - x.ndim(); ++j) {
|
||||||
|
if (shape[j] == 1) {
|
||||||
|
xstrides.push_back(out.strides()[j]);
|
||||||
|
} else {
|
||||||
|
xstrides.push_back(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int i = 0; i < x.ndim(); ++i, ++j) {
|
||||||
|
if (x.shape(i) == 1) {
|
||||||
|
if (shape[j] == 1) {
|
||||||
|
xstrides.push_back(out.strides()[j]);
|
||||||
|
} else {
|
||||||
|
xstrides.push_back(0);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
xstrides.push_back(x.strides()[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
strides_vec.push_back(std::move(xstrides));
|
||||||
|
}
|
||||||
|
|
||||||
|
auto tup = collapse_contiguous_dims(shape, strides_vec, INT32_MAX);
|
||||||
|
return {false, std::move(std::get<0>(tup)), std::move(std::get<1>(tup))};
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compiled_use_large_index(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const std::vector<array>& outputs,
|
||||||
|
bool contiguous) {
|
||||||
|
if (contiguous) {
|
||||||
|
int64_t max_size = 0;
|
||||||
|
for (const auto& in : inputs) {
|
||||||
|
max_size = std::max(max_size, in.data_size());
|
||||||
|
}
|
||||||
|
return max_size > UINT32_MAX;
|
||||||
|
} else {
|
||||||
|
int64_t max_size = 0;
|
||||||
|
for (const auto& o : outputs) {
|
||||||
|
max_size = std::max(max_size, o.size());
|
||||||
|
}
|
||||||
|
return max_size > UINT32_MAX;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
// Copyright © 2023-2024 Apple Inc.
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <sstream>
|
|
||||||
#include <unordered_set>
|
|
||||||
|
|
||||||
#include "mlx/array.h"
|
#include "mlx/array.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
@@ -14,19 +13,17 @@ inline bool is_static_cast(const Primitive& p) {
|
|||||||
return (typeid(p) == typeid(Broadcast) || typeid(p) == typeid(AsType));
|
return (typeid(p) == typeid(Broadcast) || typeid(p) == typeid(AsType));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string build_lib_name(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<array>& outputs,
|
|
||||||
const std::vector<array>& tape,
|
|
||||||
const std::unordered_set<uintptr_t>& constant_ids);
|
|
||||||
|
|
||||||
std::string get_type_string(Dtype d);
|
std::string get_type_string(Dtype d);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void print_float_constant(std::ostream& os, const array& x) {
|
void print_float_constant(std::ostream& os, const array& x) {
|
||||||
auto old_precision = os.precision();
|
auto old_precision = os.precision();
|
||||||
os << std::setprecision(std::numeric_limits<float>::digits10 + 1)
|
if constexpr (std::is_same_v<T, double>) {
|
||||||
<< x.item<T>() << std::setprecision(old_precision);
|
os << std::setprecision(std::numeric_limits<double>::digits10 + 1);
|
||||||
|
} else {
|
||||||
|
os << std::setprecision(std::numeric_limits<float>::digits10 + 1);
|
||||||
|
}
|
||||||
|
os << x.item<T>() << std::setprecision(old_precision);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@@ -60,9 +57,19 @@ bool compiled_check_contiguity(
|
|||||||
void compiled_allocate_outputs(
|
void compiled_allocate_outputs(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs,
|
std::vector<array>& outputs,
|
||||||
const std::vector<array>& inputs_,
|
const std::function<bool(size_t)>& is_constant,
|
||||||
const std::unordered_set<uintptr_t>& constant_ids_,
|
bool contiguous);
|
||||||
bool contiguous,
|
|
||||||
bool move_buffers = false);
|
// Collapse contiguous dims ignoring scalars and constants.
|
||||||
|
std::tuple<bool, Shape, std::vector<Strides>> compiled_collapse_contiguous_dims(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const array& out,
|
||||||
|
const std::function<bool(size_t)>& is_constant);
|
||||||
|
|
||||||
|
// Return whether the kernel should use large index.
|
||||||
|
bool compiled_use_large_index(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const std::vector<array>& outputs,
|
||||||
|
bool contiguous);
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/backend/common/utils.h"
|
#include "mlx/backend/common/utils.h"
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
@@ -23,17 +22,25 @@ enum class CopyType {
|
|||||||
GeneralGeneral
|
GeneralGeneral
|
||||||
};
|
};
|
||||||
|
|
||||||
void copy(const array& src, array& dst, CopyType ctype);
|
inline bool set_copy_output_data(const array& in, array& out, CopyType ctype) {
|
||||||
void copy_inplace(const array& src, array& dst, CopyType ctype);
|
if (ctype == CopyType::Vector) {
|
||||||
|
// If the input is donateable, we are doing a vector copy and the types
|
||||||
void copy_inplace(
|
// have the same size, then the input buffer can hold the output.
|
||||||
const array& src,
|
if (is_donatable(in, out)) {
|
||||||
array& dst,
|
out.copy_shared_buffer(in);
|
||||||
const Shape& data_shape,
|
return true;
|
||||||
const Strides& i_strides,
|
} else {
|
||||||
const Strides& o_strides,
|
out.set_data(
|
||||||
int64_t i_offset,
|
allocator::malloc(in.data_size() * out.itemsize()),
|
||||||
int64_t o_offset,
|
in.data_size(),
|
||||||
CopyType ctype);
|
in.strides(),
|
||||||
|
in.flags());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,198 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/lapack.h"
|
|
||||||
#include "mlx/backend/common/utils.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
#define DEFAULT(primitive) \
|
|
||||||
void primitive::eval_cpu(const std::vector<array>& inputs, array& out) { \
|
|
||||||
primitive::eval(inputs, out); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DEFAULT_MULTI(primitive) \
|
|
||||||
void primitive::eval_cpu( \
|
|
||||||
const std::vector<array>& inputs, std::vector<array>& outputs) { \
|
|
||||||
primitive::eval(inputs, outputs); \
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
DEFAULT(Abs)
|
|
||||||
DEFAULT(Add)
|
|
||||||
DEFAULT(Arange)
|
|
||||||
DEFAULT(ArcCos)
|
|
||||||
DEFAULT(ArcCosh)
|
|
||||||
DEFAULT(ArcSin)
|
|
||||||
DEFAULT(ArcSinh)
|
|
||||||
DEFAULT(ArcTan)
|
|
||||||
DEFAULT(ArcTan2)
|
|
||||||
DEFAULT(ArcTanh)
|
|
||||||
DEFAULT(ArgPartition)
|
|
||||||
DEFAULT(ArgReduce)
|
|
||||||
DEFAULT(ArgSort)
|
|
||||||
DEFAULT(AsType)
|
|
||||||
DEFAULT(AsStrided)
|
|
||||||
DEFAULT(Broadcast)
|
|
||||||
DEFAULT(BroadcastAxes)
|
|
||||||
DEFAULT(BlockMaskedMM)
|
|
||||||
DEFAULT(GatherMM)
|
|
||||||
DEFAULT(GatherQMM)
|
|
||||||
DEFAULT_MULTI(DivMod)
|
|
||||||
DEFAULT(Ceil)
|
|
||||||
DEFAULT(Concatenate)
|
|
||||||
DEFAULT(Conjugate)
|
|
||||||
DEFAULT(Convolution)
|
|
||||||
DEFAULT(Copy)
|
|
||||||
DEFAULT(Cos)
|
|
||||||
DEFAULT(Cosh)
|
|
||||||
DEFAULT_MULTI(CustomTransforms)
|
|
||||||
DEFAULT_MULTI(Depends)
|
|
||||||
DEFAULT(Divide)
|
|
||||||
DEFAULT(NumberOfElements)
|
|
||||||
DEFAULT(Remainder)
|
|
||||||
DEFAULT(Equal)
|
|
||||||
DEFAULT(Erf)
|
|
||||||
DEFAULT(ErfInv)
|
|
||||||
DEFAULT(Exp)
|
|
||||||
DEFAULT(ExpandDims)
|
|
||||||
DEFAULT(Expm1)
|
|
||||||
DEFAULT(FFT)
|
|
||||||
DEFAULT(Floor)
|
|
||||||
DEFAULT(Full)
|
|
||||||
DEFAULT(Gather)
|
|
||||||
DEFAULT(Greater)
|
|
||||||
DEFAULT(GreaterEqual)
|
|
||||||
DEFAULT(Hadamard)
|
|
||||||
DEFAULT(Less)
|
|
||||||
DEFAULT(LessEqual)
|
|
||||||
DEFAULT(Load)
|
|
||||||
DEFAULT(Log)
|
|
||||||
DEFAULT(Log1p)
|
|
||||||
DEFAULT(LogicalNot)
|
|
||||||
DEFAULT(LogicalAnd)
|
|
||||||
DEFAULT(LogicalOr)
|
|
||||||
DEFAULT(LogAddExp)
|
|
||||||
DEFAULT(Maximum)
|
|
||||||
DEFAULT(Minimum)
|
|
||||||
DEFAULT(Multiply)
|
|
||||||
DEFAULT(Negative)
|
|
||||||
DEFAULT(NotEqual)
|
|
||||||
DEFAULT(Pad)
|
|
||||||
DEFAULT(Partition)
|
|
||||||
DEFAULT(Power)
|
|
||||||
DEFAULT_MULTI(QRF)
|
|
||||||
DEFAULT(QuantizedMatmul)
|
|
||||||
DEFAULT(RandomBits)
|
|
||||||
DEFAULT(Reduce)
|
|
||||||
DEFAULT(Round)
|
|
||||||
DEFAULT(Scan)
|
|
||||||
DEFAULT(Scatter)
|
|
||||||
DEFAULT(Select)
|
|
||||||
DEFAULT(Sigmoid)
|
|
||||||
DEFAULT(Sign)
|
|
||||||
DEFAULT(Sin)
|
|
||||||
DEFAULT(Sinh)
|
|
||||||
DEFAULT(Slice)
|
|
||||||
DEFAULT(SliceUpdate)
|
|
||||||
DEFAULT(Softmax)
|
|
||||||
DEFAULT(Sort)
|
|
||||||
DEFAULT_MULTI(Split)
|
|
||||||
DEFAULT(Square)
|
|
||||||
DEFAULT(Squeeze)
|
|
||||||
DEFAULT(Sqrt)
|
|
||||||
DEFAULT(StopGradient)
|
|
||||||
DEFAULT(Subtract)
|
|
||||||
DEFAULT_MULTI(SVD)
|
|
||||||
DEFAULT(Tan)
|
|
||||||
DEFAULT(Tanh)
|
|
||||||
DEFAULT(Transpose)
|
|
||||||
DEFAULT(Inverse)
|
|
||||||
DEFAULT(Cholesky)
|
|
||||||
DEFAULT_MULTI(Eigh)
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
inline void matmul_common_general(
|
|
||||||
const array& a_pre,
|
|
||||||
const array& b_pre,
|
|
||||||
array& out,
|
|
||||||
float alpha = 1.0f,
|
|
||||||
float beta = 0.0f) {
|
|
||||||
auto check_transpose = [](const array& arr) {
|
|
||||||
auto stx = arr.strides()[arr.ndim() - 2];
|
|
||||||
auto sty = arr.strides()[arr.ndim() - 1];
|
|
||||||
if (stx == arr.shape(-1) && sty == 1) {
|
|
||||||
return std::make_tuple(false, stx, arr);
|
|
||||||
} else if (stx == 1 && sty == arr.shape(-2)) {
|
|
||||||
return std::make_tuple(true, sty, arr);
|
|
||||||
} else {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::General);
|
|
||||||
stx = arr.shape(-1);
|
|
||||||
return std::make_tuple(false, stx, arr_copy);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
auto [a_transposed, lda, a] = check_transpose(a_pre);
|
|
||||||
auto [b_transposed, ldb, b] = check_transpose(b_pre);
|
|
||||||
size_t M = a.shape(-2);
|
|
||||||
size_t N = b.shape(-1);
|
|
||||||
size_t K = a.shape(-1);
|
|
||||||
if (M == 0 || N == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (K == 0) {
|
|
||||||
std::memset(static_cast<void*>(out.data<float>()), 0, out.nbytes());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (a.size() / (M * K)); ++i) {
|
|
||||||
cblas_sgemm(
|
|
||||||
CblasRowMajor,
|
|
||||||
a_transposed ? CblasTrans : CblasNoTrans, // transA
|
|
||||||
b_transposed ? CblasTrans : CblasNoTrans, // transB
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
alpha, // alpha
|
|
||||||
a.data<float>() + elem_to_loc(M * K * i, a.shape(), a.strides()),
|
|
||||||
lda,
|
|
||||||
b.data<float>() + elem_to_loc(K * N * i, b.shape(), b.strides()),
|
|
||||||
ldb,
|
|
||||||
beta, // beta
|
|
||||||
out.data<float>() + M * N * i,
|
|
||||||
out.shape(-1) // ldc
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Matmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[Matmul::eval_cpu] Currently only supports float32.");
|
|
||||||
}
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
return matmul_common_general(inputs[0], inputs[1], out);
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[AddMM::eval_cpu] Currently only supports float32.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill output with C
|
|
||||||
auto& c = inputs[2];
|
|
||||||
CopyType ctype = c.data_size() == 1 ? CopyType::Scalar : CopyType::General;
|
|
||||||
copy(c, out, ctype);
|
|
||||||
|
|
||||||
return matmul_common_general(inputs[0], inputs[1], out, alpha_, beta_);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/lapack.h"
|
|
||||||
#include "mlx/linalg.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
void ssyevd(
|
|
||||||
char jobz,
|
|
||||||
char uplo,
|
|
||||||
float* a,
|
|
||||||
int N,
|
|
||||||
float* w,
|
|
||||||
float* work,
|
|
||||||
int lwork,
|
|
||||||
int* iwork,
|
|
||||||
int liwork) {
|
|
||||||
int info;
|
|
||||||
MLX_LAPACK_FUNC(ssyevd)
|
|
||||||
(
|
|
||||||
/* jobz = */ &jobz,
|
|
||||||
/* uplo = */ &uplo,
|
|
||||||
/* n = */ &N,
|
|
||||||
/* a = */ a,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* w = */ w,
|
|
||||||
/* work = */ work,
|
|
||||||
/* lwork = */ &lwork,
|
|
||||||
/* iwork = */ iwork,
|
|
||||||
/* liwork = */ &liwork,
|
|
||||||
/* info = */ &info);
|
|
||||||
if (info != 0) {
|
|
||||||
std::stringstream msg;
|
|
||||||
msg << "[Eigh::eval_cpu] Eigenvalue decomposition failed with error code "
|
|
||||||
<< info;
|
|
||||||
throw std::runtime_error(msg.str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Eigh::eval(const std::vector<array>& inputs, std::vector<array>& outputs) {
|
|
||||||
const auto& a = inputs[0];
|
|
||||||
auto& values = outputs[0];
|
|
||||||
|
|
||||||
auto vectors = compute_eigenvectors_
|
|
||||||
? outputs[1]
|
|
||||||
: array(a.shape(), a.dtype(), nullptr, {});
|
|
||||||
|
|
||||||
values.set_data(allocator::malloc_or_wait(values.nbytes()));
|
|
||||||
|
|
||||||
copy(
|
|
||||||
a,
|
|
||||||
vectors,
|
|
||||||
a.flags().row_contiguous ? CopyType::Vector : CopyType::General);
|
|
||||||
|
|
||||||
if (compute_eigenvectors_) {
|
|
||||||
// Set the strides and flags so the eigenvectors
|
|
||||||
// are in the columns of the output
|
|
||||||
auto flags = vectors.flags();
|
|
||||||
auto strides = vectors.strides();
|
|
||||||
auto ndim = a.ndim();
|
|
||||||
std::swap(strides[ndim - 1], strides[ndim - 2]);
|
|
||||||
|
|
||||||
if (a.size() > 1) {
|
|
||||||
flags.row_contiguous = false;
|
|
||||||
if (ndim > 2) {
|
|
||||||
flags.col_contiguous = false;
|
|
||||||
} else {
|
|
||||||
flags.col_contiguous = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vectors.move_shared_buffer(vectors, strides, flags, vectors.data_size());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto vec_ptr = vectors.data<float>();
|
|
||||||
auto eig_ptr = values.data<float>();
|
|
||||||
|
|
||||||
char jobz = compute_eigenvectors_ ? 'V' : 'N';
|
|
||||||
auto N = a.shape(-1);
|
|
||||||
|
|
||||||
// Work query
|
|
||||||
int lwork;
|
|
||||||
int liwork;
|
|
||||||
{
|
|
||||||
float work;
|
|
||||||
int iwork;
|
|
||||||
ssyevd(jobz, uplo_[0], nullptr, N, nullptr, &work, -1, &iwork, -1);
|
|
||||||
lwork = static_cast<int>(work);
|
|
||||||
liwork = iwork;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto work_buf = array::Data{allocator::malloc_or_wait(sizeof(float) * lwork)};
|
|
||||||
auto iwork_buf = array::Data{allocator::malloc_or_wait(sizeof(int) * liwork)};
|
|
||||||
for (size_t i = 0; i < a.size() / (N * N); ++i) {
|
|
||||||
ssyevd(
|
|
||||||
jobz,
|
|
||||||
uplo_[0],
|
|
||||||
vec_ptr,
|
|
||||||
N,
|
|
||||||
eig_ptr,
|
|
||||||
static_cast<float*>(work_buf.buffer.raw_ptr()),
|
|
||||||
lwork,
|
|
||||||
static_cast<int*>(iwork_buf.buffer.raw_ptr()),
|
|
||||||
liwork);
|
|
||||||
vec_ptr += N * N;
|
|
||||||
eig_ptr += N;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
/* Approximation to the inverse error function.
|
|
||||||
* Based on code from:
|
|
||||||
* https://stackoverflow.com/questions/27229371/inverse-error-function-in-c#answer-49743348
|
|
||||||
*/
|
|
||||||
float erfinv(float a) {
|
|
||||||
auto t = std::fma(a, 0.0f - a, 1.0f);
|
|
||||||
t = std::log(t);
|
|
||||||
float p;
|
|
||||||
if (std::abs(t) > 6.125f) { // maximum ulp error = 2.35793
|
|
||||||
p = 3.03697567e-10f; // 0x1.4deb44p-32
|
|
||||||
p = std::fma(p, t, 2.93243101e-8f); // 0x1.f7c9aep-26
|
|
||||||
p = std::fma(p, t, 1.22150334e-6f); // 0x1.47e512p-20
|
|
||||||
p = std::fma(p, t, 2.84108955e-5f); // 0x1.dca7dep-16
|
|
||||||
p = std::fma(p, t, 3.93552968e-4f); // 0x1.9cab92p-12
|
|
||||||
p = std::fma(p, t, 3.02698812e-3f); // 0x1.8cc0dep-9
|
|
||||||
p = std::fma(p, t, 4.83185798e-3f); // 0x1.3ca920p-8
|
|
||||||
p = std::fma(p, t, -2.64646143e-1f); // -0x1.0eff66p-2
|
|
||||||
p = std::fma(p, t, 8.40016484e-1f); // 0x1.ae16a4p-1
|
|
||||||
} else { // maximum ulp error = 2.35002
|
|
||||||
p = 5.43877832e-9f; // 0x1.75c000p-28
|
|
||||||
p = std::fma(p, t, 1.43285448e-7f); // 0x1.33b402p-23
|
|
||||||
p = std::fma(p, t, 1.22774793e-6f); // 0x1.499232p-20
|
|
||||||
p = std::fma(p, t, 1.12963626e-7f); // 0x1.e52cd2p-24
|
|
||||||
p = std::fma(p, t, -5.61530760e-5f); // -0x1.d70bd0p-15
|
|
||||||
p = std::fma(p, t, -1.47697632e-4f); // -0x1.35be90p-13
|
|
||||||
p = std::fma(p, t, 2.31468678e-3f); // 0x1.2f6400p-9
|
|
||||||
p = std::fma(p, t, 1.15392581e-2f); // 0x1.7a1e50p-7
|
|
||||||
p = std::fma(p, t, -2.32015476e-1f); // -0x1.db2aeep-3
|
|
||||||
p = std::fma(p, t, 8.86226892e-1f); // 0x1.c5bf88p-1
|
|
||||||
}
|
|
||||||
return a * p;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
|
|
||||||
#include <numeric>
|
|
||||||
|
|
||||||
#include "mlx/3rdparty/pocketfft.h"
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
void FFT::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
auto& in = inputs[0];
|
|
||||||
std::vector<std::ptrdiff_t> strides_in(
|
|
||||||
in.strides().begin(), in.strides().end());
|
|
||||||
for (auto& s : strides_in) {
|
|
||||||
s *= in.itemsize();
|
|
||||||
}
|
|
||||||
std::vector<std::ptrdiff_t> strides_out(
|
|
||||||
out.strides().begin(), out.strides().end());
|
|
||||||
for (auto& s : strides_out) {
|
|
||||||
s *= out.itemsize();
|
|
||||||
}
|
|
||||||
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
std::vector<size_t> shape;
|
|
||||||
if (out.dtype() == float32) {
|
|
||||||
shape.insert(shape.end(), out.shape().begin(), out.shape().end());
|
|
||||||
} else {
|
|
||||||
shape.insert(shape.end(), in.shape().begin(), in.shape().end());
|
|
||||||
}
|
|
||||||
|
|
||||||
float scale = 1.0f;
|
|
||||||
if (inverse_) {
|
|
||||||
size_t nelem = std::accumulate(
|
|
||||||
axes_.begin(), axes_.end(), 1, [&shape](auto x, auto y) {
|
|
||||||
return x * shape[y];
|
|
||||||
});
|
|
||||||
scale /= nelem;
|
|
||||||
}
|
|
||||||
if (in.dtype() == complex64 && out.dtype() == complex64) {
|
|
||||||
auto in_ptr =
|
|
||||||
reinterpret_cast<const std::complex<float>*>(in.data<complex64_t>());
|
|
||||||
auto out_ptr =
|
|
||||||
reinterpret_cast<std::complex<float>*>(out.data<complex64_t>());
|
|
||||||
pocketfft::c2c(
|
|
||||||
shape,
|
|
||||||
strides_in,
|
|
||||||
strides_out,
|
|
||||||
axes_,
|
|
||||||
!inverse_,
|
|
||||||
in_ptr,
|
|
||||||
out_ptr,
|
|
||||||
scale);
|
|
||||||
} else if (in.dtype() == float32 && out.dtype() == complex64) {
|
|
||||||
auto in_ptr = in.data<float>();
|
|
||||||
auto out_ptr =
|
|
||||||
reinterpret_cast<std::complex<float>*>(out.data<complex64_t>());
|
|
||||||
pocketfft::r2c(
|
|
||||||
shape,
|
|
||||||
strides_in,
|
|
||||||
strides_out,
|
|
||||||
axes_,
|
|
||||||
!inverse_,
|
|
||||||
in_ptr,
|
|
||||||
out_ptr,
|
|
||||||
scale);
|
|
||||||
} else if (in.dtype() == complex64 && out.dtype() == float32) {
|
|
||||||
auto in_ptr =
|
|
||||||
reinterpret_cast<const std::complex<float>*>(in.data<complex64_t>());
|
|
||||||
auto out_ptr = out.data<float>();
|
|
||||||
pocketfft::c2r(
|
|
||||||
shape,
|
|
||||||
strides_in,
|
|
||||||
strides_out,
|
|
||||||
axes_,
|
|
||||||
!inverse_,
|
|
||||||
in_ptr,
|
|
||||||
out_ptr,
|
|
||||||
scale);
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[FFT] Received unexpected input and output type combination.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -99,6 +99,10 @@ inline std::pair<int, int> decompose_hadamard(int n) {
|
|||||||
"[hadamard] Only supports n = m*2^k where m in (1, 12, 20, 28).");
|
"[hadamard] Only supports n = m*2^k where m in (1, 12, 20, 28).");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (n > (1 << 26)) {
|
||||||
|
throw std::invalid_argument(
|
||||||
|
"[hadamard] Only supports n = m*2^k where k <= 26");
|
||||||
|
}
|
||||||
return {n, m};
|
return {n, m};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,393 +0,0 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
|
||||||
#include <algorithm>
|
|
||||||
#include <cassert>
|
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/utils.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
template <typename IdxT>
|
|
||||||
inline size_t offset_neg_idx(IdxT idx, size_t size) {
|
|
||||||
return (idx < 0) ? idx + size : idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <>
|
|
||||||
inline size_t offset_neg_idx(bool idx, size_t) {
|
|
||||||
return idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <>
|
|
||||||
inline size_t offset_neg_idx(uint32_t idx, size_t) {
|
|
||||||
return idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename IdxT>
|
|
||||||
void gather(
|
|
||||||
const array& src,
|
|
||||||
const std::vector<array>& inds,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
const Shape& slice_sizes) {
|
|
||||||
// If the array is row contiguous then we can do a contiguous copy given
|
|
||||||
// two conditions on the slice size:
|
|
||||||
// - Any number of leading ones in the slice sizes are allowed
|
|
||||||
// - All other slice sizes match the corresponding dimension except the
|
|
||||||
// first non-singleton slice size
|
|
||||||
// If the array is col contiguous then the reverse is the case:
|
|
||||||
// - Any number of trailing ones in the slice sizes are allowed
|
|
||||||
// - All other slice sizes match the corresponding dimension except the
|
|
||||||
// first non-singleton slice size from the end
|
|
||||||
|
|
||||||
bool can_copy = false;
|
|
||||||
if (src.flags().row_contiguous) {
|
|
||||||
can_copy = true;
|
|
||||||
|
|
||||||
// Ignore leading 1s
|
|
||||||
int i = 0;
|
|
||||||
for (; i < slice_sizes.size() && slice_sizes[i] == 1; ++i)
|
|
||||||
;
|
|
||||||
|
|
||||||
// Check the remaining
|
|
||||||
i++;
|
|
||||||
for (; i < src.ndim() && can_copy; ++i) {
|
|
||||||
can_copy = (src.shape(i) == slice_sizes[i]);
|
|
||||||
}
|
|
||||||
} else if (src.flags().col_contiguous) {
|
|
||||||
can_copy = true;
|
|
||||||
|
|
||||||
// Ignore trailing 1s
|
|
||||||
int i = slice_sizes.size() - 1;
|
|
||||||
for (; i >= 0 && slice_sizes[i] == 1; --i)
|
|
||||||
;
|
|
||||||
|
|
||||||
// Skip the next slice size and check the remaining
|
|
||||||
i--;
|
|
||||||
for (; i >= 0 && can_copy; --i) {
|
|
||||||
can_copy = (src.shape(i) == slice_sizes[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
size_t slice_size = 1;
|
|
||||||
for (auto s : slice_sizes) {
|
|
||||||
slice_size *= s;
|
|
||||||
}
|
|
||||||
size_t ind_size = slice_size == 0 ? 0 : out.size() / slice_size;
|
|
||||||
const T* src_ptr = src.data<T>();
|
|
||||||
T* dst_ptr = out.data<T>();
|
|
||||||
size_t out_idx = 0;
|
|
||||||
|
|
||||||
std::vector<ContiguousIterator> its(inds.begin(), inds.end());
|
|
||||||
ContiguousIterator src_it;
|
|
||||||
if (!can_copy && src.ndim() > 0) {
|
|
||||||
src_it = ContiguousIterator(slice_sizes, src.strides(), src.ndim());
|
|
||||||
}
|
|
||||||
for (int idx = 0; idx < ind_size; idx++) {
|
|
||||||
size_t src_idx = 0;
|
|
||||||
for (int ii = 0; ii < inds.size(); ++ii) {
|
|
||||||
auto ax = axes[ii];
|
|
||||||
auto idx_loc = its[ii].loc;
|
|
||||||
its[ii].step();
|
|
||||||
auto idx_val =
|
|
||||||
offset_neg_idx(inds[ii].data<IdxT>()[idx_loc], src.shape(ax));
|
|
||||||
src_idx += (idx_val * src.strides()[ax]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slice_size == 1) {
|
|
||||||
dst_ptr[out_idx++] = src_ptr[src_idx];
|
|
||||||
} else if (can_copy) {
|
|
||||||
std::copy(
|
|
||||||
src_ptr + src_idx, src_ptr + src_idx + slice_size, dst_ptr + out_idx);
|
|
||||||
out_idx += slice_size;
|
|
||||||
} else {
|
|
||||||
for (int jj = 0; jj < slice_size; jj++) {
|
|
||||||
dst_ptr[out_idx++] = src_ptr[src_idx + src_it.loc];
|
|
||||||
src_it.step();
|
|
||||||
}
|
|
||||||
src_it.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename IdxT>
|
|
||||||
void dispatch_gather(
|
|
||||||
const array& src,
|
|
||||||
const std::vector<array>& inds,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
const Shape& size) {
|
|
||||||
switch (out.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
gather<bool, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
gather<uint8_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
gather<uint16_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
gather<uint32_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
gather<uint64_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
gather<int8_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
gather<int16_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
gather<int32_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
gather<int64_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
gather<float16_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
gather<float, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
gather<bfloat16_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
gather<complex64_t, IdxT>(src, inds, out, axes, size);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Gather::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
auto& src = inputs[0];
|
|
||||||
std::vector<array> inds(inputs.begin() + 1, inputs.end());
|
|
||||||
|
|
||||||
if (inds.empty()) {
|
|
||||||
dispatch_gather<bool>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (inds[0].dtype()) {
|
|
||||||
case bool_:
|
|
||||||
dispatch_gather<bool>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
dispatch_gather<uint8_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
dispatch_gather<uint16_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
dispatch_gather<uint32_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
dispatch_gather<uint64_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
dispatch_gather<int8_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
dispatch_gather<int16_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
dispatch_gather<int32_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
dispatch_gather<int64_t>(src, inds, out, axes_, slice_sizes_);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
case float32:
|
|
||||||
case bfloat16:
|
|
||||||
case complex64:
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[Gather::eval] Cannot gather with floating point indices.");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename InT, typename IdxT, typename OpT>
|
|
||||||
void scatter(
|
|
||||||
const array& updates,
|
|
||||||
array& out,
|
|
||||||
const std::vector<array>& inds,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
const OpT& op) {
|
|
||||||
int nind = inds.size();
|
|
||||||
auto inds_ndim = updates.ndim() - out.ndim();
|
|
||||||
size_t n_updates = nind ? inds[0].size() : 1;
|
|
||||||
|
|
||||||
Shape update_shape(
|
|
||||||
updates.shape().begin() + inds_ndim, updates.shape().end());
|
|
||||||
size_t update_size = 1;
|
|
||||||
for (auto us : update_shape) {
|
|
||||||
update_size *= us;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ContiguousIterator> its(inds.begin(), inds.end());
|
|
||||||
ContiguousIterator update_it(updates);
|
|
||||||
ContiguousIterator out_it(update_shape, out.strides(), out.ndim());
|
|
||||||
|
|
||||||
for (int i = 0; i < n_updates; ++i) {
|
|
||||||
size_t out_offset = 0;
|
|
||||||
for (int j = 0; j < nind; ++j) {
|
|
||||||
auto ax = axes[j];
|
|
||||||
auto idx_loc = its[j].loc;
|
|
||||||
its[j].step();
|
|
||||||
auto idx_val =
|
|
||||||
offset_neg_idx(inds[j].data<IdxT>()[idx_loc], out.shape(ax));
|
|
||||||
out_offset += (idx_val * out.strides()[ax]);
|
|
||||||
}
|
|
||||||
update_it.seek(i * update_size);
|
|
||||||
for (int j = 0; j < update_size; ++j) {
|
|
||||||
op(updates.data<InT>()[update_it.loc],
|
|
||||||
out.data<InT>() + out_offset + out_it.loc);
|
|
||||||
update_it.step();
|
|
||||||
out_it.step();
|
|
||||||
}
|
|
||||||
out_it.reset();
|
|
||||||
update_it.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename InT, typename IdxT>
|
|
||||||
void dispatch_scatter_inds(
|
|
||||||
array& out,
|
|
||||||
const std::vector<array>& indices,
|
|
||||||
const array& updates,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
Scatter::ReduceType rtype) {
|
|
||||||
switch (rtype) {
|
|
||||||
case Scatter::None:
|
|
||||||
scatter<InT, IdxT>(
|
|
||||||
updates, out, indices, axes, [](auto x, auto* y) { (*y) = x; });
|
|
||||||
break;
|
|
||||||
case Scatter::Sum:
|
|
||||||
scatter<InT, IdxT>(
|
|
||||||
updates, out, indices, axes, [](auto x, auto* y) { (*y) += x; });
|
|
||||||
break;
|
|
||||||
case Scatter::Prod:
|
|
||||||
scatter<InT, IdxT>(
|
|
||||||
updates, out, indices, axes, [](auto x, auto* y) { (*y) *= x; });
|
|
||||||
break;
|
|
||||||
case Scatter::Max:
|
|
||||||
scatter<InT, IdxT>(updates, out, indices, axes, [](auto x, auto* y) {
|
|
||||||
(*y) = (*y > x) ? *y : x;
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
case Scatter::Min:
|
|
||||||
scatter<InT, IdxT>(updates, out, indices, axes, [](auto x, auto* y) {
|
|
||||||
(*y) = (*y < x) ? *y : x;
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename InT>
|
|
||||||
void dispatch_scatter(
|
|
||||||
array& out,
|
|
||||||
const std::vector<array>& inds,
|
|
||||||
const array& updates,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
Scatter::ReduceType rtype) {
|
|
||||||
if (inds.empty()) {
|
|
||||||
dispatch_scatter_inds<InT, bool>(out, inds, updates, axes, rtype);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (inds[0].dtype()) {
|
|
||||||
case bool_:
|
|
||||||
dispatch_scatter_inds<InT, bool>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
dispatch_scatter_inds<InT, uint8_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
dispatch_scatter_inds<InT, uint16_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
dispatch_scatter_inds<InT, uint32_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
dispatch_scatter_inds<InT, uint64_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
dispatch_scatter_inds<InT, int8_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
dispatch_scatter_inds<InT, int16_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
dispatch_scatter_inds<InT, int32_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
dispatch_scatter_inds<InT, int64_t>(out, inds, updates, axes, rtype);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
case float32:
|
|
||||||
case bfloat16:
|
|
||||||
case complex64:
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[Scatter::eval_cpu] Cannot scatter with floating point indices.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scatter::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
assert(inputs.size() >= 2);
|
|
||||||
|
|
||||||
auto& src = inputs[0];
|
|
||||||
std::vector<array> inds(inputs.begin() + 1, inputs.end() - 1);
|
|
||||||
auto& updates = inputs.back();
|
|
||||||
|
|
||||||
// Copy src into out (copy allocates memory for out)
|
|
||||||
copy(src, out, CopyType::General);
|
|
||||||
|
|
||||||
switch (src.dtype()) {
|
|
||||||
case bool_:
|
|
||||||
dispatch_scatter<bool>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case uint8:
|
|
||||||
dispatch_scatter<uint8_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case uint16:
|
|
||||||
dispatch_scatter<uint16_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case uint32:
|
|
||||||
dispatch_scatter<uint32_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case uint64:
|
|
||||||
dispatch_scatter<uint64_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case int8:
|
|
||||||
dispatch_scatter<int8_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case int16:
|
|
||||||
dispatch_scatter<int16_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case int32:
|
|
||||||
dispatch_scatter<int32_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case int64:
|
|
||||||
dispatch_scatter<int64_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
dispatch_scatter<float16_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
dispatch_scatter<float>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
dispatch_scatter<bfloat16_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
case complex64:
|
|
||||||
dispatch_scatter<complex64_t>(out, inds, updates, axes_, reduce_type_);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/lapack.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
int strtri_wrapper(char uplo, char diag, float* matrix, int N) {
|
|
||||||
int info;
|
|
||||||
MLX_LAPACK_FUNC(strtri)
|
|
||||||
(
|
|
||||||
/* uplo = */ &uplo,
|
|
||||||
/* diag = */ &diag,
|
|
||||||
/* N = */ &N,
|
|
||||||
/* a = */ matrix,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* info = */ &info);
|
|
||||||
return info;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
void general_inv(array& inv, int N, int i) {
|
|
||||||
int info;
|
|
||||||
auto ipiv = array::Data{allocator::malloc_or_wait(sizeof(int) * N)};
|
|
||||||
// Compute LU factorization.
|
|
||||||
sgetrf_(
|
|
||||||
/* m = */ &N,
|
|
||||||
/* n = */ &N,
|
|
||||||
/* a = */ inv.data<float>() + N * N * i,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* ipiv = */ static_cast<int*>(ipiv.buffer.raw_ptr()),
|
|
||||||
/* info = */ &info);
|
|
||||||
|
|
||||||
if (info != 0) {
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "inverse_impl: LU factorization failed with error code " << info;
|
|
||||||
throw std::runtime_error(ss.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
static const int lwork_query = -1;
|
|
||||||
float workspace_size = 0;
|
|
||||||
|
|
||||||
// Compute workspace size.
|
|
||||||
sgetri_(
|
|
||||||
/* m = */ &N,
|
|
||||||
/* a = */ nullptr,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* ipiv = */ nullptr,
|
|
||||||
/* work = */ &workspace_size,
|
|
||||||
/* lwork = */ &lwork_query,
|
|
||||||
/* info = */ &info);
|
|
||||||
|
|
||||||
if (info != 0) {
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "inverse_impl: LU workspace calculation failed with error code "
|
|
||||||
<< info;
|
|
||||||
throw std::runtime_error(ss.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
const int lwork = workspace_size;
|
|
||||||
auto scratch = array::Data{allocator::malloc_or_wait(sizeof(float) * lwork)};
|
|
||||||
|
|
||||||
// Compute inverse.
|
|
||||||
sgetri_(
|
|
||||||
/* m = */ &N,
|
|
||||||
/* a = */ inv.data<float>() + N * N * i,
|
|
||||||
/* lda = */ &N,
|
|
||||||
/* ipiv = */ static_cast<int*>(ipiv.buffer.raw_ptr()),
|
|
||||||
/* work = */ static_cast<float*>(scratch.buffer.raw_ptr()),
|
|
||||||
/* lwork = */ &lwork,
|
|
||||||
/* info = */ &info);
|
|
||||||
|
|
||||||
if (info != 0) {
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "inverse_impl: inversion failed with error code " << info;
|
|
||||||
throw std::runtime_error(ss.str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void tri_inv(array& inv, int N, int i, bool upper) {
|
|
||||||
const char uplo = upper ? 'L' : 'U';
|
|
||||||
const char diag = 'N';
|
|
||||||
int info = strtri_wrapper(uplo, diag, inv.data<float>() + N * N * i, N);
|
|
||||||
if (info != 0) {
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "inverse_impl: triangular inversion failed with error code " << info;
|
|
||||||
throw std::runtime_error(ss.str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void inverse_impl(const array& a, array& inv, bool tri, bool upper) {
|
|
||||||
// Lapack uses the column-major convention. We take advantage of the following
|
|
||||||
// identity to avoid transposing (see
|
|
||||||
// https://math.stackexchange.com/a/340234):
|
|
||||||
// (A⁻¹)ᵀ = (Aᵀ)⁻¹
|
|
||||||
|
|
||||||
// The inverse is computed in place, so just copy the input to the output.
|
|
||||||
copy(a, inv, a.flags().row_contiguous ? CopyType::Vector : CopyType::General);
|
|
||||||
|
|
||||||
const int N = a.shape(-1);
|
|
||||||
const size_t num_matrices = a.size() / (N * N);
|
|
||||||
|
|
||||||
for (int i = 0; i < num_matrices; i++) {
|
|
||||||
if (tri) {
|
|
||||||
tri_inv(inv, N, i, upper);
|
|
||||||
} else {
|
|
||||||
general_inv(inv, N, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Inverse::eval(const std::vector<array>& inputs, array& output) {
|
|
||||||
if (inputs[0].dtype() != float32) {
|
|
||||||
throw std::runtime_error("[Inverse::eval] only supports float32.");
|
|
||||||
}
|
|
||||||
inverse_impl(inputs[0], output, tri_, upper_);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
// Required for Visual Studio.
|
|
||||||
// https://github.com/OpenMathLib/OpenBLAS/blob/develop/docs/install.md
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#include <complex>
|
|
||||||
#define LAPACK_COMPLEX_CUSTOM
|
|
||||||
#define lapack_complex_float std::complex<float>
|
|
||||||
#define lapack_complex_double std::complex<double>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef ACCELERATE_NEW_LAPACK
|
|
||||||
#include <Accelerate/Accelerate.h>
|
|
||||||
#else
|
|
||||||
#include <cblas.h>
|
|
||||||
#include <lapack.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(LAPACK_GLOBAL) || defined(LAPACK_NAME)
|
|
||||||
|
|
||||||
// This is to work around a change in the function signatures of lapack >= 3.9.1
|
|
||||||
// where functions taking char* also include a strlen argument, see a similar
|
|
||||||
// change in OpenCV:
|
|
||||||
// https://github.com/opencv/opencv/blob/1eb061f89de0fb85c4c75a2deeb0f61a961a63ad/cmake/OpenCVFindLAPACK.cmake#L57
|
|
||||||
#define MLX_LAPACK_FUNC(f) LAPACK_##f
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#define MLX_LAPACK_FUNC(f) f##_
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023 Apple Inc.
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
|
||||||
#include "mlx/backend/common/load.h"
|
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
#include "mlx/scheduler.h"
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
@@ -29,33 +27,31 @@ void swap_endianness(uint8_t* data_bytes, size_t N) {
|
|||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
void load(
|
void Load::eval_cpu(const std::vector<array>& /* inputs */, array& out) {
|
||||||
array& out,
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
size_t offset,
|
auto read_task = [out_ptr = out.data<char>(),
|
||||||
const std::shared_ptr<io::Reader>& reader,
|
size = out.size(),
|
||||||
bool swap_endianness_) {
|
itemsize = out.itemsize(),
|
||||||
reader->read(out.data<char>(), out.nbytes(), offset);
|
offset = offset_,
|
||||||
|
reader = reader_,
|
||||||
|
swap_endianness_ = swap_endianness_]() mutable {
|
||||||
|
reader->read(out_ptr, size * itemsize, offset);
|
||||||
if (swap_endianness_) {
|
if (swap_endianness_) {
|
||||||
switch (out.itemsize()) {
|
switch (itemsize) {
|
||||||
case 2:
|
case 2:
|
||||||
swap_endianness<2>(out.data<uint8_t>(), out.data_size());
|
swap_endianness<2>(reinterpret_cast<uint8_t*>(out_ptr), size);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
swap_endianness<4>(out.data<uint8_t>(), out.data_size());
|
swap_endianness<4>(reinterpret_cast<uint8_t*>(out_ptr), size);
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
swap_endianness<8>(out.data<uint8_t>(), out.data_size());
|
swap_endianness<8>(reinterpret_cast<uint8_t*>(out_ptr), size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
auto fut = io::thread_pool().enqueue(std::move(read_task)).share();
|
||||||
void Load::eval(const std::vector<array>& inputs, array& out) {
|
scheduler::enqueue(stream(), [fut = std::move(fut)]() { fut.wait(); });
|
||||||
assert(inputs.size() == 0);
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
load(out, offset_, reader_, swap_endianness_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
// Copyright © 2024 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/io/load.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
void load(
|
|
||||||
array& out,
|
|
||||||
size_t offset,
|
|
||||||
const std::shared_ptr<io::Reader>& reader,
|
|
||||||
bool swap_endianess);
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,300 +0,0 @@
|
|||||||
// Copyright © 2024 Apple Inc.
|
|
||||||
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/backend/common/copy.h"
|
|
||||||
#include "mlx/backend/common/lapack.h"
|
|
||||||
#include "mlx/backend/common/utils.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename T, typename mask_t>
|
|
||||||
inline void mask_matrix(
|
|
||||||
T* data,
|
|
||||||
const mask_t* mask,
|
|
||||||
int block_size,
|
|
||||||
const int X,
|
|
||||||
const int Y,
|
|
||||||
const int64_t X_data_str,
|
|
||||||
const int64_t Y_data_str,
|
|
||||||
const int64_t X_mask_str,
|
|
||||||
const int64_t Y_mask_str,
|
|
||||||
const size_t mask_offset) {
|
|
||||||
int tX = (X + block_size - 1) / block_size;
|
|
||||||
int tY = (Y + block_size - 1) / block_size;
|
|
||||||
|
|
||||||
for (int i = 0; i < tX; i++) {
|
|
||||||
for (int j = 0; j < tY; j++) {
|
|
||||||
mask_t do_mask = mask[mask_offset + i * X_mask_str + j * Y_mask_str];
|
|
||||||
if (do_mask != 1) {
|
|
||||||
int loc_x = i * block_size;
|
|
||||||
int loc_y = j * block_size;
|
|
||||||
T* data_block = data + loc_x * X_data_str + loc_y * Y_data_str;
|
|
||||||
|
|
||||||
int size_x = std::min(block_size, X - loc_x);
|
|
||||||
int size_y = std::min(block_size, Y - loc_y);
|
|
||||||
for (int ii = 0; ii < size_x; ii++) {
|
|
||||||
for (int jj = 0; jj < size_y; jj++) {
|
|
||||||
if constexpr (std::is_same_v<mask_t, bool>) {
|
|
||||||
data_block[ii * X_data_str + jj * Y_data_str] = T(0.);
|
|
||||||
} else {
|
|
||||||
data_block[ii * X_data_str + jj * Y_data_str] *= do_mask;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void BlockMaskedMM::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[BlockMaskedMM::eval] Currently only supports float32.");
|
|
||||||
}
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
auto& a_pre = inputs[0];
|
|
||||||
auto& b_pre = inputs[1];
|
|
||||||
|
|
||||||
auto check_transpose =
|
|
||||||
[](const array& arr, bool do_copy, bool expand_all = false) {
|
|
||||||
auto stx = arr.strides()[arr.ndim() - 2];
|
|
||||||
auto sty = arr.strides()[arr.ndim() - 1];
|
|
||||||
if (!expand_all && stx == arr.shape(-1) && sty == 1) {
|
|
||||||
if (do_copy) {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::Vector);
|
|
||||||
return std::make_tuple(false, stx, arr_copy);
|
|
||||||
}
|
|
||||||
return std::make_tuple(false, stx, arr);
|
|
||||||
} else if (!expand_all && stx == 1 && sty == arr.shape(-2)) {
|
|
||||||
if (do_copy) {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::Vector);
|
|
||||||
return std::make_tuple(true, sty, arr_copy);
|
|
||||||
}
|
|
||||||
return std::make_tuple(true, sty, arr);
|
|
||||||
} else {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::General);
|
|
||||||
int64_t stx = arr.shape(-1);
|
|
||||||
return std::make_tuple(false, stx, arr_copy);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
bool has_op_mask = inputs.size() > 3;
|
|
||||||
bool has_out_mask = inputs.size() == 3 || inputs.size() == 5;
|
|
||||||
auto [a_transposed, lda, a] =
|
|
||||||
check_transpose(a_pre, has_op_mask, inputs.back().dtype() != bool_);
|
|
||||||
auto [b_transposed, ldb, b] =
|
|
||||||
check_transpose(b_pre, has_op_mask, inputs.back().dtype() != bool_);
|
|
||||||
|
|
||||||
size_t M = a.shape(-2);
|
|
||||||
size_t N = b.shape(-1);
|
|
||||||
size_t K = a.shape(-1);
|
|
||||||
|
|
||||||
if (M == 0 || N == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (K == 0) {
|
|
||||||
std::memset(static_cast<void*>(out.data<float>()), 0, out.nbytes());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto mask_array = [](const array& mask,
|
|
||||||
float* data,
|
|
||||||
int block_size,
|
|
||||||
int batch_idx,
|
|
||||||
int X,
|
|
||||||
int Y,
|
|
||||||
size_t X_data_str,
|
|
||||||
size_t Y_data_str) {
|
|
||||||
auto mask_offset = elem_to_loc(
|
|
||||||
mask.shape(-1) * mask.shape(-2) * batch_idx,
|
|
||||||
mask.shape(),
|
|
||||||
mask.strides());
|
|
||||||
|
|
||||||
auto X_mask_str = mask.strides()[mask.ndim() - 2];
|
|
||||||
auto Y_mask_str = mask.strides()[mask.ndim() - 1];
|
|
||||||
|
|
||||||
if (mask.dtype() == bool_) {
|
|
||||||
return mask_matrix(
|
|
||||||
data,
|
|
||||||
mask.data<bool>(),
|
|
||||||
block_size,
|
|
||||||
X,
|
|
||||||
Y,
|
|
||||||
X_data_str,
|
|
||||||
Y_data_str,
|
|
||||||
X_mask_str,
|
|
||||||
Y_mask_str,
|
|
||||||
mask_offset);
|
|
||||||
} else {
|
|
||||||
return mask_matrix(
|
|
||||||
data,
|
|
||||||
mask.data<float>(),
|
|
||||||
block_size,
|
|
||||||
X,
|
|
||||||
Y,
|
|
||||||
X_data_str,
|
|
||||||
Y_data_str,
|
|
||||||
X_mask_str,
|
|
||||||
Y_mask_str,
|
|
||||||
mask_offset);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for (int i = 0; i < (out.size() / (M * size_t(N))); ++i) {
|
|
||||||
// Adjust pointer
|
|
||||||
float* ai =
|
|
||||||
a.data<float>() + elem_to_loc(M * K * i, a.shape(), a.strides());
|
|
||||||
float* bi =
|
|
||||||
b.data<float>() + elem_to_loc(K * N * i, b.shape(), b.strides());
|
|
||||||
float* ci = out.data<float>() + M * N * i;
|
|
||||||
|
|
||||||
// Zero out blocks in a and b if needed
|
|
||||||
if (has_op_mask) {
|
|
||||||
auto& a_mask = inputs[inputs.size() - 2];
|
|
||||||
mask_array(
|
|
||||||
a_mask,
|
|
||||||
ai,
|
|
||||||
block_size_,
|
|
||||||
i,
|
|
||||||
M,
|
|
||||||
K,
|
|
||||||
a_transposed ? 1 : lda,
|
|
||||||
a_transposed ? lda : 1);
|
|
||||||
|
|
||||||
auto& b_mask = inputs[inputs.size() - 1];
|
|
||||||
mask_array(
|
|
||||||
b_mask,
|
|
||||||
bi,
|
|
||||||
block_size_,
|
|
||||||
i,
|
|
||||||
K,
|
|
||||||
N,
|
|
||||||
b_transposed ? 1 : ldb,
|
|
||||||
b_transposed ? ldb : 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do matmul
|
|
||||||
cblas_sgemm(
|
|
||||||
CblasRowMajor,
|
|
||||||
a_transposed ? CblasTrans : CblasNoTrans, // transA
|
|
||||||
b_transposed ? CblasTrans : CblasNoTrans, // transB
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
1.0, // alpha
|
|
||||||
ai,
|
|
||||||
lda,
|
|
||||||
bi,
|
|
||||||
ldb,
|
|
||||||
0.0, // beta
|
|
||||||
ci,
|
|
||||||
out.shape(-1) // ldc
|
|
||||||
);
|
|
||||||
|
|
||||||
// Zero out blocks in out
|
|
||||||
if (has_out_mask) {
|
|
||||||
mask_array(inputs[2], ci, block_size_, i, M, N, N, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GatherMM::eval(const std::vector<array>& inputs, array& out) {
|
|
||||||
if (out.dtype() != float32) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"[GatherMM::eval] Currently only supports float32.");
|
|
||||||
}
|
|
||||||
out.set_data(allocator::malloc_or_wait(out.nbytes()));
|
|
||||||
|
|
||||||
auto& a_pre = inputs[0];
|
|
||||||
auto& b_pre = inputs[1];
|
|
||||||
|
|
||||||
auto check_transpose = [](const array& arr) {
|
|
||||||
auto stx = arr.strides()[arr.ndim() - 2];
|
|
||||||
auto sty = arr.strides()[arr.ndim() - 1];
|
|
||||||
if (stx == arr.shape(-1) && sty == 1) {
|
|
||||||
return std::make_tuple(false, stx, arr);
|
|
||||||
} else if (stx == 1 && sty == arr.shape(-2)) {
|
|
||||||
return std::make_tuple(true, sty, arr);
|
|
||||||
} else {
|
|
||||||
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
||||||
copy(arr, arr_copy, CopyType::General);
|
|
||||||
int64_t stx = arr.shape(-1);
|
|
||||||
return std::make_tuple(false, stx, arr_copy);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
auto [a_transposed, lda, a] = check_transpose(a_pre);
|
|
||||||
auto [b_transposed, ldb, b] = check_transpose(b_pre);
|
|
||||||
|
|
||||||
size_t M = a.shape(-2);
|
|
||||||
size_t N = b.shape(-1);
|
|
||||||
size_t K = a.shape(-1);
|
|
||||||
|
|
||||||
if (M == 0 || N == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (K == 0) {
|
|
||||||
std::memset(static_cast<void*>(out.data<float>()), 0, out.nbytes());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get batch dims
|
|
||||||
auto batch_size_out = out.size() / (M * N);
|
|
||||||
size_t matrix_stride_out = M * N;
|
|
||||||
|
|
||||||
auto get_batch_dims = [](const auto& v) {
|
|
||||||
return decltype(v){v.begin(), v.end() - 2};
|
|
||||||
};
|
|
||||||
|
|
||||||
auto& lhs_indices = inputs[2];
|
|
||||||
auto& rhs_indices = inputs[3];
|
|
||||||
|
|
||||||
auto batch_shape = get_batch_dims(out.shape());
|
|
||||||
int batch_ndim = batch_shape.size();
|
|
||||||
|
|
||||||
auto batch_shape_A = get_batch_dims(a.shape());
|
|
||||||
auto batch_strides_A = get_batch_dims(a.strides());
|
|
||||||
auto batch_shape_B = get_batch_dims(b.shape());
|
|
||||||
auto batch_strides_B = get_batch_dims(b.strides());
|
|
||||||
|
|
||||||
const uint32_t* lhs_indices_ptr = lhs_indices.data<uint32_t>();
|
|
||||||
const uint32_t* rhs_indices_ptr = rhs_indices.data<uint32_t>();
|
|
||||||
|
|
||||||
for (int i = 0; i < batch_size_out; i++) {
|
|
||||||
// Get index
|
|
||||||
uint32_t indx_A = lhs_indices_ptr[elem_to_loc(i, lhs_indices)];
|
|
||||||
uint32_t indx_B = rhs_indices_ptr[elem_to_loc(i, rhs_indices)];
|
|
||||||
|
|
||||||
cblas_sgemm(
|
|
||||||
CblasRowMajor,
|
|
||||||
a_transposed ? CblasTrans : CblasNoTrans, // transA
|
|
||||||
b_transposed ? CblasTrans : CblasNoTrans, // transB
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
1.0f, // alpha
|
|
||||||
a.data<float>() + elem_to_loc(indx_A, batch_shape_A, batch_strides_A),
|
|
||||||
lda,
|
|
||||||
b.data<float>() + elem_to_loc(indx_B, batch_shape_B, batch_strides_B),
|
|
||||||
ldb,
|
|
||||||
0.0f, // beta
|
|
||||||
out.data<float>() + matrix_stride_out * i,
|
|
||||||
out.shape(-1) // ldc
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
67
mlx/backend/common/matmul.h
Normal file
67
mlx/backend/common/matmul.h
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/backend/common/utils.h"
|
||||||
|
#include "mlx/utils.h"
|
||||||
|
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
inline std::tuple<Shape, Strides, Strides> collapse_batches(
|
||||||
|
const array& a,
|
||||||
|
const array& b) {
|
||||||
|
if (a.ndim() == 2) {
|
||||||
|
return {Shape{1}, Strides{0}, Strides{0}};
|
||||||
|
}
|
||||||
|
|
||||||
|
Shape A_bshape{a.shape().begin(), a.shape().end() - 2};
|
||||||
|
Strides A_bstride{a.strides().begin(), a.strides().end() - 2};
|
||||||
|
Strides B_bstride{b.strides().begin(), b.strides().end() - 2};
|
||||||
|
|
||||||
|
auto [batch_shape, batch_strides] =
|
||||||
|
collapse_contiguous_dims(A_bshape, std::vector{A_bstride, B_bstride});
|
||||||
|
|
||||||
|
auto a_batch_strides = batch_strides[0];
|
||||||
|
auto b_batch_strides = batch_strides[1];
|
||||||
|
|
||||||
|
if (batch_shape.empty()) {
|
||||||
|
batch_shape.push_back(1);
|
||||||
|
a_batch_strides.push_back(0);
|
||||||
|
b_batch_strides.push_back(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_tuple(batch_shape, a_batch_strides, b_batch_strides);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::tuple<Shape, Strides, Strides, Strides>
|
||||||
|
collapse_batches(const array& a, const array& b, const array& c) {
|
||||||
|
if (a.ndim() == 2) {
|
||||||
|
return {Shape{1}, Strides{0}, Strides{0}, Strides{0}};
|
||||||
|
}
|
||||||
|
|
||||||
|
Shape A_bshape{a.shape().begin(), a.shape().end() - 2};
|
||||||
|
Strides A_bstride{a.strides().begin(), a.strides().end() - 2};
|
||||||
|
Strides B_bstride{b.strides().begin(), b.strides().end() - 2};
|
||||||
|
Strides C_bstride{c.strides().begin(), c.strides().end() - 2};
|
||||||
|
|
||||||
|
auto [batch_shape, batch_strides] = collapse_contiguous_dims(
|
||||||
|
A_bshape, std::vector{A_bstride, B_bstride, C_bstride});
|
||||||
|
|
||||||
|
auto A_batch_stride = batch_strides[0];
|
||||||
|
auto B_batch_stride = batch_strides[1];
|
||||||
|
auto C_batch_stride = batch_strides[2];
|
||||||
|
|
||||||
|
if (batch_shape.empty()) {
|
||||||
|
batch_shape.push_back(1);
|
||||||
|
A_batch_stride.push_back(0);
|
||||||
|
B_batch_stride.push_back(0);
|
||||||
|
C_batch_stride.push_back(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_tuple(
|
||||||
|
batch_shape, A_batch_stride, B_batch_stride, C_batch_stride);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -1,680 +0,0 @@
|
|||||||
// Copyright © 2023-2024 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <cmath>
|
|
||||||
#include <complex>
|
|
||||||
|
|
||||||
namespace mlx::core::detail {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
constexpr float inf = std::numeric_limits<float>::infinity();
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
typedef union {
|
|
||||||
int i;
|
|
||||||
float f;
|
|
||||||
} IntOrFloat;
|
|
||||||
|
|
||||||
inline float fast_exp(float x) {
|
|
||||||
if (x == -std::numeric_limits<float>::infinity()) {
|
|
||||||
return 0.0f;
|
|
||||||
} else if (x == std::numeric_limits<float>::infinity() || std::isnan(x)) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
x *= 1.442695; // multiply with log_2(e)
|
|
||||||
float ipart, fpart;
|
|
||||||
IntOrFloat epart;
|
|
||||||
x = std::max(-80.f, std::min(x, 80.f));
|
|
||||||
ipart = std::floor(x + 0.5);
|
|
||||||
fpart = x - ipart;
|
|
||||||
|
|
||||||
x = 1.535336188319500e-4f;
|
|
||||||
x = x * fpart + 1.339887440266574e-3f;
|
|
||||||
x = x * fpart + 9.618437357674640e-3f;
|
|
||||||
x = x * fpart + 5.550332471162809e-2f;
|
|
||||||
x = x * fpart + 2.402264791363012e-1f;
|
|
||||||
x = x * fpart + 6.931472028550421e-1f;
|
|
||||||
x = x * fpart + 1.000000000000000f;
|
|
||||||
|
|
||||||
// generate 2**ipart in the floating point representation using integer
|
|
||||||
// bitshifting
|
|
||||||
epart.i = (int(ipart) + 127) << 23;
|
|
||||||
|
|
||||||
return epart.f * x;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline float fast_erf(float a) {
|
|
||||||
float r, s, t, u;
|
|
||||||
t = std::abs(a);
|
|
||||||
s = a * a;
|
|
||||||
if (t > 0.927734375f) {
|
|
||||||
// maximum error 0.99527 ulp
|
|
||||||
r = std::fma(
|
|
||||||
-1.72853470e-5f, t, 3.83197126e-4f); // -0x1.220000p-16,0x1.91cfb2p-12
|
|
||||||
u = std::fma(
|
|
||||||
-3.88396438e-3f, t, 2.42546219e-2f); // -0x1.fd1438p-9, 0x1.8d6342p-6
|
|
||||||
r = std::fma(r, s, u);
|
|
||||||
r = std::fma(r, t, -1.06777877e-1f); // -0x1.b55cb8p-4
|
|
||||||
r = std::fma(r, t, -6.34846687e-1f); // -0x1.450aa0p-1
|
|
||||||
r = std::fma(r, t, -1.28717512e-1f); // -0x1.079d0cp-3
|
|
||||||
r = std::fma(r, t, -t);
|
|
||||||
// TODO, replace with expm1 when implemented
|
|
||||||
r = 1.0f - std::exp(r);
|
|
||||||
r = std::copysign(r, a);
|
|
||||||
} else {
|
|
||||||
// maximum error 0.98929 ulp
|
|
||||||
r = -5.96761703e-4f; // -0x1.38e000p-11
|
|
||||||
r = std::fma(r, s, 4.99119423e-3f); // 0x1.471a58p-8
|
|
||||||
r = std::fma(r, s, -2.67681349e-2f); // -0x1.b691b2p-6
|
|
||||||
r = std::fma(r, s, 1.12819925e-1f); // 0x1.ce1c44p-4
|
|
||||||
r = std::fma(r, s, -3.76125336e-1f); // -0x1.812700p-2
|
|
||||||
r = std::fma(r, s, 1.28379166e-1f); // 0x1.06eba8p-3
|
|
||||||
r = std::fma(r, a, a);
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline float fast_erfinv(float a) {
|
|
||||||
auto t = std::fma(a, 0.0f - a, 1.0f);
|
|
||||||
t = std::log(t);
|
|
||||||
float p;
|
|
||||||
if (std::abs(t) > 6.125f) { // maximum ulp error = 2.35793
|
|
||||||
p = 3.03697567e-10f; // 0x1.4deb44p-32
|
|
||||||
p = std::fma(p, t, 2.93243101e-8f); // 0x1.f7c9aep-26
|
|
||||||
p = std::fma(p, t, 1.22150334e-6f); // 0x1.47e512p-20
|
|
||||||
p = std::fma(p, t, 2.84108955e-5f); // 0x1.dca7dep-16
|
|
||||||
p = std::fma(p, t, 3.93552968e-4f); // 0x1.9cab92p-12
|
|
||||||
p = std::fma(p, t, 3.02698812e-3f); // 0x1.8cc0dep-9
|
|
||||||
p = std::fma(p, t, 4.83185798e-3f); // 0x1.3ca920p-8
|
|
||||||
p = std::fma(p, t, -2.64646143e-1f); // -0x1.0eff66p-2
|
|
||||||
p = std::fma(p, t, 8.40016484e-1f); // 0x1.ae16a4p-1
|
|
||||||
} else { // maximum ulp error = 2.35002
|
|
||||||
p = 5.43877832e-9f; // 0x1.75c000p-28
|
|
||||||
p = std::fma(p, t, 1.43285448e-7f); // 0x1.33b402p-23
|
|
||||||
p = std::fma(p, t, 1.22774793e-6f); // 0x1.499232p-20
|
|
||||||
p = std::fma(p, t, 1.12963626e-7f); // 0x1.e52cd2p-24
|
|
||||||
p = std::fma(p, t, -5.61530760e-5f); // -0x1.d70bd0p-15
|
|
||||||
p = std::fma(p, t, -1.47697632e-4f); // -0x1.35be90p-13
|
|
||||||
p = std::fma(p, t, 2.31468678e-3f); // 0x1.2f6400p-9
|
|
||||||
p = std::fma(p, t, 1.15392581e-2f); // 0x1.7a1e50p-7
|
|
||||||
p = std::fma(p, t, -2.32015476e-1f); // -0x1.db2aeep-3
|
|
||||||
p = std::fma(p, t, 8.86226892e-1f); // 0x1.c5bf88p-1
|
|
||||||
}
|
|
||||||
return a * p;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Abs {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::abs(x);
|
|
||||||
}
|
|
||||||
uint8_t operator()(uint8_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint16_t operator()(uint16_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint32_t operator()(uint32_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint64_t operator()(uint64_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
bool operator()(bool x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcCos {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::acos(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcCosh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::acosh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcSin {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::asin(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcSinh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::asinh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcTan {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::atan(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcTan2 {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T y, T x) {
|
|
||||||
return std::atan2(y, x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ArcTanh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::atanh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Ceil {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::ceil(x);
|
|
||||||
}
|
|
||||||
int8_t operator()(int8_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int16_t operator()(int16_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int32_t operator()(int32_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int64_t operator()(int64_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint8_t operator()(uint8_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint16_t operator()(uint16_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint32_t operator()(uint32_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint64_t operator()(uint64_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
bool operator()(bool x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Conjugate {
|
|
||||||
complex64_t operator()(complex64_t x) {
|
|
||||||
return std::conj(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Cos {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::cos(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Cosh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::cosh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Erf {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return static_cast<T>(fast_erf(static_cast<float>(x)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ErfInv {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return static_cast<T>(fast_erfinv(static_cast<float>(x)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Exp {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return fast_exp(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
complex64_t operator()(complex64_t x) {
|
|
||||||
return std::exp(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Expm1 {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return expm1(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Floor {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::floor(x);
|
|
||||||
}
|
|
||||||
int8_t operator()(int8_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int16_t operator()(int16_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int32_t operator()(int32_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
int64_t operator()(int64_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint8_t operator()(uint8_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint16_t operator()(uint16_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint32_t operator()(uint32_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
uint64_t operator()(uint64_t x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
bool operator()(bool x) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Imag {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::imag(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Log {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::log(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Log2 {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::log2(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Log10 {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::log10(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Log1p {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return log1p(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LogicalNot {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return !x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Negative {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return -x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Real {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::real(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Round {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::rint(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
complex64_t operator()(complex64_t x) {
|
|
||||||
return {std::rint(x.real()), std::rint(x.imag())};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Sigmoid {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
auto one = static_cast<decltype(x)>(1.0);
|
|
||||||
return one / (one + fast_exp(-x));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Sign {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return (x > T(0)) - (x < T(0));
|
|
||||||
}
|
|
||||||
uint8_t operator()(uint8_t x) {
|
|
||||||
return x != 0;
|
|
||||||
}
|
|
||||||
uint16_t operator()(uint16_t x) {
|
|
||||||
return x != 0;
|
|
||||||
}
|
|
||||||
uint32_t operator()(uint32_t x) {
|
|
||||||
return x != 0;
|
|
||||||
}
|
|
||||||
uint64_t operator()(uint64_t x) {
|
|
||||||
return x != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
complex64_t operator()(complex64_t x) {
|
|
||||||
return x == complex64_t(0) ? x : x / std::abs(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Sin {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::sin(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Sinh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::sinh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Square {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return x * x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Sqrt {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::sqrt(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Rsqrt {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return static_cast<decltype(x)>(1.0) / std::sqrt(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Tan {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::tan(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Tanh {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x) {
|
|
||||||
return std::tanh(x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Add {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x + y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Divide {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x / y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Remainder {
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<std::is_integral_v<T> & !std::is_signed_v<T>, T> operator()(
|
|
||||||
T numerator,
|
|
||||||
T denominator) {
|
|
||||||
return numerator % denominator;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<std::is_integral_v<T> & std::is_signed_v<T>, T> operator()(
|
|
||||||
T numerator,
|
|
||||||
T denominator) {
|
|
||||||
auto r = numerator % denominator;
|
|
||||||
if (r != 0 && (r < 0 != denominator < 0))
|
|
||||||
r += denominator;
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<!std::is_integral_v<T>, T> operator()(
|
|
||||||
T numerator,
|
|
||||||
T denominator) {
|
|
||||||
auto r = std::fmod(numerator, denominator);
|
|
||||||
if (r != 0 && (r < 0 != denominator < 0)) {
|
|
||||||
r += denominator;
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
complex64_t operator()(complex64_t numerator, complex64_t denominator) {
|
|
||||||
return numerator % denominator;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Equal {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x == y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NaNEqual {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
if constexpr (std::is_integral_v<T>) {
|
|
||||||
// isnan always returns false for integers, and MSVC refuses to compile.
|
|
||||||
return x == y;
|
|
||||||
} else {
|
|
||||||
return x == y || (std::isnan(x) && std::isnan(y));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Greater {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x > y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct GreaterEqual {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x >= y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Less {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x < y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LessEqual {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x <= y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Maximum {
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<std::is_integral_v<T>, T> operator()(T x, T y) {
|
|
||||||
return (x > y) ? x : y;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<!std::is_integral_v<T>, T> operator()(T x, T y) {
|
|
||||||
if (std::isnan(x)) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
return (x > y) ? x : y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Minimum {
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<std::is_integral_v<T>, T> operator()(T x, T y) {
|
|
||||||
return x < y ? x : y;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<!std::is_integral_v<T>, T> operator()(T x, T y) {
|
|
||||||
if (std::isnan(x)) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
return x < y ? x : y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LogAddExp {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
constexpr float inf = std::numeric_limits<float>::infinity();
|
|
||||||
auto maxval = Maximum()(x, y);
|
|
||||||
auto minval = Minimum()(x, y);
|
|
||||||
return (minval == -inf || maxval == inf)
|
|
||||||
? maxval
|
|
||||||
: static_cast<decltype(x)>(
|
|
||||||
maxval + std::log1p(fast_exp(minval - maxval)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Multiply {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x * y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NotEqual {
|
|
||||||
template <typename T>
|
|
||||||
bool operator()(T x, T y) {
|
|
||||||
return x != y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Power {
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<!std::is_integral_v<T>, T> operator()(T base, T exp) {
|
|
||||||
return std::pow(base, exp);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<std::is_integral_v<T>, T> operator()(T base, T exp) {
|
|
||||||
T res = 1;
|
|
||||||
while (exp) {
|
|
||||||
if (exp & 1) {
|
|
||||||
res *= base;
|
|
||||||
}
|
|
||||||
exp >>= 1;
|
|
||||||
base *= base;
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Subtract {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x - y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LogicalAnd {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x && y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LogicalOr {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x || y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Select {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(bool condition, T x, T y) {
|
|
||||||
return condition ? x : y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct BitwiseAnd {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x & y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct BitwiseOr {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x | y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct BitwiseXor {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x ^ y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct LeftShift {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x << y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct RightShift {
|
|
||||||
template <typename T>
|
|
||||||
T operator()(T x, T y) {
|
|
||||||
return x >> y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace mlx::core::detail
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user