mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-10-30 10:08:08 +08:00
Commit Graph
Select branches
Hide Pull Requests
awq
awq-tq
dist-eval
distributed-layers
flux-dist-improv
flux-qlora
load-gguf
main
openlm
packed-quants
#1
#10
#1001
#1003
#1004
#1006
#1009
#1012
#1013
#1014
#1015
#1016
#102
#1020
#1023
#1024
#1026
#1027
#1028
#103
#1035
#1036
#1037
#1038
#1040
#1045
#1047
#1048
#1048
#1049
#105
#1050
#1054
#1055
#106
#1061
#1062
#1063
#1065
#1068
#1069
#107
#1070
#1072
#1074
#1075
#1078
#1079
#108
#1080
#1081
#1082
#1085
#1089
#1090
#1092
#1093
#1094
#1096
#1099
#110
#1100
#1103
#1105
#1113
#1114
#1115
#1117
#1118
#1119
#112
#1121
#1122
#1125
#1128
#1129
#113
#1131
#1132
#1133
#1135
#1137
#1140
#1141
#1143
#1144
#1145
#1146
#1148
#1149
#115
#1152
#1153
#1154
#1155
#1156
#1157
#1158
#1159
#116
#1163
#1164
#1166
#1173
#1174
#1176
#1178
#1180
#1189
#1191
#1192
#1193
#1194
#1196
#1197
#1199
#12
#1200
#1202
#1204
#1205
#1206
#1208
#1209
#121
#1210
#1211
#1212
#1213
#1214
#1215
#1216
#1217
#1220
#1222
#1225
#1228
#1229
#123
#1230
#1230
#1231
#1231
#1233
#1234
#1235
#1240
#1241
#1242
#1246
#1249
#1250
#1251
#1253
#1257
#1259
#1260
#1263
#1265
#1267
#1270
#1271
#1272
#1273
#1276
#1277
#1278
#1279
#1280
#1283
#1287
#129
#1291
#1294
#1295
#1297
#1298
#1299
#1300
#1301
#1302
#1304
#1305
#1308
#1311
#1312
#1314
#1315
#1316
#1319
#1321
#1322
#1323
#1324
#1325
#1326
#133
#1330
#1331
#1332
#1336
#1338
#1339
#134
#1340
#1342
#1346
#1348
#1349
#1350
#1353
#1358
#1359
#136
#1364
#1365
#1367
#1370
#1371
#1375
#1375
#1377
#1377
#1383
#1383
#1385
#1386
#1387
#1388
#1389
#1390
#14
#140
#141
#144
#145
#147
#149
#151
#152
#153
#154
#157
#158
#159
#160
#161
#162
#163
#164
#165
#166
#167
#169
#171
#172
#173
#174
#176
#177
#178
#180
#183
#184
#186
#187
#189
#19
#190
#191
#192
#193
#195
#197
#198
#2
#200
#201
#202
#203
#205
#208
#211
#213
#214
#215
#219
#22
#221
#222
#227
#229
#23
#231
#234
#235
#237
#238
#24
#240
#241
#242
#243
#245
#248
#250
#251
#252
#253
#254
#255
#257
#260
#263
#264
#265
#266
#269
#27
#270
#271
#272
#274
#275
#276
#278
#282
#284
#285
#287
#290
#291
#292
#293
#294
#295
#30
#300
#301
#302
#303
#306
#307
#308
#309
#310
#311
#312
#315
#318
#319
#32
#320
#321
#325
#326
#33
#331
#333
#335
#337
#338
#340
#342
#343
#347
#350
#351
#352
#353
#354
#358
#36
#360
#361
#363
#364
#365
#366
#369
#37
#373
#375
#377
#378
#379
#380
#386
#387
#388
#389
#391
#392
#393
#396
#397
#398
#399
#40
#401
#405
#408
#409
#41
#41
#411
#413
#414
#415
#417
#418
#419
#42
#420
#421
#424
#426
#427
#429
#43
#431
#432
#433
#439
#441
#443
#445
#446
#449
#45
#450
#451
#453
#455
#457
#458
#461
#462
#466
#467
#468
#469
#47
#470
#471
#472
#474
#475
#479
#48
#482
#483
#483
#486
#489
#491
#494
#495
#496
#497
#498
#5
#50
#501
#502
#503
#505
#506
#509
#51
#510
#514
#515
#516
#518
#519
#52
#520
#521
#522
#523
#528
#53
#530
#531
#534
#536
#539
#541
#544
#545
#546
#547
#548
#549
#55
#552
#555
#558
#562
#563
#565
#566
#569
#570
#571
#572
#573
#574
#577
#578
#580
#581
#585
#589
#590
#591
#592
#595
#596
#599
#602
#603
#604
#608
#609
#610
#611
#613
#614
#62
#621
#623
#628
#632
#633
#634
#636
#639
#64
#640
#643
#644
#645
#648
#650
#651
#654
#657
#66
#661
#665
#666
#667
#668
#670
#673
#674
#675
#676
#679
#680
#681
#682
#683
#684
#685
#687
#688
#69
#690
#691
#693
#694
#697
#698
#7
#701
#702
#703
#705
#707
#708
#711
#712
#715
#716
#717
#719
#720
#721
#729
#73
#731
#735
#736
#739
#74
#740
#743
#744
#746
#749
#75
#752
#753
#758
#76
#760
#763
#766
#77
#770
#773
#775
#778
#779
#78
#782
#789
#79
#790
#792
#793
#794
#797
#798
#8
#80
#800
#802
#803
#806
#807
#810
#813
#817
#818
#82
#821
#822
#824
#825
#827
#828
#830
#831
#833
#835
#837
#838
#839
#84
#840
#85
#851
#852
#853
#855
#856
#857
#86
#863
#867
#87
#871
#877
#879
#88
#882
#885
#886
#888
#889
#89
#890
#891
#895
#898
#899
#90
#902
#903
#904
#905
#906
#907
#911
#913
#914
#915
#915
#920
#923
#926
#93
#931
#932
#935
#936
#937
#94
#940
#942
#945
#946
#948
#949
#954
#955
#956
#957
#96
#960
#961
#962
#963
#965
#969
#97
#971
#971
#973
#979
#98
#981
#983
#984
#989
#99
#990
#991
#993
#995
#996
#998
Select branches
Hide Pull Requests
awq
awq-tq
dist-eval
distributed-layers
flux-dist-improv
flux-qlora
load-gguf
main
openlm
packed-quants
#1
#10
#1001
#1003
#1004
#1006
#1009
#1012
#1013
#1014
#1015
#1016
#102
#1020
#1023
#1024
#1026
#1027
#1028
#103
#1035
#1036
#1037
#1038
#1040
#1045
#1047
#1048
#1048
#1049
#105
#1050
#1054
#1055
#106
#1061
#1062
#1063
#1065
#1068
#1069
#107
#1070
#1072
#1074
#1075
#1078
#1079
#108
#1080
#1081
#1082
#1085
#1089
#1090
#1092
#1093
#1094
#1096
#1099
#110
#1100
#1103
#1105
#1113
#1114
#1115
#1117
#1118
#1119
#112
#1121
#1122
#1125
#1128
#1129
#113
#1131
#1132
#1133
#1135
#1137
#1140
#1141
#1143
#1144
#1145
#1146
#1148
#1149
#115
#1152
#1153
#1154
#1155
#1156
#1157
#1158
#1159
#116
#1163
#1164
#1166
#1173
#1174
#1176
#1178
#1180
#1189
#1191
#1192
#1193
#1194
#1196
#1197
#1199
#12
#1200
#1202
#1204
#1205
#1206
#1208
#1209
#121
#1210
#1211
#1212
#1213
#1214
#1215
#1216
#1217
#1220
#1222
#1225
#1228
#1229
#123
#1230
#1230
#1231
#1231
#1233
#1234
#1235
#1240
#1241
#1242
#1246
#1249
#1250
#1251
#1253
#1257
#1259
#1260
#1263
#1265
#1267
#1270
#1271
#1272
#1273
#1276
#1277
#1278
#1279
#1280
#1283
#1287
#129
#1291
#1294
#1295
#1297
#1298
#1299
#1300
#1301
#1302
#1304
#1305
#1308
#1311
#1312
#1314
#1315
#1316
#1319
#1321
#1322
#1323
#1324
#1325
#1326
#133
#1330
#1331
#1332
#1336
#1338
#1339
#134
#1340
#1342
#1346
#1348
#1349
#1350
#1353
#1358
#1359
#136
#1364
#1365
#1367
#1370
#1371
#1375
#1375
#1377
#1377
#1383
#1383
#1385
#1386
#1387
#1388
#1389
#1390
#14
#140
#141
#144
#145
#147
#149
#151
#152
#153
#154
#157
#158
#159
#160
#161
#162
#163
#164
#165
#166
#167
#169
#171
#172
#173
#174
#176
#177
#178
#180
#183
#184
#186
#187
#189
#19
#190
#191
#192
#193
#195
#197
#198
#2
#200
#201
#202
#203
#205
#208
#211
#213
#214
#215
#219
#22
#221
#222
#227
#229
#23
#231
#234
#235
#237
#238
#24
#240
#241
#242
#243
#245
#248
#250
#251
#252
#253
#254
#255
#257
#260
#263
#264
#265
#266
#269
#27
#270
#271
#272
#274
#275
#276
#278
#282
#284
#285
#287
#290
#291
#292
#293
#294
#295
#30
#300
#301
#302
#303
#306
#307
#308
#309
#310
#311
#312
#315
#318
#319
#32
#320
#321
#325
#326
#33
#331
#333
#335
#337
#338
#340
#342
#343
#347
#350
#351
#352
#353
#354
#358
#36
#360
#361
#363
#364
#365
#366
#369
#37
#373
#375
#377
#378
#379
#380
#386
#387
#388
#389
#391
#392
#393
#396
#397
#398
#399
#40
#401
#405
#408
#409
#41
#41
#411
#413
#414
#415
#417
#418
#419
#42
#420
#421
#424
#426
#427
#429
#43
#431
#432
#433
#439
#441
#443
#445
#446
#449
#45
#450
#451
#453
#455
#457
#458
#461
#462
#466
#467
#468
#469
#47
#470
#471
#472
#474
#475
#479
#48
#482
#483
#483
#486
#489
#491
#494
#495
#496
#497
#498
#5
#50
#501
#502
#503
#505
#506
#509
#51
#510
#514
#515
#516
#518
#519
#52
#520
#521
#522
#523
#528
#53
#530
#531
#534
#536
#539
#541
#544
#545
#546
#547
#548
#549
#55
#552
#555
#558
#562
#563
#565
#566
#569
#570
#571
#572
#573
#574
#577
#578
#580
#581
#585
#589
#590
#591
#592
#595
#596
#599
#602
#603
#604
#608
#609
#610
#611
#613
#614
#62
#621
#623
#628
#632
#633
#634
#636
#639
#64
#640
#643
#644
#645
#648
#650
#651
#654
#657
#66
#661
#665
#666
#667
#668
#670
#673
#674
#675
#676
#679
#680
#681
#682
#683
#684
#685
#687
#688
#69
#690
#691
#693
#694
#697
#698
#7
#701
#702
#703
#705
#707
#708
#711
#712
#715
#716
#717
#719
#720
#721
#729
#73
#731
#735
#736
#739
#74
#740
#743
#744
#746
#749
#75
#752
#753
#758
#76
#760
#763
#766
#77
#770
#773
#775
#778
#779
#78
#782
#789
#79
#790
#792
#793
#794
#797
#798
#8
#80
#800
#802
#803
#806
#807
#810
#813
#817
#818
#82
#821
#822
#824
#825
#827
#828
#830
#831
#833
#835
#837
#838
#839
#84
#840
#85
#851
#852
#853
#855
#856
#857
#86
#863
#867
#87
#871
#877
#879
#88
#882
#885
#886
#888
#889
#89
#890
#891
#895
#898
#899
#90
#902
#903
#904
#905
#906
#907
#911
#913
#914
#915
#915
#920
#923
#926
#93
#931
#932
#935
#936
#937
#94
#940
#942
#945
#946
#948
#949
#954
#955
#956
#957
#96
#960
#961
#962
#963
#965
#969
#97
#971
#971
#973
#979
#98
#981
#983
#984
#989
#99
#990
#991
#993
#995
#996
#998
-
c68aa3c7c3
Stable lm 2 (#666)
Awni Hannun
2024-04-08 14:18:55 -07:00 -
1e2f7f50b6
fix for empty initial string (#665)
Awni Hannun
2024-04-08 10:40:05 -07:00 -
c386dd5f5a
Fix for cohere plus (#650)
Awni Hannun
2024-04-05 14:11:24 -07:00 -
2bd64b78cf
Save lora config (#636)
Awni Hannun
2024-04-02 13:52:53 -07:00 -
d661440dbb
Add support for qwen2moe (#640)
Prince Canuma
2024-04-02 20:33:29 +02:00 -
78c431dc25
cleanup whisper a little (#639)
Awni Hannun
2024-03-30 13:13:58 -07:00 -
f6283ef7ce
Configurable LR schedulers (#604)
Chime Ogbuji
2024-03-29 16:41:10 -04:00 -
b80adbcc3e
DBRX (#628)
Awni Hannun
2024-03-28 21:03:53 -07:00 -
297a908e3d
fix(mlx-lm): type hints in gguf.py (#621)
Anchen
2024-03-27 01:56:01 +11:00 -
0ab01b4626
fix(mlx-lm): sorted probs in top_p implementation. (#610)
Anchen
2024-03-26 09:07:55 +11:00 -
bbfcc103d7
cast around lora adapters (#613)
Awni Hannun
2024-03-24 19:34:51 -07:00 -
5a52899405
Partially stream de-tokenization (#609)
Awni Hannun
2024-03-23 15:32:33 -07:00 -
494cdf8e96
chore: fix loar for moe model (#608)
Anchen
2024-03-24 01:22:11 +11:00 -
b8a348c1b8
Switch to fast RMS/LN Norm (#603)
Awni Hannun
2024-03-23 07:13:51 -07:00 -
fbed720d6f
chore(mlx-lm): fix the top_p implementation. (#602)
Anchen
2024-03-22 06:18:23 +11:00 -
fe96ef342f
feat(mlx-lm): export the GGUF (fp16) format model weights from fuse.py (#555)
Anchen
2024-03-22 04:34:11 +11:00 -
8f906c859a
chore(mlx-lm): enable to apply default chat template (#577)
Anchen
2024-03-21 15:39:39 +11:00 -
d2a99172a6
Add dropout parameter to lora configuration (#599)
Ivan Fioravanti
2024-03-20 16:44:40 +01:00 -
949f63f309
chore(mlx-lm): fix print_trainable_parameters for quant models (#581)
Anchen
2024-03-21 02:41:03 +11:00 -
373dd6f2a2
Set finish_reason in response (#592)
Matt Wronkiewicz
2024-03-19 20:21:26 -07:00 -
6c3d4c8ba2
add dequantize option to mlx_lm/convert.py (#547)
Alwin Arrasyid
2024-03-20 09:50:08 +07:00 -
6f2fd5daea
Add mlx-lm version information to HF model card (#596)
Chime Ogbuji
2024-03-19 22:42:03 -04:00 -
39d5ca6427
LoRA: report last train info (#595)
madroid
2024-03-20 08:29:50 +08:00 -
4680ef4413
Enable more BERT models (#580)
yzimmermann
2024-03-20 01:21:33 +01:00 -
b0bcd86a40
Support for OpenAI’s fine-tuning dataset format (#548)
madroid
2024-03-20 07:45:46 +08:00 -
e05e502c34
Fix scaling when embeddings are tied (#591)
Abdul Fatir
2024-03-18 21:41:07 +01:00 -
e4b19bb9e1
Make attention faster for a some models (#574)
Awni Hannun
2024-03-14 21:35:54 -07:00 -
3f3741d229
Fix requirements and image2image strength/steps mismatch (#585)
Angelos Katharopoulos
2024-03-14 12:22:54 -07:00 -
e2205beb66
Update server.py to add --trust-remote-code to server (#578)
sweetcard
2024-03-14 22:05:19 +08:00 -
2cd793dd69
feat: add update_config functionality (#531)
Sugato Ray
2024-03-14 09:36:05 -04:00 -
485180ae91
LoRA: some minor optimizations (#573)
madroid
2024-03-14 11:26:30 +08:00 -
d4e1de1d5b
add peak_memory info to training callback (#572)
madroid
2024-03-14 11:17:10 +08:00 -
376bb9cc44
bert encoder inherits from nn.Module now (#571)
Race
2024-03-13 10:24:21 -07:00 -
14fe868825
version (#570)
Awni Hannun
2024-03-13 10:09:36 -07:00 -
76c3244cc5
Add support for Cohere's Command-R (#565)
Prince Canuma
2024-03-13 15:03:36 +01:00 -
3535408c99
chore(mlx-lm): fix tie_word_embeddings for qwen2 (#566)
Anchen
2024-03-13 15:34:32 +11:00 -
39084e81c2
Some improvements to LoRA (#528)
Awni Hannun
2024-03-12 20:02:03 -07:00 -
e56d9015ef
LoRA on all linear transformer block layers (#546)
Chime Ogbuji
2024-03-12 10:37:40 -04:00 -
fe5edee360
Fix image2image for SDXL (#563)
devonthomas35
2024-03-11 12:18:47 -07:00 -
d0fa6cfcae
feat: stable-diffusion t2i add --seed (#558)
zweifisch
2024-03-10 21:12:54 +08:00 -
ad3cf5ed98
dropout 0 as default (#549)
Awni Hannun
2024-03-08 13:07:10 -08:00 -
3a9e6c3f70
Stable diffusion XL (#516)
Angelos Katharopoulos
2024-03-08 10:24:19 -08:00 -
8c2cf665ed
YAML configuration for mlx_lm.lora (#503)
Chime Ogbuji
2024-03-08 10:57:52 -05:00 -
8b05bb6d18
[mlx-lm] Use sdpa in llama / mistral model (#515)
Awni Hannun
2024-03-07 17:41:23 -08:00 -
7cdd1b69ac
Enable unit testing in Circle and start some MLX LM tests (#545)
Awni Hannun
2024-03-07 09:31:57 -08:00 -
ef32379bc6
Update README.md (#530)
amcox886
2024-03-07 14:23:43 +00:00 -
8a178f8716
chore: enable tie_word_embeddings config for qwen2 (#544)
Anchen
2024-03-08 01:11:35 +11:00 -
b8e5eda4fd
Refactoring of mlx_lm example (#501)
Y4hL
2024-03-06 16:24:31 +02:00 -
710c552731
add huggingface repo url print (#534)
Madroid Ma
2024-03-06 13:51:31 +08:00 -
5de7c2ac33
Add tips on porting LLMs from HuggingFace (#523)
Muhtasham Oblokulov
2024-03-06 02:43:15 +01:00 -
3fdf85e79d
Starcoder2: Update config and change GQA to use repeat (#520)
Prince Canuma
2024-03-03 15:12:03 +01:00 -
1e3daea3bb
chore(mlx-lm): add missing model_type for starcoder2 (#522)
Anchen
2024-03-04 01:07:45 +11:00 -
3655bfc3bd
chore(mlx-lm): fix broken server.py script (#519)
Anchen
2024-03-04 01:04:54 +11:00 -
81e2a80026
Add Starcoder 2 (#502)
Muhtasham Oblokulov
2024-03-03 04:39:23 +01:00 -
5b1043a458
llms: convert() add 'revision' argument (#506)
Miller Liang
2024-03-02 22:28:26 +08:00 -
a429263905
LlaVA in MLX (#461)
Noah Kasmanoff
2024-03-01 13:28:35 -05:00 -
261f1280f6
Update to StableLM code (#514)
Ashish
2024-03-01 10:53:38 -07:00 -
3acc1ec84e
fix: string indentation with
textwrap.dedent(#510)
Sugato Ray
2024-03-01 01:23:01 -05:00 -
f03c8a7b44
LoRA: adapter file Support path information (#505)
Madroid Ma
2024-03-01 14:20:49 +08:00 -
ae48563378
Remove gc (#509)
Awni Hannun
2024-02-29 09:40:04 -08:00 -
13794a05da
chore(mlx-lm): add adapter support in generate.py (#494)
Anchen
2024-02-29 02:49:25 +11:00 -
ab0f1dd1b6
Add metadata when saving safetensors (#496)
Alex Ishida
2024-02-29 00:29:00 +09:00 -
ea92f623d6
Prevent llms/mlx_lm from serving the local directory as a webserver (#498)
Y4hL
2024-02-28 05:40:42 +02:00 -
676e574eff
Add missing import (#497)
Y4hL
2024-02-27 23:27:08 +02:00 -
95f82e67a2
Fix import warning (#479)
Awni Hannun
2024-02-27 08:47:56 -08:00 -
82f3f31d93
chore(mlx-lm): refactor server.py to utilize generate_step from utils for consistency (#491)
Anchen
2024-02-28 01:25:24 +11:00 -
19a21bfce4
chore: add /v1/completions for server (#489)
Anchen
2024-02-27 15:59:33 +11:00 -
e5dfef5d9a
LoRA: Extract the run function for easy use in scripts file (#482)
Madroid Ma
2024-02-27 11:35:04 +08:00 -
ccb278bcbd
Add top-p sampling for text generation (#486)
peterjc123
2024-02-26 22:18:11 +08:00 -
47dd6bd17f
chore(clip): update the clip example to make it compatible with HF format (#472)
Anchen
2024-02-24 01:49:53 +11:00 -
f24edfa9dc
[mlx-lm] Add precompiled normalizations (#451)
Awni Hannun
2024-02-22 12:40:55 -08:00 -
97c09a863d
bump version and include in package (#475)
Awni Hannun
2024-02-21 09:40:36 -08:00 -
ab9172baac
Gemma support (#474)
Awni Hannun
2024-02-21 08:47:13 -08:00 -
cfeef6d9d2
Typo: SGD->AdamW (#471)
Kosti
2024-02-20 23:47:17 +00:00 -
838990b33b
fix: remove custom rope (#470)
Juan B. Rodriguez
2024-02-20 16:46:16 -05:00 -
dc4f2e0a6b
Lazy loading models for faster convert and merge (#462)
Angelos Katharopoulos
2024-02-20 13:36:55 -08:00 -
8eee4399f4
LoRA: Add printing and callbacks for learning rate during training (#457)
Madroid Ma
2024-02-21 05:07:21 +08:00 -
15ecf692b9
Bug fix in lora.py (#468)
Sergey Shumov
2024-02-20 12:53:30 -08:00 -
20b39c7fee
update protobuf (#467)
Awni Hannun
2024-02-20 11:46:36 -08:00 -
8fd953ee2b
Support for slerp merging models (#455)
Awni Hannun
2024-02-19 20:37:15 -08:00 -
8c9148a8fd
Make it easier to know in which file we have bad JSON data (#458)
Ovid
2024-02-20 05:11:45 +01:00 -
88458c4e40
feat(mlx-lm): add openAI like api server (#429)
Anchen
2024-02-19 09:01:28 +11:00 -
cc671cd1c7
Mixtral: Fix non-default arg follows default exception (#450)
devonthomas35
2024-02-18 13:30:26 -08:00 -
b05907c87e
Change argument name in lora.py (#453)
Ivan Fioravanti
2024-02-18 15:04:49 +01:00 -
e4d5630698
Basic CircleCI (#449)
Awni Hannun
2024-02-16 22:13:55 -08:00 -
21e19b5b5a
Add Repetitive penalty to LLM inference - mlx-lm (#399)
vishal-14069
2024-02-17 00:58:17 -05:00 -
0ba466369f
LoRA: add training callbacks (#414)
Madroid Ma
2024-02-16 22:04:57 +08:00 -
726b1ddec0
fix: check LoRA layers number error (#446)
Madroid Ma
2024-02-16 22:03:33 +08:00 -
837a02092d
Update black version to 24.2.0 (#445)
Nripesh Niketan
2024-02-16 18:02:52 +04:00 -
f71e965d57
Change gqa to use repeat instead of concatenate (#443)
Angelos Katharopoulos
2024-02-14 17:40:11 -08:00 -
06ddb8414d
Fix Qwen2 and SD (#441)
Awni Hannun
2024-02-14 13:43:12 -08:00 -
e446598f62
Passing parameterized loss and batching to trainer (#391)
Chime Ogbuji
2024-02-13 10:03:25 -05:00 -
954aa50c54
LoRA: Improve validation error for LoRA layer count exceeding model layer (#427)
Madroid Ma
2024-02-13 22:56:27 +08:00 -
d4666615bb
Lazy import + refactor Lora layer addition (#426)
Awni Hannun
2024-02-12 10:51:02 -08:00 -
4576946151
Add checkpoints directory for adapter weights (#431)
Ivan Fioravanti
2024-02-12 19:50:05 +01:00 -
70465b8cda
clean up loss function extraction (#433)
Lee Harrold
2024-02-12 08:46:00 -05:00 -
f1ef378a58
Feat: update pre-commit rev (#432)
Nripesh Niketan
2024-02-11 19:23:27 +04:00 -
f45a1ab83c
Update a few examples to use compile (#420)
Awni Hannun
2024-02-08 13:00:41 -08:00 -
da7adae5ec
fix(mlx-m): lazy load hf_olmo (#424)
Anchen
2024-02-09 04:02:43 +11:00 -
9b387007ab
Example of a Convolutional Variational Autoencoder (CVAE) on MNIST (#264)
Markus Enzweiler
2024-02-07 05:02:27 +01:00