mirror of https://github.com/vladmandic/automatic
Compare commits
733 Commits
2026-01-22
...
master
| Author | SHA1 | Date |
|---|---|---|
|
|
0eb4a98e07 | |
|
|
155dabc840 | |
|
|
2fcabc8047 | |
|
|
d98d05ca2d | |
|
|
27a62cfa70 | |
|
|
d97191f342 | |
|
|
fbf1a962f2 | |
|
|
d7904b239f | |
|
|
90b5e7de30 | |
|
|
08c28ab257 | |
|
|
0c94a169ea | |
|
|
32b69bdd3d | |
|
|
b2e071dc52 | |
|
|
470a0d816e | |
|
|
ffeda702c5 | |
|
|
bfd9a0c0f5 | |
|
|
25af3242c3 | |
|
|
dc6f20ec8f | |
|
|
a809b616e6 | |
|
|
88bde026f7 | |
|
|
e49d6262e9 | |
|
|
ac9aacac66 | |
|
|
2177609e54 | |
|
|
ee3b141297 | |
|
|
24f4490a59 | |
|
|
d2a47ee0ed | |
|
|
01d53edb25 | |
|
|
4cafae9350 | |
|
|
b659a06c60 | |
|
|
9d0ecde462 | |
|
|
fdc2f46457 | |
|
|
1ed2811c80 | |
|
|
f5c037a735 | |
|
|
668a94141d | |
|
|
999cbe5d3a | |
|
|
5d83552630 | |
|
|
fa97cf6232 | |
|
|
d3880632ed | |
|
|
7a51017ac3 | |
|
|
1310264d43 | |
|
|
75cc035354 | |
|
|
61c10d6591 | |
|
|
3dd09fde08 | |
|
|
ec1341348b | |
|
|
ca2d49497b | |
|
|
a370bfc987 | |
|
|
849ab8fe1e | |
|
|
61e7663eb3 | |
|
|
95dadab5c3 | |
|
|
59b9ca50ee | |
|
|
eeb9b6291b | |
|
|
8d6ec348b2 | |
|
|
a1b03a383c | |
|
|
ba362ad3ca | |
|
|
7f07d4cb31 | |
|
|
715b1b0699 | |
|
|
b2b6fdf9d5 | |
|
|
8ef8074467 | |
|
|
06fe4c7f20 | |
|
|
005eb789a3 | |
|
|
b44493d171 | |
|
|
133bf81ad0 | |
|
|
ba8f7b06b2 | |
|
|
b86e357b8e | |
|
|
611dfe4301 | |
|
|
274aeef65e | |
|
|
fa1bc15e64 | |
|
|
1e9bef8d56 | |
|
|
fe7e4b40ff | |
|
|
6eadf40f33 | |
|
|
64400a0cde | |
|
|
e0f6766b8a | |
|
|
cef81ba7cf | |
|
|
716bbd759b | |
|
|
20dd097235 | |
|
|
b5e9fbc7b3 | |
|
|
db6b724dfa | |
|
|
1c5edb913c | |
|
|
58f9902ffc | |
|
|
107e9f78a7 | |
|
|
4b6b577d14 | |
|
|
d812726570 | |
|
|
775ff59fdb | |
|
|
a6029afb04 | |
|
|
31e1461a65 | |
|
|
9003e67ec4 | |
|
|
27205c295e | |
|
|
4a297d70f2 | |
|
|
397631fb4c | |
|
|
48e8b3a513 | |
|
|
51ec59d06b | |
|
|
1ac3359e20 | |
|
|
bec694a7d2 | |
|
|
c3cce0ad76 | |
|
|
b3837aa944 | |
|
|
2675e68813 | |
|
|
a51755dcf4 | |
|
|
375591ecdd | |
|
|
8b6de3c215 | |
|
|
a255e5bdcf | |
|
|
c37e788c0b | |
|
|
12af43e4de | |
|
|
688485018e | |
|
|
d16676518c | |
|
|
0b74796d94 | |
|
|
b90558430e | |
|
|
7a47b6bbdb | |
|
|
400d284711 | |
|
|
18568db41c | |
|
|
10942032a3 | |
|
|
11f3921832 | |
|
|
81cc9383b0 | |
|
|
78dbe18150 | |
|
|
66d4d6ca3a | |
|
|
ff247d8fd2 | |
|
|
d429e11d55 | |
|
|
62e629a3ac | |
|
|
53839e464c | |
|
|
acf475ee45 | |
|
|
14c3cf9172 | |
|
|
fd1646cc37 | |
|
|
a6f6a37dea | |
|
|
876e3b9897 | |
|
|
1003926646 | |
|
|
09ab19c438 | |
|
|
c4ebef29a9 | |
|
|
3f830589d1 | |
|
|
f0bb0a921a | |
|
|
62d2229520 | |
|
|
4f0fb7cc29 | |
|
|
fb853af4b0 | |
|
|
670ca3a644 | |
|
|
cff7f8d4a9 | |
|
|
903869b337 | |
|
|
641321d7d2 | |
|
|
92960de8d6 | |
|
|
6ab9b7bc62 | |
|
|
4f8260e670 | |
|
|
2d81bcdc69 | |
|
|
abe25e7b07 | |
|
|
d3f925e8e5 | |
|
|
481d974b91 | |
|
|
b75a1f971f | |
|
|
a5977f09f5 | |
|
|
53598d40ab | |
|
|
3e228afa78 | |
|
|
de86927c1b | |
|
|
d474f28cb3 | |
|
|
bc22e0f0dc | |
|
|
dbd2293586 | |
|
|
f5fbc06fe4 | |
|
|
c1e67a9c71 | |
|
|
047e66f610 | |
|
|
eb4a1ec781 | |
|
|
0ee33ea60e | |
|
|
312d909739 | |
|
|
9033095be6 | |
|
|
73b854e4cf | |
|
|
41fddce2e9 | |
|
|
09b9ae32c1 | |
|
|
3261e2eeae | |
|
|
57990fd8c0 | |
|
|
a6cae50a67 | |
|
|
9719290ceb | |
|
|
091f31d4bf | |
|
|
0d248c45e7 | |
|
|
deb45c49b3 | |
|
|
e87fcd1bc5 | |
|
|
b2b4e13bab | |
|
|
e572364766 | |
|
|
afe3786f5f | |
|
|
9a5e908ddf | |
|
|
a54e9b3311 | |
|
|
8e10ec3fec | |
|
|
783f20b5d7 | |
|
|
5e4819c3e9 | |
|
|
79bf0f8c1d | |
|
|
93947d3901 | |
|
|
ab7c780c9f | |
|
|
8311add607 | |
|
|
f86b0e69ef | |
|
|
295fe6fbd3 | |
|
|
7588ec0636 | |
|
|
c10472f37a | |
|
|
24c4f2599e | |
|
|
f83e9e7943 | |
|
|
4b998e3866 | |
|
|
760b0121bc | |
|
|
d679fbb8fb | |
|
|
6a282d904b | |
|
|
813b61eda8 | |
|
|
0ec1e9bca2 | |
|
|
9cf2ab08a0 | |
|
|
e7f2b63814 | |
|
|
0be7678ea4 | |
|
|
c220e2858c | |
|
|
202862fb64 | |
|
|
91d21eefb6 | |
|
|
ef0573a409 | |
|
|
87659823fc | |
|
|
d34896028d | |
|
|
97a16e5eb9 | |
|
|
6e3c187b3f | |
|
|
615c74fd05 | |
|
|
867960f4ef | |
|
|
4f40a9c8dd | |
|
|
5a5e3cc381 | |
|
|
e4cb96f6a4 | |
|
|
423c3ef8b7 | |
|
|
ee2fc75619 | |
|
|
cb943919eb | |
|
|
fb2f9ea650 | |
|
|
c187aa706a | |
|
|
59a07a33cd | |
|
|
39ce3d7ea3 | |
|
|
84edba5d94 | |
|
|
4e1c1a6844 | |
|
|
d588ced2ef | |
|
|
ea1abfe2ce | |
|
|
4c256976df | |
|
|
1acc327192 | |
|
|
03e686c988 | |
|
|
6427b799e9 | |
|
|
742eeb8691 | |
|
|
de7959397c | |
|
|
6a9b9297c1 | |
|
|
ee9e93e62f | |
|
|
3ac68a2401 | |
|
|
936216e20d | |
|
|
12fd4d30f1 | |
|
|
7a409a9e83 | |
|
|
bbdaaae682 | |
|
|
9013ba7bf1 | |
|
|
2fb9e2115e | |
|
|
729eca460e | |
|
|
c86674047a | |
|
|
d432945a29 | |
|
|
9eced4ff86 | |
|
|
edfbae0783 | |
|
|
e0faa149dd | |
|
|
35803746df | |
|
|
cb224651a0 | |
|
|
7a014f0612 | |
|
|
cb76c164e7 | |
|
|
4effd92663 | |
|
|
928448b654 | |
|
|
3addc16a5a | |
|
|
7af0d6ea07 | |
|
|
cbefbcf772 | |
|
|
a72e3c4f29 | |
|
|
f4ec0bab4a | |
|
|
6821d92a92 | |
|
|
242325c11d | |
|
|
f9abe07035 | |
|
|
e8ba35e7e9 | |
|
|
d4e87c6ff3 | |
|
|
1b834f8ad6 | |
|
|
09e41b65d3 | |
|
|
756b4599be | |
|
|
278c2a1b6b | |
|
|
4bd6e42c62 | |
|
|
af1531af5a | |
|
|
f89afe0add | |
|
|
121c51289b | |
|
|
14ec021eae | |
|
|
f6ed1a92a4 | |
|
|
f91fd63a60 | |
|
|
ee6cadfa9c | |
|
|
1b699557b8 | |
|
|
2485753c6b | |
|
|
0838b938e1 | |
|
|
f484f80293 | |
|
|
51a10320c7 | |
|
|
9a7f1e7978 | |
|
|
041029d184 | |
|
|
686ac0375d | |
|
|
09c82064e1 | |
|
|
f43b459e9b | |
|
|
0d27d9ea09 | |
|
|
897925da2a | |
|
|
f1a4faadb8 | |
|
|
7a225b4766 | |
|
|
0b83da416e | |
|
|
eaf735f0ed | |
|
|
9013f2bbe1 | |
|
|
2b2a044ff9 | |
|
|
56d0307f13 | |
|
|
80ab63db2c | |
|
|
fde3554341 | |
|
|
2ef12ddffb | |
|
|
860e9864d3 | |
|
|
886341b1cc | |
|
|
6f13f6131e | |
|
|
7c87bb2d1e | |
|
|
939eea1cd9 | |
|
|
b526d98cb0 | |
|
|
d3798cc6a4 | |
|
|
24e3416e02 | |
|
|
5451bbeec4 | |
|
|
565beebd90 | |
|
|
a3668e9f0c | |
|
|
72895b5192 | |
|
|
0e0b607cfa | |
|
|
49e2c6f53a | |
|
|
6436b833f5 | |
|
|
d5a22d3951 | |
|
|
954d079472 | |
|
|
643256f21d | |
|
|
5b85a6d10b | |
|
|
0bdbf300ac | |
|
|
de5b9f27f2 | |
|
|
3bea120c08 | |
|
|
a42d2d41e1 | |
|
|
1890515a08 | |
|
|
3586ae90bc | |
|
|
a23abb356e | |
|
|
cbc1b7c994 | |
|
|
07954ee849 | |
|
|
a93358b3ac | |
|
|
ee1b76b1c9 | |
|
|
ac5ae390d8 | |
|
|
62dcf352eb | |
|
|
b388f7c582 | |
|
|
a2624bebdc | |
|
|
86fba0b544 | |
|
|
eb94fa3f38 | |
|
|
adff14c257 | |
|
|
195a9568ac | |
|
|
d9207aa2e1 | |
|
|
64e193b60b | |
|
|
b2f95b91bf | |
|
|
f35e644d44 | |
|
|
e202d43801 | |
|
|
30d21f420f | |
|
|
b02a63c60c | |
|
|
76b184a9fe | |
|
|
d9e628574a | |
|
|
e5f9707e48 | |
|
|
afc517ad87 | |
|
|
3a8ef75068 | |
|
|
b27c48a3a6 | |
|
|
9fbe70195f | |
|
|
20e8b8ab18 | |
|
|
9fa6304cbc | |
|
|
1ff587a191 | |
|
|
a5fd4ee15b | |
|
|
eacd09e09f | |
|
|
61b98de9e2 | |
|
|
609c2bed44 | |
|
|
acab240e3b | |
|
|
952fdedd73 | |
|
|
020118a89f | |
|
|
7657c5af29 | |
|
|
c68ab0f75e | |
|
|
2f30e466e1 | |
|
|
c2087de473 | |
|
|
1b4a45e2a6 | |
|
|
feffbe0851 | |
|
|
af8622240c | |
|
|
61415ca305 | |
|
|
3f8812a76f | |
|
|
7c09e3dfe1 | |
|
|
b2f29911d2 | |
|
|
87cbfc7d39 | |
|
|
045df139c0 | |
|
|
7045c26f1b | |
|
|
3bc75d3832 | |
|
|
f879c2d09a | |
|
|
eb9d9f6bb0 | |
|
|
0d9f6b98c8 | |
|
|
9df9ed1b05 | |
|
|
540f34e41f | |
|
|
d54bdefc2a | |
|
|
04757adc78 | |
|
|
1ddd0bf33a | |
|
|
5ff73b61a4 | |
|
|
95a8c1125b | |
|
|
ff4b5c33dc | |
|
|
9ad68c2ff4 | |
|
|
6c5f7c298a | |
|
|
2da67ce7d3 | |
|
|
77e639fe3c | |
|
|
c9918ed59b | |
|
|
403e9c2241 | |
|
|
afff46f2ac | |
|
|
543b6802f2 | |
|
|
37be4239dc | |
|
|
9d3e7e7ed1 | |
|
|
5e572badf7 | |
|
|
d869c700b1 | |
|
|
6790690ff2 | |
|
|
a485347896 | |
|
|
b111e7d782 | |
|
|
554c8fbf2f | |
|
|
0160f6b3ef | |
|
|
78efbc7e85 | |
|
|
2d9c3275f1 | |
|
|
8ea9d428d5 | |
|
|
a1d46b3ecd | |
|
|
a96b5234d7 | |
|
|
d79d4edddb | |
|
|
87e4505acd | |
|
|
c25bc9b4d8 | |
|
|
7ee8c85c46 | |
|
|
9a63ec758a | |
|
|
0c9c86c3f9 | |
|
|
58fe1f59ff | |
|
|
316b940b6b | |
|
|
fce682f1d6 | |
|
|
9de84792b4 | |
|
|
bd39638df1 | |
|
|
47543663f9 | |
|
|
6a72aa8083 | |
|
|
7db3eac944 | |
|
|
cc8739e2fe | |
|
|
49f5a89961 | |
|
|
f34df6faac | |
|
|
494e4a7a7b | |
|
|
4d10449ae1 | |
|
|
403441feea | |
|
|
f78f0eb4a5 | |
|
|
216558185b | |
|
|
76a20cf43d | |
|
|
4006b3e093 | |
|
|
d65a2d1ebc | |
|
|
e5c494f999 | |
|
|
22aa9eab01 | |
|
|
2d28e7438c | |
|
|
a3074baf8b | |
|
|
bfe014f5da | |
|
|
7aded79e8a | |
|
|
e9eadd06db | |
|
|
6fdd3a53cf | |
|
|
d6bbfe3dc2 | |
|
|
ab44e59bd1 | |
|
|
859072c683 | |
|
|
84c95442ee | |
|
|
e307ef5e1b | |
|
|
71afd3fb6f | |
|
|
0d0996878d | |
|
|
bcd2e76597 | |
|
|
3dde41faa5 | |
|
|
73b90c5228 | |
|
|
6876d2b84d | |
|
|
dd776936d6 | |
|
|
254198360f | |
|
|
792d2a4d99 | |
|
|
43f134d9f9 | |
|
|
2b460dd624 | |
|
|
bce97efbf8 | |
|
|
82e91439bf | |
|
|
b1679152a0 | |
|
|
31ec4cb6e0 | |
|
|
e683884d5f | |
|
|
9f9d67713d | |
|
|
6971f3438c | |
|
|
63c5e493be | |
|
|
c25c35ebb3 | |
|
|
ae8a6257c4 | |
|
|
ccd3e2e489 | |
|
|
88db926ecd | |
|
|
3ee816888e | |
|
|
cf5e1e0df2 | |
|
|
da1cf2f996 | |
|
|
0ed64ec195 | |
|
|
3a4efcc444 | |
|
|
1b4f94660f | |
|
|
41f206dec9 | |
|
|
78c58e0d70 | |
|
|
8563e2a853 | |
|
|
b4e5b563c6 | |
|
|
73a5d55022 | |
|
|
8561da6f8c | |
|
|
967974ade7 | |
|
|
3ae9909b2a | |
|
|
dc8ecb0a64 | |
|
|
162651cbdb | |
|
|
76aa949a26 | |
|
|
2c4d0751d9 | |
|
|
80014fac7c | |
|
|
139e331d80 | |
|
|
8d67debdfd | |
|
|
6c20e49897 | |
|
|
e2cdbe47fa | |
|
|
57659ab642 | |
|
|
17b03ed8e4 | |
|
|
443a73b740 | |
|
|
bf7a72f12e | |
|
|
fba942b25e | |
|
|
0c45e58e80 | |
|
|
588222f2d1 | |
|
|
d78c5c1cd0 | |
|
|
f4b5abde68 | |
|
|
61b031ada5 | |
|
|
5183ebec58 | |
|
|
83fa8e39ba | |
|
|
7825f44581 | |
|
|
0559651b1b | |
|
|
3208067259 | |
|
|
a04ba1e482 | |
|
|
5fc46c042e | |
|
|
f431141d2f | |
|
|
f3c4fae440 | |
|
|
ef797169a3 | |
|
|
ec7934799e | |
|
|
6b89cc8463 | |
|
|
385532154f | |
|
|
bc66d8aff8 | |
|
|
fe0fba0884 | |
|
|
7eb9b1cc5c | |
|
|
d602a093fb | |
|
|
bd61633e14 | |
|
|
684d77d871 | |
|
|
e907a0a573 | |
|
|
363cb175aa | |
|
|
42d8ad498e | |
|
|
4e7b5c0b70 | |
|
|
e3ca883cbd | |
|
|
0d2e9fbf62 | |
|
|
d0f9e25906 | |
|
|
480b58e994 | |
|
|
b454fa9748 | |
|
|
175771666b | |
|
|
54c18d3f32 | |
|
|
4162fd84db | |
|
|
d426502d44 | |
|
|
93f8f66305 | |
|
|
3d0edb4f8f | |
|
|
356505cab2 | |
|
|
8d764951cf | |
|
|
d01f45519f | |
|
|
1e23495039 | |
|
|
34a18c9098 | |
|
|
5000401647 | |
|
|
33de04a0c7 | |
|
|
c8597ca84e | |
|
|
a2ee885e28 | |
|
|
8ff7074da5 | |
|
|
935a4fcb03 | |
|
|
3ba1cfb540 | |
|
|
89dd406a1d | |
|
|
bf1e763156 | |
|
|
c9e21a51db | |
|
|
3aac1f6510 | |
|
|
d8362182bc | |
|
|
bf36047ca6 | |
|
|
38e52b8570 | |
|
|
a0f9447d04 | |
|
|
e8ff09a2d2 | |
|
|
9ebd05dc7b | |
|
|
04d7dbc1b8 | |
|
|
56d0aade56 | |
|
|
e199f2d351 | |
|
|
c288f0de14 | |
|
|
8e4aa8dbe1 | |
|
|
1e668fe86d | |
|
|
5e2ab3057f | |
|
|
6eb1d0a2bf | |
|
|
ef75acffb4 | |
|
|
4db4ff00ca | |
|
|
54fac3090c | |
|
|
ade7c2e5f9 | |
|
|
f84cb6ac64 | |
|
|
171ead0fa2 | |
|
|
0c0911eddc | |
|
|
1696d0cd29 | |
|
|
8bfa612902 | |
|
|
d7da93e814 | |
|
|
6d30df7ed6 | |
|
|
137504de36 | |
|
|
f4bb78b51c | |
|
|
e7d382dc29 | |
|
|
6520784487 | |
|
|
339dff59f8 | |
|
|
40cbd33cbd | |
|
|
059cbaf470 | |
|
|
9df9b84752 | |
|
|
dd80e15cfd | |
|
|
e5ebc4e5a2 | |
|
|
3860737df7 | |
|
|
59654d68ea | |
|
|
605d87cb2d | |
|
|
5009b70ed8 | |
|
|
2d6cc5addb | |
|
|
c3c3930cce | |
|
|
a0c7df892c | |
|
|
0587a600c8 | |
|
|
0d240b1a8f | |
|
|
d3dff5df15 | |
|
|
05df49a3cc | |
|
|
fe96285635 | |
|
|
2ec79a1807 | |
|
|
be019a3b4a | |
|
|
b9f1acdbc2 | |
|
|
2c80557d2c | |
|
|
d9a2a21c8c | |
|
|
d7ca4f63a7 | |
|
|
f439d51ea7 | |
|
|
bf1414385f | |
|
|
c4de8fb1cc | |
|
|
915bcab3c2 | |
|
|
df30ba28ac | |
|
|
40df9ce689 | |
|
|
4f7555d9e1 | |
|
|
ced81d630d | |
|
|
90cf83b90e | |
|
|
4b3f395cdb | |
|
|
dd1704075c | |
|
|
0920492294 | |
|
|
4ba913e072 | |
|
|
a23c7d5733 | |
|
|
af9fe036a3 | |
|
|
e03ce70082 | |
|
|
a3ad11ef93 | |
|
|
f97edb9950 | |
|
|
8cbd5afb98 | |
|
|
1d369b032c | |
|
|
20aeb8b793 | |
|
|
f5630fdf63 | |
|
|
7eb776a594 | |
|
|
504e0cbbdd | |
|
|
ee8bd982e5 | |
|
|
4cf899b3c0 | |
|
|
d2f0ca39de | |
|
|
695e949627 | |
|
|
b4919f9960 | |
|
|
c0b5858697 | |
|
|
682bbc3ccf | |
|
|
9caab3d6cd | |
|
|
c3e915badd | |
|
|
cc03ebc584 | |
|
|
bb188314b0 | |
|
|
8bc48c5c5b | |
|
|
9dc536b25b | |
|
|
2aa5820dea | |
|
|
5465ba2279 | |
|
|
867354bd29 | |
|
|
900d567f59 | |
|
|
a8211bec84 | |
|
|
a74b1f53a9 | |
|
|
9e9e1e2236 | |
|
|
2925577ec2 | |
|
|
c25b7ac58f | |
|
|
fa0670fcd9 | |
|
|
9c1f317980 | |
|
|
58351b1f53 | |
|
|
ccff480d24 | |
|
|
8e3671c169 | |
|
|
1629c21452 | |
|
|
8f0e46516d | |
|
|
264a9f02d7 | |
|
|
575c147ab4 | |
|
|
668600cd69 | |
|
|
8e227a1863 | |
|
|
eeb176c0d0 | |
|
|
a0f5c064e2 | |
|
|
93fc65b2ea | |
|
|
3ca4ebf2d0 | |
|
|
2cb68b7432 | |
|
|
ef416f9628 | |
|
|
b0cd31a0cb | |
|
|
22240b93b5 | |
|
|
a89d97a1f3 | |
|
|
b15f319195 | |
|
|
df3ff41ebb | |
|
|
1003074144 | |
|
|
4edf742c21 | |
|
|
727fd92d76 | |
|
|
7ce0cc2520 | |
|
|
f47c7d17af | |
|
|
856a48a4f8 | |
|
|
e63422ba16 | |
|
|
ad15f733b5 | |
|
|
c1f3fc594e | |
|
|
947dd7b2b3 | |
|
|
5613cb383a | |
|
|
7fc18befc5 | |
|
|
58347af998 | |
|
|
5e8ea52177 | |
|
|
603560c079 | |
|
|
ea3098a26a | |
|
|
33d4a4999d | |
|
|
7bd73d6e75 | |
|
|
a7c32caae3 | |
|
|
a468c5d8f8 | |
|
|
09fdda05a4 | |
|
|
a4671045b6 | |
|
|
e4be2942bb | |
|
|
bfc5445025 | |
|
|
82361e6633 | |
|
|
58c3aecc00 | |
|
|
e7d7894130 | |
|
|
b8381f31ca | |
|
|
3343d2e05f | |
|
|
6cd2c6a5f5 | |
|
|
50c65ed990 | |
|
|
8d6bfcd827 | |
|
|
cc9c2c31e5 | |
|
|
651e7177c4 | |
|
|
08f24b211a | |
|
|
b8bac68915 | |
|
|
a673ed2411 | |
|
|
578a16c65d | |
|
|
9cce5676e4 | |
|
|
64157f7ff5 | |
|
|
69685f198d | |
|
|
a47e2ff62f | |
|
|
51e148cd2c | |
|
|
1f2b0aa56d | |
|
|
0310dc8fd6 | |
|
|
6700edaf7d | |
|
|
65d8c9e7f2 | |
|
|
747ec86eb9 | |
|
|
b9b36ed962 | |
|
|
418f27266e | |
|
|
3298f3db9a | |
|
|
e6eeb22a81 | |
|
|
fe20635d0f | |
|
|
2f8976e28d | |
|
|
6344db1b09 | |
|
|
c2c32d7847 | |
|
|
849f045301 | |
|
|
cb0aa2fb97 | |
|
|
2fae55a7f9 | |
|
|
a344a13863 | |
|
|
26c679f9e7 | |
|
|
6b10f0df4f | |
|
|
5abb794462 | |
|
|
becb19319d | |
|
|
656e86a962 | |
|
|
09b8fe9761 | |
|
|
db97c42320 | |
|
|
694e2f0427 |
|
|
@ -66,13 +66,14 @@ body:
|
|||
If unsure if this is a right place to ask your question, perhaps post on [Discussions](https://github.com/vladmandic/automatic/discussions)
|
||||
Or reach-out to us on [Discord](https://discord.gg/WqMzTUDC)
|
||||
- type: dropdown
|
||||
id: backend
|
||||
id: os
|
||||
attributes:
|
||||
label: Backend
|
||||
description: What is the backend you're using?
|
||||
label: Operating system
|
||||
description: What is the OS you're using?
|
||||
options:
|
||||
- Diffusers
|
||||
- Original
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
name: Model request
|
||||
description: Request support for a new model
|
||||
title: "[Model]: "
|
||||
labels: ["enhancement"]
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
id: name
|
||||
attributes:
|
||||
label: Model name
|
||||
description: Enter model name
|
||||
value:
|
||||
- type: textarea
|
||||
id: type
|
||||
attributes:
|
||||
label: Model type
|
||||
description: Describe model type
|
||||
value:
|
||||
- type: textarea
|
||||
id: url
|
||||
attributes:
|
||||
label: Model URL
|
||||
description: Enter URL to the model page
|
||||
value:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Reason
|
||||
description: Enter a reason why you would like to see this model supported
|
||||
value:
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
# SD.Next: AGENTS.md Project Guidelines
|
||||
|
||||
SD.Next is a complex codebase with specific patterns and conventions.
|
||||
General app structure is:
|
||||
- Python backend server
|
||||
Uses Torch for model inference, FastAPI for API routes and Gradio for creation of UI components.
|
||||
- JavaScript/CSS frontend
|
||||
|
||||
## Tools
|
||||
|
||||
- `venv` for Python environment management, activated with `source venv/bin/activate` (Linux) or `venv\Scripts\activate` (Windows).
|
||||
venv MUST be activated before running any Python commands or scripts to ensure correct dependencies and environment variables.
|
||||
- `python` 3.10+.
|
||||
- `pyproject.toml` for Python configuration, including linting and type checking settings.
|
||||
- `eslint` configured for both core and UI code.
|
||||
- `pnpm` for managing JavaScript dependencies and scripts, with key commands defined in `package.json`.
|
||||
- `ruff` and `pylint` for Python linting, with configurations in `pyproject.toml` and executed via `pnpm ruff` and `pnpm pylint`.
|
||||
- `pre-commit` hooks which also check line-endings and other formatting issues, configured in `.pre-commit-config.yaml`.
|
||||
|
||||
## Project Structure
|
||||
|
||||
- Entry/startup flow: `webui.sh` -> `launch.py` -> `webui.py` -> modules under `modules/`.
|
||||
- Install: `installer.py` takes care of installing dependencies and setting up the environment.
|
||||
- Core runtime state is centralized in `modules/shared.py` (shared.opts, model state, backend/device state).
|
||||
- API/server routes are under `modules/api/`.
|
||||
- UI codebase is split between base JS in `javascript/` and actual UI in `extensions-builtin/sdnext-modernui/`.
|
||||
- Model and pipeline logic is split between `modules/sd_*` and `pipelines/`.
|
||||
- Additional plug-ins live in `scripts/` and are used only when specified.
|
||||
- Extensions live in `extensions-builtin/` and `extensions/` and are loaded dynamically.
|
||||
- Tests and CLI scripts are under `test/` and `cli/`, with some API smoke checks in `test/full-test.sh`.
|
||||
|
||||
## Code Style
|
||||
|
||||
- Prefer existing project patterns over strict generic style rules;
|
||||
this codebase intentionally allows patterns often flagged in default linters such as allowing long lines, etc.
|
||||
|
||||
## Build And Test
|
||||
|
||||
- Activate environment: `source venv/bin/activate` (always ensure this is active when working with Python code).
|
||||
- Test startup: `python launch.py --test`
|
||||
- Full startup: `python launch.py`
|
||||
- Full lint sequence: `pnpm lint`
|
||||
- Python checks individually: `pnpm ruff`, `pnpm pylint`
|
||||
- JS checks: `pnpm eslint` and `pnpm eslint-ui`
|
||||
|
||||
## Conventions
|
||||
|
||||
- Keep PR-ready changes targeted to `dev` branch.
|
||||
- Use conventions from `CONTRIBUTING`.
|
||||
- Do not include unrelated edits or submodule changes when preparing contributions.
|
||||
- Use existing CLI/API tool patterns in `cli/` and `test/` when adding automation scripts.
|
||||
- Respect environment-driven behavior (`SD_*` flags and options) instead of hardcoding platform/model assumptions.
|
||||
- For startup/init edits, preserve error handling and partial-failure tolerance in parallel scans and extension loading.
|
||||
|
||||
## Pitfalls
|
||||
|
||||
- Initialization order matters: startup paths in `launch.py` and `webui.py` are sensitive to import/load timing.
|
||||
- Shared mutable global state can create subtle regressions; prefer narrow, explicit changes.
|
||||
- Device/backend-specific code paths (**CUDA/ROCm/IPEX/DirectML/OpenVINO**) should not assume one platform.
|
||||
- Scripts and extension loading is dynamic; failures may appear only when specific extensions or models are present.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
description: "Use when editing Python core runtime code, startup flow, model loading, API internals, backend/device logic, or shared state in modules and pipelines."
|
||||
name: "Core Runtime Guidelines"
|
||||
applyTo: "launch.py, webui.py, installer.py, modules/**/*.py, pipelines/**/*.py, scripts/**/*.py, extensions-builtin/**/*.py"
|
||||
---
|
||||
# Core Runtime Guidelines
|
||||
|
||||
- Preserve startup ordering and import timing in `launch.py` and `webui.py`; avoid moving initialization steps unless required.
|
||||
- Treat `modules/shared.py` as the source of truth for global runtime state (`shared.opts`, model references, backend/device flags).
|
||||
- Prefer narrow changes with explicit side effects; avoid introducing new cross-module mutable globals.
|
||||
- Keep platform paths neutral: do not assume CUDA-only behavior and preserve ROCm/IPEX/DirectML/OpenVINO compatibility branches.
|
||||
- Keep extension and script loading resilient: when adding startup scans/hooks, preserve partial-failure tolerance and logging.
|
||||
- Follow existing API/server patterns under `modules/api/` and reuse shared queue/state helpers rather than ad-hoc request handling.
|
||||
- Reuse established model-loading and pipeline patterns (`modules/sd_*`, `pipelines/`) instead of creating parallel abstractions.
|
||||
- For substantial Python changes, run at least relevant checks: `npm run ruff` and `npm run pylint` (or narrower equivalents when appropriate).
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
description: "Use when editing frontend UI code, JavaScript, HTML, CSS, localization files, or built-in UI extensions including modernui and kanvas."
|
||||
name: "UI And Frontend Guidelines"
|
||||
applyTo: "javascript/**/*.js, html/**/*.html, html/**/*.css, html/**/*.js, extensions-builtin/sdnext-modernui/**/*, extensions-builtin/sdnext-kanvas/**/*"
|
||||
---
|
||||
# UI And Frontend Guidelines
|
||||
|
||||
- Preserve existing UI behavior and wiring between Gradio/Python endpoints and frontend handlers; do not change payload shapes without backend alignment.
|
||||
- Follow existing project lint and style patterns; prefer consistency with nearby files over introducing new frameworks or architecture.
|
||||
- Keep localization-friendly UI text changes synchronized with locale resources in `html/locale_*.json` when user-facing strings are added or changed.
|
||||
- Avoid bundling unrelated visual refactors with functional fixes; keep UI PRs scoped and reviewable.
|
||||
- For extension UI work, respect each extension's boundaries and avoid cross-extension coupling.
|
||||
- Validate JavaScript changes with `pnpm eslint`; for modern UI extension changes also run `pnpm eslint-ui`.
|
||||
- Maintain mobile compatibility when touching layout or interaction behavior.
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
# defaults
|
||||
venv/
|
||||
__pycache__
|
||||
.ruff_cache
|
||||
/cache.json
|
||||
.vscode/
|
||||
.idea/
|
||||
**/.DS_Store
|
||||
/*.json
|
||||
/*.yaml
|
||||
/params.txt
|
||||
|
|
@ -9,17 +12,15 @@ __pycache__
|
|||
/user.css
|
||||
/webui-user.bat
|
||||
/webui-user.sh
|
||||
/html/extensions.json
|
||||
/html/themes.json
|
||||
config_states
|
||||
/data/metadata.json
|
||||
/data/extensions.json
|
||||
/data/cache.json
|
||||
/data/themes.json
|
||||
/data/installer.json
|
||||
/data/rocm.json
|
||||
node_modules
|
||||
pnpm-lock.yaml
|
||||
package-lock.json
|
||||
venv
|
||||
.history
|
||||
cache
|
||||
**/.DS_Store
|
||||
tunableop_results*.csv
|
||||
|
||||
# all models and temp files
|
||||
*.log
|
||||
|
|
@ -42,30 +43,30 @@ tunableop_results*.csv
|
|||
/*.mp3
|
||||
/*.lnk
|
||||
/*.swp
|
||||
!webui.bat
|
||||
!webui.sh
|
||||
!package.json
|
||||
!requirements.txt
|
||||
|
||||
# all dynamic stuff
|
||||
/extensions/**/*
|
||||
/outputs/**/*
|
||||
/embeddings/**/*
|
||||
/models/**/*
|
||||
/interrogate/**/*
|
||||
/train/log/**/*
|
||||
/textual_inversion/**/*
|
||||
/detected_maps/**/*
|
||||
/tmp
|
||||
/log
|
||||
/cert
|
||||
.vscode/
|
||||
.idea/
|
||||
/localizations
|
||||
.*/
|
||||
cache
|
||||
tunableop_results*.csv
|
||||
|
||||
# force included
|
||||
!webui.bat
|
||||
!webui.sh
|
||||
!package.json
|
||||
!requirements.txt
|
||||
!constraints.txt
|
||||
!/data
|
||||
!/models/VAE-approx
|
||||
!/models/VAE-approx/model.pt
|
||||
!/models/Reference
|
||||
!/models/Reference/**/*
|
||||
|
||||
# copilot instructions
|
||||
!/.github/
|
||||
!/.github/copilot-instructions.md
|
||||
!/.github/instructions/
|
||||
!/.github/instructions/**/*
|
||||
|
|
|
|||
|
|
@ -10,13 +10,9 @@
|
|||
path = extensions-builtin/sd-extension-chainner
|
||||
url = https://github.com/vladmandic/sd-extension-chainner
|
||||
ignore = dirty
|
||||
[submodule "extensions-builtin/stable-diffusion-webui-rembg"]
|
||||
path = extensions-builtin/stable-diffusion-webui-rembg
|
||||
url = https://github.com/vladmandic/sd-extension-rembg
|
||||
ignore = dirty
|
||||
[submodule "extensions-builtin/sdnext-modernui"]
|
||||
path = extensions-builtin/sdnext-modernui
|
||||
url = https://github.com/BinaryQuantumSoul/sdnext-modernui
|
||||
[submodule "extensions-builtin/sdnext-kanvas"]
|
||||
path = extensions-builtin/sdnext-kanvas
|
||||
url = https://github.com/vladmandic/sdnext-kanvas
|
||||
path = extensions-builtin/sdnext-kanvas
|
||||
url = https://github.com/vladmandic/sdnext-kanvas
|
||||
|
|
|
|||
|
|
@ -13,20 +13,17 @@ repos:
|
|||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-illegal-windows-names
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: check-builtin-literals
|
||||
- id: check-case-conflict
|
||||
- id: check-illegal-windows-names
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: check-xml
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
- id: check-executables-have-shebangs
|
||||
|
|
@ -41,3 +38,13 @@ repos:
|
|||
.*\.md|
|
||||
.github/ISSUE_TEMPLATE/.*\.yml
|
||||
)$
|
||||
- repo: https://github.com/oliv5/pre-commit-indents-to-spaces
|
||||
rev: v0.0.3
|
||||
hooks:
|
||||
- id: indents-to-spaces
|
||||
args: ["--spaces=4"]
|
||||
types: [python]
|
||||
- id: indents-to-spaces
|
||||
args: ["--spaces=2"]
|
||||
types: [file]
|
||||
files: \.(json|js|mjs|css|html|md|yaml|toml|sh)$
|
||||
|
|
|
|||
283
.pylintrc
283
.pylintrc
|
|
@ -1,283 +0,0 @@
|
|||
[MAIN]
|
||||
analyse-fallback-blocks=no
|
||||
clear-cache-post-run=no
|
||||
extension-pkg-allow-list=
|
||||
prefer-stubs=yes
|
||||
extension-pkg-whitelist=
|
||||
fail-on=
|
||||
fail-under=10
|
||||
ignore=CVS
|
||||
ignore-paths=/usr/lib/.*$,
|
||||
venv,
|
||||
.git,
|
||||
.ruff_cache,
|
||||
.vscode,
|
||||
modules/apg,
|
||||
modules/cfgzero,
|
||||
modules/control/proc,
|
||||
modules/control/units,
|
||||
modules/dml,
|
||||
modules/facelib,
|
||||
modules/flash_attn_triton_amd,
|
||||
modules/ggml,
|
||||
modules/hidiffusion,
|
||||
modules/hijack/ddpm_edit.py,
|
||||
modules/intel,
|
||||
modules/intel/ipex,
|
||||
modules/framepack/pipeline,
|
||||
modules/onnx_impl,
|
||||
modules/pag,
|
||||
modules/postprocess/aurasr_arch.py,
|
||||
modules/prompt_parser_xhinker.py,
|
||||
modules/ras,
|
||||
modules/seedvr,
|
||||
modules/rife,
|
||||
modules/schedulers,
|
||||
modules/taesd,
|
||||
modules/teacache,
|
||||
modules/todo,
|
||||
pipelines/bria,
|
||||
pipelines/flex2,
|
||||
pipelines/f_lite,
|
||||
pipelines/hidream,
|
||||
pipelines/hdm,
|
||||
pipelines/meissonic,
|
||||
pipelines/omnigen2,
|
||||
pipelines/segmoe,
|
||||
pipelines/xomni,
|
||||
pipelines/chrono,
|
||||
scripts/consistory,
|
||||
scripts/ctrlx,
|
||||
scripts/daam,
|
||||
scripts/demofusion,
|
||||
scripts/freescale,
|
||||
scripts/infiniteyou,
|
||||
scripts/instantir,
|
||||
scripts/lbm,
|
||||
scripts/layerdiffuse,
|
||||
scripts/mod,
|
||||
scripts/pixelsmith,
|
||||
scripts/differential_diffusion.py,
|
||||
scripts/pulid,
|
||||
scripts/xadapter,
|
||||
repositories,
|
||||
extensions-builtin/sd-extension-chainner/nodes,
|
||||
extensions-builtin/sd-webui-agent-scheduler,
|
||||
extensions-builtin/sdnext-modernui/node_modules,
|
||||
extensions-builtin/sdnext-kanvas/node_modules,
|
||||
ignore-patterns=.*test*.py$,
|
||||
.*_model.py$,
|
||||
.*_arch.py$,
|
||||
.*_model_arch.py*,
|
||||
.*_model_arch_v2.py$,
|
||||
ignored-modules=
|
||||
jobs=8
|
||||
limit-inference-results=100
|
||||
load-plugins=
|
||||
persistent=no
|
||||
py-version=3.10
|
||||
recursive=no
|
||||
source-roots=
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
[BASIC]
|
||||
argument-naming-style=snake_case
|
||||
attr-naming-style=snake_case
|
||||
bad-names=foo, bar, baz, toto, tutu, tata
|
||||
bad-names-rgxs=
|
||||
class-attribute-naming-style=any
|
||||
class-const-naming-style=UPPER_CASE
|
||||
class-naming-style=PascalCase
|
||||
const-naming-style=snake_case
|
||||
docstring-min-length=-1
|
||||
function-naming-style=snake_case
|
||||
good-names=i,j,k,e,ex,ok,p,x,y,id
|
||||
good-names-rgxs=
|
||||
include-naming-hint=no
|
||||
inlinevar-naming-style=any
|
||||
method-naming-style=snake_case
|
||||
module-naming-style=snake_case
|
||||
name-group=
|
||||
no-docstring-rgx=^_
|
||||
property-classes=abc.abstractproperty
|
||||
variable-naming-style=snake_case
|
||||
|
||||
[CLASSES]
|
||||
check-protected-access-in-special-methods=no
|
||||
defining-attr-methods=__init__, __new__,
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
|
||||
valid-classmethod-first-arg=cls
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
[DESIGN]
|
||||
exclude-too-few-public-methods=
|
||||
ignored-parents=
|
||||
max-args=199
|
||||
max-attributes=99
|
||||
max-bool-expr=99
|
||||
max-branches=199
|
||||
max-locals=99
|
||||
max-parents=99
|
||||
max-public-methods=99
|
||||
max-returns=99
|
||||
max-statements=199
|
||||
min-public-methods=1
|
||||
|
||||
[EXCEPTIONS]
|
||||
overgeneral-exceptions=builtins.BaseException,builtins.Exception
|
||||
|
||||
[FORMAT]
|
||||
expected-line-ending-format=
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
indent-after-paren=4
|
||||
indent-string=' '
|
||||
max-line-length=200
|
||||
max-module-lines=9999
|
||||
single-line-class-stmt=no
|
||||
single-line-if-stmt=no
|
||||
|
||||
[IMPORTS]
|
||||
allow-any-import-level=
|
||||
allow-reexport-from-package=no
|
||||
allow-wildcard-with-all=no
|
||||
deprecated-modules=
|
||||
ext-import-graph=
|
||||
import-graph=
|
||||
int-import-graph=
|
||||
known-standard-library=
|
||||
known-third-party=enchant
|
||||
preferred-modules=
|
||||
|
||||
[LOGGING]
|
||||
logging-format-style=new
|
||||
logging-modules=logging
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
confidence=HIGH,
|
||||
CONTROL_FLOW,
|
||||
INFERENCE,
|
||||
INFERENCE_FAILURE,
|
||||
UNDEFINED
|
||||
# disable=C,R,W
|
||||
disable=abstract-method,
|
||||
bad-inline-option,
|
||||
bare-except,
|
||||
broad-exception-caught,
|
||||
chained-comparison,
|
||||
consider-iterating-dictionary,
|
||||
consider-merging-isinstance,
|
||||
consider-using-dict-items,
|
||||
consider-using-enumerate,
|
||||
consider-using-from-import,
|
||||
consider-using-generator,
|
||||
consider-using-get,
|
||||
consider-using-in,
|
||||
consider-using-max-builtin,
|
||||
consider-using-min-builtin,
|
||||
consider-using-sys-exit,
|
||||
cyclic-import,
|
||||
dangerous-default-value,
|
||||
deprecated-pragma,
|
||||
duplicate-code,
|
||||
file-ignored,
|
||||
import-error,
|
||||
import-outside-toplevel,
|
||||
invalid-name,
|
||||
line-too-long,
|
||||
locally-disabled,
|
||||
logging-fstring-interpolation,
|
||||
missing-class-docstring,
|
||||
missing-function-docstring,
|
||||
missing-module-docstring,
|
||||
no-else-raise,
|
||||
no-else-return,
|
||||
not-callable,
|
||||
pointless-string-statement,
|
||||
raw-checker-failed,
|
||||
simplifiable-if-expression,
|
||||
suppressed-message,
|
||||
too-few-public-methods,
|
||||
too-many-instance-attributes,
|
||||
too-many-locals,
|
||||
too-many-nested-blocks,
|
||||
too-many-positional-arguments,
|
||||
too-many-statements,
|
||||
unidiomatic-typecheck,
|
||||
unknown-option-value,
|
||||
unnecessary-dict-index-lookup,
|
||||
unnecessary-dunder-call,
|
||||
unnecessary-lambda-assigment,
|
||||
unnecessary-lambda,
|
||||
unused-wildcard-import,
|
||||
unpacking-non-sequence,
|
||||
unsubscriptable-object,
|
||||
useless-return,
|
||||
use-dict-literal,
|
||||
use-symbolic-message-instead,
|
||||
useless-suppression,
|
||||
wrong-import-position,
|
||||
enable=c-extension-no-member
|
||||
|
||||
[METHOD_ARGS]
|
||||
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
|
||||
|
||||
[MISCELLANEOUS]
|
||||
notes=FIXME,
|
||||
XXX,
|
||||
TODO
|
||||
notes-rgx=
|
||||
|
||||
[REFACTORING]
|
||||
max-nested-blocks=5
|
||||
never-returning-functions=sys.exit,argparse.parse_error
|
||||
|
||||
[REPORTS]
|
||||
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
|
||||
msg-template=
|
||||
reports=no
|
||||
score=no
|
||||
|
||||
[SIMILARITIES]
|
||||
ignore-comments=yes
|
||||
ignore-docstrings=yes
|
||||
ignore-imports=yes
|
||||
ignore-signatures=yes
|
||||
min-similarity-lines=4
|
||||
|
||||
[SPELLING]
|
||||
max-spelling-suggestions=4
|
||||
spelling-dict=
|
||||
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
|
||||
spelling-ignore-words=
|
||||
spelling-private-dict-file=
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
[STRING]
|
||||
check-quote-consistency=no
|
||||
check-str-concat-over-line-jumps=no
|
||||
|
||||
[TYPECHECK]
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
generated-members=numpy.*,logging.*,torch.*,cv2.*
|
||||
ignore-none=yes
|
||||
ignore-on-opaque-inference=yes
|
||||
ignored-checks-for-mixins=no-member,
|
||||
not-async-context-manager,
|
||||
not-context-manager,
|
||||
attribute-defined-outside-init
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
|
||||
missing-member-hint=yes
|
||||
missing-member-hint-distance=1
|
||||
missing-member-max-choices=1
|
||||
mixin-class-rgx=.*[Mm]ixin
|
||||
signature-mutators=
|
||||
|
||||
[VARIABLES]
|
||||
additional-builtins=
|
||||
allow-global-unused-variables=yes
|
||||
allowed-redefined-builtins=
|
||||
callbacks=cb_,
|
||||
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||
ignored-argument-names=_.*|^ignored_|^unused_
|
||||
init-import=no
|
||||
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
||||
109
.ruff.toml
109
.ruff.toml
|
|
@ -1,109 +0,0 @@
|
|||
exclude = [
|
||||
"venv",
|
||||
".git",
|
||||
".ruff_cache",
|
||||
".vscode",
|
||||
|
||||
"modules/cfgzero",
|
||||
"modules/facelib",
|
||||
"modules/flash_attn_triton_amd",
|
||||
"modules/hidiffusion",
|
||||
"modules/intel/ipex",
|
||||
"modules/pag",
|
||||
"modules/schedulers",
|
||||
"modules/teacache",
|
||||
"modules/seedvr",
|
||||
|
||||
"modules/control/proc",
|
||||
"modules/control/units",
|
||||
"modules/control/units/xs_pipe.py",
|
||||
"modules/postprocess/aurasr_arch.py",
|
||||
|
||||
"pipelines/meissonic",
|
||||
"pipelines/omnigen2",
|
||||
"pipelines/hdm",
|
||||
"pipelines/segmoe",
|
||||
"pipelines/xomni",
|
||||
"pipelines/chrono",
|
||||
|
||||
"scripts/lbm",
|
||||
"scripts/daam",
|
||||
"scripts/xadapter",
|
||||
"scripts/pulid",
|
||||
"scripts/instantir",
|
||||
"scripts/freescale",
|
||||
"scripts/consistory",
|
||||
|
||||
"repositories",
|
||||
|
||||
"extensions-builtin/Lora",
|
||||
"extensions-builtin/sd-extension-chainner/nodes",
|
||||
"extensions-builtin/sd-webui-agent-scheduler",
|
||||
"extensions-builtin/sdnext-modernui/node_modules",
|
||||
]
|
||||
line-length = 250
|
||||
indent-width = 4
|
||||
target-version = "py310"
|
||||
|
||||
[lint]
|
||||
select = [
|
||||
"F",
|
||||
"E",
|
||||
"W",
|
||||
"C",
|
||||
"B",
|
||||
"I",
|
||||
"YTT",
|
||||
"ASYNC",
|
||||
"RUF",
|
||||
"AIR",
|
||||
"NPY",
|
||||
"C4",
|
||||
"T10",
|
||||
"EXE",
|
||||
"ISC",
|
||||
"ICN",
|
||||
"RSE",
|
||||
"TCH",
|
||||
"TID",
|
||||
"INT",
|
||||
"PLE",
|
||||
]
|
||||
ignore = [
|
||||
"B006", # Do not use mutable data structures for argument defaults
|
||||
"B008", # Do not perform function call in argument defaults
|
||||
"B905", # Strict zip() usage
|
||||
"C420", # Unnecessary dict comprehension for iterable; use `dict.fromkeys` instead
|
||||
"C408", # Unnecessary `dict` call
|
||||
"I001", # Import block is un-sorted or un-formatted
|
||||
"E402", # Module level import not at top of file
|
||||
"E501", # Line too long
|
||||
"E721", # Do not compare types, use `isinstance()`
|
||||
"E731", # Do not assign a `lambda` expression, use a `def`
|
||||
"E741", # Ambiguous variable name
|
||||
"F401", # Imported by unused
|
||||
"EXE001", # file with shebang is not marked executable
|
||||
"NPY002", # replace legacy random
|
||||
"RUF005", # Consider iterable unpacking
|
||||
"RUF008", # Do not use mutable default values for dataclass
|
||||
"RUF010", # Use explicit conversion flag
|
||||
"RUF012", # Mutable class attributes
|
||||
"RUF013", # PEP 484 prohibits implicit `Optional`
|
||||
"RUF015", # Prefer `next(...)` over single element slice
|
||||
"RUF046", # Value being cast to `int` is already an integer
|
||||
"RUF059", # Unpacked variables are not used
|
||||
"RUF051", # Prefer pop over del
|
||||
]
|
||||
fixable = ["ALL"]
|
||||
unfixable = []
|
||||
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
|
||||
|
||||
[format]
|
||||
quote-style = "double"
|
||||
indent-style = "space"
|
||||
skip-magic-trailing-comma = false
|
||||
line-ending = "auto"
|
||||
docstring-code-format = false
|
||||
|
||||
[lint.mccabe]
|
||||
max-complexity = 150
|
||||
|
|
@ -11,7 +11,6 @@
|
|||
"env": { "USED_VSCODE_COMMAND_PICKARGS": "1" },
|
||||
"args": [
|
||||
"--uv",
|
||||
"--quick",
|
||||
"--log", "vscode.log",
|
||||
"${command:pickArgs}"]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
{
|
||||
"files.eol": "\n",
|
||||
"python.analysis.extraPaths": [".", "./modules", "./scripts", "./pipelines"],
|
||||
"python.analysis.typeCheckingMode": "off",
|
||||
"editor.formatOnSave": false,
|
||||
|
|
@ -11,5 +12,8 @@
|
|||
"css",
|
||||
"json",
|
||||
"markdown"
|
||||
],
|
||||
"githubPullRequests.ignoredPullRequestBranches": [
|
||||
"master"
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,60 @@
|
|||
# SD.Next: AGENTS.md Project Guidelines
|
||||
|
||||
SD.Next is a complex codebase with specific patterns and conventions.
|
||||
General app structure is:
|
||||
- Python backend server
|
||||
Uses Torch for model inference, FastAPI for API routes and Gradio for creation of UI components.
|
||||
- JavaScript/CSS frontend
|
||||
|
||||
## Tools
|
||||
|
||||
- `venv` for Python environment management, activated with `source venv/bin/activate` (Linux) or `venv\Scripts\activate` (Windows).
|
||||
venv MUST be activated before running any Python commands or scripts to ensure correct dependencies and environment variables.
|
||||
- `python` 3.10+.
|
||||
- `pyproject.toml` for Python configuration, including linting and type checking settings.
|
||||
- `eslint` configured for both core and UI code.
|
||||
- `pnpm` for managing JavaScript dependencies and scripts, with key commands defined in `package.json`.
|
||||
- `ruff` and `pylint` for Python linting, with configurations in `pyproject.toml` and executed via `pnpm ruff` and `pnpm pylint`.
|
||||
- `pre-commit` hooks which also check line-endings and other formatting issues, configured in `.pre-commit-config.yaml`.
|
||||
|
||||
## Project Structure
|
||||
|
||||
- Entry/startup flow: `webui.sh` -> `launch.py` -> `webui.py` -> modules under `modules/`.
|
||||
- Install: `installer.py` takes care of installing dependencies and setting up the environment.
|
||||
- Core runtime state is centralized in `modules/shared.py` (shared.opts, model state, backend/device state).
|
||||
- API/server routes are under `modules/api/`.
|
||||
- UI codebase is split between base JS in `javascript/` and actual UI in `extensions-builtin/sdnext-modernui/`.
|
||||
- Model and pipeline logic is split between `modules/sd_*` and `pipelines/`.
|
||||
- Additional plug-ins live in `scripts/` and are used only when specified.
|
||||
- Extensions live in `extensions-builtin/` and `extensions/` and are loaded dynamically.
|
||||
- Tests and CLI scripts are under `test/` and `cli/`, with some API smoke checks in `test/full-test.sh`.
|
||||
|
||||
## Code Style
|
||||
|
||||
- Prefer existing project patterns over strict generic style rules;
|
||||
this codebase intentionally allows patterns often flagged in default linters such as allowing long lines, etc.
|
||||
|
||||
## Build And Test
|
||||
|
||||
- Activate environment: `source venv/bin/activate` (always ensure this is active when working with Python code).
|
||||
- Test startup: `python launch.py --test`
|
||||
- Full startup: `python launch.py`
|
||||
- Full lint sequence: `pnpm lint`
|
||||
- Python checks individually: `pnpm ruff`, `pnpm pylint`
|
||||
- JS checks: `pnpm eslint` and `pnpm eslint-ui`
|
||||
|
||||
## Conventions
|
||||
|
||||
- Keep PR-ready changes targeted to `dev` branch.
|
||||
- Use conventions from `CONTRIBUTING`.
|
||||
- Do not include unrelated edits or submodule changes when preparing contributions.
|
||||
- Use existing CLI/API tool patterns in `cli/` and `test/` when adding automation scripts.
|
||||
- Respect environment-driven behavior (`SD_*` flags and options) instead of hardcoding platform/model assumptions.
|
||||
- For startup/init edits, preserve error handling and partial-failure tolerance in parallel scans and extension loading.
|
||||
|
||||
## Pitfalls
|
||||
|
||||
- Initialization order matters: startup paths in `launch.py` and `webui.py` are sensitive to import/load timing.
|
||||
- Shared mutable global state can create subtle regressions; prefer narrow, explicit changes.
|
||||
- Device/backend-specific code paths (**CUDA/ROCm/IPEX/DirectML/OpenVINO**) should not assume one platform.
|
||||
- Scripts and extension loading is dynamic; failures may appear only when specific extensions or models are present.
|
||||
317
CHANGELOG.md
317
CHANGELOG.md
|
|
@ -1,7 +1,306 @@
|
|||
# Change Log for SD.Next
|
||||
|
||||
## Update for 2026-04-04
|
||||
|
||||
- **Models**
|
||||
- [AiArtLab SDXS-1B](https://huggingface.co/AiArtLab/sdxs-1b) Simple Diffusion XS *(training still in progress)*
|
||||
this model combines Qwen3.5-1.8B text encoder with SDXL-style UNET with only 1.6B parameters and custom 32ch VAE
|
||||
- **Compute**
|
||||
- **ROCm** futher work on advanced configuration and tuning, thanks @resonantsky
|
||||
see *main interface -> scripts -> rocm advanced config*
|
||||
- **Internal**
|
||||
- additional typing and typechecks, thanks @awsr
|
||||
- Prohibit python==3.14 unless `--experimental`
|
||||
- **Fixes**
|
||||
- UI CSS fixes, thanks @awsr
|
||||
- detect/warn if space in system path
|
||||
- add `ftfy` to requirements
|
||||
|
||||
## Update for 2026-04-01
|
||||
|
||||
### Highlights for 2026-04-01
|
||||
|
||||
This release brings massive code refactoring to modernize codebase and removal of some obsolete features. Leaner & Faster!
|
||||
And since its a bit quieter period when it comes to new models, notable additions would be : *FireRed-Image-Edit*, *SkyWorks-UniPic-3* and new versions of *Anima-Preview*, *Flux-Klein-KV* image models and *LTX 2.3* video model
|
||||
|
||||
If you're on Windows platform, we have a brand new [All-in-one Installer & Launcher](https://github.com/vladmandic/sdnext-launcher): simply download [exe or zip](https://github.com/vladmandic/sdnext-launcher/releases) and done!
|
||||
|
||||
And we have a new (optional) React-based **UI** [Enso](https://github.com/CalamitousFelicitousness/enso)!
|
||||
|
||||
*What else*? Really a lot!
|
||||
New color grading module, updated localization with new languages and improved translations, new CivitAI integration module, new finetunes loader, several new upscalers, improvements to LLM/VLM in captioning and prompt enhance, a lot of new control preprocessors, new realtime server info panel, some new UI themes
|
||||
And major work on API hardening: *security, rate limits, secrets handling, new endpoints*, etc.
|
||||
But also many smaller quality-of-life improvements - for full details, see [ChangeLog](https://github.com/vladmandic/automatic/blob/master/CHANGELOG.md)
|
||||
|
||||
*Note*: Purely due to size of changes, clean install is recommended!
|
||||
Just how big? Some stats: *~530 commits over 880 files*
|
||||
|
||||
[ReadMe](https://github.com/vladmandic/automatic/blob/master/README.md) | [ChangeLog](https://github.com/vladmandic/automatic/blob/master/CHANGELOG.md) | [Docs](https://vladmandic.github.io/sdnext-docs/) | [WiKi](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.com/invite/sd-next-federal-batch-inspectors-1101998836328697867) | [Sponsor](https://github.com/sponsors/vladmandic)
|
||||
|
||||
### Details for 2026-04-01
|
||||
|
||||
- **Models**
|
||||
- [Google Flash 3.1 Image](https://ai.google.dev/gemini-api/docs/models/gemini-3-flash-preview) a.k.a. *Nano Banana 2*
|
||||
- [FireRed Image Edit](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0) *1.0 and 1.1*
|
||||
*Note*: FireRed is a fine-tune of Qwen-Image-Edit regardless of its claim as a new base-model
|
||||
- [Skyworks UniPic-3](https://huggingface.co/Skywork/Unipic3), *Consistency and DMD* variants to reference/community section
|
||||
*Note*: UniPic-3 is a fine-tune of Qwen-Image-Edit with new distillation regardless of its claim of major changes
|
||||
- [Anima Preview-v2](https://huggingface.co/circlestone-labs/Anima)
|
||||
- [FLUX.2-Klein-KV](https://huggingface.co/black-forest-labs/FLUX.2-klein-9b-kv), thanks @liutyi
|
||||
- [LTX-Video 2.3](https://huggingface.co/Lightricks/LTX-2.3) in *Full and Distilled* variants and in both original *FP16 and SDNQ-4bit* quantiztion
|
||||
*note* ltx-2.3 is a massive 22B parameters and full model is very large (72GB) so use of pre-quantized variant (32GB) is highly recommended
|
||||
- **Image manipulation**
|
||||
- new **Color grading** module
|
||||
apply basic corrections to your images: brightness,contrast,saturation,shadows,highlights
|
||||
move to professional photo corrections: hue,gamma,sharpness,temperature
|
||||
correct tone: shadows,midtones,highlights
|
||||
add effects: vignette,grain
|
||||
apply professional lut-table using .cube file
|
||||
*hint* color grading is available as step during generate or as processing item for already existing images
|
||||
- **Upscaling**
|
||||
add support for [spandrel](https://github.com/chaiNNer-org/spandrel) engine with suport for new upscaling model families
|
||||
add two new ai upscalers: *RealPLKSR NomosWebPhoto* and *RealPLKSR AnimeSharpV2*
|
||||
add two new **interpolation** methods: *HQX* and *ICB*
|
||||
use high-quality [sharpfin](https://github.com/drhead/Sharpfin) accelerated library
|
||||
extend `chainner` support for additional models
|
||||
- update **Latent corrections** *(former HDR Corrections)*
|
||||
expand allowed models
|
||||
- **Captioning / Prompt Enhance**
|
||||
- new models: **Qwen-3.5**, **Mistral-3** in multiple variations
|
||||
- new models: multiple *heretic* and *abliterated* finetunes for **Qwen, Gemma, Mistral**
|
||||
- **captioning** and **prompt enhance**: add support for all cloud-based Gemini models
|
||||
*3.1/3.0/2.5 pro/flash/flash-lite*
|
||||
- improve captioning and prompt enhance memory handling/offloading
|
||||
- **Features**
|
||||
- **Secrets** handling: new `secrets.json` and special handling for tokens/keys/passwords
|
||||
used to be treated like any other `config.json` param which can cause security issues
|
||||
- **Control**: many new **pre-processors**
|
||||
*anyline, depth_anything v2, dsine, lotus, marigold normals, oneformer, rtmlib pose, sam2, stablenormal, teed, vitpose*
|
||||
- pipelines: add **ZImageInpaint**
|
||||
- rewritten **CivitAI** module
|
||||
browse/discover mode with sort, period, type/base dropdowns; URL paste; subfolder sorting; auto-browse; dynamic dropdowns
|
||||
- **HiRes**: allow using different lora in refiner prompt
|
||||
- **Nunchaku** models are now listed in networks tab as reference models
|
||||
instead of being used implicitly via quantization
|
||||
- improve image **Metadata** parser for foreign metadata (e.g. XMP)
|
||||
- new **CeeTeeDees image-to-image batch inference**, thanks @resonantsky
|
||||
available in *main interface -> scripts*
|
||||
- **Compute**
|
||||
- **ROCm** advanced configuration and tuning, thanks @resonantsky
|
||||
see *main interface -> scripts -> rocm advanced config*
|
||||
- **ROCm** support for additional AMD GPUs: `gfx103X`, thanks @crashingalexsan
|
||||
- **Cuda** update to `torch=2.11` with `cuda=13.0`
|
||||
- **Ipex** update to `torch==2.11`
|
||||
- **ROCm/Linux** update to `torch==2.11` with `rocm==7.2`
|
||||
- **OpenVINO** update to `torch==2.11` and `openvino==2026.0`
|
||||
- *note* **Cuda** `torch==2.10` removed support for `rtx1000` series and older GPUs
|
||||
use following before first startup to force installation of `torch==2.9.1` with `cuda==12.6`:
|
||||
> `set TORCH_COMMAND='torch==2.9.1 torchvision==0.24.1 torchaudio==2.9.1 --index-url https://download.pytorch.org/whl/cu126'`
|
||||
- *note* **Cuda** `cuda==13.0` requires newer nVidia drivers
|
||||
use following before first startup to force installation of `torch==2.11.0` with `cuda==12.68`:
|
||||
> `set TORCH_COMMAND='torch torchvision --index-url https://download.pytorch.org/whl/cu128`
|
||||
- update installer and support `nunchaku==1.2.1`
|
||||
- **UI**
|
||||
- **Enso** new React-based UI, developed by @CalamitousFelicitousness!
|
||||
with WYSIWYG infinite canvas workspace, command palette, and numerous quality of life improvements across the board
|
||||
enable using `--enso` flag and access using `/enso` endpoint (e.g. <http://localhost:7860/enso>)
|
||||
see [Enso Docs](https://vladmandic.github.io/sdnext-docs/Enso/) and [Enso Home](https://github.com/CalamitousFelicitousness/enso) for details
|
||||
*note* Enso is work-in-progress and alpha-ready
|
||||
- legacy panels **T2I** and **I2I** are disabled by default
|
||||
you can re-enable them in *settings -> ui -> hide legacy tabs*
|
||||
- new panel: **Server Info** with detailed runtime informaton
|
||||
- rename **Scripts** to **Extras** and reorganize to split internal functionality vs external extensions
|
||||
- **Networks** add **UNet/DiT**
|
||||
- **Localization** improved translation quality and new translations locales:
|
||||
*en, en1, en2, en3, en4, hr, es, it, fr, de, pt, ru, zh, ja, ko, hi, ar, bn, ur, id, vi, tr, sr, po, he, xx, yy, qq, tlh*
|
||||
yes, this now includes stuff like *latin, esperanto, arabic, hebrew, klingon* and a lot more!
|
||||
and also introduce some pseudo-locales such as: *techno-babbel*, *for-n00bs*
|
||||
*hint*: click on locale icon in bottom-left corner to cycle through available locales, or set default in *settings -> ui*
|
||||
- **Server settings** new section in *settings*
|
||||
- **Kanvas** add paste image from clipboard
|
||||
- **Themes** add *CTD-NT64Light*, *CTD-NT64Medium* and *CTD-NT64Dark*, thanks @resonantsky
|
||||
- **Themes** add *Vlad-Neomorph*
|
||||
- **Gallery** add option to auto-refresh gallery, thanks @awsr
|
||||
- **Token counters** add per-section display for supported models, thanks @awsr
|
||||
- **Colour grading** add hints for all the functions
|
||||
- **Docs / Wiki**
|
||||
- updates to to compute sections: *AMD-ROCm, AMD-MIOpen, ZLUDA, OpenVINO, nVidia*
|
||||
- updates to core sections: *Installation, Python, Schedulers, Launcher, SDNQ, Video*
|
||||
- added Enso page
|
||||
- **API**
|
||||
- prototype **v2 API** (`/sdapi/v2/`)
|
||||
job-based generation with queue, per-job WebSocket progress, file uploads with TTL, model/network enumeration
|
||||
and a plethora of other improvements *(work-in-progress)*
|
||||
for the time being ships with Enso, which must be enabled wih `--enso` flag on startup for v2 API to be available
|
||||
- **rate limiting**: global for all endpoints, guards against abuse and denial-of-service type of attacks
|
||||
configurable in *settings -> server settings*
|
||||
- new `/sdapi/v1/upload` endpoint with support for both POST with form-data or PUT using raw-bytes
|
||||
- new `/sdapi/v1/torch` endpoint for torch info (backend, version, etc.)
|
||||
- new `/sdapi/v1/gpu` endpoint for GPU info
|
||||
- new `/sdapi/v1/rembg` endpoint for background removal
|
||||
- new `/sdapi/v1/unet` endpoint to list available unets/dits
|
||||
- use rate limiting for api logging
|
||||
- **Obsoleted**
|
||||
- removed support for additional quantization engines: *BitsAndBytes, TorchAO, Optimum-Quanto, NNCF*
|
||||
*note*: SDNQ is quantization engine of choice for SD.Next
|
||||
- removed `flux_enhance` script
|
||||
- **Internal**
|
||||
- `python==3.13` full support
|
||||
- `python==3.14` initial support
|
||||
see [docs](https://vladmandic.github.io/sdnext-docs/Python/) for details
|
||||
- remove hard-dependnecies:
|
||||
`clip, numba, skimage, torchsde, omegaconf, antlr, patch-ng, patch-ng, astunparse, addict, inflection, jsonmerge, kornia`,
|
||||
`resize-right, voluptuous, yapf, sqlalchemy, invisible-watermark, pi-heif, ftfy, blendmodes, PyWavelets, imp`
|
||||
these are now installed on-demand when needed
|
||||
- bump `huggingface_hub==1.5.0`
|
||||
- bump `transformers==5.3.0`
|
||||
- installer introduce `constraints.txt`
|
||||
- refactor to/from *image/tensor* logic
|
||||
- refactor reorganize `cli` scripts
|
||||
- refactor move tests to dedicated `/test/`
|
||||
- refactor all image handling to `modules/image/`
|
||||
- refactor many params that were server-global are now ui params that are handled per-request
|
||||
*schedulers, todo, tome, etc.*
|
||||
- refactor error handling during `torch.compile`
|
||||
- refactor move `rebmg` to core instead of extensions
|
||||
- remove face restoration
|
||||
- unified command line parsing
|
||||
- reinstall `nunchaku` with `--reinstall` flag
|
||||
- use explicit icon image references in `gallery`, thanks @awsr
|
||||
- launch use threads to async execute non-critical tasks
|
||||
- switch from deprecated `pkg_resources` to `importlib`
|
||||
- modernize typing and type annotations
|
||||
- improve `pydantic==2.x` compatibility
|
||||
- refactor entire logging into separate `modules/logger`
|
||||
- replace `timestamp` based startup checks with state caching
|
||||
- split monolithic `shared` module and introduce `ui_definitions`
|
||||
- modularize all imports and avoid re-imports
|
||||
- use `threading` for deferable operatios
|
||||
- use `threading` for io-independent parallel operations
|
||||
- remove requirements: `clip`, `open-clip`
|
||||
- add new build of `insightface`, thanks @hameerabbasi
|
||||
- reduce use of generators with ui interactor
|
||||
- better subprocess execute, thanks @awsr
|
||||
- better wslopen handling, thanks @awsr
|
||||
- refactor for PEP-484 compliance, thanks @awsr
|
||||
- detect active `venv`
|
||||
- **Obsolete**
|
||||
- remove `normalbae` pre-processor
|
||||
- remove `dwpose` pre-processor
|
||||
- remove `hdm` model support
|
||||
- remove `xadapter` script
|
||||
- remove `codeformer` and `gfpgan` face restorers
|
||||
- **Checks**
|
||||
- switch to `pyproject.toml` for tool configs
|
||||
- update `lint` rules, thanks @awsr
|
||||
- add `ty` to optional lint tooling
|
||||
- add `pyright` to optional lint tooling
|
||||
- **Fixes**
|
||||
- ui `gallery` cache recursive cleanup, thanks @awsr
|
||||
- ui main results pane sizing
|
||||
- ui connection monitor
|
||||
- handle `clip` installer doing unwanted `setuptools` update
|
||||
- cleanup for `uv` installer fallback
|
||||
- add `metadata` restore to always-on scripts
|
||||
- improve `wildcard` weights parsing, thanks @Tillerz
|
||||
- model detection for `anima`
|
||||
- handle `lora` unwanted unload
|
||||
- improve `preview` error handler
|
||||
- handle `gallery` over remote/unsecure connections
|
||||
- fix `ltx2-i2v`
|
||||
- handle missing `preview` image
|
||||
- kandinsky 5 t2i/i2i model type detection
|
||||
- kanvas notify core on image size change
|
||||
- command arg `--reinstall` stricter enforcement
|
||||
- handle `api` state reset
|
||||
- processing upscaler refresh button
|
||||
- simplify and validate `rembg` dependencies
|
||||
- improve video generation progress tracking
|
||||
- handle startup with bad `scripts` more gracefully
|
||||
- thread-safety for `error-limiter`, thanks @awsr
|
||||
- add `lora` support for flux2-klein
|
||||
- fix `lora` change when used with `sdnq`
|
||||
- multiple `sdnq` fixes
|
||||
- handle `taesd` init errors
|
||||
|
||||
## Update for 2026-02-04
|
||||
|
||||
### Highlights for 2026-02-04
|
||||
|
||||
Refresh release two weeks after prior release, yet we still somehow managed to pack in *~150 commits*!
|
||||
Highlights would be two new models: **Z-Image-Base** and **Anima**, *captioning* support for **tagger** models and a massive addition of new **schedulers**
|
||||
Also here are updates to `torch` and additional GPU archs support for `ROCm` backends, plus a lot of internal improvements and fixes.
|
||||
|
||||
[ReadMe](https://github.com/vladmandic/automatic/blob/master/README.md) | [ChangeLog](https://github.com/vladmandic/automatic/blob/master/CHANGELOG.md) | [Docs](https://vladmandic.github.io/sdnext-docs/) | [WiKi](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.com/invite/sd-next-federal-batch-inspectors-1101998836328697867) | [Sponsor](https://github.com/sponsors/vladmandic)
|
||||
|
||||
### Details for 2026-02-04
|
||||
|
||||
- **Models**
|
||||
- [Tongyi-MAI Z-Image Base](https://tongyi-mai.github.io/Z-Image-blog/)
|
||||
yup, its finally here, the full base model of **Z-Image**
|
||||
- [CircleStone Anima](https://huggingface.co/circlestone-labs/Anima)
|
||||
2B anime optimized model based on a modified Cosmos-Predict, using Qwen3-0.6B as a text encoder
|
||||
- **Features**
|
||||
- **caption** tab support for Booru tagger models, thanks @CalamitousFelicitousness
|
||||
- add SmilingWolf WD14/WaifuDiffusion tagger models, thanks @CalamitousFelicitousness
|
||||
- support comments in wildcard files, using `#`
|
||||
- support aliases in metadata skip params, thanks @CalamitousFelicitousness
|
||||
- ui gallery improve cache cleanup and add manual option, thanks @awsr
|
||||
- selectable options to add system info to metadata, thanks @Athari
|
||||
see *settings -> image metadata*
|
||||
- **Schedulers**
|
||||
- schedulers documentation has new home: <https://vladmandic.github.io/sdnext-docs/Schedulers/>
|
||||
- add 13(!) new scheduler families
|
||||
not a port, but more of inspired-by [res4lyf](https://github.com/ClownsharkBatwing/RES4LYF) library
|
||||
all schedulers should be compatible with both `epsilon` and `flow` prediction style!
|
||||
*note*: each family may have multiple actual schedulers, so the list total is 56(!) new schedulers
|
||||
- core family: *RES*
|
||||
- exponential: *DEIS, ETD, Lawson, ABNorsett*
|
||||
- integrators: *Runge-Kutta, Linear-RK, Specialized-RK, Lobatto, Radau-IIA, Gauss-Legendre*
|
||||
- flow: *PEC, Riemannian, Euclidean, Hyperbolic, Lorentzian, Langevin-Dynamics*
|
||||
- add 3 additional schedulers: *CogXDDIM, DDIMParallel, DDPMParallel*
|
||||
not originally intended to be a general purpose schedulers, but they work quite nicely and produce good results
|
||||
- image metadata: always log scheduler class used
|
||||
- **API**
|
||||
- add `/sdapi/v1/xyz-grid` to enumerate xyz-grid axis options and their choices
|
||||
see `/cli/api-xyzenum.py` for example usage
|
||||
- add `/sdapi/v1/sampler` to get current sampler config
|
||||
- modify `/sdapi/v1/samplers` to enumerate available samplers possible options
|
||||
see `/cli/api-samplers.py` for example usage
|
||||
- **Internal**
|
||||
- tagged release history: <https://github.com/vladmandic/sdnext/tags>
|
||||
each major for the past year is now tagged for easier reference
|
||||
- **torch** update
|
||||
*note*: may cause slow first startup/generate
|
||||
**cuda**: update to `torch==2.10.0`
|
||||
**xpu**: update to `torch==2.10.0`
|
||||
**rocm**: update to `torch==2.10.0`
|
||||
**openvino**: update to `torch==2.10.0` and `openvino==2025.4.1`
|
||||
- rocm: expand available gfx archs, thanks @crashingalexsan
|
||||
- rocm: set `MIOPEN_FIND_MODE=2` by default, thanks @crashingalexsan
|
||||
- relocate all json data files to `data/` folder
|
||||
existing data files are auto-migrated on startup
|
||||
- refactor and improve connection monitor, thanks @awsr
|
||||
- further work on type consistency and type checking, thanks @awsr
|
||||
- log captured exceptions
|
||||
- improve temp folder handling and cleanup
|
||||
- remove torch errors/warings on fast server shutdown
|
||||
- add ui placeholders for future agent-scheduler work, thanks @ryanmeador
|
||||
- implement abort system on repeated errors, thanks @awsr
|
||||
currently used by lora and textual-inversion loaders
|
||||
- update package requirements
|
||||
- **Fixes**
|
||||
- add video ui elem_ids, thanks @ryanmeador
|
||||
- use base steps as-is for non sd/sdxl models
|
||||
- ui css fixes for modernui
|
||||
- support lora inside prompt selector
|
||||
- framepack video save
|
||||
- metadata save for manual saves
|
||||
|
||||
## Update for 2026-01-22
|
||||
|
||||
Bugfix refresh
|
||||
|
||||
- add `SD_DEVICE_DEBUG` env variable to trace rocm/xpu/directml init failures
|
||||
- fix detailer double save
|
||||
- fix lora load when using peft/diffusers loader
|
||||
|
|
@ -43,7 +342,7 @@ For full list of changes, see full changelog.
|
|||
available in both *original* and *sdnq-dynamic prequantized* variants
|
||||
thanks @CalamitousFelicitousness
|
||||
*note*: model requires pre-release versions of `transformers` package:
|
||||
> pip install --upgrade git+https://github.com/huggingface/transformers.git
|
||||
> pip install --upgrade git+<https://github.com/huggingface/transformers.git>
|
||||
> ./webui.sh --experimental
|
||||
- [Nunchaku Z-Image Turbo](https://huggingface.co/nunchaku-tech/nunchaku-z-image-turbo)
|
||||
nunchaku optimized z-image turbo
|
||||
|
|
@ -137,7 +436,7 @@ End of year release update, just two weeks after previous one, with several new
|
|||
- **Models**
|
||||
- [LongCat Image](https://github.com/meituan-longcat/LongCat-Image) in *Image* and *Image Edit* variants
|
||||
LongCat is a new 8B diffusion base model using Qwen-2.5 as text encoder
|
||||
- [Qwen-Image-Edit 2511](Qwen/Qwen-Image-Edit-2511) in *base* and *pre-quantized* variants
|
||||
- [Qwen-Image-Edit 2511](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) in *base* and *pre-quantized* variants
|
||||
Key enhancements: mitigate image drift, improved character consistency, enhanced industrial design generation, and strengthened geometric reasoning ability
|
||||
- [Qwen-Image-Layered](https://huggingface.co/Qwen/Qwen-Image-Layered) in *base* and *pre-quantized* variants
|
||||
Qwen-Image-Layered, a model capable of decomposing an image into multiple RGBA layers
|
||||
|
|
@ -204,9 +503,9 @@ Plus a lot of internal improvements and fixes
|
|||
**Z-Image** is a powerful and highly efficient image generation model with 6B parameters and using Qwen-3 as text encoder
|
||||
unlike most of new models that are far larger, Z-Image architecture allows it to run with good performance even on mid-range hardware
|
||||
*note*: initial release is *Turbo* variant only with *Base* and *Edit* variants to follow
|
||||
- [Kandinsky 5.0 Lite]() is a new 6B model using Qwen-2.5 as text encoder
|
||||
- [Kandinsky 5.0 Lite](https://huggingface.co/kandinskylab/Kandinsky-5.0-I2V-Lite-5s-Diffusers) is a new 6B model using Qwen-2.5 as text encoder
|
||||
it comes in text-to-image and image-edit variants
|
||||
- **Google Gemini Nano Banana** [2.5 Flash](https://blog.google/products/gemini/gemini-nano-banana-examples/) and [3.0 Pro](https://deepmind.google/models/gemini-image/pro/)
|
||||
- **Google Gemini Nano Banana** [2.5 Flash](https://blog.google/products/gemini/gemini-nano-banana-examples/) and [3.0 Pro](https://deepmind.google/models/gemini-image/pro/)
|
||||
first cloud-based model directly supported in SD.Next UI
|
||||
*note*: need to set `GOOGLE_API_KEY` environment variable with your key to use this model
|
||||
- [Photoroom PRX 1024 Beta](https://huggingface.co/Photoroom/prx-1024-t2i-beta)
|
||||
|
|
@ -238,7 +537,7 @@ Plus a lot of internal improvements and fixes
|
|||
- ui indicator of model capabilities
|
||||
- support for *prefill* style of prompting/answering
|
||||
- support for *reasoning* mode for supported models
|
||||
with option to output answer-only or reasoning-process
|
||||
with option to output answer-only or reasoning-process
|
||||
- additional debug logging
|
||||
- **Other Features**
|
||||
- **wildcards**: allow recursive inline wildcards using curly braces syntax
|
||||
|
|
@ -297,9 +596,11 @@ Plus a lot of internal improvements and fixes
|
|||
- control: safe load non-sparse controlnet
|
||||
- control: fix marigold preprocessor with bfloat16
|
||||
- auth: fix password being shown in clear text during login
|
||||
- github: better handling of forks
|
||||
- firefox: remove obsolete checks, thanks @awsr
|
||||
- runai streamer: cleanup logging, thanks @CalamitousFelicitousness
|
||||
- gradio: event handlers, thanks @awsr
|
||||
- seedvr: handle non-cuda environments, thanks @resonantsky
|
||||
|
||||
## Update for 2025-11-06
|
||||
|
||||
|
|
@ -562,7 +863,7 @@ Highlight are:
|
|||
requires qwen-image-edit-2509 or its variant as multi-image edits are not available in original qwen-image
|
||||
in ui control tab: inputs -> separate init image
|
||||
add image for *input media* and *control media*
|
||||
can be
|
||||
can be
|
||||
- [Cache-DiT](https://github.com/vipshop/cache-dit)
|
||||
cache-dit is a unified, flexible and training-free cache acceleration framework
|
||||
compatible with many dit-based models such as FLUX.1, Qwen, HunyuanImage, Wan2.2, Chroma, etc.
|
||||
|
|
@ -777,7 +1078,7 @@ And check out new **history** tab in the right panel, it now shows visualization
|
|||
- update openvino to `openvino==2025.3.0`
|
||||
- add deprecation warning for `python==3.9`
|
||||
- allow setting denoise strength to 0 in control/img2img
|
||||
this allows to run workflows which only refine or detail existing image without changing it
|
||||
this allows to run workflows which only refine or detail existing image without changing it
|
||||
- **Fixes**
|
||||
- normalize path hanlding when deleting images
|
||||
- unified compile upscalers
|
||||
|
|
@ -863,7 +1164,7 @@ New release two weeks after the last one and its a big one with over 150 commits
|
|||
- Several new models: [Qwen-Image](https://qwenlm.github.io/blog/qwen-image/) (plus *Lightning* variant) and [FLUX.1-Krea-Dev](https://www.krea.ai/blog/flux-krea-open-source-release)
|
||||
- Several updated models: [Chroma](https://huggingface.co/lodestones/Chroma), [SkyReels-V2](https://huggingface.co/Skywork/SkyReels-V2-DF-14B-720P-Diffusers), [Wan-VACE](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B-diffusers), [HunyuanDiT](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers-Distilled)
|
||||
- Plus continuing with major **UI** work with new embedded **Docs/Wiki** search, redesigned real-time **hints**, **wildcards** UI selector, built-in **GPU monitor**, **CivitAI** integration and more!
|
||||
- On the compute side, new profiles for high-vram GPUs, offloading improvements, parallel-load for large models, support for new `torch` release and improved quality when using low-bit quantization!
|
||||
- On the compute side, new profiles for high-vram GPUs, offloading improvements, parallel-load for large models, support for new `torch` release and improved quality when using low-bit quantization!
|
||||
- [SD.Next Model Samples Gallery](https://vladmandic.github.io/sd-samples/compare.html): pre-generated image gallery with 60 models (45 base and 15 finetunes) and 40 different styles resulting in 2,400 high resolution images!
|
||||
gallery additionally includes model details such as typical load and inference times as well as sizes and types of each model component (*e.g. unet, transformer, text-encoder, vae*)
|
||||
- And (*as always*) many bugfixes and improvements to existing features!
|
||||
|
|
|
|||
121
README.md
121
README.md
|
|
@ -1,8 +1,14 @@
|
|||
<div align="center">
|
||||
<img src="https://github.com/vladmandic/sdnext/raw/master/html/logo-transparent.png" width=200 alt="SD.Next">
|
||||
<img src="https://github.com/vladmandic/sdnext/raw/master/html/logo-transparent.png" width=200 alt="SD.Next: AI art generator logo">
|
||||
|
||||
# SD.Next: All-in-one WebUI for AI generative image and video creation
|
||||
# SD.Next: All-in-one WebUI
|
||||
|
||||
SD.Next is a powerful, open-source WebUI app for AI image and video generation, built on Stable Diffusion and supporting dozens of advanced models. Create, caption, and process images and videos with a modern, cross-platform interface—perfect for artists, researchers, and AI enthusiasts.
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
[](https://discord.gg/VjvR2tabEX)
|
||||
|
|
@ -17,65 +23,63 @@
|
|||
## Table of contents
|
||||
|
||||
- [Documentation](https://vladmandic.github.io/sdnext-docs/)
|
||||
- [SD.Next Features](#sdnext-features)
|
||||
- [Model support](#model-support)
|
||||
- [Platform support](#platform-support)
|
||||
- [SD.Features](#features--capabilities)
|
||||
- [Supported AI Models](#supported-ai-models)
|
||||
- [Supported Platforms & Hardware](#supported-platforms--hardware)
|
||||
- [Getting started](#getting-started)
|
||||
|
||||
## SD.Next Features
|
||||
### Screenshot: Desktop interface
|
||||
|
||||
All individual features are not listed here, instead check [ChangeLog](CHANGELOG.md) for full list of changes
|
||||
- Fully localized:
|
||||
▹ **English | Chinese | Russian | Spanish | German | French | Italian | Portuguese | Japanese | Korean**
|
||||
- Multiple UIs!
|
||||
▹ **Standard | Modern**
|
||||
- Multiple [diffusion models](https://vladmandic.github.io/sdnext-docs/Model-Support/)!
|
||||
- Built-in Control for Text, Image, Batch and Video processing!
|
||||
- Multi-platform!
|
||||
▹ **Windows | Linux | MacOS | nVidia CUDA | AMD ROCm | Intel Arc / IPEX XPU | DirectML | OpenVINO | ONNX+Olive | ZLUDA**
|
||||
<div align="center">
|
||||
<img src="https://github.com/vladmandic/sdnext/raw/dev/html/screenshot-robot.jpg" alt="SD.Next: AI art generator desktop interface screenshot" width="90%">
|
||||
</div>
|
||||
|
||||
### Screenshot: Mobile interface
|
||||
|
||||
<div align="center">
|
||||
<img src="https://github.com/user-attachments/assets/ced9fe0c-d2c2-46d1-94a7-8f9f2307ce38" alt="SD.Next: AI art generator mobile interface screenshot" width="35%">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<br>
|
||||
|
||||
## Features & Capabilities
|
||||
|
||||
SD.Next is feature-rich with a focus on performance, flexibility, and user experience. Key features include:
|
||||
- [Multi-platform](#platform-support!
|
||||
- Many [diffusion models](https://vladmandic.github.io/sdnext-docs/Model-Support/)!
|
||||
- Fully localized to ~15 languages and with support for many [UI themes](https://vladmandic.github.io/sdnext-docs/Themes/)!
|
||||
- [Desktop](#screenshot-desktop-interface) and [Mobile](#screenshot-mobile-interface) support!
|
||||
- Platform specific auto-detection and tuning performed on install
|
||||
- Optimized processing with latest `torch` developments with built-in support for model compile and quantize
|
||||
Compile backends: *Triton | StableFast | DeepCache | OneDiff | TeaCache | etc.*
|
||||
Quantization methods: *SDNQ | BitsAndBytes | Optimum-Quanto | TorchAO / LayerWise*
|
||||
- **Interrogate/Captioning** with 150+ **OpenCLiP** models and 20+ built-in **VLMs**
|
||||
- Built-in queue management
|
||||
- Built in installer with automatic updates and dependency management
|
||||
- Mobile compatible
|
||||
|
||||
### Unique features
|
||||
|
||||
SD.Next includes many features not found in other WebUIs, such as:
|
||||
- **SDNQ**: State-of-the-Art quantization engine
|
||||
Use pre-quantized or run with quantizaion on-the-fly for up to 4x VRAM reduction with no or minimal quality and performance impact
|
||||
- **Balanced Offload**: Dynamically balance CPU and GPU memory to run larger models on limited hardware
|
||||
- **Captioning** with 150+ **OpenCLiP** models, **Tagger** with **WaifuDiffusion** and **DeepDanbooru** models, and 25+ built-in **VLMs**
|
||||
- **Image Processing** with full image correction color-grading suite of tools
|
||||
|
||||
<br>
|
||||
|
||||
**Desktop** interface
|
||||
<div align="center">
|
||||
<img src="https://github.com/user-attachments/assets/d6119a63-6ee5-4597-95f6-29ed0701d3b5" alt="screenshot-modernui-desktop" width="90%">
|
||||
</div>
|
||||
|
||||
**Mobile** interface
|
||||
<div align="center">
|
||||
<img src="https://github.com/user-attachments/assets/ced9fe0c-d2c2-46d1-94a7-8f9f2307ce38" alt="screenshot-modernui-mobile" width="35%">
|
||||
</div>
|
||||
|
||||
For screenshots and information on other available themes, see [Themes](https://vladmandic.github.io/sdnext-docs/Themes/)
|
||||
|
||||
<br>
|
||||
|
||||
## Model support
|
||||
## Supported AI Models
|
||||
|
||||
SD.Next supports broad range of models: [supported models](https://vladmandic.github.io/sdnext-docs/Model-Support/) and [model specs](https://vladmandic.github.io/sdnext-docs/Models/)
|
||||
|
||||
## Platform support
|
||||
## Supported Platforms & Hardware
|
||||
|
||||
- *nVidia* GPUs using **CUDA** libraries on both *Windows and Linux*
|
||||
- *AMD* GPUs using **ROCm** libraries on *Linux*
|
||||
Support will be extended to *Windows* once AMD releases ROCm for Windows
|
||||
- *AMD* GPUs using **ROCm** libraries on both *Linux and Windows*
|
||||
- *AMD* GPUs on Windows using **ZLUDA** libraries
|
||||
- *Intel Arc* GPUs using **OneAPI** with *IPEX XPU* libraries on both *Windows and Linux*
|
||||
- Any *CPU/GPU* or device compatible with **OpenVINO** libraries on both *Windows and Linux*
|
||||
- Any GPU compatible with *DirectX* on *Windows* using **DirectML** libraries
|
||||
This includes support for AMD GPUs that are not supported by native ROCm libraries
|
||||
- Any GPU or device compatible with **OpenVINO** libraries on both *Windows and Linux*
|
||||
- *Apple M1/M2* on *OSX* using built-in support in Torch with **MPS** optimizations
|
||||
- *ONNX/Olive*
|
||||
- *AMD* GPUs on Windows using **ZLUDA** libraries
|
||||
|
||||
Plus Docker container recipes for: [CUDA, ROCm, Intel IPEX and OpenVINO](https://vladmandic.github.io/sdnext-docs/Docker/)
|
||||
Plus **Docker** container recipes for: [CUDA, ROCm, Intel IPEX and OpenVINO](https://vladmandic.github.io/sdnext-docs/Docker/)
|
||||
|
||||
## Getting started
|
||||
|
||||
|
|
@ -88,21 +92,37 @@ Plus Docker container recipes for: [CUDA, ROCm, Intel IPEX and OpenVINO](https:/
|
|||
> And for platform specific information, check out
|
||||
> [WSL](https://vladmandic.github.io/sdnext-docs/WSL/) | [Intel Arc](https://vladmandic.github.io/sdnext-docs/Intel-ARC/) | [DirectML](https://vladmandic.github.io/sdnext-docs/DirectML/) | [OpenVINO](https://vladmandic.github.io/sdnext-docs/OpenVINO/) | [ONNX & Olive](https://vladmandic.github.io/sdnext-docs/ONNX-Runtime/) | [ZLUDA](https://vladmandic.github.io/sdnext-docs/ZLUDA/) | [AMD ROCm](https://vladmandic.github.io/sdnext-docs/AMD-ROCm/) | [MacOS](https://vladmandic.github.io/sdnext-docs/MacOS-Python/) | [nVidia](https://vladmandic.github.io/sdnext-docs/nVidia/) | [Docker](https://vladmandic.github.io/sdnext-docs/Docker/)
|
||||
|
||||
### Quick Start
|
||||
|
||||
```shell
|
||||
git clone https://github.com/vladmandic/sdnext
|
||||
cd sdnext
|
||||
./webui.sh # Linux/Mac
|
||||
webui.bat # Windows
|
||||
webui.ps1 # PowerShell
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> If you run into issues, check out [troubleshooting](https://vladmandic.github.io/sdnext-docs/Troubleshooting/) and [debugging](https://vladmandic.github.io/sdnext-docs/Debug/) guides
|
||||
|
||||
|
||||
## Community & Support
|
||||
|
||||
If you're unsure how to use a feature, best place to start is [Docs](https://vladmandic.github.io/sdnext-docs/) and if its not there,
|
||||
check [ChangeLog](https://vladmandic.github.io/sdnext-docs/CHANGELOG/) for when feature was first introduced as it will always have a short note on how to use it
|
||||
|
||||
And for any question, reach out on [Discord](https://discord.gg/VjvR2tabEX) or open an [issue](https://github.com/vladmandic/sdnext/issues) or [discussion](https://github.com/vladmandic/sdnext/discussions)
|
||||
|
||||
### Contributing
|
||||
|
||||
Please see [Contributing](CONTRIBUTING) for details on how to contribute to this project
|
||||
And for any question, reach out on [Discord](https://discord.gg/VjvR2tabEX) or open an [issue](https://github.com/vladmandic/sdnext/issues) or [discussion](https://github.com/vladmandic/sdnext/discussions)
|
||||
|
||||
### Credits
|
||||
## License & Credits
|
||||
|
||||
- SD.Next is licensed under the [Apache License 2.0](LICENSE.txt)
|
||||
- Main credit goes to [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) for the original codebase
|
||||
- Additional credits are listed in [Credits](https://github.com/AUTOMATIC1111/stable-diffusion-webui/#credits)
|
||||
- Licenses for modules are listed in [Licenses](html/licenses.html)
|
||||
|
||||
### Evolution
|
||||
## Evolution
|
||||
|
||||
<a href="https://star-history.com/#vladmandic/sdnext&Date">
|
||||
<picture width=640>
|
||||
|
|
@ -113,9 +133,4 @@ And for any question, reach out on [Discord](https://discord.gg/VjvR2tabEX) or o
|
|||
|
||||
- [OSS Stats](https://ossinsight.io/analyze/vladmandic/sdnext#overview)
|
||||
|
||||
### Docs
|
||||
|
||||
If you're unsure how to use a feature, best place to start is [Docs](https://vladmandic.github.io/sdnext-docs/) and if its not there,
|
||||
check [ChangeLog](https://vladmandic.github.io/sdnext-docs/CHANGELOG/) for when feature was first introduced as it will always have a short note on how to use it
|
||||
|
||||
<br>
|
||||
|
|
|
|||
231
TODO.md
231
TODO.md
|
|
@ -1,137 +1,150 @@
|
|||
# TODO
|
||||
|
||||
## Project Board
|
||||
|
||||
- <https://github.com/users/vladmandic/projects>
|
||||
<https://github.com/huggingface/diffusers/pull/13317>
|
||||
|
||||
## Internal
|
||||
|
||||
- Feature: Move `nunchaku` models to refernce instead of internal decision
|
||||
- Update: `transformers==5.0.0`
|
||||
- Feature: Unify *huggingface* and *diffusers* model folders
|
||||
- Reimplement `llama` remover for Kanvas
|
||||
- Deploy: Create executable for SD.Next
|
||||
- Feature: implement `unload_auxiliary_models`
|
||||
- Feature: RIFE update
|
||||
- Feature: RIFE in processing
|
||||
- Feature: SeedVR2 in processing
|
||||
- Feature: Add video models to `Reference`
|
||||
- Deploy: Lite vs Expert mode
|
||||
- Engine: [mmgp](https://github.com/deepbeepmeep/mmgp)
|
||||
- Engine: `TensorRT` acceleration
|
||||
- Feature: Auto handle scheduler `prediction_type`
|
||||
- Feature: Cache models in memory
|
||||
- Feature: JSON image metadata
|
||||
- Validate: Control tab add overrides handling
|
||||
- Feature: Integrate natural language image search
|
||||
[ImageDB](https://github.com/vladmandic/imagedb)
|
||||
- Feature: Remote Text-Encoder support
|
||||
- Refactor: move sampler options to settings to config
|
||||
- Feature: Multi-user support
|
||||
- Feature: Settings profile manager
|
||||
- Feature: Video tab add full API support
|
||||
- Refactor: Unify *huggingface* and *diffusers* model folders
|
||||
- Refactor: [GGUF](https://huggingface.co/docs/diffusers/main/en/quantization/gguf)
|
||||
- Feature: LoRA add OMI format support for SD35/FLUX.1
|
||||
- Refactor: remove `CodeFormer`
|
||||
- Refactor: remove `GFPGAN`
|
||||
- UI: Lite vs Expert mode
|
||||
- Video tab: add full API support
|
||||
- Control tab: add overrides handling
|
||||
- Engine: `TensorRT` acceleration
|
||||
- Engine: [mmgp](https://github.com/deepbeepmeep/mmgp)
|
||||
- Engine: [sharpfin](https://github.com/drhead/sharpfin) instead of `torchvision`
|
||||
- Reimplement `llama` remover for Kanvas
|
||||
- Integrate: [Depth3D](https://github.com/vladmandic/sd-extension-depth3d)
|
||||
|
||||
## OnHold
|
||||
|
||||
- Feature: LoRA add OMI format support for SD35/FLUX.1, on-hold
|
||||
- Feature: Remote Text-Encoder support, sidelined for the moment
|
||||
|
||||
## Modular
|
||||
|
||||
*Pending finalization of modular pipelines implementation and development of compatibility layer*
|
||||
|
||||
- Switch to modular pipelines
|
||||
- Feature: Transformers unified cache handler
|
||||
- Refactor: [Modular pipelines and guiders](https://github.com/huggingface/diffusers/issues/11915)
|
||||
- [MagCache](https://github.com/lllyasviel/FramePack/pull/673/files)
|
||||
- [MagCache](https://github.com/huggingface/diffusers/pull/12744)
|
||||
- [SmoothCache](https://github.com/huggingface/diffusers/issues/11135)
|
||||
|
||||
## Features
|
||||
|
||||
- [Flux.2 TinyVAE](https://huggingface.co/fal/FLUX.2-Tiny-AutoEncoder)
|
||||
- [IPAdapter composition](https://huggingface.co/ostris/ip-composition-adapter)
|
||||
- [IPAdapter negative guidance](https://github.com/huggingface/diffusers/discussions/7167)
|
||||
- [STG](https://github.com/huggingface/diffusers/blob/main/examples/community/README.md#spatiotemporal-skip-guidance)
|
||||
- [Video Inpaint Pipeline](https://github.com/huggingface/diffusers/pull/12506)
|
||||
- [Sonic Inpaint](https://github.com/ubc-vision/sonic)
|
||||
|
||||
### New models / Pipelines
|
||||
## New models / Pipelines
|
||||
|
||||
TODO: Investigate which models are diffusers-compatible and prioritize!
|
||||
|
||||
- [Bria FiboEdit](https://github.com/huggingface/diffusers/commit/d7a1c31f4f85bae5a9e01cdce49bd7346bd8ccd6)
|
||||
- [LTXVideo 0.98 LongMulti](https://github.com/huggingface/diffusers/pull/12614)
|
||||
- [Cosmos-Predict-2.5](https://huggingface.co/nvidia/Cosmos-Predict2.5-2B)
|
||||
- [NewBie Image Exp0.1](https://github.com/huggingface/diffusers/pull/12803)
|
||||
- [Sana-I2V](https://github.com/huggingface/diffusers/pull/12634#issuecomment-3540534268)
|
||||
- [Bria FIBO](https://huggingface.co/briaai/FIBO)
|
||||
- [Bytedance Lynx](https://github.com/bytedance/lynx)
|
||||
- [ByteDance OneReward](https://github.com/bytedance/OneReward)
|
||||
- [ByteDance USO](https://github.com/bytedance/USO)
|
||||
- [Chroma Radiance](https://huggingface.co/lodestones/Chroma1-Radiance)
|
||||
- [Chroma Zeta](https://huggingface.co/lodestones/Zeta-Chroma)
|
||||
- [DiffSynth Studio](https://github.com/modelscope/DiffSynth-Studio)
|
||||
- [DiffusionForcing](https://github.com/kwsong0113/diffusion-forcing-transformer)
|
||||
- [Dream0 guidance](https://huggingface.co/ByteDance/DreamO)
|
||||
- [HunyuanAvatar](https://huggingface.co/tencent/HunyuanVideo-Avatar)
|
||||
- [HunyuanCustom](https://github.com/Tencent-Hunyuan/HunyuanCustom)
|
||||
- [Inf-DiT](https://github.com/zai-org/Inf-DiT)
|
||||
- [Krea Realtime Video](https://huggingface.co/krea/krea-realtime-video)
|
||||
- [LanDiff](https://github.com/landiff/landiff)
|
||||
- [Liquid](https://github.com/FoundationVision/Liquid)
|
||||
- [LongCat-Video](https://huggingface.co/meituan-longcat/LongCat-Video)
|
||||
- [LucyEdit](https://github.com/huggingface/diffusers/pull/12340)
|
||||
- [Lumina-DiMOO](https://huggingface.co/Alpha-VLLM/Lumina-DiMOO)
|
||||
- [Magi](https://github.com/SandAI-org/MAGI-1)(https://github.com/huggingface/diffusers/pull/11713)
|
||||
- [Ming](https://github.com/inclusionAI/Ming)
|
||||
- [MUG-V 10B](https://huggingface.co/MUG-V/MUG-V-inference)
|
||||
- [Ovi](https://github.com/character-ai/Ovi)
|
||||
- [Phantom HuMo](https://github.com/Phantom-video/Phantom)
|
||||
- [SD3 UltraEdit](https://github.com/HaozheZhao/UltraEdit)
|
||||
- [SelfForcing](https://github.com/guandeh17/Self-Forcing)
|
||||
- [SEVA](https://github.com/huggingface/diffusers/pull/11440)
|
||||
- [Step1X](https://github.com/stepfun-ai/Step1X-Edit)
|
||||
- [Wan-2.2 Animate](https://github.com/huggingface/diffusers/pull/12526)
|
||||
- [Wan-2.2 S2V](https://github.com/huggingface/diffusers/pull/12258)
|
||||
- [WAN-CausVid-Plus t2v](https://github.com/goatWu/CausVid-Plus/)
|
||||
- [WAN-CausVid](https://huggingface.co/lightx2v/Wan2.1-T2V-14B-CausVid)
|
||||
- [WAN-StepDistill](https://huggingface.co/lightx2v/Wan2.1-T2V-14B-StepDistill-CfgDistill)
|
||||
- [Wan2.2-Animate-14B](https://huggingface.co/Wan-AI/Wan2.2-Animate-14B)
|
||||
- [WAN2GP](https://github.com/deepbeepmeep/Wan2GP)
|
||||
### Image-Base
|
||||
|
||||
### Asyncio
|
||||
- [Chroma Zeta](https://huggingface.co/lodestones/Zeta-Chroma): Image and video generator for creative effects and professional filters
|
||||
- [Chroma Radiance](https://huggingface.co/lodestones/Chroma1-Radiance): Pixel-space model eliminating VAE artifacts for high visual fidelity
|
||||
- [Bria FIBO](https://huggingface.co/briaai/FIBO): Fully JSON based
|
||||
- [Liquid](https://github.com/FoundationVision/Liquid): Unified vision-language auto-regressive generation paradigm
|
||||
- [Lumina-DiMOO](https://huggingface.co/Alpha-VLLM/Lumina-DiMOO): Foundational multi-modal generation and understanding via discrete diffusion
|
||||
- [nVidia Cosmos-Predict-2.5](https://huggingface.co/nvidia/Cosmos-Predict2.5-2B): Physics-aware world foundation model for consistent scene prediction
|
||||
- [Liquid (unified multimodal generator)](https://github.com/FoundationVision/Liquid): Auto-regressive generation paradigm across vision and language
|
||||
- [Lumina-DiMOO](https://huggingface.co/Alpha-VLLM/Lumina-DiMOO): foundational multi-modal multi-task generation and understanding
|
||||
|
||||
- Policy system is deprecated and will be removed in **Python 3.16**
|
||||
- [Python 3.14 removals - asyncio](https://docs.python.org/3.14/whatsnew/3.14.html#id10)
|
||||
- https://docs.python.org/3.14/library/asyncio-policy.html
|
||||
- Affected files:
|
||||
- [`webui.py`](webui.py)
|
||||
- [`cli/sdapi.py`](cli/sdapi.py)
|
||||
- Migration:
|
||||
- [asyncio.run](https://docs.python.org/3.14/library/asyncio-runner.html#asyncio.run)
|
||||
- [asyncio.Runner](https://docs.python.org/3.14/library/asyncio-runner.html#asyncio.Runner)
|
||||
### Image-Edit
|
||||
|
||||
#### rmtree
|
||||
- [Bria FIBO-Edit](https://huggingface.co/briaai/Fibo-Edit-RMBG): Fully JSON-based instruction-following image editing framework
|
||||
- [Meituan LongCat-Image-Edit-Turbo](https://huggingface.co/meituan-longcat/LongCat-Image-Edit-Turbo):6B instruction-following image editing with high visual consistency
|
||||
- [VIBE Image-Edit](https://huggingface.co/iitolstykh/VIBE-Image-Edit): (Sana+Qwen-VL)Fast visual instruction-based image editing framework
|
||||
- [LucyEdit](https://github.com/huggingface/diffusers/pull/12340):Instruction-guided video editing while preserving motion and identity
|
||||
- [Step1X-Edit](https://github.com/stepfun-ai/Step1X-Edit):Multimodal image editing decoding MLLM tokens via DiT
|
||||
- [OneReward](https://github.com/bytedance/OneReward):Reinforcement learning grounded generative reward model for image editing
|
||||
- [ByteDance DreamO](https://huggingface.co/ByteDance/DreamO): image customization framework for IP adaptation and virtual try-on
|
||||
- [nVidia Cosmos-Transfer-2.5](https://github.com/huggingface/diffusers/pull/13066)
|
||||
|
||||
- `onerror` deprecated and replaced with `onexc` in **Python 3.12**
|
||||
``` python
|
||||
def excRemoveReadonly(func, path, exc: BaseException):
|
||||
import stat
|
||||
shared.log.debug(f'Exception during cleanup: {func} {path} {type(exc).__name__}')
|
||||
if func in (os.rmdir, os.remove, os.unlink) and isinstance(exc, PermissionError):
|
||||
shared.log.debug(f'Retrying cleanup: {path}')
|
||||
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
||||
func(path)
|
||||
# ...
|
||||
try:
|
||||
shutil.rmtree(found.path, ignore_errors=False, onexc=excRemoveReadonly)
|
||||
```
|
||||
### Video
|
||||
|
||||
- [LTX-Condition](https://github.com/huggingface/diffusers/pull/13058)
|
||||
- [LTX-Distilled](https://github.com/huggingface/diffusers/pull/12934)
|
||||
- [OpenMOSS MOVA](https://huggingface.co/OpenMOSS-Team/MOVA-720p): Unified foundation model for synchronized high-fidelity video and audio
|
||||
- [Wan family (Wan2.1 / Wan2.2 variants)](https://huggingface.co/Wan-AI/Wan2.2-Animate-14B): MoE-based foundational tools for cinematic T2V/I2V/TI2V
|
||||
example: [Wan2.1-T2V-14B-CausVid](https://huggingface.co/lightx2v/Wan2.1-T2V-14B-CausVid)
|
||||
distill / step-distill examples: [Wan2.1-StepDistill-CfgDistill](https://huggingface.co/lightx2v/Wan2.1-T2V-14B-StepDistill-CfgDistill)
|
||||
- [Krea Realtime Video](https://huggingface.co/krea/krea-realtime-video): (Wan2.1)Distilled real-time video diffusion using self-forcing techniques
|
||||
- [MAGI-1 (autoregressive video)](https://github.com/SandAI-org/MAGI-1): Autoregressive video generation allowing infinite and timeline control
|
||||
- [MUG-V 10B (video generation)](https://huggingface.co/MUG-V/MUG-V-inference): large-scale DiT-based video generation system trained via flow-matching
|
||||
- [Ovi (audio/video generation)](https://github.com/character-ai/Ovi): (Wan2.2)Speech-to-video with synchronized sound effects and music
|
||||
- [HunyuanVideo-Avatar / HunyuanCustom](https://huggingface.co/tencent/HunyuanVideo-Avatar): (HunyuanVideo)MM-DiT based dynamic emotion-controllable dialogue generation
|
||||
- [Sana Image→Video (Sana-I2V)](https://github.com/huggingface/diffusers/pull/12634#issuecomment-3540534268): (Sana)Compact Linear DiT framework for efficient high-resolution video
|
||||
- [Wan-2.2 S2V (diffusers PR)](https://github.com/huggingface/diffusers/pull/12258): (Wan2.2)Audio-driven cinematic speech-to-video generation
|
||||
- [LongCat-Video](https://huggingface.co/meituan-longcat/LongCat-Video): Unified framework for minutes-long coherent video generation via Block Sparse Attention
|
||||
- [LTXVideo / LTXVideo LongMulti (diffusers PR)](https://github.com/huggingface/diffusers/pull/12614): Real-time DiT-based generation with production-ready camera controls
|
||||
- [DiffSynth-Studio (ModelScope)](https://github.com/modelscope/DiffSynth-Studio): (Wan2.2)Comprehensive training and quantization tools for Wan video models
|
||||
- [Phantom (Phantom HuMo)](https://github.com/Phantom-video/Phantom): Human-centric video generation framework focus on subject ID consistency
|
||||
- [CausVid-Plus / WAN-CausVid-Plus](https://github.com/goatWu/CausVid-Plus/): (Wan2.1)Causal diffusion for high-quality temporally consistent long videos
|
||||
- [Wan2GP (workflow/GUI for Wan)](https://github.com/deepbeepmeep/Wan2GP): (Wan)Web-based UI focused on running complex video models for GPU-poor setups
|
||||
- [LivePortrait](https://github.com/KwaiVGI/LivePortrait): Efficient portrait animation system with high stitching and retargeting control
|
||||
- [Magi (SandAI)](https://github.com/SandAI-org/MAGI-1): High-quality autoregressive video generation framework
|
||||
- [Ming (inclusionAI)](https://github.com/inclusionAI/Ming): Unified multimodal model for processing text, audio, image, and video
|
||||
|
||||
### Other/Unsorted
|
||||
|
||||
- [DiffusionForcing](https://github.com/kwsong0113/diffusion-forcing-transformer): Full-sequence diffusion with autoregressive next-token prediction
|
||||
- [Self-Forcing](https://github.com/guandeh17/Self-Forcing): Framework for improving temporal consistency in long-horizon video generation
|
||||
- [SEVA](https://github.com/huggingface/diffusers/pull/11440): Stable Virtual Camera for novel view synthesis and 3D-consistent video
|
||||
- [ByteDance USO](https://github.com/bytedance/USO): Unified Style-Subject Optimized framework for personalized image generation
|
||||
- [ByteDance Lynx](https://github.com/bytedance/lynx): State-of-the-art high-fidelity personalized video generation based on DiT
|
||||
- [LanDiff](https://github.com/landiff/landiff): Coarse-to-fine text-to-video integrating Language and Diffusion Models
|
||||
- [Video Inpaint Pipeline](https://github.com/huggingface/diffusers/pull/12506): Unified inpainting pipeline implementation within Diffusers library
|
||||
- [Sonic Inpaint](https://github.com/ubc-vision/sonic): Audio-driven portrait animation system focus on global audio perception
|
||||
- [Make-It-Count](https://github.com/Litalby1/make-it-count): CountGen method for precise numerical control of objects via object identity features
|
||||
- [ControlNeXt](https://github.com/dvlab-research/ControlNeXt/): Lightweight architecture for efficient controllable image and video generation
|
||||
- [MS-Diffusion](https://github.com/MS-Diffusion/MS-Diffusion): Layout-guided multi-subject image personalization framework
|
||||
- [UniRef](https://github.com/FoundationVision/UniRef): Unified model for segmentation tasks designed as foundation model plug-in
|
||||
- [FlashFace](https://github.com/ali-vilab/FlashFace): High-fidelity human image customization and face swapping framework
|
||||
- [ReNO](https://github.com/ExplainableML/ReNO): Reward-based Noise Optimization to improve text-to-image quality during inference
|
||||
|
||||
### Not Planned
|
||||
|
||||
- [LoRAdapter](https://github.com/CompVis/LoRAdapter): Not recently updated
|
||||
- [SD3 UltraEdit](https://github.com/HaozheZhao/UltraEdit): Based on SD3
|
||||
- [PowerPaint](https://github.com/open-mmlab/PowerPaint): Based on SD15
|
||||
- [FreeCustom](https://github.com/aim-uofa/FreeCustom): Based on SD15
|
||||
- [AnyDoor](https://github.com/ali-vilab/AnyDoor): Based on SD21
|
||||
- [AnyText2](https://github.com/tyxsspa/AnyText2): Based on SD15
|
||||
- [DragonDiffusion](https://github.com/MC-E/DragonDiffusion): Based on SD15
|
||||
- [DenseDiffusion](https://github.com/naver-ai/DenseDiffusion): Based on SD15
|
||||
- [IC-Light](https://github.com/lllyasviel/IC-Light): Based on SD15
|
||||
|
||||
## Code TODO
|
||||
|
||||
> npm run todo
|
||||
|
||||
- fc: autodetect distilled based on model
|
||||
- fc: autodetect tensor format based on model
|
||||
- hypertile: vae breaks when using non-standard sizes
|
||||
- install: switch to pytorch source when it becomes available
|
||||
- loader: load receipe
|
||||
- loader: save receipe
|
||||
- lora: add other quantization types
|
||||
- lora: add t5 key support for sd35/f1
|
||||
- lora: maybe force imediate quantization
|
||||
- model load: force-reloading entire model as loading transformers only leads to massive memory usage
|
||||
- model load: implement model in-memory caching
|
||||
- modernui: monkey-patch for missing tabs.select event
|
||||
- modules/lora/lora_extract.py:188:9: W0511: TODO: lora: support pre-quantized flux
|
||||
- modules/modular_guiders.py:65:58: W0511: TODO: guiders
|
||||
- processing: remove duplicate mask params
|
||||
- resize image: enable full VAE mode for resize-latent
|
||||
|
||||
```code
|
||||
installer.py:TODO rocm: switch to pytorch source when it becomes available
|
||||
modules/control/run.py:TODO modernui: monkey-patch for missing tabs.select event
|
||||
modules/history.py:TODO: apply metadata, preview, load/save
|
||||
modules/image/resize.py:TODO resize image: enable full VAE mode for resize-latent
|
||||
modules/lora/lora_apply.py:TODO lora: add other quantization types
|
||||
modules/lora/lora_apply.py:TODO lora: maybe force imediate quantization
|
||||
modules/lora/lora_extract.py:TODO: lora: support pre-quantized flux
|
||||
modules/lora/lora_load.py:TODO lora: add t5 key support for sd35/f1
|
||||
modules/masking.py:TODO: additional masking algorithms
|
||||
modules/modular_guiders.py:TODO: guiders
|
||||
modules/processing_class.py:TODO processing: remove duplicate mask params
|
||||
modules/sd_hijack_hypertile.py:TODO hypertile: vae breaks when using non-standard sizes
|
||||
modules/sd_models.py:TODO model load: implement model in-memory caching
|
||||
modules/sd_samplers_diffusers.py:TODO enso-required
|
||||
modules/sd_unet.py:TODO model load: force-reloading entire model as loading transformers only leads to massive memory usage
|
||||
modules/transformer_cache.py:TODO fc: autodetect distilled based on model
|
||||
modules/transformer_cache.py:TODO fc: autodetect tensor format based on model
|
||||
modules/ui_models_load.py:TODO loader: load receipe
|
||||
modules/ui_models_load.py:TODO loader: save receipe
|
||||
modules/video_models/video_save.py:TODO audio set time-base
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
use clip to interrogate image(s)
|
||||
use clip to caption image(s)
|
||||
"""
|
||||
|
||||
import io
|
||||
|
|
@ -43,64 +43,63 @@ def print_summary():
|
|||
log.info({ 'keyword stats': keywords })
|
||||
|
||||
|
||||
async def interrogate(f):
|
||||
async def caption(f):
|
||||
if not filetype.is_image(f):
|
||||
log.info({ 'interrogate skip': f })
|
||||
log.info({ 'caption skip': f })
|
||||
return
|
||||
json = Map({ 'image': encode(f) })
|
||||
log.info({ 'interrogate': f })
|
||||
log.info({ 'caption': f })
|
||||
# run clip
|
||||
json.model = 'clip'
|
||||
res = await sdapi.post('/sdapi/v1/interrogate', json)
|
||||
caption = ""
|
||||
res = await sdapi.post('/sdapi/v1/caption', json)
|
||||
result = ""
|
||||
style = ""
|
||||
if 'caption' in res:
|
||||
caption = res.caption
|
||||
log.info({ 'interrogate caption': caption })
|
||||
if ', by' in caption:
|
||||
style = caption.split(', by')[1].strip()
|
||||
log.info({ 'interrogate style': style })
|
||||
for word in caption.split(' '):
|
||||
result = res.caption
|
||||
log.info({ 'caption result': result })
|
||||
if ', by' in result:
|
||||
style = result.split(', by')[1].strip()
|
||||
log.info({ 'caption style': style })
|
||||
for word in result.split(' '):
|
||||
if word not in exclude:
|
||||
stats['captions'][word] = stats['captions'][word] + 1 if word in stats['captions'] else 1
|
||||
else:
|
||||
log.error({ 'interrogate clip error': res })
|
||||
# run booru
|
||||
json.model = 'deepdanbooru'
|
||||
res = await sdapi.post('/sdapi/v1/interrogate', json)
|
||||
log.error({ 'caption clip error': res })
|
||||
# run tagger (DeepBooru)
|
||||
tagger_req = Map({'image': json.image, 'model': 'deepbooru', 'show_scores': True})
|
||||
res = await sdapi.post('/sdapi/v1/tagger', tagger_req)
|
||||
keywords = {}
|
||||
if 'caption' in res:
|
||||
for term in res.caption.split(', '):
|
||||
term = term.replace('(', '').replace(')', '').replace('\\', '').split(':')
|
||||
if len(term) < 2:
|
||||
continue
|
||||
keywords[term[0]] = term[1]
|
||||
keywords = dict(sorted(keywords.items(), key=lambda x:x[1], reverse=True))
|
||||
for word in keywords.items():
|
||||
stats['keywords'][word[0]] = stats['keywords'][word[0]] + 1 if word[0] in stats['keywords'] else 1
|
||||
log.info({ 'interrogate keywords': keywords })
|
||||
if 'scores' in res and res.scores:
|
||||
keywords = dict(sorted(res.scores.items(), key=lambda x: x[1], reverse=True))
|
||||
for word in keywords:
|
||||
stats['keywords'][word] = stats['keywords'][word] + 1 if word in stats['keywords'] else 1
|
||||
log.info({'caption keywords': keywords})
|
||||
elif 'tags' in res:
|
||||
for tag in res.tags.split(', '):
|
||||
stats['keywords'][tag] = stats['keywords'][tag] + 1 if tag in stats['keywords'] else 1
|
||||
log.info({'caption tags': res.tags})
|
||||
else:
|
||||
log.error({ 'interrogate booru error': res })
|
||||
return caption, keywords, style
|
||||
log.error({'caption tagger error': res})
|
||||
return result, keywords, style
|
||||
|
||||
|
||||
async def main():
|
||||
sys.argv.pop(0)
|
||||
await sdapi.session()
|
||||
if len(sys.argv) == 0:
|
||||
log.error({ 'interrogate': 'no files specified' })
|
||||
log.error({ 'caption': 'no files specified' })
|
||||
for arg in sys.argv:
|
||||
if os.path.exists(arg):
|
||||
if os.path.isfile(arg):
|
||||
await interrogate(arg)
|
||||
await caption(arg)
|
||||
elif os.path.isdir(arg):
|
||||
for root, _dirs, files in os.walk(arg):
|
||||
for f in files:
|
||||
_caption, _keywords, _style = await interrogate(os.path.join(root, f))
|
||||
_caption, _keywords, _style = await caption(os.path.join(root, f))
|
||||
else:
|
||||
log.error({ 'interrogate unknown file type': arg })
|
||||
log.error({ 'caption unknown file type': arg })
|
||||
else:
|
||||
log.error({ 'interrogate file missing': arg })
|
||||
log.error({ 'caption file missing': arg })
|
||||
await sdapi.close()
|
||||
print_summary()
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ def generate(x: int, y: int): # pylint: disable=redefined-outer-name
|
|||
return images
|
||||
|
||||
|
||||
def merge(images: list[Image.Image], horizontal: bool, labels: list[str] = None):
|
||||
def merge(images: list[Image.Image], horizontal: bool, labels: list[str] | None = None):
|
||||
rows = 1 if horizontal else len(images)
|
||||
cols = math.ceil(len(images) / rows)
|
||||
w = max([i.size[0] for i in images])
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
@ -32,7 +32,7 @@ def get(endpoint: str, dct: dict = None):
|
|||
return req.json()
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, payload: dict = None):
|
||||
def post(endpoint: str, payload: dict | None = None):
|
||||
if 'sdapi' not in endpoint:
|
||||
endpoint = f'sdapi/v1/{endpoint}'
|
||||
if 'http' not in endpoint:
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
@ -34,7 +34,7 @@ def get(endpoint: str, dct: dict = None):
|
|||
return req.json()
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
@ -34,7 +34,7 @@ def get(endpoint: str, dct: dict = None):
|
|||
return req.json()
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ async function main() {
|
|||
} else {
|
||||
const json = await res.json();
|
||||
console.log('result:', json.info);
|
||||
for (const i in json.images) { // eslint-disable-line guard-for-in
|
||||
for (const i in json.images) {
|
||||
const file = args.output || `/tmp/test-${i}.jpg`;
|
||||
const data = atob(json.images[i]);
|
||||
fs.writeFileSync(file, data, 'binary');
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
get list of all samplers and details of current sampler
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import urllib3
|
||||
import requests
|
||||
|
||||
|
||||
url = "http://127.0.0.1:7860"
|
||||
user = ""
|
||||
password = ""
|
||||
|
||||
log_format = '%(asctime)s %(levelname)s: %(message)s'
|
||||
logging.basicConfig(level = logging.INFO, format = log_format)
|
||||
log = logging.getLogger("sd")
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
log.info('available samplers')
|
||||
auth = requests.auth.HTTPBasicAuth(user, password) if len(user) > 0 and len(password) > 0 else None
|
||||
req = requests.get(f'{url}/sdapi/v1/samplers', verify=False, auth=auth, timeout=60)
|
||||
if req.status_code != 200:
|
||||
log.error({ 'url': req.url, 'request': req.status_code, 'reason': req.reason })
|
||||
exit(1)
|
||||
res = req.json()
|
||||
for item in res:
|
||||
log.info(item)
|
||||
|
||||
log.info('current sampler')
|
||||
req = requests.get(f'{url}/sdapi/v1/sampler', verify=False, auth=auth, timeout=60)
|
||||
res = req.json()
|
||||
log.info(res)
|
||||
|
|
@ -41,7 +41,7 @@ async function main() {
|
|||
} else {
|
||||
const json = await res.json();
|
||||
console.log('result:', json.info);
|
||||
for (const i in json.images) { // eslint-disable-line guard-for-in
|
||||
for (const i in json.images) {
|
||||
const f = `/tmp/test-${i}.jpg`;
|
||||
fs.writeFileSync(f, atob(json.images[i]), 'binary');
|
||||
console.log('image saved:', f);
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
@ -32,7 +32,7 @@ def get(endpoint: str, dct: dict = None):
|
|||
return req.json()
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict = None):
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
@ -32,7 +32,7 @@ def get(endpoint: str, dct: dict = None):
|
|||
return req.json()
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ def auth():
|
|||
return None
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
import logging
|
||||
import requests
|
||||
import urllib3
|
||||
|
||||
|
||||
sd_url = os.environ.get('SDAPI_URL', "http://127.0.0.1:7860")
|
||||
sd_username = os.environ.get('SDAPI_USR', None)
|
||||
sd_password = os.environ.get('SDAPI_PWD', None)
|
||||
options = {
|
||||
"save_images": True,
|
||||
"send_images": True,
|
||||
}
|
||||
|
||||
logging.basicConfig(level = logging.INFO, format = '%(asctime)s %(levelname)s: %(message)s')
|
||||
log = logging.getLogger(__name__)
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
def auth():
|
||||
if sd_username is not None and sd_password is not None:
|
||||
return requests.auth.HTTPBasicAuth(sd_username, sd_password)
|
||||
return None
|
||||
|
||||
|
||||
def get(endpoint: str, dct: dict | None = None):
|
||||
req = requests.get(f'{sd_url}{endpoint}', json = dct, timeout=300, verify=False, auth=auth())
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
else:
|
||||
return req.json()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
options = get('/sdapi/v1/xyz-grid')
|
||||
log.info(f'api-xyzgrid-options: {len(options)}')
|
||||
for option in options:
|
||||
log.info(f' {option}')
|
||||
details = get('/sdapi/v1/xyz-grid?option=upscaler')
|
||||
for choice in details[0]['choices']:
|
||||
log.info(f' {choice}')
|
||||
|
|
@ -99,10 +99,10 @@ def search_civitai(
|
|||
types:str = '', # (Checkpoint, TextualInversion, Hypernetwork, AestheticGradient, LORA, Controlnet, Poses)
|
||||
sort:str = '', # (Highest Rated, Most Downloaded, Newest)
|
||||
period:str = '', # (AllTime, Year, Month, Week, Day)
|
||||
nsfw:bool = None, # optional:bool
|
||||
nsfw:bool | None = None, # optional:bool
|
||||
limit:int = 0,
|
||||
base:list[str] = [], # list
|
||||
token:str = None,
|
||||
token:str | None = None,
|
||||
exact:bool = True,
|
||||
):
|
||||
import requests
|
||||
|
|
@ -113,6 +113,11 @@ def search_civitai(
|
|||
return []
|
||||
|
||||
t0 = time.time()
|
||||
import re
|
||||
url_match = re.match(r'https?://civitai\.com/models/(\d+)', query.strip())
|
||||
if url_match:
|
||||
query = url_match.group(1)
|
||||
log.info(f'CivitAI: extracted model id={query} from URL')
|
||||
dct = { 'query': query }
|
||||
if len(tag) > 0:
|
||||
dct['tag'] = tag
|
||||
|
|
@ -164,7 +169,7 @@ def search_civitai(
|
|||
return exact_models if len(exact_models) > 0 else models
|
||||
|
||||
|
||||
def models_to_dct(all_models:list, model_id:int=None):
|
||||
def models_to_dct(all_models:list, model_id:int | None=None):
|
||||
dct = []
|
||||
for model in all_models:
|
||||
if model_id is not None and model.id != model_id:
|
||||
|
|
|
|||
|
|
@ -1,301 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# pylint: disable=no-member
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import importlib
|
||||
import asyncio
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from util import Map, log
|
||||
from sdapi import get, post, close
|
||||
from generate import generate # pylint: disable=import-error
|
||||
grid = importlib.import_module('image-grid').grid
|
||||
|
||||
|
||||
options = Map({
|
||||
# used by extra networks
|
||||
'prompt': 'photo of <keyword> <embedding>, photograph, posing, pose, high detailed, intricate, elegant, sharp focus, skin texture, looking forward, facing camera, 135mm, shot on dslr, canon 5d, 4k, modelshoot style, cinematic lighting',
|
||||
# used by models
|
||||
'prompts': [
|
||||
('photo citiscape', 'cityscape during night, photorealistic, high detailed, sharp focus, depth of field, 4k'),
|
||||
('photo car', 'photo of a sports car, high detailed, sharp focus, dslr, cinematic lighting, realistic'),
|
||||
('photo woman', 'portrait photo of beautiful woman, high detailed, dslr, 35mm'),
|
||||
('photo naked', 'full body photo of beautiful sexy naked woman, high detailed, dslr, 35mm'),
|
||||
|
||||
('photo taylor', 'portrait photo of beautiful woman taylor swift, high detailed, sharp focus, depth of field, dslr, 35mm <lora:taylor-swift:1>'),
|
||||
('photo ti-mia', 'portrait photo of beautiful woman "ti-mia", naked, high detailed, dslr, 35mm'),
|
||||
('photo ti-vlado', 'portrait photo of man "ti-vlado", high detailed, dslr, 35mm'),
|
||||
('photo lora-vlado', 'portrait photo of man vlado, high detailed, dslr, 35mm <lora:vlado-original:1>'),
|
||||
|
||||
('wlop', 'a stunning portrait of sexy teen girl in a wet t-shirt, vivid color palette, digital painting, octane render, highly detailed, particles, light effect, volumetric lighting, art by wlop'),
|
||||
('greg rutkowski', 'beautiful woman, high detailed, sharp focus, depth of field, 4k, art by greg rutkowski'),
|
||||
('carne griffiths', 'beautiful woman taylor swift, high detailed, sharp focus, depth of field, art by carne griffiths <lora:taylor-swift:1>'),
|
||||
('carne griffiths', 'man vlado, high detailed, sharp focus, depth of field, art by carne griffiths <lora:vlado-full:1>'),
|
||||
],
|
||||
# save format
|
||||
'format': '.jpg',
|
||||
# used by generate script
|
||||
'paths': {
|
||||
"root": "/mnt/c/Users/mandi/OneDrive/Generative/Generate",
|
||||
"generate": "image",
|
||||
"upscale": "upscale",
|
||||
"grid": "grid",
|
||||
},
|
||||
# generate params
|
||||
'generate': {
|
||||
'detailer': True,
|
||||
'prompt': '',
|
||||
'negative_prompt': 'foggy, blurry, blurred, duplicate, ugly, mutilated, mutation, mutated, out of frame, bad anatomy, disfigured, deformed, censored, low res, low resolution, watermark, text, poorly drawn face, poorly drawn hands, signature',
|
||||
'steps': 20,
|
||||
'batch_size': 2,
|
||||
'n_iter': 1,
|
||||
'seed': -1,
|
||||
'sampler_name': 'UniPC',
|
||||
'cfg_scale': 6,
|
||||
'width': 512,
|
||||
'height': 512,
|
||||
},
|
||||
'lora': {
|
||||
'strength': 1.0,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
def preview_exists(folder, model):
|
||||
model = os.path.splitext(model)[0]
|
||||
for suffix in ['', '.preview']:
|
||||
for ext in ['.jpg', '.png', '.webp']:
|
||||
fn = os.path.join(folder, f'{model}{suffix}{ext}')
|
||||
if os.path.exists(fn):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def preview_models(params):
|
||||
data = await get('/sdapi/v1/sd-models')
|
||||
allmodels = [m['title'] for m in data]
|
||||
models = []
|
||||
excluded = []
|
||||
for m in allmodels: # loop through all registered models
|
||||
ok = True
|
||||
for e in params.exclude: # check if model is excluded
|
||||
if e in m:
|
||||
excluded.append(m)
|
||||
ok = False
|
||||
break
|
||||
if ok:
|
||||
short = m.split(' [')[0]
|
||||
short = short.replace('.ckpt', '').replace('.safetensors', '')
|
||||
models.append(short)
|
||||
if len(params.input) > 0: # check if model is included in cmd line
|
||||
filtered = []
|
||||
for m in params.input:
|
||||
if m in models:
|
||||
filtered.append(m)
|
||||
else:
|
||||
log.error({ 'model not found': m })
|
||||
return
|
||||
models = filtered
|
||||
log.info({ 'models preview' })
|
||||
log.info({ 'models': len(models), 'excluded': len(excluded) })
|
||||
opt = await get('/sdapi/v1/options')
|
||||
log.info({ 'total jobs': len(models) * options.generate.batch_size, 'per-model': options.generate.batch_size })
|
||||
log.info(json.dumps(options, indent=2))
|
||||
for model in models:
|
||||
if preview_exists(opt['ckpt_dir'], model) and len(params.input) == 0: # if model preview exists and not manually included
|
||||
log.info({ 'model preview exists': model })
|
||||
continue
|
||||
fn = os.path.join(opt['ckpt_dir'], os.path.splitext(model)[0] + options.format)
|
||||
log.info({ 'model load': model })
|
||||
|
||||
opt['sd_model_checkpoint'] = model
|
||||
del opt['sd_lora']
|
||||
del opt['sd_lyco']
|
||||
await post('/sdapi/v1/options', opt)
|
||||
opt = await get('/sdapi/v1/options')
|
||||
images = []
|
||||
labels = []
|
||||
t0 = time.time()
|
||||
for label, p in options.prompts:
|
||||
options.generate.prompt = p
|
||||
log.info({ 'model generating': model, 'label': label, 'prompt': options.generate.prompt })
|
||||
data = await generate(options = options, quiet=True)
|
||||
if 'image' in data:
|
||||
for img in data['image']:
|
||||
images.append(img)
|
||||
labels.append(label)
|
||||
else:
|
||||
log.error({ 'model': model, 'error': data })
|
||||
t1 = time.time()
|
||||
if len(images) == 0:
|
||||
log.error({ 'model': model, 'error': 'no images generated' })
|
||||
continue
|
||||
image = grid(images = images, labels = labels, border = 8)
|
||||
log.info({ 'saving preview': fn, 'images': len(images), 'size': [image.width, image.height] })
|
||||
image.save(fn)
|
||||
t = t1 - t0
|
||||
its = 1.0 * options.generate.steps * len(images) / t
|
||||
log.info({ 'model preview created': model, 'image': fn, 'images': len(images), 'grid': [image.width, image.height], 'time': round(t, 2), 'its': round(its, 2) })
|
||||
|
||||
opt = await get('/sdapi/v1/options')
|
||||
if opt['sd_model_checkpoint'] != params.model:
|
||||
log.info({ 'model set default': params.model })
|
||||
opt['sd_model_checkpoint'] = params.model
|
||||
del opt['sd_lora']
|
||||
del opt['sd_lyco']
|
||||
await post('/sdapi/v1/options', opt)
|
||||
|
||||
|
||||
async def lora(params):
|
||||
opt = await get('/sdapi/v1/options')
|
||||
folder = opt['lora_dir']
|
||||
if not os.path.exists(folder):
|
||||
log.error({ 'lora directory not found': folder })
|
||||
return
|
||||
models1 = list(Path(folder).glob('**/*.safetensors'))
|
||||
models2 = list(Path(folder).glob('**/*.ckpt'))
|
||||
models = [os.path.splitext(f)[0] for f in models1 + models2]
|
||||
log.info({ 'loras': len(models) })
|
||||
for model in models:
|
||||
if preview_exists('', model) and len(params.input) == 0: # if model preview exists and not manually included
|
||||
log.info({ 'lora preview exists': model })
|
||||
continue
|
||||
fn = model + options.format
|
||||
model = os.path.basename(model)
|
||||
images = []
|
||||
labels = []
|
||||
t0 = time.time()
|
||||
keywords = re.sub(r'\d', '', model)
|
||||
keywords = keywords.replace('-v', ' ').replace('-', ' ').strip().split(' ')
|
||||
keyword = '\"' + '\" \"'.join(keywords) + '\"'
|
||||
options.generate.prompt = options.prompt.replace('<keyword>', keyword)
|
||||
options.generate.prompt = options.generate.prompt.replace('<embedding>', '')
|
||||
options.generate.prompt += f' <lora:{model}:{options.lora.strength}>'
|
||||
log.info({ 'lora generating': model, 'keyword': keyword, 'prompt': options.generate.prompt })
|
||||
data = await generate(options = options, quiet=True)
|
||||
if 'image' in data:
|
||||
for img in data['image']:
|
||||
images.append(img)
|
||||
labels.append(keyword)
|
||||
else:
|
||||
log.error({ 'lora': model, 'keyword': keyword, 'error': data })
|
||||
t1 = time.time()
|
||||
if len(images) == 0:
|
||||
log.error({ 'model': model, 'error': 'no images generated' })
|
||||
continue
|
||||
image = grid(images = images, labels = labels, border = 8)
|
||||
log.info({ 'saving preview': fn, 'images': len(images), 'size': [image.width, image.height] })
|
||||
image.save(fn)
|
||||
t = t1 - t0
|
||||
its = 1.0 * options.generate.steps * len(images) / t
|
||||
log.info({ 'lora preview created': model, 'image': fn, 'images': len(images), 'grid': [image.width, image.height], 'time': round(t, 2), 'its': round(its, 2) })
|
||||
|
||||
|
||||
async def lyco(params):
|
||||
opt = await get('/sdapi/v1/options')
|
||||
folder = opt['lyco_dir']
|
||||
if not os.path.exists(folder):
|
||||
log.error({ 'lyco directory not found': folder })
|
||||
return
|
||||
models1 = list(Path(folder).glob('**/*.safetensors'))
|
||||
models2 = list(Path(folder).glob('**/*.ckpt'))
|
||||
models = [os.path.splitext(f)[0] for f in models1 + models2]
|
||||
log.info({ 'lycos': len(models) })
|
||||
for model in models:
|
||||
if preview_exists('', model) and len(params.input) == 0: # if model preview exists and not manually included
|
||||
log.info({ 'lyco preview exists': model })
|
||||
continue
|
||||
fn = model + options.format
|
||||
model = os.path.basename(model)
|
||||
images = []
|
||||
labels = []
|
||||
t0 = time.time()
|
||||
keywords = re.sub(r'\d', '', model)
|
||||
keywords = keywords.replace('-v', ' ').replace('-', ' ').strip().split(' ')
|
||||
keyword = '\"' + '\" \"'.join(keywords) + '\"'
|
||||
options.generate.prompt = options.prompt.replace('<keyword>', keyword)
|
||||
options.generate.prompt = options.generate.prompt.replace('<embedding>', '')
|
||||
options.generate.prompt += f' <lyco:{model}:{options.lora.strength}>'
|
||||
log.info({ 'lyco generating': model, 'keyword': keyword, 'prompt': options.generate.prompt })
|
||||
data = await generate(options = options, quiet=True)
|
||||
if 'image' in data:
|
||||
for img in data['image']:
|
||||
images.append(img)
|
||||
labels.append(keyword)
|
||||
else:
|
||||
log.error({ 'lyco': model, 'keyword': keyword, 'error': data })
|
||||
t1 = time.time()
|
||||
if len(images) == 0:
|
||||
log.error({ 'model': model, 'error': 'no images generated' })
|
||||
continue
|
||||
image = grid(images = images, labels = labels, border = 8)
|
||||
log.info({ 'saving preview': fn, 'images': len(images), 'size': [image.width, image.height] })
|
||||
image.save(fn)
|
||||
t = t1 - t0
|
||||
its = 1.0 * options.generate.steps * len(images) / t
|
||||
log.info({ 'lyco preview created': model, 'image': fn, 'images': len(images), 'grid': [image.width, image.height], 'time': round(t, 2), 'its': round(its, 2) })
|
||||
|
||||
|
||||
async def embedding(params):
|
||||
opt = await get('/sdapi/v1/options')
|
||||
folder = opt['embeddings_dir']
|
||||
if not os.path.exists(folder):
|
||||
log.error({ 'embeddings directory not found': folder })
|
||||
return
|
||||
models = [os.path.splitext(f)[0] for f in Path(folder).glob('**/*.pt')]
|
||||
log.info({ 'embeddings': len(models) })
|
||||
for model in models:
|
||||
if preview_exists(folder, model) and len(params.input) == 0: # if model preview exists and not manually included
|
||||
log.info({ 'embedding preview exists': model })
|
||||
continue
|
||||
fn = os.path.join(folder, model + '.preview' + options.format)
|
||||
images = []
|
||||
labels = []
|
||||
t0 = time.time()
|
||||
keyword = '\"' + re.sub(r'\d', '', model) + '\"'
|
||||
options.generate.batch_size = 4
|
||||
options.generate.prompt = options.prompt.replace('<keyword>', keyword)
|
||||
options.generate.prompt = options.generate.prompt.replace('<embedding>', '')
|
||||
log.info({ 'embedding generating': model, 'keyword': keyword, 'prompt': options.generate.prompt })
|
||||
data = await generate(options = options, quiet=True)
|
||||
if 'image' in data:
|
||||
for img in data['image']:
|
||||
images.append(img)
|
||||
labels.append(keyword)
|
||||
else:
|
||||
log.error({ 'embeding': model, 'keyword': keyword, 'error': data })
|
||||
t1 = time.time()
|
||||
if len(images) == 0:
|
||||
log.error({ 'model': model, 'error': 'no images generated' })
|
||||
continue
|
||||
image = grid(images = images, labels = labels, border = 8)
|
||||
log.info({ 'saving preview': fn, 'images': len(images), 'size': [image.width, image.height] })
|
||||
image.save(fn)
|
||||
t = t1 - t0
|
||||
its = 1.0 * options.generate.steps * len(images) / t
|
||||
log.info({ 'embeding preview created': model, 'image': fn, 'images': len(images), 'grid': [image.width, image.height], 'time': round(t, 2), 'its': round(its, 2) })
|
||||
|
||||
|
||||
async def create_previews(params):
|
||||
await preview_models(params)
|
||||
await lora(params)
|
||||
await lyco(params)
|
||||
await embedding(params)
|
||||
await close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description = 'generate model previews')
|
||||
parser.add_argument('--model', default='best/icbinp-icantbelieveIts-final.safetensors [73f48afbdc]', help="model used to create extra network previews")
|
||||
parser.add_argument('--exclude', default=['sd-v20', 'sd-v21', 'inpainting', 'pix2pix'], help="exclude models with keywords")
|
||||
parser.add_argument('--debug', default = False, action='store_true', help = 'print extra debug information')
|
||||
parser.add_argument('input', type = str, nargs = '*')
|
||||
args = parser.parse_args()
|
||||
if args.debug:
|
||||
log.setLevel(logging.DEBUG)
|
||||
log.debug({ 'debug': True })
|
||||
log.debug({ 'args': args.__dict__ })
|
||||
asyncio.run(create_previews(args))
|
||||
|
|
@ -33,7 +33,7 @@ def pil_to_b64(img: Image, size: int, quality: int):
|
|||
return f'data:image/jpeg;base64,{b64encoded}'
|
||||
|
||||
|
||||
def post(endpoint: str, dct: dict = None):
|
||||
def post(endpoint: str, dct: dict | None = None):
|
||||
req = requests.post(endpoint, json = dct, timeout=300, verify=False)
|
||||
if req.status_code != 200:
|
||||
return { 'error': req.status_code, 'reason': req.reason, 'url': req.url }
|
||||
|
|
|
|||
|
|
@ -24,10 +24,7 @@
|
|||
{
|
||||
"upscaler_1": "SwinIR_4x",
|
||||
"upscaler_2": "None",
|
||||
"upscaling_resize": 0,
|
||||
"gfpgan_visibility": 0,
|
||||
"codeformer_visibility": 0,
|
||||
"codeformer_weight": 0.5
|
||||
"upscaling_resize": 0
|
||||
},
|
||||
"options":
|
||||
{
|
||||
|
|
|
|||
|
|
@ -65,8 +65,6 @@ def exif(info, i = None, op = 'generate'):
|
|||
seed = ', '.join([str(x) for x in seed]) # int list to str list to single str
|
||||
template = '{prompt} | negative {negative_prompt} | seed {s} | steps {steps} | cfgscale {cfg_scale} | sampler {sampler_name} | batch {batch_size} | timestamp {job_timestamp} | model {model} | vae {vae}'.format(s = seed, model = sd.options['sd_model_checkpoint'], vae = sd.options['sd_vae'], **info) # pylint: disable=consider-using-f-string
|
||||
if op == 'upscale':
|
||||
template += ' | faces gfpgan' if sd.upscale.gfpgan_visibility > 0 else ''
|
||||
template += ' | faces codeformer' if sd.upscale.codeformer_visibility > 0 else ''
|
||||
template += ' | upscale {resize}x {upscaler}'.format(resize = sd.upscale.upscaling_resize, upscaler = sd.upscale.upscaler_1) if sd.upscale.upscaler_1 != 'None' else '' # pylint: disable=consider-using-f-string
|
||||
template += ' | upscale {resize}x {upscaler}'.format(resize = sd.upscale.upscaling_resize, upscaler = sd.upscale.upscaler_2) if sd.upscale.upscaler_2 != 'None' else '' # pylint: disable=consider-using-f-string
|
||||
if op == 'grid':
|
||||
|
|
@ -221,7 +219,7 @@ def args(): # parse cmd arguments
|
|||
global random # pylint: disable=global-statement
|
||||
parser = argparse.ArgumentParser(description = 'sd pipeline')
|
||||
parser.add_argument('--config', type = str, default = 'generate.json', required = False, help = 'configuration file')
|
||||
parser.add_argument('--random', type = str, default = 'random.json', required = False, help = 'prompt file with randomized sections')
|
||||
parser.add_argument('--random', type = str, default = 'generate-random.json', required = False, help = 'prompt file with randomized sections')
|
||||
parser.add_argument('--max', type = int, default = 1, required = False, help = 'maximum number of generated images')
|
||||
parser.add_argument('--prompt', type = str, default = 'dynamic', required = False, help = 'prompt')
|
||||
parser.add_argument('--negative', type = str, default = 'dynamic', required = False, help = 'negative prompt')
|
||||
|
|
@ -309,7 +307,6 @@ def args(): # parse cmd arguments
|
|||
sd.generate.height = params.height if params.height > 0 else sd.generate.height
|
||||
sd.generate.steps = params.steps if params.steps > 0 else sd.generate.steps
|
||||
sd.upscale.upscaling_resize = params.upscale if params.upscale > 0 else sd.upscale.upscaling_resize
|
||||
sd.upscale.codeformer_visibility = 1 if params.detailer else sd.upscale.codeformer_visibility
|
||||
sd.options.sd_vae = params.vae if params.vae != '' else sd.options.sd_vae
|
||||
sd.options.sd_model_checkpoint = params.model if params.model != '' else sd.options.sd_model_checkpoint
|
||||
sd.upscale.upscaler_1 = 'SwinIR_4x' if params.upscale > 1 else sd.upscale.upscaler_1
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ if __name__ == "__main__":
|
|||
full=True,
|
||||
limit=100,
|
||||
sort="downloads",
|
||||
direction=-1,
|
||||
)
|
||||
res = sorted(res, key=lambda x: x.id)
|
||||
exact = [m for m in res if keyword.lower() in m.id.lower()]
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class ImageDB:
|
|||
def __init__(self,
|
||||
name:str='db',
|
||||
fmt:str='json',
|
||||
cache_dir:str=None,
|
||||
cache_dir:str | None=None,
|
||||
dtype:torch.dtype=torch.float16,
|
||||
device:torch.device=torch.device('cpu'),
|
||||
model:str='openai/clip-vit-large-patch14', # 'facebook/dinov2-small'
|
||||
|
|
@ -123,8 +123,8 @@ class ImageDB:
|
|||
self.df = rec
|
||||
self.index.add(embed)
|
||||
|
||||
def search(self, filename: str = None, metadata: str = None, embed: np.ndarray = None, k=10, d=1.0): # search by filename/metadata/prompt-embed/image-embed
|
||||
def dct(record: pd.DataFrame, mode: str, distance: float = None):
|
||||
def search(self, filename: str | None = None, metadata: str | None = None, embed: np.ndarray = None, k=10, d=1.0): # search by filename/metadata/prompt-embed/image-embed
|
||||
def dct(record: pd.DataFrame, mode: str, distance: float | None = None):
|
||||
if distance is not None:
|
||||
return {'type': mode, 'filename': record[1]['filename'], 'metadata': record[1]['metadata'], 'distance': round(distance, 2)}
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,67 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
// script used to localize sdnext ui and hints to multiple languages using google gemini ai
|
||||
|
||||
const fs = require('node:fs');
|
||||
|
||||
const { GoogleGenerativeAI } = require('@google/generative-ai');
|
||||
|
||||
const api_key = process.env.GOOGLE_AI_API_KEY;
|
||||
const model = 'gemini-2.5-flash';
|
||||
const prompt = `Translate attached JSON from English to {language} using following rules: fields id, label and reload should be preserved from original, field localized should be a translated version of field label and field hint should be translated in-place.
|
||||
if field is less than 3 characters, do not translate it and keep it as is.
|
||||
Every JSON entry should have id, label, localized, reload and hint fields.
|
||||
Output should be pure JSON without any additional text. To better match translation, context of the text is related to Stable Diffusion and topic of Generative AI.`;
|
||||
const languages = {
|
||||
hr: 'Croatian',
|
||||
de: 'German',
|
||||
es: 'Spanish',
|
||||
fr: 'French',
|
||||
it: 'Italian',
|
||||
pt: 'Portuguese',
|
||||
zh: 'Chinese',
|
||||
ja: 'Japanese',
|
||||
ko: 'Korean',
|
||||
ru: 'Russian',
|
||||
};
|
||||
const chunkLines = 100;
|
||||
|
||||
async function localize() {
|
||||
if (!api_key || api_key.length < 10) {
|
||||
console.error('localize: set GOOGLE_AI_API_KEY env variable with your API key');
|
||||
process.exit();
|
||||
}
|
||||
const genAI = new GoogleGenerativeAI(api_key);
|
||||
const instance = genAI.getGenerativeModel({ model });
|
||||
const raw = fs.readFileSync('html/locale_en.json');
|
||||
const json = JSON.parse(raw);
|
||||
for (const locale of Object.keys(languages)) {
|
||||
const lang = languages[locale];
|
||||
const target = prompt.replace('{language}', lang).trim();
|
||||
const output = {};
|
||||
const fn = `html/locale_${locale}.json`;
|
||||
for (const section of Object.keys(json)) {
|
||||
const data = json[section];
|
||||
output[section] = [];
|
||||
for (let i = 0; i < data.length; i += chunkLines) {
|
||||
let markdown;
|
||||
try {
|
||||
const chunk = data.slice(i, i + chunkLines);
|
||||
const result = await instance.generateContent([target, JSON.stringify(chunk)]);
|
||||
markdown = result.response.text();
|
||||
const text = markdown.replaceAll('```', '').replace(/^.*\n/, '');
|
||||
const parsed = JSON.parse(text);
|
||||
output[section].push(...parsed);
|
||||
console.log(`localize: locale=${locale} lang=${lang} section=${section} chunk=${chunk.length} output=${output[section].length} fn=${fn}`);
|
||||
} catch (err) {
|
||||
console.error('localize:', err);
|
||||
console.error('localize input:', { target, section, i });
|
||||
console.error('localize output:', { markdown });
|
||||
}
|
||||
}
|
||||
const txt = JSON.stringify(output, null, 2);
|
||||
fs.writeFileSync(fn, txt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localize();
|
||||
|
|
@ -13,9 +13,8 @@ def get_nvidia_smi(output='dict'):
|
|||
if smi is None:
|
||||
log.error("nvidia-smi not found")
|
||||
return None
|
||||
result = subprocess.run(f'"{smi}" -q -x', shell=True, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
xml = result.stdout.decode(encoding="utf8", errors="ignore")
|
||||
d = xmltodict.parse(xml)
|
||||
result = subprocess.run(f'"{smi}" -q -x', shell=True, check=False, env=os.environ, capture_output=True, text=True)
|
||||
d = xmltodict.parse(result.stdout)
|
||||
if 'nvidia_smi_log' in d:
|
||||
d = d['nvidia_smi_log']
|
||||
if 'gpu' in d and 'supported_clocks' in d['gpu']:
|
||||
|
|
|
|||
|
|
@ -6,13 +6,12 @@ import base64
|
|||
import numpy as np
|
||||
import mediapipe as mp
|
||||
from PIL import Image, ImageOps
|
||||
from pi_heif import register_heif_opener
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
from scipy.stats import beta
|
||||
|
||||
import util
|
||||
import sdapi
|
||||
import options
|
||||
import process_options as options
|
||||
|
||||
face_model = None
|
||||
body_model = None
|
||||
|
|
@ -22,7 +21,7 @@ all_images_by_type = {}
|
|||
|
||||
|
||||
class Result():
|
||||
def __init__(self, typ: str, fn: str, tag: str = None, requested: list = []):
|
||||
def __init__(self, typ: str, fn: str, tag: str | None = None, requested: list = []):
|
||||
self.type = typ
|
||||
self.input = fn
|
||||
self.output = ''
|
||||
|
|
@ -42,7 +41,7 @@ def detect_blur(image: Image):
|
|||
cx, cy = image.size[0] // 2, image.size[1] // 2
|
||||
fft = np.fft.fft2(bw)
|
||||
fftShift = np.fft.fftshift(fft)
|
||||
fftShift[cy - options.process.blur_samplesize: cy + options.process.blur_samplesize, cx - options.process.blur_samplesize: cx + options.process.blur_samplesize] = 0
|
||||
fftShift[cy - options.process.blur_samplesize: cy + options.process.blur_samplesize, cx - options.process.blur_samplesize: cx + options.process.blur_samplesize] = 0 # pylint: disable=unsupported-assignment-operation
|
||||
fftShift = np.fft.ifftshift(fftShift)
|
||||
recon = np.fft.ifft2(fftShift)
|
||||
magnitude = np.log(np.abs(recon))
|
||||
|
|
@ -126,11 +125,9 @@ def reset():
|
|||
all_images = []
|
||||
|
||||
|
||||
def upscale_restore_image(res: Result, upscale: bool = False, restore: bool = False):
|
||||
def upscale_restore_image(res: Result, upscale: bool = False):
|
||||
kwargs = util.Map({
|
||||
'image': encode(res.image),
|
||||
'codeformer_visibility': 0.0,
|
||||
'codeformer_weight': 0.0,
|
||||
})
|
||||
if res.image.width >= options.process.target_size and res.image.height >= options.process.target_size:
|
||||
upscale = False
|
||||
|
|
@ -138,25 +135,21 @@ def upscale_restore_image(res: Result, upscale: bool = False, restore: bool = Fa
|
|||
kwargs.upscaler_1 = 'SwinIR_4x'
|
||||
kwargs.upscaling_resize = 2
|
||||
res.ops.append('upscale')
|
||||
if restore:
|
||||
kwargs.codeformer_visibility = 1.0
|
||||
kwargs.codeformer_weight = 0.2
|
||||
res.ops.append('restore')
|
||||
if upscale or restore:
|
||||
if upscale:
|
||||
result = sdapi.postsync('/sdapi/v1/extra-single-image', kwargs)
|
||||
if 'image' not in result:
|
||||
res.message = 'failed to upscale/restore image'
|
||||
res.message = 'failed to upscale image'
|
||||
else:
|
||||
res.image = Image.open(io.BytesIO(base64.b64decode(result['image'])))
|
||||
return res
|
||||
|
||||
|
||||
def interrogate_image(res: Result, tag: str = None):
|
||||
def caption_image(res: Result, tag: str | None = None):
|
||||
caption = ''
|
||||
tags = []
|
||||
for model in options.process.interrogate_model:
|
||||
for model in options.process.caption_model:
|
||||
json = util.Map({ 'image': encode(res.image), 'model': model })
|
||||
result = sdapi.postsync('/sdapi/v1/interrogate', json)
|
||||
result = sdapi.postsync('/sdapi/v1/caption', json)
|
||||
if model == 'clip':
|
||||
caption = result.caption if 'caption' in result else ''
|
||||
caption = caption.split(',')[0].replace(' a ', ' ').strip()
|
||||
|
|
@ -176,7 +169,7 @@ def interrogate_image(res: Result, tag: str = None):
|
|||
tags = tags[:options.process.tag_limit]
|
||||
res.caption = caption
|
||||
res.tags = tags
|
||||
res.ops.append('interrogate')
|
||||
res.ops.append('caption')
|
||||
return res
|
||||
|
||||
|
||||
|
|
@ -267,7 +260,6 @@ def file(filename: str, folder: str, tag = None, requested = []):
|
|||
res = Result(fn = filename, typ='unknown', tag=tag, requested = requested)
|
||||
# open image
|
||||
try:
|
||||
register_heif_opener()
|
||||
res.image = Image.open(filename)
|
||||
if res.image.mode == 'RGBA':
|
||||
res.image = res.image.convert('RGB')
|
||||
|
|
@ -309,13 +301,13 @@ def file(filename: str, folder: str, tag = None, requested = []):
|
|||
if res.image is None:
|
||||
return res
|
||||
# post processing steps
|
||||
res = upscale_restore_image(res, 'upscale' in requested, 'restore' in requested)
|
||||
res = upscale_restore_image(res, 'upscale' in requested)
|
||||
if res.image.width < options.process.target_size or res.image.height < options.process.target_size:
|
||||
res.message = f'low resolution: [{res.image.width}, {res.image.height}]'
|
||||
res.image = None
|
||||
return res
|
||||
if 'interrogate' in requested:
|
||||
res = interrogate_image(res, tag)
|
||||
if 'caption' in requested:
|
||||
res = caption_image(res, tag)
|
||||
if 'resize' in requested:
|
||||
res = resize_image(res)
|
||||
if 'square' in requested:
|
||||
|
|
|
|||
|
|
@ -130,9 +130,9 @@ process = Map({
|
|||
'body_pad': 0.2, # pad body image percentage
|
||||
'body_model': 2, # body model to use 0/low 1/medium 2/high
|
||||
# similarity detection settings
|
||||
# interrogate settings
|
||||
'interrogate': False, # interrogate images
|
||||
'interrogate_model': ['clip', 'deepdanbooru'], # interrogate models
|
||||
# caption settings
|
||||
'caption': False, # caption images
|
||||
'caption_model': ['clip', 'deepdanbooru'], # caption models
|
||||
'tag_limit': 5, # number of tags to extract
|
||||
# validations
|
||||
# tbd
|
||||
|
|
@ -93,7 +93,7 @@ def resultsync(req: requests.Response):
|
|||
return res
|
||||
|
||||
|
||||
async def get(endpoint: str, json: dict = None):
|
||||
async def get(endpoint: str, json: dict | None = None):
|
||||
global sess # pylint: disable=global-statement
|
||||
sess = sess if sess is not None else await session()
|
||||
try:
|
||||
|
|
@ -105,7 +105,7 @@ async def get(endpoint: str, json: dict = None):
|
|||
return {}
|
||||
|
||||
|
||||
def getsync(endpoint: str, json: dict = None):
|
||||
def getsync(endpoint: str, json: dict | None = None):
|
||||
try:
|
||||
req = requests.get(f'{sd_url}{endpoint}', json=json, verify=False, auth=authsync()) # pylint: disable=missing-timeout
|
||||
res = resultsync(req)
|
||||
|
|
@ -115,7 +115,7 @@ def getsync(endpoint: str, json: dict = None):
|
|||
return {}
|
||||
|
||||
|
||||
async def post(endpoint: str, json: dict = None):
|
||||
async def post(endpoint: str, json: dict | None = None):
|
||||
global sess # pylint: disable=global-statement
|
||||
# sess = sess if sess is not None else await session()
|
||||
if sess and not sess.closed:
|
||||
|
|
@ -130,7 +130,7 @@ async def post(endpoint: str, json: dict = None):
|
|||
return {}
|
||||
|
||||
|
||||
def postsync(endpoint: str, json: dict = None):
|
||||
def postsync(endpoint: str, json: dict | None = None):
|
||||
req = requests.post(f'{sd_url}{endpoint}', json=json, verify=False, auth=authsync()) # pylint: disable=missing-timeout
|
||||
res = resultsync(req)
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ class Page():
|
|||
return ''
|
||||
|
||||
def __str__(self):
|
||||
return f'Page(title="{self.title.strip()}" fn="{self.fn}" mtime={self.mtime} h1={[h.strip() for h in self.h1]} h2={len(self.h2)} h3={len(self.h3)} lines={len(self.lines)} size={self.size})'
|
||||
return f'Page(title="{self.title.strip()}" file="{self.fn}" mtime={self.mtime} h1={[h.strip() for h in self.h1]} h2={len(self.h2)} h3={len(self.h3)} lines={len(self.lines)} size={self.size})'
|
||||
|
||||
|
||||
class Pages():
|
||||
|
|
@ -129,16 +129,14 @@ if __name__ == "__main__":
|
|||
sys.argv.pop(0)
|
||||
if len(sys.argv) < 1:
|
||||
log.error("Usage: python cli/docs.py <search_term>")
|
||||
text = ' '.join(sys.argv)
|
||||
topk = 10
|
||||
full = True
|
||||
log.info(f'Search: "{text}" topk={topk}, full={full}')
|
||||
term = ' '.join(sys.argv)
|
||||
log.info(f'Search: "{term}" topk=10, full=True')
|
||||
t0 = time.time()
|
||||
results = index.search(text, topk=topk, full=full)
|
||||
results = index.search(term, topk=10, full=True)
|
||||
t1 = time.time()
|
||||
log.info(f'Results: pages={len(results)} size={index.size} time={t1-t0:.3f}')
|
||||
for score, page in results:
|
||||
log.info(f'Score: {score:.2f} {page}')
|
||||
for _score, _page in results:
|
||||
log.info(f'Score: {_score:.2f} {_page}')
|
||||
# if len(results) > 0:
|
||||
# log.info('Top result:')
|
||||
# log.info(results[0][1].get())
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
fastapi==0.124.4
|
||||
numpy==2.1.2
|
||||
Pillow==10.4.0
|
||||
|
|
@ -1,11 +1,18 @@
|
|||
{
|
||||
"Google Gemini 2.5 Flash Nano Banana": {
|
||||
"Google Gemini 2.5 Flash Nano Banana": {
|
||||
"path": "gemini-2.5-flash-image",
|
||||
"desc": "Gemini can generate and process images conversationally. You can prompt Gemini with text, images, or a combination of both allowing you to create, edit, and iterate on visuals with unprecedented control.",
|
||||
"preview": "gemini-2.5-flash-image.jpg",
|
||||
"tags": "cloud",
|
||||
"skip": true
|
||||
},
|
||||
"Google Gemini 3.1 Flash Nano Banana": {
|
||||
"path": "gemini-3.1-flash-image-preview",
|
||||
"desc": "Gemini can generate and process images conversationally. You can prompt Gemini with text, images, or a combination of both allowing you to create, edit, and iterate on visuals with unprecedented control.",
|
||||
"preview": "gemini-3.1-flash-image-preview.jpg",
|
||||
"tags": "cloud",
|
||||
"skip": true
|
||||
},
|
||||
"Google Gemini 3.0 Pro Nano Banana": {
|
||||
"path": "gemini-3-pro-image-preview",
|
||||
"desc": "Built on Gemini 3. Create and edit images with studio-quality levels of precision and control",
|
||||
|
|
@ -106,20 +106,22 @@
|
|||
"desc": "Pony V7 is a versatile character generation model based on AuraFlow architecture. It supports a wide range of styles and species types (humanoid, anthro, feral, and more) and handles character interactions through natural language prompts.",
|
||||
"extras": "",
|
||||
"tags": "community",
|
||||
"date": "October September"
|
||||
"date": "2025 October"
|
||||
},
|
||||
"ShuttleAI Shuttle 3.0 Diffusion": {
|
||||
"path": "shuttleai/shuttle-3-diffusion",
|
||||
"desc": "Shuttle uses Flux.1 Schnell as its base. It can produce images similar to Flux Dev or Pro in just 4 steps, and it is licensed under Apache 2. The model was partially de-distilled during training. When used beyond 10 steps, it enters refiner mode enhancing image details without altering the composition",
|
||||
"preview": "shuttleai--shuttle-3-diffusion.jpg",
|
||||
"tags": "community",
|
||||
"date": "2024 November",
|
||||
"skip": true
|
||||
},
|
||||
"ShuttleAI Shuttle 3.1 Aesthetic": {
|
||||
"path": "shuttleai/shuttle-3.1-aesthetic",
|
||||
"desc": "Shuttle uses Flux.1 Schnell as its base. It can produce images similar to Flux Dev or Pro in just 4 steps, and it is licensed under Apache 2. The model was partially de-distilled during training. When used beyond 10 steps, it enters refiner mode enhancing image details without altering the composition",
|
||||
"preview": "shuttleai--shuttle-3_1-aestetic.jpg",
|
||||
"preview": "shuttleai--shuttle-3.1-aesthetic.jpg",
|
||||
"tags": "community",
|
||||
"date": "2024 November",
|
||||
"skip": true
|
||||
},
|
||||
"ShuttleAI Shuttle Jaguar": {
|
||||
|
|
@ -127,6 +129,55 @@
|
|||
"desc": "Shuttle uses Flux.1 Schnell as its base. It can produce images similar to Flux Dev or Pro in just 4 steps, and it is licensed under Apache 2. The model was partially de-distilled during training. When used beyond 10 steps, it enters refiner mode enhancing image details without altering the composition",
|
||||
"preview": "shuttleai--shuttle-jaguar.jpg",
|
||||
"tags": "community",
|
||||
"date": "2025 January",
|
||||
"skip": true
|
||||
},
|
||||
"Anima Preview 1": {
|
||||
"path": "CalamitousFelicitousness/Anima-sdnext-diffusers",
|
||||
"preview": "CalamitousFelicitousness--Anima-sdnext-diffusers.jpg",
|
||||
"desc": "Modified Cosmos-Predict-2B that replaces the T5-11B text encoder with Qwen3-0.6B. Anima is a 2 billion parameter text-to-image model created via a collaboration between CircleStone Labs and Comfy Org. It is focused mainly on anime concepts, characters, and styles, but is also capable of generating a wide variety of other non-photorealistic content. The model is designed for making illustrations and artistic images, and will not work well at realism.",
|
||||
"tags": "community",
|
||||
"date": "2026 January",
|
||||
"skip": true
|
||||
},
|
||||
"Anima Preview 2": {
|
||||
"path": "CalamitousFelicitousness/Anima-Preview-2-sdnext-diffusers",
|
||||
"preview": "CalamitousFelicitousness--Anima-Preview-2-sdnext-diffusers.jpg",
|
||||
"desc": "Anima Preview V2 with improved hyperparameters, extended medium-resolution training for more character knowledge, and a regularization dataset for better natural language comprehension. A 2B parameter anime-focused text-to-image model based on modified Cosmos-Predict-2B with Qwen3-0.6B text encoder.",
|
||||
"tags": "community",
|
||||
"date": "2026 March",
|
||||
"skip": true
|
||||
},
|
||||
"FireRed Image Edit 1.0": {
|
||||
"path": "FireRedTeam/FireRed-Image-Edit-1.0",
|
||||
"preview": "FireRedTeam--FireRed-Image-Edit-1.0.jpg",
|
||||
"desc": "FireRed-Image-Edit is a general-purpose image editing model that delivers high-fidelity and consistent editing across a wide range of scenarios. FireRed is a fine-tune of Qwen-Image-Edit.",
|
||||
"tags": "community",
|
||||
"date": "2026 February",
|
||||
"skip": true
|
||||
},
|
||||
"FireRed Image Edit 1.1": {
|
||||
"path": "FireRedTeam/FireRed-Image-Edit-1.1",
|
||||
"preview": "FireRedTeam--FireRed-Image-Edit-1.1.jpg",
|
||||
"desc": "FireRed-Image-Edit is a general-purpose image editing model that delivers high-fidelity and consistent editing across a wide range of scenarios. FireRed is a fine-tune of Qwen-Image-Edit.",
|
||||
"tags": "community",
|
||||
"date": "2026 February",
|
||||
"skip": true
|
||||
},
|
||||
"Skywork UniPic3": {
|
||||
"path": "Skywork/Unipic3",
|
||||
"preview": "Skywork--Unipic3.jpg",
|
||||
"desc": "UniPic3 is an image editing and multi-image composition model based. It is a fine-tune of Qwen-Image-Edit.",
|
||||
"tags": "community",
|
||||
"date": "2026 February",
|
||||
"skip": true
|
||||
},
|
||||
"Skywork/Unipic3-DMD": {
|
||||
"path": "Skywork/Unipic3-DMD",
|
||||
"preview": "Skywork--Unipic3-DMD.jpg",
|
||||
"desc": "UniPic3-DMD-Model is a few-step image editing and multi-image composition model trained using Distribution Matching Distillation (DMD) and is a fine-tune of Qwen-Image-Edit.",
|
||||
"tags": "community",
|
||||
"date": "2026 February",
|
||||
"skip": true
|
||||
}
|
||||
}
|
||||
|
|
@ -170,7 +170,7 @@
|
|||
"tags": "distilled",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
|
||||
"size": 8.5,
|
||||
"date": "2025 January"
|
||||
"date": "2026 January"
|
||||
},
|
||||
"Black Forest Labs FLUX.2 Klein 9B": {
|
||||
"path": "black-forest-labs/FLUX.2-klein-9B",
|
||||
|
|
@ -180,6 +180,25 @@
|
|||
"tags": "distilled",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
|
||||
"size": 18.5,
|
||||
"date": "2025 January"
|
||||
"date": "2026 January"
|
||||
},
|
||||
"Black Forest Labs FLUX.2 Klein 9B KV": {
|
||||
"path": "black-forest-labs/FLUX.2-klein-9b-kv",
|
||||
"preview": "black-forest-labs--FLUX.2-klein-9b-kv.jpg",
|
||||
"desc": "FLUX.2 klein 9B KV is an optimized variant of FLUX.2 klein 9B with KV-cache support for accelerated multi-reference editing. This variant caches key-value pairs from reference images during the first denoising step, eliminating redundant computation in subsequent steps for significantly faster multi-image editing workflows.",
|
||||
"skip": true,
|
||||
"tags": "distilled",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
|
||||
"size": 18.5,
|
||||
"date": "2026 March"
|
||||
},
|
||||
"Meituan LongCat Image-Edit Turbo": {
|
||||
"path": "meituan-longcat/LongCat-Image-Edit-Turbo",
|
||||
"preview": "meituan-longcat--LongCat-Image-Edit.jpg",
|
||||
"desc": "LongCat-Image-Edit-Turbo, the distilled version of LongCat-Image-Edit. It achieves high-quality image editing with only 8 NFEs (Number of Function Evaluations) , offering extremely low inference latency.",
|
||||
"skip": true,
|
||||
"extras": "",
|
||||
"size": 27.30,
|
||||
"date": "2026 February"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,209 @@
|
|||
{
|
||||
"FLUX.1-Dev Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-dev",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-dev.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-dev transformer with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"FLUX.1-Schnell Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-schnell",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-schnell.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-schnell transformer with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"FLUX.1-Kontext Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-Kontext-dev",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-Kontext-dev.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Kontext-dev transformer with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"FLUX.1-Krea Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-Krea-dev",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-Krea-dev.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Krea-dev transformer with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"FLUX.1-Fill Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-Fill-dev",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-Fill-dev.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Fill-dev transformer for inpainting",
|
||||
"skip": true,
|
||||
"hidden": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"FLUX.1-Depth Nunchaku SVDQuant": {
|
||||
"path": "black-forest-labs/FLUX.1-Depth-dev",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "black-forest-labs--FLUX.1-Depth-dev.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Depth-dev transformer for depth-conditioned generation",
|
||||
"skip": true,
|
||||
"hidden": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Shuttle Jaguar Nunchaku SVDQuant": {
|
||||
"path": "shuttleai/shuttle-jaguar",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "shuttleai--shuttle-jaguar.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Shuttle Jaguar transformer",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model", "TE"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Image Nunchaku SVDQuant": {
|
||||
"path": "Qwen/Qwen-Image",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "Qwen--Qwen-Image.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Image transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Lightning (8-step) Nunchaku SVDQuant": {
|
||||
"path": "vladmandic/Qwen-Lightning",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "vladmandic--Qwen-Lightning.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning (8-step distilled) transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "steps: 8",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Lightning (4-step) Nunchaku SVDQuant": {
|
||||
"path": "vladmandic/Qwen-Lightning",
|
||||
"subfolder": "nunchaku-4step",
|
||||
"preview": "vladmandic--Qwen-Lightning.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning (4-step distilled) transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "steps: 4",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Image-Edit Nunchaku SVDQuant": {
|
||||
"path": "Qwen/Qwen-Image-Edit",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "Qwen--Qwen-Image-Edit.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Image-Edit transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Lightning-Edit (8-step) Nunchaku SVDQuant": {
|
||||
"path": "vladmandic/Qwen-Lightning-Edit",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "vladmandic--Qwen-Lightning-Edit.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning-Edit (8-step distilled editing) transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "steps: 8",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Lightning-Edit (4-step) Nunchaku SVDQuant": {
|
||||
"path": "vladmandic/Qwen-Lightning-Edit",
|
||||
"subfolder": "nunchaku-4step",
|
||||
"preview": "vladmandic--Qwen-Lightning-Edit.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning-Edit (4-step distilled editing) transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "steps: 4",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Qwen-Image-Edit-2509 Nunchaku SVDQuant": {
|
||||
"path": "Qwen/Qwen-Image-Edit-2509",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "Qwen--Qwen-Image-Edit-2509.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Qwen-Image-Edit-2509 transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 September"
|
||||
},
|
||||
"Sana 1.6B 1k Nunchaku SVDQuant": {
|
||||
"path": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "Efficient-Large-Model--Sana_1600M_1024px_BF16_diffusers.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Sana 1.6B 1024px transformer with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"Z-Image-Turbo Nunchaku SVDQuant": {
|
||||
"path": "Tongyi-MAI/Z-Image-Turbo",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "Tongyi-MAI--Z-Image-Turbo.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of Z-Image-Turbo transformer with INT4 and SVD rank 128",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 9",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"SDXL Base Nunchaku SVDQuant": {
|
||||
"path": "stabilityai/stable-diffusion-xl-base-1.0",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "stabilityai--stable-diffusion-xl-base-1.0.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of SDXL Base 1.0 UNet with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
},
|
||||
"SDXL Turbo Nunchaku SVDQuant": {
|
||||
"path": "stabilityai/sdxl-turbo",
|
||||
"subfolder": "nunchaku",
|
||||
"preview": "stabilityai--sdxl-turbo.jpg",
|
||||
"desc": "Nunchaku SVDQuant quantization of SDXL Turbo UNet with INT4 and SVD rank 32",
|
||||
"skip": true,
|
||||
"nunchaku": ["Model"],
|
||||
"tags": "nunchaku",
|
||||
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
|
||||
"size": 0,
|
||||
"date": "2025 June"
|
||||
}
|
||||
}
|
||||
|
|
@ -143,6 +143,15 @@
|
|||
"date": "2025 January"
|
||||
},
|
||||
|
||||
"Z-Image": {
|
||||
"path": "Tongyi-MAI/Z-Image",
|
||||
"preview": "Tongyi-MAI--Z-Image.jpg",
|
||||
"desc": "Z-Image, an efficient image generation foundation model built on a Single-Stream Diffusion Transformer architecture. It preserves the complete training signal with full CFG support, enabling aesthetic versatility from hyper-realistic photography to anime, enhanced output diversity, and robust negative prompting for artifact suppression. Ideal base for LoRA training, ControlNet, and semantic conditioning.",
|
||||
"skip": true,
|
||||
"extras": "sampler: Default, cfg_scale: 4.0, steps: 50",
|
||||
"size": 20.3,
|
||||
"date": "2026 January"
|
||||
},
|
||||
"Z-Image-Turbo": {
|
||||
"path": "Tongyi-MAI/Z-Image-Turbo",
|
||||
"preview": "Tongyi-MAI--Z-Image-Turbo.jpg",
|
||||
|
|
@ -855,6 +864,16 @@
|
|||
"extras": "sampler: Default, cfg_scale: 1.5, steps: 50",
|
||||
"size": 15.3,
|
||||
"date": "2025 January"
|
||||
},
|
||||
|
||||
"AiArtLab SDXS-1B": {
|
||||
"path": "AiArtLab/sdxs-1b",
|
||||
"preview": "AiArtLab--sdxs-1b.jpg",
|
||||
"desc": "Simple Diffusion XS (train in progress) combines Qwen3.5-1.8B text encoder with SDXL-style UNET with only 1.6B parameters and custom 32ch VAE",
|
||||
"skip": true,
|
||||
"extras": "sampler: Default",
|
||||
"size": 15.3,
|
||||
"date": "2026 January"
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -25,11 +25,13 @@ const jsConfig = defineConfig([
|
|||
ecmaVersion: 'latest',
|
||||
},
|
||||
globals: { // Set per project
|
||||
...globals.node,
|
||||
...globals.builtin,
|
||||
...globals.browser,
|
||||
...globals.jquery,
|
||||
panzoom: 'readonly',
|
||||
authFetch: 'readonly',
|
||||
initServerInfo: 'readonly',
|
||||
log: 'readonly',
|
||||
debug: 'readonly',
|
||||
error: 'readonly',
|
||||
|
|
@ -53,6 +55,7 @@ const jsConfig = defineConfig([
|
|||
generateForever: 'readonly',
|
||||
showContributors: 'readonly',
|
||||
opts: 'writable',
|
||||
monitorOption: 'readonly',
|
||||
sortUIElements: 'readonly',
|
||||
all_gallery_buttons: 'readonly',
|
||||
selected_gallery_button: 'readonly',
|
||||
|
|
@ -92,12 +95,15 @@ const jsConfig = defineConfig([
|
|||
initGPU: 'readonly',
|
||||
startGPU: 'readonly',
|
||||
disableNVML: 'readonly',
|
||||
hash: 'readonly',
|
||||
idbGet: 'readonly',
|
||||
idbPut: 'readonly',
|
||||
idbDel: 'readonly',
|
||||
idbAdd: 'readonly',
|
||||
idbCount: 'readonly',
|
||||
idbFolderCleanup: 'readonly',
|
||||
idbClearAll: 'readonly',
|
||||
idbIsReady: 'readonly',
|
||||
initChangelog: 'readonly',
|
||||
sendNotification: 'readonly',
|
||||
monitorConnection: 'readonly',
|
||||
|
|
@ -124,6 +130,7 @@ const jsConfig = defineConfig([
|
|||
camelcase: 'off',
|
||||
'default-case': 'off',
|
||||
'max-classes-per-file': 'warn',
|
||||
'guard-for-in': 'off',
|
||||
'no-await-in-loop': 'off',
|
||||
'no-bitwise': 'off',
|
||||
'no-continue': 'off',
|
||||
|
|
@ -141,6 +148,7 @@ const jsConfig = defineConfig([
|
|||
'prefer-rest-params': 'off',
|
||||
'prefer-template': 'warn',
|
||||
'promise/no-nesting': 'off',
|
||||
'@typescript-eslint/no-for-in-array': 'off',
|
||||
radix: 'off',
|
||||
'@stylistic/brace-style': [
|
||||
'error',
|
||||
|
|
@ -241,6 +249,9 @@ const jsonConfig = defineConfig([
|
|||
plugins: { json },
|
||||
language: 'json/json',
|
||||
extends: ['json/recommended'],
|
||||
rules: {
|
||||
'json/no-empty-keys': 'off',
|
||||
},
|
||||
},
|
||||
]);
|
||||
|
||||
|
|
@ -327,6 +338,7 @@ export default defineConfig([
|
|||
'**/exifr.js',
|
||||
'**/jquery.js',
|
||||
'**/sparkline.js',
|
||||
'**/sha256.js',
|
||||
'**/iframeResizer.min.js',
|
||||
]),
|
||||
...jsConfig,
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit 2a7005fbcf8985644b66121365fa7228a65f34b0
|
||||
Subproject commit d4eab2166e4d9b52e42924cc942198f9e22eb916
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 9942ea11aa7f23e8ad70ab81a4b411aae333081f
|
||||
Subproject commit 006f08f499bbe69c484f0f1cc332bbf0e75526c2
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 79cae1944646e57cfbfb126a971a04e44e45d776
|
||||
Subproject commit 1e840033b040d8915ddfb5dbf62c80f411bcec0a
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit f8cb233f39e406befe70f2130f626bfa413e641a
|
||||
Subproject commit e3720332f2301fa597c94b40897aa6e983020f1f
|
||||
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 3ec8f9eb796be519a98de985bac03645d0040ede
|
||||
|
|
@ -4,46 +4,6 @@
|
|||
#licenses pre { margin: 1em 0 2em 0;}
|
||||
</style>
|
||||
|
||||
<h2><a href="https://github.com/sczhou/CodeFormer/blob/master/LICENSE">CodeFormer</a></h2>
|
||||
<small>Parts of CodeFormer code had to be copied to be compatible with GFPGAN.</small>
|
||||
<pre>
|
||||
S-Lab License 1.0
|
||||
|
||||
Copyright 2022 S-Lab
|
||||
|
||||
Redistribution and use for non-commercial purpose in source and
|
||||
binary forms, with or without modification, are permitted provided
|
||||
that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
In the event that redistribution and/or use for commercial purpose in
|
||||
source or binary forms, with or without modification is required,
|
||||
please contact the contributor(s) of the work.
|
||||
</pre>
|
||||
|
||||
|
||||
<h2><a href="https://github.com/victorca25/iNNfer/blob/main/LICENSE">ESRGAN</a></h2>
|
||||
<small>Code for architecture and reading models copied.</small>
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
16695
html/locale_de.json
16695
html/locale_de.json
File diff suppressed because it is too large
Load Diff
3033
html/locale_en.json
3033
html/locale_en.json
File diff suppressed because it is too large
Load Diff
16627
html/locale_es.json
16627
html/locale_es.json
File diff suppressed because it is too large
Load Diff
16739
html/locale_fr.json
16739
html/locale_fr.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
16614
html/locale_hr.json
16614
html/locale_hr.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
15123
html/locale_it.json
15123
html/locale_it.json
File diff suppressed because it is too large
Load Diff
16736
html/locale_ja.json
16736
html/locale_ja.json
File diff suppressed because it is too large
Load Diff
19051
html/locale_ko.json
19051
html/locale_ko.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
16713
html/locale_pt.json
16713
html/locale_pt.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
16797
html/locale_ru.json
16797
html/locale_ru.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
17125
html/locale_zh.json
17125
html/locale_zh.json
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:svg="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
|
||||
<!-- Generator: Adobe Illustrator 30.2.0, SVG Export Plug-In . SVG Version: 2.1.1 Build 105) -->
|
||||
<path d="M333.62,470.26l-9.4,4.63-39.05-21.52,16.16,32.8-9.4,4.63-24.4-49.52,9.4-4.63,39.09,21.59-16.19-32.87,9.4-4.63,24.4,49.52h-.01ZM442.86,365.07h0ZM334.71,418.37l6.05,12.27h.01l22.32-11.01,4.03,8.18-22.32,11,6.26,12.7,38.07-18.76,3.55-23.51-18.59-10.28-39.38,19.41h0ZM457.12,338.1l-205,101,31.64,65.17,205.12-100.53-31.76-65.64h0ZM423.12,426.16l-21.57-12.47-2.75,24.46-11.34,5.59h0l-41.79,20.59-24.4-49.52,43.06-21.21.19.07-.16-.09h-.01.01l11.54-5.68,19.04,10.95,2.2-21.42,11.34-5.59-4.76,31.42,30.96,17.21-11.55,5.69h-.01ZM433.46,369.7l-14.85,7.32-4.03-8.18,39.11-19.27,4.03,8.18-14.86,7.32,20.37,41.34-9.4,4.63-20.37-41.34h0Z" fill="#231f20"/>
|
||||
<polygon points="248.11 427.73 453.11 326.73 453.11 113.73 248.11 7.73 248.11 117.73 333.11 167.73 333.11 267.73 248.11 317.73 248.11 427.73" fill="#231f20"/>
|
||||
<polygon points="23.11 150.73 23.11 275.73 103.11 319.73 103.11 331.73 23.11 375.73 23.11 500.73 228.11 386.73 228.11 261.73 143.11 221.73 143.11 209.73 228.11 161.73 228.11 36.73 23.11 150.73" fill="#231f20"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:svg="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
|
||||
<!-- Generator: Adobe Illustrator 30.2.0, SVG Export Plug-In . SVG Version: 2.1.1 Build 105) -->
|
||||
<path d="M333.62,470.26l-9.4,4.63-39.05-21.52,16.16,32.8-9.4,4.63-24.4-49.52,9.4-4.63,39.09,21.59-16.19-32.87,9.4-4.63,24.4,49.52h-.01ZM442.86,365.07h0ZM334.71,418.37l6.05,12.27h.01l22.32-11.01,4.03,8.18-22.32,11,6.26,12.7,38.07-18.76,3.55-23.51-18.59-10.28-39.38,19.41h0ZM457.12,338.1l-205,101,31.64,65.17,205.12-100.53-31.76-65.64h0ZM423.12,426.16l-21.57-12.47-2.75,24.46-11.34,5.59h0l-41.79,20.59-24.4-49.52,43.06-21.21.19.07-.16-.09h-.01.01l11.54-5.68,19.04,10.95,2.2-21.42,11.34-5.59-4.76,31.42,30.96,17.21-11.55,5.69h-.01ZM433.46,369.7l-14.85,7.32-4.03-8.18,39.11-19.27,4.03,8.18-14.86,7.32,20.37,41.34-9.4,4.63-20.37-41.34h0Z" fill="#fff"/>
|
||||
<polygon points="248.11 427.73 453.11 326.73 453.11 113.73 248.11 7.73 248.11 117.73 333.11 167.73 333.11 267.73 248.11 317.73 248.11 427.73" fill="#fff"/>
|
||||
<polygon points="23.11 150.73 23.11 275.73 103.11 319.73 103.11 331.73 23.11 375.73 23.11 500.73 228.11 386.73 228.11 261.73 143.11 221.73 143.11 209.73 228.11 161.73 228.11 36.73 23.11 150.73" fill="#fff"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 17 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 473 KiB |
BIN
html/logo-wm.png
BIN
html/logo-wm.png
Binary file not shown.
|
Before Width: | Height: | Size: 14 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue