mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-04 07:22:13 +08:00
Compare commits
1026 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0236b97d49 | ||
|
|
26f6b1eeff | ||
|
|
dc447ccebe | ||
|
|
7ec29638f4 | ||
|
|
4c9562af20 | ||
|
|
71942fd322 | ||
|
|
550b979ac5 | ||
|
|
3878a5a46f | ||
|
|
e443a6a1ea | ||
|
|
963494ec6f | ||
|
|
525cdb8830 | ||
|
|
a6764e82f2 | ||
|
|
8027531d07 | ||
|
|
30706355a4 | ||
|
|
dfe99507b8 | ||
|
|
c1717c9a6c | ||
|
|
1fd1a58a7a | ||
|
|
fad07507be | ||
|
|
a20c211162 | ||
|
|
9f6ab6b817 | ||
|
|
bf3d6c0e6e | ||
|
|
241023f3fc | ||
|
|
1292c44b41 | ||
|
|
b4fce47049 | ||
|
|
e7780cd8c8 | ||
|
|
af96c8ea53 | ||
|
|
7d26b81075 | ||
|
|
b8ada63ac3 | ||
|
|
cfaac12af1 | ||
|
|
6028efd26c | ||
|
|
62a566ef2c | ||
|
|
94419f434c | ||
|
|
21f349c032 | ||
|
|
28e36f7925 | ||
|
|
6c02076333 | ||
|
|
7414bdf0e3 | ||
|
|
e6326b2929 | ||
|
|
17cdcebd04 | ||
|
|
a14babdc73 | ||
|
|
aadc6a763a | ||
|
|
f16af8bf88 | ||
|
|
5ceaef4500 | ||
|
|
1ac7219a92 | ||
|
|
d4cc9871c4 | ||
|
|
961c30e7c0 | ||
|
|
13e85b3147 | ||
|
|
50a3c7fa0b | ||
|
|
bd9d2671d7 | ||
|
|
62b40636e0 | ||
|
|
eeff451bc5 | ||
|
|
56fcb20f94 | ||
|
|
7134266acf | ||
|
|
2e4ac88ad9 | ||
|
|
51547fa216 | ||
|
|
2005fc97a8 | ||
|
|
0772d9250e | ||
|
|
aa6047c460 | ||
|
|
045cba78b4 | ||
|
|
8989d0d4b6 | ||
|
|
c521117b99 | ||
|
|
e0f52a8ab8 | ||
|
|
6c23fadf7e | ||
|
|
869952d113 | ||
|
|
07ab051ee4 | ||
|
|
f2d98fc0c7 | ||
|
|
2b41cec840 | ||
|
|
6cf77040e7 | ||
|
|
20b70bc5fd | ||
|
|
4905e7193a | ||
|
|
9c1f4b8e72 | ||
|
|
9857c17631 | ||
|
|
7e34bb946f | ||
|
|
47b748851b | ||
|
|
a6f99cf534 | ||
|
|
a120a6bc32 | ||
|
|
d557d1a190 | ||
|
|
e0286e5085 | ||
|
|
4b41e898a4 | ||
|
|
668e164793 | ||
|
|
fa2e6188d0 | ||
|
|
7fde9ebbc2 | ||
|
|
aef7c3b9bb | ||
|
|
a0b76bd608 | ||
|
|
c1fab7f8d8 | ||
|
|
f42c8f2abe | ||
|
|
aa5846b282 | ||
|
|
594a0ade38 | ||
|
|
d45cc23171 | ||
|
|
d795734352 | ||
|
|
4da9fdd1d5 | ||
|
|
6b218caa21 | ||
|
|
5c138007d0 | ||
|
|
1acfc46f46 | ||
|
|
fbffb08aae | ||
|
|
8640a62319 | ||
|
|
fa782e70a4 | ||
|
|
afd72abc6e | ||
|
|
71f72e167e | ||
|
|
6595c7601e | ||
|
|
67c0506290 | ||
|
|
6447be4534 | ||
|
|
3741617ebd | ||
|
|
ab4e8b2cf0 | ||
|
|
474165d7aa | ||
|
|
94e067a2e2 | ||
|
|
4293c89166 | ||
|
|
ec82c37da5 | ||
|
|
552a4b998a | ||
|
|
0d2061b268 | ||
|
|
8a260defc2 | ||
|
|
e14c87597a | ||
|
|
f3f19d35aa | ||
|
|
ced90e1d84 | ||
|
|
17e4033340 | ||
|
|
044d3a013d | ||
|
|
1fc9dd7b68 | ||
|
|
8147866c09 | ||
|
|
7bd1972f94 | ||
|
|
2c9dcfe27b | ||
|
|
1b79b0f3ff | ||
|
|
c637e6cf31 | ||
|
|
d3a9f5bb88 | ||
|
|
7eb0415a8a | ||
|
|
bdbc8fa08f | ||
|
|
63f3af0f94 | ||
|
|
686f890fbf | ||
|
|
220fbe6544 | ||
|
|
ae44a94325 | ||
|
|
3718d6dcd4 | ||
|
|
90b3838173 | ||
|
|
19d3ecc76f | ||
|
|
6fba4ebb13 | ||
|
|
c31974c913 | ||
|
|
6177fa5dd8 | ||
|
|
cfe72159d0 | ||
|
|
8321e4a647 | ||
|
|
3084330d0c | ||
|
|
b566649e79 | ||
|
|
10a6180e4a | ||
|
|
cbe9e78977 | ||
|
|
74145b1f39 | ||
|
|
359e56751b | ||
|
|
5899784aa4 | ||
|
|
9e8959c56d | ||
|
|
1bff2292a6 | ||
|
|
cf9247754e | ||
|
|
eefab15958 | ||
|
|
0e23732631 | ||
|
|
37c044fb4b | ||
|
|
6da5fa01b9 | ||
|
|
616930f9d3 | ||
|
|
b9c31fa7c4 | ||
|
|
17b339972c | ||
|
|
39f8bd91b9 | ||
|
|
aa4e37d085 | ||
|
|
f59b66b7d4 | ||
|
|
8f0ea7a02d | ||
|
|
a1dc00890e | ||
|
|
dfbcc363d1 | ||
|
|
1047f973d5 | ||
|
|
e32977dd73 | ||
|
|
b5f78ec1e8 | ||
|
|
e0f290fdc8 | ||
|
|
fc00a4e3b2 | ||
|
|
db1f6ded88 | ||
|
|
4644af2ccc | ||
|
|
2e3e8687e1 | ||
|
|
ca42a45802 | ||
|
|
9350ecb62b | ||
|
|
a4a026e8da | ||
|
|
342fd03e72 | ||
|
|
e3f1fd9b63 | ||
|
|
e4a4dfd038 | ||
|
|
a377e99088 | ||
|
|
1d3d7a3033 | ||
|
|
e7086cb3a3 | ||
|
|
4f2a97073e | ||
|
|
7407e3b45d | ||
|
|
01ef7340aa | ||
|
|
1c960d22c1 | ||
|
|
ece0606fed | ||
|
|
2666422b99 | ||
|
|
e6d59216d4 | ||
|
|
4e8615f276 | ||
|
|
91e4d95660 | ||
|
|
45456fa24c | ||
|
|
4588258d80 | ||
|
|
c12e48f966 | ||
|
|
ec8f50a658 | ||
|
|
99c9191784 | ||
|
|
6bb02d141f | ||
|
|
07bb2a5f3f | ||
|
|
417861a48e | ||
|
|
b7e878de64 | ||
|
|
05edb5514b | ||
|
|
e90ec847b6 | ||
|
|
6344fa2a86 | ||
|
|
7e288acc90 | ||
|
|
29b0e4a8a5 | ||
|
|
27ff222cfb | ||
|
|
11f7b83522 | ||
|
|
f7177be3b6 | ||
|
|
875b417fde | ||
|
|
2573107b32 | ||
|
|
5b85005945 | ||
|
|
1ee984478f | ||
|
|
fd693dc526 | ||
|
|
e73531ce9b | ||
|
|
53ad1645cf | ||
|
|
ecea13757b | ||
|
|
af9c4a7dd0 | ||
|
|
80d8d6c3bc | ||
|
|
d648811233 | ||
|
|
34695acb85 | ||
|
|
a63de12182 | ||
|
|
f16910d616 | ||
|
|
64b3f3cec1 | ||
|
|
6a685727d0 | ||
|
|
32d25f76fc | ||
|
|
69cafe8674 | ||
|
|
18ba8d9166 | ||
|
|
e97fd7e81c | ||
|
|
cdb64b0d33 | ||
|
|
8d4d3b03bb | ||
|
|
addefe79e1 | ||
|
|
b764d3b8f6 | ||
|
|
611fd884bd | ||
|
|
826090e099 | ||
|
|
7399de6ecc | ||
|
|
25cb5e7505 | ||
|
|
5c13ec3121 | ||
|
|
d8aff3a7e3 | ||
|
|
f44927b9f8 | ||
|
|
c0110cb5af | ||
|
|
1f8e1142a0 | ||
|
|
1e51de88d6 | ||
|
|
30995b5397 | ||
|
|
eb60f67054 | ||
|
|
78193ceec1 | ||
|
|
f0e08e7687 | ||
|
|
10b8259259 | ||
|
|
6826149a8f | ||
|
|
eb0b77bf4d | ||
|
|
9d81467937 | ||
|
|
fd8ccaf01a | ||
|
|
c9debc50b1 | ||
|
|
2b30e3b6d7 | ||
|
|
6e90ec6111 | ||
|
|
8dd38f4775 | ||
|
|
fbd73f248f | ||
|
|
3fcefe6c32 | ||
|
|
f740d2c291 | ||
|
|
bf6585a40f | ||
|
|
8c2dd7b3f0 | ||
|
|
4167c437a8 | ||
|
|
0ddaef3c9a | ||
|
|
2fc6aaf936 | ||
|
|
1c0519f1c7 | ||
|
|
6bbe7800be | ||
|
|
2694149489 | ||
|
|
a17ac50118 | ||
|
|
656a77d585 | ||
|
|
7455476c60 | ||
|
|
36cda57c81 | ||
|
|
9f1f203b84 | ||
|
|
b41a8ca93f | ||
|
|
e3cf0c0e10 | ||
|
|
de18bce9aa | ||
|
|
3cc407bc0e | ||
|
|
00a0a12138 | ||
|
|
b08767a4f9 | ||
|
|
ac6bde7a98 | ||
|
|
d2d41d68dd | ||
|
|
944b7f7617 | ||
|
|
53825eb073 | ||
|
|
1a7f49513f | ||
|
|
885a2ce7ef | ||
|
|
14ba80a0af | ||
|
|
5fa22fdf82 | ||
|
|
bcaae2eb91 | ||
|
|
767a41e263 | ||
|
|
252d6c5301 | ||
|
|
7a4e65ad4b | ||
|
|
a582aa89a9 | ||
|
|
acefa1da12 | ||
|
|
a88698f3fc | ||
|
|
ebc6755b33 | ||
|
|
c8eff34388 | ||
|
|
f19b03825b | ||
|
|
25178cdbe1 | ||
|
|
a461538d58 | ||
|
|
b43ee62947 | ||
|
|
ebe6f418f3 | ||
|
|
391e79f8ee | ||
|
|
c7fcb7a84b | ||
|
|
87f4ed591e | ||
|
|
440d2e28ed | ||
|
|
6cb8980404 | ||
|
|
fe752bbd35 | ||
|
|
c74d451fa2 | ||
|
|
12d743fb35 | ||
|
|
6acb9f7910 | ||
|
|
eb6f5c6927 | ||
|
|
7ccb4c8ea3 | ||
|
|
4ce986d47d | ||
|
|
91ef085d7d | ||
|
|
97aaa24733 | ||
|
|
faf6441633 | ||
|
|
00c151b463 | ||
|
|
106b20cdbf | ||
|
|
c069b3b1e8 | ||
|
|
a2ae9f1f27 | ||
|
|
4cd6d86426 | ||
|
|
fa72f1947a | ||
|
|
9ee7d3935d | ||
|
|
1071fe0ac7 | ||
|
|
0be003377f | ||
|
|
ca3f497b56 | ||
|
|
034b84b707 | ||
|
|
1624523c4e | ||
|
|
313afe14ce | ||
|
|
01180b316f | ||
|
|
ee7d061001 | ||
|
|
60c5949a74 | ||
|
|
2ebbd4c94d | ||
|
|
785115c62b | ||
|
|
e643fc382c | ||
|
|
34aad82ac3 | ||
|
|
0c29468f90 | ||
|
|
9301dae63e | ||
|
|
2475d4a205 | ||
|
|
be75fc3474 | ||
|
|
785e049af3 | ||
|
|
be4e49e6d7 | ||
|
|
1307d604e7 | ||
|
|
45d57018eb | ||
|
|
03bf348530 | ||
|
|
cab60ef735 | ||
|
|
a3791104f9 | ||
|
|
2b3e40bb2a | ||
|
|
0c1dcad429 | ||
|
|
101ef0cf62 | ||
|
|
0debe0a80c | ||
|
|
d22e62ac8a | ||
|
|
1ee17383f8 | ||
|
|
b59c79c458 | ||
|
|
bcb6444f89 | ||
|
|
c2b14693b4 | ||
|
|
92d35409de | ||
|
|
351a08f813 | ||
|
|
a58dc787a9 | ||
|
|
7079edc2d0 | ||
|
|
da89583ccc | ||
|
|
a42a1f08e9 | ||
|
|
ebd5253e22 | ||
|
|
6411645ffc | ||
|
|
c0c322ba16 | ||
|
|
d35c5cd491 | ||
|
|
7a353028e7 | ||
|
|
2d8d3b7857 | ||
|
|
4190293b07 | ||
|
|
421b4c0aff | ||
|
|
cd69a7cb85 | ||
|
|
0c9ba9e86c | ||
|
|
1b4d2a41c9 | ||
|
|
0787d2b47a | ||
|
|
97bf1d85ab | ||
|
|
207a493fab | ||
|
|
1f3f9e131e | ||
|
|
4ddedfaaf9 | ||
|
|
3ebebef95f | ||
|
|
9f7ad47598 | ||
|
|
3c83cd8be2 | ||
|
|
963b3b768c | ||
|
|
f6709fb5d6 | ||
|
|
921599948b | ||
|
|
5df3cafa99 | ||
|
|
1a2143c1fe | ||
|
|
dd25281305 | ||
|
|
49d0301dde | ||
|
|
d90e56eb45 | ||
|
|
838ada8864 | ||
|
|
65a106792a | ||
|
|
ee4bfcbb81 | ||
|
|
a087f089b8 | ||
|
|
afbe8bf001 | ||
|
|
2a3ef0be06 | ||
|
|
3403909354 | ||
|
|
005d0c5f53 | ||
|
|
8aaaeb29cc | ||
|
|
230f8abd04 | ||
|
|
a18bbb5f2f | ||
|
|
60fce4f1dc | ||
|
|
9af65efcdb | ||
|
|
bc194a7d8c | ||
|
|
c28f691f32 | ||
|
|
ff1f114989 | ||
|
|
cac230206d | ||
|
|
79ae15d5e8 | ||
|
|
0cce0a8877 | ||
|
|
225fd035ae | ||
|
|
fb7d1346b5 | ||
|
|
491a744481 | ||
|
|
f366026435 | ||
|
|
1a0d4ed668 | ||
|
|
63a8c76946 | ||
|
|
f355a68bc9 | ||
|
|
c87e6526c1 | ||
|
|
af3a5076d6 | ||
|
|
18f2e21414 | ||
|
|
8a8cdeebb4 | ||
|
|
12b33f4ea4 | ||
|
|
01b3a09d7d | ||
|
|
0d6c1c7790 | ||
|
|
95e366b6c6 | ||
|
|
77701143bf | ||
|
|
02dea7b09b | ||
|
|
c26f93c4a0 | ||
|
|
c826ac28ef | ||
|
|
1893b0eb30 | ||
|
|
05527b13db | ||
|
|
ae5d9c8bfc | ||
|
|
9117c2a4ec | ||
|
|
bab4bb9904 | ||
|
|
33bae6f49b | ||
|
|
32d619a56b | ||
|
|
642432cf2a | ||
|
|
61e9598b08 | ||
|
|
d4e34c7514 | ||
|
|
bfe7a5e452 | ||
|
|
77d916ffec | ||
|
|
831abf7977 | ||
|
|
817a491087 | ||
|
|
9a8dacc514 | ||
|
|
8adf80d98b | ||
|
|
62686a6213 | ||
|
|
3a089242f8 | ||
|
|
9d70c38504 | ||
|
|
aeb464f3ca | ||
|
|
7076717b20 | ||
|
|
c0a4fcea0a | ||
|
|
aa2b195c86 | ||
|
|
1d0872e7ca | ||
|
|
33988637b5 | ||
|
|
d4f6ad7225 | ||
|
|
078fefed03 | ||
|
|
5b10af85b4 | ||
|
|
4caf95e5dd | ||
|
|
8e1bcf53bb | ||
|
|
064f9be7e4 | ||
|
|
adcfb44cb7 | ||
|
|
3d79773ba2 | ||
|
|
6aa8cbbf20 | ||
|
|
742e73c9c2 | ||
|
|
f8de2bdedc | ||
|
|
59879b7fa7 | ||
|
|
27abae21b8 | ||
|
|
0819c8a51a | ||
|
|
9dcd3cd491 | ||
|
|
49767cccd2 | ||
|
|
29fb447daa | ||
|
|
f6fe5b552d | ||
|
|
bd0801a887 | ||
|
|
05b1c66aa8 | ||
|
|
80ae592c23 | ||
|
|
ba6de4c4d4 | ||
|
|
46ea9170cb | ||
|
|
7d318aeefa | ||
|
|
0aa3cf677a | ||
|
|
72961c5858 | ||
|
|
a05711a37a | ||
|
|
efc9e1d673 | ||
|
|
a11ac188c2 | ||
|
|
60350d298a | ||
|
|
838dad8759 | ||
|
|
a728dfe0c6 | ||
|
|
0c7cbe3566 | ||
|
|
832b0185c7 | ||
|
|
b1719b26d1 | ||
|
|
ccf6a921c7 | ||
|
|
197c570baa | ||
|
|
0fe09f1d40 | ||
|
|
4a91954532 | ||
|
|
b8b5cec35c | ||
|
|
43c203333e | ||
|
|
1c6393b131 | ||
|
|
22f04e72e5 | ||
|
|
5f3debf65b | ||
|
|
fd8ef27535 | ||
|
|
a80ec5d8bb | ||
|
|
530a16291c | ||
|
|
7be8f4dc6e | ||
|
|
9792b17597 | ||
|
|
99f1e3ff35 | ||
|
|
5ba71cd2f1 | ||
|
|
b7df7ce5d5 | ||
|
|
405829dc30 | ||
|
|
451a851118 | ||
|
|
e97c376681 | ||
|
|
7541e243bc | ||
|
|
50a8116ae9 | ||
|
|
bf6fe5e962 | ||
|
|
e4f8799323 | ||
|
|
1f95524996 | ||
|
|
a50d5d351b | ||
|
|
067810fa98 | ||
|
|
a9285b8a94 | ||
|
|
ec6bcfeb83 | ||
|
|
7abec1888f | ||
|
|
fdcbf7aacf | ||
|
|
445bfdf242 | ||
|
|
0fba1901c8 | ||
|
|
fc5b9c8235 | ||
|
|
f490f44501 | ||
|
|
7e02082209 | ||
|
|
d869ac95fa | ||
|
|
5c856460a6 | ||
|
|
3613695f91 | ||
|
|
8fb7d476b8 | ||
|
|
dd8df483cd | ||
|
|
65459a99b6 | ||
|
|
2129584fd6 | ||
|
|
2da9c216c3 | ||
|
|
c6e26c5a16 | ||
|
|
fd57fa4913 | ||
|
|
8c4d22b3f9 | ||
|
|
c221774c51 | ||
|
|
23686b1391 | ||
|
|
0fffba5423 | ||
|
|
0e0eb747b5 | ||
|
|
f6f8695a8e | ||
|
|
b2141a96e2 | ||
|
|
4280aca82c | ||
|
|
c08889b021 | ||
|
|
57ebe382f9 | ||
|
|
73089bbfdf | ||
|
|
3a04552f98 | ||
|
|
b67bf2227e | ||
|
|
dde3b59e7b | ||
|
|
947800b95f | ||
|
|
7aa4c083a9 | ||
|
|
fcc77d1383 | ||
|
|
997cd1e332 | ||
|
|
2e88e23002 | ||
|
|
39ca192c41 | ||
|
|
f7fa71bc28 | ||
|
|
fbfbb26fd2 | ||
|
|
493bd188d5 | ||
|
|
9fd95df5cf | ||
|
|
54de3bf27a | ||
|
|
4587c3e53e | ||
|
|
be18bc6fc3 | ||
|
|
212cbbd3a2 | ||
|
|
6f9e690345 | ||
|
|
115d06edf0 | ||
|
|
e135435ce2 | ||
|
|
cd09adc3cc | ||
|
|
2491e9b5ad | ||
|
|
e63c83955a | ||
|
|
4b72aa33f3 | ||
|
|
ff9683b0fc | ||
|
|
607237571f | ||
|
|
28ca7df297 | ||
|
|
856c955386 | ||
|
|
e1c9016d90 | ||
|
|
953c5036bf | ||
|
|
37fa980565 | ||
|
|
f648b8e026 | ||
|
|
678c3ae132 | ||
|
|
c1c31ed9b2 | ||
|
|
777be05348 | ||
|
|
0bb3e4a98c | ||
|
|
9a91815b94 | ||
|
|
000e621eb6 | ||
|
|
093d7ba858 | ||
|
|
ce006a7a91 | ||
|
|
9d795061af | ||
|
|
1d1fc019dc | ||
|
|
bb664d9bbf | ||
|
|
bfc7b339f7 | ||
|
|
f30f8905ec | ||
|
|
3bae525026 | ||
|
|
df00805a2a | ||
|
|
a88ee96518 | ||
|
|
3cc2f9bd57 | ||
|
|
d1b684b782 | ||
|
|
6460d4ad3a | ||
|
|
19ea392d5d | ||
|
|
fb4d016176 | ||
|
|
afec747d9e | ||
|
|
7388fcce41 | ||
|
|
a6f9f9f968 | ||
|
|
29759721e0 | ||
|
|
1941b20521 | ||
|
|
e6969acb50 | ||
|
|
9489531431 | ||
|
|
32b7c0ca9b | ||
|
|
4ac57b4edf | ||
|
|
685a1e0ba3 | ||
|
|
e350aab1bd | ||
|
|
0dd6986e28 | ||
|
|
6d0102a70c | ||
|
|
f96a2a18c1 | ||
|
|
f955b04a6f | ||
|
|
2fd6ac319b | ||
|
|
82fbf452a8 | ||
|
|
ba69736f55 | ||
|
|
c75c6b6858 | ||
|
|
de61745bb2 | ||
|
|
3fab0fcd4c | ||
|
|
03bcd94ae5 | ||
|
|
0343bc7777 | ||
|
|
565d19acfd | ||
|
|
960acf1982 | ||
|
|
ece911521e | ||
|
|
5d95e59742 | ||
|
|
01d084bbfd | ||
|
|
7918fc2844 | ||
|
|
31b30a6df2 | ||
|
|
d217b59e0b | ||
|
|
169a4b9d32 | ||
|
|
15f3ffb165 | ||
|
|
02db1010dd | ||
|
|
935ea66681 | ||
|
|
26060e702f | ||
|
|
65d4ca2563 | ||
|
|
3c619a8da5 | ||
|
|
ded9b6c14e | ||
|
|
609abbbd7c | ||
|
|
1b4e504fad | ||
|
|
0a3a445828 | ||
|
|
c7e18bd5be | ||
|
|
083d202fe4 | ||
|
|
8365a8328b | ||
|
|
58f21e4b3a | ||
|
|
5bd7408b2f | ||
|
|
c671e8dd1d | ||
|
|
a3aed3c4c3 | ||
|
|
c008649584 | ||
|
|
516f8f287c | ||
|
|
66148690c6 | ||
|
|
cadd7f546f | ||
|
|
a3ff317f1c | ||
|
|
d8d4b0c0c7 | ||
|
|
d616f8c854 | ||
|
|
b6fa8b8eec | ||
|
|
36d2e6999b | ||
|
|
076c00063d | ||
|
|
ea8104c6a2 | ||
|
|
ca3e9336e1 | ||
|
|
f92ab48166 | ||
|
|
c10267ce2b | ||
|
|
9bd6a62ab3 | ||
|
|
0dbea6ca58 | ||
|
|
6523b23221 | ||
|
|
29c406dda0 | ||
|
|
483c8f246d | ||
|
|
645f283108 | ||
|
|
da6fd45000 | ||
|
|
fb3ef5f388 | ||
|
|
86bc76e352 | ||
|
|
644058174e | ||
|
|
4573868c08 | ||
|
|
09166a52f8 | ||
|
|
aaac1aaca9 | ||
|
|
59898c16c6 | ||
|
|
0dacdf480b | ||
|
|
fdf9f68298 | ||
|
|
7be5e1734c | ||
|
|
bfe414670f | ||
|
|
e435a46db5 | ||
|
|
84bd881e68 | ||
|
|
a901117b8c | ||
|
|
6bccb8a8a6 | ||
|
|
3de1e0e485 | ||
|
|
492b852a1f | ||
|
|
8a137405d4 | ||
|
|
f431f5ed72 | ||
|
|
980fc9608f | ||
|
|
07be258dca | ||
|
|
dbdb29594c | ||
|
|
53d55bb92f | ||
|
|
3f3efff065 | ||
|
|
57b078f2c7 | ||
|
|
1fc6ef3d4f | ||
|
|
c2567831d9 | ||
|
|
e8671fd7c2 | ||
|
|
4950ee48a0 | ||
|
|
5fa45f3b8c | ||
|
|
3b6584cc8d | ||
|
|
7be1195281 | ||
|
|
1fae8d086d | ||
|
|
10636d8a1f | ||
|
|
c67f02eaf0 | ||
|
|
0b32f61062 | ||
|
|
2ee6c26676 | ||
|
|
a89477ddf5 | ||
|
|
2f520c8d47 | ||
|
|
33db7a0fb6 | ||
|
|
50b9897182 | ||
|
|
f8ac5538e2 | ||
|
|
1985be26b2 | ||
|
|
fdfc739b72 | ||
|
|
bde9dbc57a | ||
|
|
80510e5f16 | ||
|
|
773f20ed5e | ||
|
|
f323174d07 | ||
|
|
987589eabc | ||
|
|
1004bd86ac | ||
|
|
03f69dd394 | ||
|
|
d14c24bbf3 | ||
|
|
48dc011b2a | ||
|
|
b341810e60 | ||
|
|
46d9aee6dd | ||
|
|
36a1a7998b | ||
|
|
40498aac9d | ||
|
|
440b87094a | ||
|
|
0832dfb32e | ||
|
|
be09188bda | ||
|
|
5d2219d299 | ||
|
|
900cce20a1 | ||
|
|
36bb327024 | ||
|
|
5d9667d27a | ||
|
|
fad04ca995 | ||
|
|
074bd0dfda | ||
|
|
b41fa5e15f | ||
|
|
beceb45d23 | ||
|
|
9450edf462 | ||
|
|
785a7397f8 | ||
|
|
3d1f03c286 | ||
|
|
8ff40f52e0 | ||
|
|
6577f2ef03 | ||
|
|
41d0383fb7 | ||
|
|
1cf51b14f7 | ||
|
|
372e04f69a | ||
|
|
e2107ce45e | ||
|
|
a817cafe3d | ||
|
|
ab14df043a | ||
|
|
5feff6b1e5 | ||
|
|
06b0f62e79 | ||
|
|
40d110efe4 | ||
|
|
f23318fbcf | ||
|
|
cbab49d65f | ||
|
|
b5a3b3db66 | ||
|
|
9cafa46dd3 | ||
|
|
f6bff97d26 | ||
|
|
d04b47b3ca | ||
|
|
862199143e | ||
|
|
57e8abcb63 | ||
|
|
ed31c54961 | ||
|
|
4bfa69bffa | ||
|
|
2857fa2ef7 | ||
|
|
e681431454 | ||
|
|
5b568aa9d4 | ||
|
|
471943269c | ||
|
|
28a5e2f0e6 | ||
|
|
b4c22ce6ce | ||
|
|
5248097f90 | ||
|
|
8e2c22d0bd | ||
|
|
888f2936ad | ||
|
|
4e894bac1f | ||
|
|
f96acf6e27 | ||
|
|
be56a282f2 | ||
|
|
2459eafb71 | ||
|
|
ed681d0830 | ||
|
|
5f4eb9f9d0 | ||
|
|
d1cd5c0a73 | ||
|
|
5429c74c10 | ||
|
|
3734abed4c | ||
|
|
abf5de69fb | ||
|
|
7582dc53d2 | ||
|
|
174d7c774d | ||
|
|
a9518cc5be | ||
|
|
2f190d812a | ||
|
|
d411cf4472 | ||
|
|
1ae49b9ead | ||
|
|
0bf162f64a | ||
|
|
6423636177 | ||
|
|
b6aaee01ce | ||
|
|
3511376c2c | ||
|
|
584cfc3db2 | ||
|
|
eaa7d899f0 | ||
|
|
84cc651b46 | ||
|
|
b7243660c4 | ||
|
|
e722992439 | ||
|
|
fff1d54858 | ||
|
|
a5f29019d9 | ||
|
|
208c5380f4 | ||
|
|
29191af877 | ||
|
|
2d6066f985 | ||
|
|
3ea5e5c33a | ||
|
|
dbd7969a3e | ||
|
|
af3069073a | ||
|
|
65661f24e2 | ||
|
|
ed2eba9028 | ||
|
|
10c1590b1d | ||
|
|
114e172603 | ||
|
|
09c8380b3d | ||
|
|
ba567babf4 | ||
|
|
9403aa9bd1 | ||
|
|
34b8bbcbe4 | ||
|
|
6b36992d34 | ||
|
|
6533a4647d | ||
|
|
9c910c2049 | ||
|
|
43dc23a47d | ||
|
|
61a2bf469a | ||
|
|
fe1d46a8ea | ||
|
|
a88bb8684f | ||
|
|
c7b42148a5 | ||
|
|
bc1abb6a23 | ||
|
|
d307d48def | ||
|
|
1bb40084fc | ||
|
|
8f0efa16ca | ||
|
|
8da5fac69e | ||
|
|
e2cdb6c758 | ||
|
|
ef2c35dbb1 | ||
|
|
04a1a7c2b5 | ||
|
|
d21d70a5cf | ||
|
|
e73b778d2b | ||
|
|
723102766b | ||
|
|
a4a46a8618 | ||
|
|
6ae82e04d5 | ||
|
|
19cca11e00 | ||
|
|
c8f87a9c92 | ||
|
|
f1e884ce2b | ||
|
|
86f3124720 | ||
|
|
ae6fed15cc | ||
|
|
4b309fa8b5 | ||
|
|
378e476e48 | ||
|
|
2a1067c82b | ||
|
|
a54b81cf74 | ||
|
|
2d4236f76e | ||
|
|
166080b29c | ||
|
|
3b0910f664 | ||
|
|
e489996713 | ||
|
|
54fe363257 | ||
|
|
84ced1c497 | ||
|
|
b161312183 | ||
|
|
1dd3158c7e | ||
|
|
1f647b120a | ||
|
|
7d0a30fa8f | ||
|
|
d95e04fd1f | ||
|
|
5dd83d3cf2 | ||
|
|
14e1aac9b5 | ||
|
|
5d1c51a37f | ||
|
|
58912d4ac5 | ||
|
|
6114f69cca | ||
|
|
d6c2921f2b | ||
|
|
29ca1290b3 | ||
|
|
3fcb0cc37c | ||
|
|
61c73287dc | ||
|
|
89905ec43d | ||
|
|
aa4b102108 | ||
|
|
e4bc35151f | ||
|
|
2bfb16291f | ||
|
|
56da498b7e | ||
|
|
1bba1a62b1 | ||
|
|
d367d1cde6 | ||
|
|
4a84ca9a02 | ||
|
|
3c46f7d266 | ||
|
|
16131c3d3f | ||
|
|
a70d37a676 | ||
|
|
6892e84ad2 | ||
|
|
73f455745c | ||
|
|
021abfca18 | ||
|
|
7d66f7ff0d | ||
|
|
470b37be7e | ||
|
|
f6cfab9901 | ||
|
|
51572b5da0 | ||
|
|
91ca28b7e3 | ||
|
|
04cedce9a1 | ||
|
|
5e0d789440 | ||
|
|
d7011163b8 | ||
|
|
149e4267cd | ||
|
|
fc8a39e0f5 | ||
|
|
9a479d1b55 | ||
|
|
fc095bf054 | ||
|
|
1af06aed96 | ||
|
|
9236936a55 | ||
|
|
125152460f | ||
|
|
6d90fb0bc3 | ||
|
|
b889d5017b | ||
|
|
72b08f9cc5 | ||
|
|
681950dadd | ||
|
|
a67d9337b8 | ||
|
|
2f1182e8a9 | ||
|
|
cbb4d854ab | ||
|
|
35598d5648 | ||
|
|
5c76b9e45a | ||
|
|
0b8fea4cb4 | ||
|
|
5fa93ebdc7 | ||
|
|
8aa0aed566 | ||
|
|
2eb32a0ed7 | ||
|
|
bac9e2bfd5 | ||
|
|
e4d74ae11d | ||
|
|
8a0a8558cf | ||
|
|
2185a3b674 | ||
|
|
9e3c306a5b | ||
|
|
b1c30df8e3 | ||
|
|
69816f8691 | ||
|
|
b4ec65785d | ||
|
|
3c93644146 | ||
|
|
fb58560d15 | ||
|
|
9da80e9fda | ||
|
|
bb5a5dd65e | ||
|
|
6ab77f5eb5 | ||
|
|
4f57d7f761 | ||
|
|
1563bd3dda | ||
|
|
df3346387f | ||
|
|
77b66653ed | ||
|
|
53e1c8b268 | ||
|
|
d876686a00 | ||
|
|
7546a56736 | ||
|
|
00caf0bcd8 | ||
|
|
9634494ba9 | ||
|
|
e1ac0db05c | ||
|
|
6f3e77a2df | ||
|
|
4a20a2a8ba | ||
|
|
bc3ca5f068 | ||
|
|
fd43be8d0b | ||
|
|
836ba14b70 | ||
|
|
a14dfb769a | ||
|
|
2588fa6a8f | ||
|
|
3077fd279d | ||
|
|
f3605ddc71 | ||
|
|
6aaa4aee6a | ||
|
|
e3748da860 | ||
|
|
36e6fb5fc8 | ||
|
|
86b503f87f | ||
|
|
50a783ff01 | ||
|
|
f6ca701917 | ||
|
|
a84604dceb | ||
|
|
da9546ba24 | ||
|
|
e75d3e3584 | ||
|
|
1439eb39a9 | ||
|
|
8226a4ce4d | ||
|
|
e1a68497d6 | ||
|
|
c4615a1224 | ||
|
|
65c0d8b51f | ||
|
|
a9e256ce8c | ||
|
|
fa28dcbf32 | ||
|
|
2656320d04 | ||
|
|
7e1674e43a | ||
|
|
fc104dfb56 | ||
|
|
5d4327eb14 | ||
|
|
b4f6c4f9d5 | ||
|
|
0e514ed80b | ||
|
|
14c6c9321a | ||
|
|
386126b1b2 | ||
|
|
de0927289e | ||
|
|
edb0937024 | ||
|
|
43a4840daf | ||
|
|
5e98445b22 | ||
|
|
e617b45ba3 | ||
|
|
20283bb55b | ||
|
|
515dbf2c78 | ||
|
|
2887e280d6 | ||
|
|
8826705e71 | ||
|
|
8917afab2a | ||
|
|
49233ec26a | ||
|
|
1e1cbbee80 | ||
|
|
39a5b17d31 | ||
|
|
782a54a8a1 | ||
|
|
35a55e10aa | ||
|
|
9e80ed0fa8 | ||
|
|
5299f3dcf6 | ||
|
|
7b1564898b | ||
|
|
4e01126ff2 | ||
|
|
55b56328da | ||
|
|
ce764bf2d9 | ||
|
|
d71537d431 | ||
|
|
ae1ba45350 | ||
|
|
c4182f8c33 | ||
|
|
028f8aaa97 | ||
|
|
d3f11fdbd3 | ||
|
|
8672b2f3ec | ||
|
|
de753a149e | ||
|
|
2d4bbbf49d | ||
|
|
76d242e024 | ||
|
|
260c152166 | ||
|
|
9f4c1ef9f9 | ||
|
|
bd7fdb5e6c | ||
|
|
a381910e86 | ||
|
|
d182ef0391 | ||
|
|
7319122e92 | ||
|
|
4809fa4f19 | ||
|
|
792bef615c | ||
|
|
ee01f80dc1 | ||
|
|
98671a73f4 | ||
|
|
f33a950103 | ||
|
|
132bf34b69 | ||
|
|
01b08e1e43 | ||
|
|
000a943cce | ||
|
|
c6a456c7c7 | ||
|
|
cc2329d4fd | ||
|
|
84d0433cc3 | ||
|
|
f82e346f02 | ||
|
|
a113dd4def | ||
|
|
98f793155f | ||
|
|
a38bd413ab | ||
|
|
9e1535e203 | ||
|
|
d8e405511e | ||
|
|
037a409919 | ||
|
|
029994a83b | ||
|
|
37047919ab | ||
|
|
0b45d48e85 | ||
|
|
0c660f8335 | ||
|
|
ce9a247a9d | ||
|
|
b4bd46d067 | ||
|
|
74d35f0860 | ||
|
|
de7ff902de | ||
|
|
317f26f0bf | ||
|
|
dd96ada3c6 | ||
|
|
9b120e68b8 | ||
|
|
377bffe281 | ||
|
|
31fe017888 | ||
|
|
99250ec527 | ||
|
|
dcf5f60237 | ||
|
|
399dd78b2a | ||
|
|
78d0ca3775 | ||
|
|
618a614cbf | ||
|
|
99dc3b59bc | ||
|
|
d9e345f23d | ||
|
|
a505d992ee | ||
|
|
13262a5698 | ||
|
|
bece1b5201 |
22
.gitattributes
vendored
Normal file
22
.gitattributes
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# 确保所有 SQL 迁移文件使用 LF 换行符
|
||||||
|
backend/migrations/*.sql text eol=lf
|
||||||
|
|
||||||
|
# Go 源代码文件
|
||||||
|
*.go text eol=lf
|
||||||
|
|
||||||
|
# 前端 源代码文件
|
||||||
|
*.ts text eol=lf
|
||||||
|
*.tsx text eol=lf
|
||||||
|
*.js text eol=lf
|
||||||
|
*.jsx text eol=lf
|
||||||
|
*.vue text eol=lf
|
||||||
|
|
||||||
|
# Shell 脚本
|
||||||
|
*.sh text eol=lf
|
||||||
|
|
||||||
|
# YAML/YML 配置文件
|
||||||
|
*.yaml text eol=lf
|
||||||
|
*.yml text eol=lf
|
||||||
|
|
||||||
|
# Dockerfile
|
||||||
|
Dockerfile text eol=lf
|
||||||
16
.github/workflows/backend-ci.yml
vendored
16
.github/workflows/backend-ci.yml
vendored
@@ -11,15 +11,15 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: false
|
check-latest: false
|
||||||
cache: true
|
cache: true
|
||||||
- name: Verify Go version
|
- name: Verify Go version
|
||||||
run: |
|
run: |
|
||||||
go version | grep -q 'go1.25.6'
|
go version | grep -q 'go1.26.1'
|
||||||
- name: Unit tests
|
- name: Unit tests
|
||||||
working-directory: backend
|
working-directory: backend
|
||||||
run: make test-unit
|
run: make test-unit
|
||||||
@@ -30,18 +30,18 @@ jobs:
|
|||||||
golangci-lint:
|
golangci-lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: false
|
check-latest: false
|
||||||
cache: true
|
cache: true
|
||||||
- name: Verify Go version
|
- name: Verify Go version
|
||||||
run: |
|
run: |
|
||||||
go version | grep -q 'go1.25.6'
|
go version | grep -q 'go1.26.1'
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v9
|
||||||
with:
|
with:
|
||||||
version: v2.7
|
version: v2.9
|
||||||
args: --timeout=5m
|
args: --timeout=30m
|
||||||
working-directory: backend
|
working-directory: backend
|
||||||
57
.github/workflows/release.yml
vendored
57
.github/workflows/release.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Update VERSION file
|
- name: Update VERSION file
|
||||||
run: |
|
run: |
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
echo "Updated VERSION file to: $VERSION"
|
echo "Updated VERSION file to: $VERSION"
|
||||||
|
|
||||||
- name: Upload VERSION artifact
|
- name: Upload VERSION artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v7
|
||||||
with:
|
with:
|
||||||
name: version-file
|
name: version-file
|
||||||
path: backend/cmd/server/VERSION
|
path: backend/cmd/server/VERSION
|
||||||
@@ -55,7 +55,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Setup pnpm
|
- name: Setup pnpm
|
||||||
uses: pnpm/action-setup@v4
|
uses: pnpm/action-setup@v4
|
||||||
@@ -63,7 +63,7 @@ jobs:
|
|||||||
version: 9
|
version: 9
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: '20'
|
node-version: '20'
|
||||||
cache: 'pnpm'
|
cache: 'pnpm'
|
||||||
@@ -78,7 +78,7 @@ jobs:
|
|||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
|
|
||||||
- name: Upload frontend artifact
|
- name: Upload frontend artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v7
|
||||||
with:
|
with:
|
||||||
name: frontend-dist
|
name: frontend-dist
|
||||||
path: backend/internal/web/dist/
|
path: backend/internal/web/dist/
|
||||||
@@ -89,25 +89,25 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
ref: ${{ github.event.inputs.tag || github.ref }}
|
ref: ${{ github.event.inputs.tag || github.ref }}
|
||||||
|
|
||||||
- name: Download VERSION artifact
|
- name: Download VERSION artifact
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v8
|
||||||
with:
|
with:
|
||||||
name: version-file
|
name: version-file
|
||||||
path: backend/cmd/server/
|
path: backend/cmd/server/
|
||||||
|
|
||||||
- name: Download frontend artifact
|
- name: Download frontend artifact
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v8
|
||||||
with:
|
with:
|
||||||
name: frontend-dist
|
name: frontend-dist
|
||||||
path: backend/internal/web/dist/
|
path: backend/internal/web/dist/
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: false
|
check-latest: false
|
||||||
@@ -115,7 +115,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Verify Go version
|
- name: Verify Go version
|
||||||
run: |
|
run: |
|
||||||
go version | grep -q 'go1.25.6'
|
go version | grep -q 'go1.26.1'
|
||||||
|
|
||||||
# Docker setup for GoReleaser
|
# Docker setup for GoReleaser
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
@@ -173,7 +173,7 @@ jobs:
|
|||||||
run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v6
|
uses: goreleaser/goreleaser-action@v7
|
||||||
with:
|
with:
|
||||||
version: '~> v2'
|
version: '~> v2'
|
||||||
args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }}
|
args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }}
|
||||||
@@ -188,7 +188,7 @@ jobs:
|
|||||||
# Update DockerHub description
|
# Update DockerHub description
|
||||||
- name: Update DockerHub description
|
- name: Update DockerHub description
|
||||||
if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }}
|
if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }}
|
||||||
uses: peter-evans/dockerhub-description@v4
|
uses: peter-evans/dockerhub-description@v5
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
with:
|
with:
|
||||||
@@ -271,3 +271,36 @@ jobs:
|
|||||||
parse_mode: "Markdown",
|
parse_mode: "Markdown",
|
||||||
disable_web_page_preview: true
|
disable_web_page_preview: true
|
||||||
}')"
|
}')"
|
||||||
|
|
||||||
|
sync-version-file:
|
||||||
|
needs: [release]
|
||||||
|
if: ${{ needs.release.result == 'success' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout default branch
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
|
||||||
|
- name: Sync VERSION file to released tag
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
VERSION=${{ github.event.inputs.tag }}
|
||||||
|
VERSION=${VERSION#v}
|
||||||
|
else
|
||||||
|
VERSION=${GITHUB_REF#refs/tags/v}
|
||||||
|
fi
|
||||||
|
|
||||||
|
CURRENT_VERSION=$(tr -d '\r\n' < backend/cmd/server/VERSION || true)
|
||||||
|
if [ "$CURRENT_VERSION" = "$VERSION" ]; then
|
||||||
|
echo "VERSION file already matches $VERSION"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$VERSION" > backend/cmd/server/VERSION
|
||||||
|
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||||
|
git add backend/cmd/server/VERSION
|
||||||
|
git commit -m "chore: sync VERSION to ${VERSION} [skip ci]"
|
||||||
|
git push origin HEAD:${{ github.event.repository.default_branch }}
|
||||||
|
|||||||
16
.github/workflows/security-scan.yml
vendored
16
.github/workflows/security-scan.yml
vendored
@@ -12,38 +12,34 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
backend-security:
|
backend-security:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 15
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: false
|
check-latest: false
|
||||||
cache-dependency-path: backend/go.sum
|
cache-dependency-path: backend/go.sum
|
||||||
- name: Verify Go version
|
- name: Verify Go version
|
||||||
run: |
|
run: |
|
||||||
go version | grep -q 'go1.25.6'
|
go version | grep -q 'go1.26.1'
|
||||||
- name: Run govulncheck
|
- name: Run govulncheck
|
||||||
working-directory: backend
|
working-directory: backend
|
||||||
run: |
|
run: |
|
||||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
govulncheck ./...
|
govulncheck ./...
|
||||||
- name: Run gosec
|
|
||||||
working-directory: backend
|
|
||||||
run: |
|
|
||||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
|
||||||
gosec -severity high -confidence high ./...
|
|
||||||
|
|
||||||
frontend-security:
|
frontend-security:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- name: Set up pnpm
|
- name: Set up pnpm
|
||||||
uses: pnpm/action-setup@v4
|
uses: pnpm/action-setup@v4
|
||||||
with:
|
with:
|
||||||
version: 9
|
version: 9
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: '20'
|
node-version: '20'
|
||||||
cache: 'pnpm'
|
cache: 'pnpm'
|
||||||
|
|||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -116,17 +116,20 @@ backend/.installed
|
|||||||
# ===================
|
# ===================
|
||||||
tests
|
tests
|
||||||
CLAUDE.md
|
CLAUDE.md
|
||||||
AGENTS.md
|
|
||||||
.claude
|
.claude
|
||||||
scripts
|
scripts
|
||||||
.code-review-state
|
.code-review-state
|
||||||
openspec/
|
#openspec/
|
||||||
docs/
|
|
||||||
code-reviews/
|
code-reviews/
|
||||||
AGENTS.md
|
#AGENTS.md
|
||||||
backend/cmd/server/server
|
backend/cmd/server/server
|
||||||
deploy/docker-compose.override.yml
|
deploy/docker-compose.override.yml
|
||||||
.gocache/
|
.gocache/
|
||||||
vite.config.js
|
vite.config.js
|
||||||
docs/*
|
docs/*
|
||||||
.serena/
|
.serena/
|
||||||
|
.codex/
|
||||||
|
frontend/coverage/
|
||||||
|
aicodex
|
||||||
|
output/
|
||||||
|
|
||||||
|
|||||||
@@ -47,6 +47,8 @@ dockers:
|
|||||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
use: buildx
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
build_flag_templates:
|
build_flag_templates:
|
||||||
- "--platform=linux/amd64"
|
- "--platform=linux/amd64"
|
||||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
|||||||
@@ -63,6 +63,8 @@ dockers:
|
|||||||
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
use: buildx
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
build_flag_templates:
|
build_flag_templates:
|
||||||
- "--platform=linux/amd64"
|
- "--platform=linux/amd64"
|
||||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
@@ -76,6 +78,8 @@ dockers:
|
|||||||
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
use: buildx
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
build_flag_templates:
|
build_flag_templates:
|
||||||
- "--platform=linux/arm64"
|
- "--platform=linux/arm64"
|
||||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
@@ -89,6 +93,8 @@ dockers:
|
|||||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
use: buildx
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
build_flag_templates:
|
build_flag_templates:
|
||||||
- "--platform=linux/amd64"
|
- "--platform=linux/amd64"
|
||||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
@@ -102,6 +108,8 @@ dockers:
|
|||||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
use: buildx
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
build_flag_templates:
|
build_flag_templates:
|
||||||
- "--platform=linux/arm64"
|
- "--platform=linux/arm64"
|
||||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
|||||||
346
DEV_GUIDE.md
Normal file
346
DEV_GUIDE.md
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
# sub2api 项目开发指南
|
||||||
|
|
||||||
|
> 本文档记录项目环境配置、常见坑点和注意事项,供 Claude Code 和团队成员参考。
|
||||||
|
|
||||||
|
## 一、项目基本信息
|
||||||
|
|
||||||
|
| 项目 | 说明 |
|
||||||
|
|------|------|
|
||||||
|
| **上游仓库** | Wei-Shaw/sub2api |
|
||||||
|
| **Fork 仓库** | bayma888/sub2api-bmai |
|
||||||
|
| **技术栈** | Go 后端 (Ent ORM + Gin) + Vue3 前端 (pnpm) |
|
||||||
|
| **数据库** | PostgreSQL 16 + Redis |
|
||||||
|
| **包管理** | 后端: go modules, 前端: **pnpm**(不是 npm) |
|
||||||
|
|
||||||
|
## 二、本地环境配置
|
||||||
|
|
||||||
|
### PostgreSQL 16 (Windows 服务)
|
||||||
|
|
||||||
|
| 配置项 | 值 |
|
||||||
|
|--------|-----|
|
||||||
|
| 端口 | 5432 |
|
||||||
|
| psql 路径 | `C:\Program Files\PostgreSQL\16\bin\psql.exe` |
|
||||||
|
| pg_hba.conf | `C:\Program Files\PostgreSQL\16\data\pg_hba.conf` |
|
||||||
|
| 数据库凭据 | user=`sub2api`, password=`sub2api`, dbname=`sub2api` |
|
||||||
|
| 超级用户 | user=`postgres`, password=`postgres` |
|
||||||
|
|
||||||
|
### Redis
|
||||||
|
|
||||||
|
| 配置项 | 值 |
|
||||||
|
|--------|-----|
|
||||||
|
| 端口 | 6379 |
|
||||||
|
| 密码 | 无 |
|
||||||
|
|
||||||
|
### 开发工具
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# golangci-lint v2.7
|
||||||
|
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.7
|
||||||
|
|
||||||
|
# pnpm (前端包管理)
|
||||||
|
npm install -g pnpm
|
||||||
|
```
|
||||||
|
|
||||||
|
## 三、CI/CD 流水线
|
||||||
|
|
||||||
|
### GitHub Actions Workflows
|
||||||
|
|
||||||
|
| Workflow | 触发条件 | 检查内容 |
|
||||||
|
|----------|----------|----------|
|
||||||
|
| **backend-ci.yml** | push, pull_request | 单元测试 + 集成测试 + golangci-lint v2.7 |
|
||||||
|
| **security-scan.yml** | push, pull_request, 每周一 | govulncheck + gosec + pnpm audit |
|
||||||
|
| **release.yml** | tag `v*` | 构建发布(PR 不触发) |
|
||||||
|
|
||||||
|
### CI 要求
|
||||||
|
|
||||||
|
- Go 版本必须是 **1.25.7**
|
||||||
|
- 前端使用 `pnpm install --frozen-lockfile`,必须提交 `pnpm-lock.yaml`
|
||||||
|
|
||||||
|
### 本地测试命令
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 后端单元测试
|
||||||
|
cd backend && go test -tags=unit ./...
|
||||||
|
|
||||||
|
# 后端集成测试
|
||||||
|
cd backend && go test -tags=integration ./...
|
||||||
|
|
||||||
|
# 代码质量检查
|
||||||
|
cd backend && golangci-lint run ./...
|
||||||
|
|
||||||
|
# 前端依赖安装(必须用 pnpm)
|
||||||
|
cd frontend && pnpm install
|
||||||
|
```
|
||||||
|
|
||||||
|
## 四、常见坑点 & 解决方案
|
||||||
|
|
||||||
|
### 坑 1:pnpm-lock.yaml 必须同步提交
|
||||||
|
|
||||||
|
**问题**:`package.json` 新增依赖后,CI 的 `pnpm install --frozen-lockfile` 失败。
|
||||||
|
|
||||||
|
**原因**:上游 CI 使用 pnpm,lock 文件不同步会报错。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
pnpm install # 更新 pnpm-lock.yaml
|
||||||
|
git add pnpm-lock.yaml
|
||||||
|
git commit -m "chore: update pnpm-lock.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 2:npm 和 pnpm 的 node_modules 冲突
|
||||||
|
|
||||||
|
**问题**:之前用 npm 装过 `node_modules`,pnpm install 报 `EPERM` 错误。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
rm -rf node_modules # 或 PowerShell: Remove-Item -Recurse -Force node_modules
|
||||||
|
pnpm install
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 3:PowerShell 中 bcrypt hash 的 `$` 被转义
|
||||||
|
|
||||||
|
**问题**:bcrypt hash 格式如 `$2a$10$xxx...`,PowerShell 把 `$2a` 当变量解析,导致数据丢失。
|
||||||
|
|
||||||
|
**解决**:将 SQL 写入文件,用 `psql -f` 执行:
|
||||||
|
```bash
|
||||||
|
# 错误示范(PowerShell 会吃掉 $)
|
||||||
|
psql -c "INSERT INTO users ... VALUES ('$2a$10$...')"
|
||||||
|
|
||||||
|
# 正确做法
|
||||||
|
echo "INSERT INTO users ... VALUES ('\$2a\$10\$...')" > temp.sql
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api -f temp.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 4:psql 不支持中文路径
|
||||||
|
|
||||||
|
**问题**:`psql -f "D:\中文路径\file.sql"` 报错找不到文件。
|
||||||
|
|
||||||
|
**解决**:复制到纯英文路径再执行:
|
||||||
|
```bash
|
||||||
|
cp "D:\中文路径\file.sql" "C:\temp.sql"
|
||||||
|
psql -f "C:\temp.sql"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 5:PostgreSQL 密码重置流程
|
||||||
|
|
||||||
|
**场景**:忘记 PostgreSQL 密码。
|
||||||
|
|
||||||
|
**步骤**:
|
||||||
|
1. 修改 `C:\Program Files\PostgreSQL\16\data\pg_hba.conf`
|
||||||
|
```
|
||||||
|
# 将 scram-sha-256 改为 trust
|
||||||
|
host all all 127.0.0.1/32 trust
|
||||||
|
```
|
||||||
|
2. 重启 PostgreSQL 服务
|
||||||
|
```powershell
|
||||||
|
Restart-Service postgresql-x64-16
|
||||||
|
```
|
||||||
|
3. 无密码登录并重置
|
||||||
|
```bash
|
||||||
|
psql -U postgres -h 127.0.0.1
|
||||||
|
ALTER USER sub2api WITH PASSWORD 'sub2api';
|
||||||
|
ALTER USER postgres WITH PASSWORD 'postgres';
|
||||||
|
```
|
||||||
|
4. 改回 `scram-sha-256` 并重启
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 6:Go interface 新增方法后 test stub 必须补全
|
||||||
|
|
||||||
|
**问题**:给 interface 新增方法后,编译报错 `does not implement interface (missing method XXX)`。
|
||||||
|
|
||||||
|
**原因**:所有测试文件中实现该 interface 的 stub/mock 都必须补上新方法。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
# 搜索所有实现该 interface 的 struct
|
||||||
|
cd backend
|
||||||
|
grep -r "type.*Stub.*struct" internal/
|
||||||
|
grep -r "type.*Mock.*struct" internal/
|
||||||
|
|
||||||
|
# 逐一补全新方法
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 7:Windows 上 psql 连 localhost 的 IPv6 问题
|
||||||
|
|
||||||
|
**问题**:psql 连 `localhost` 先尝试 IPv6 (::1),可能报错后再回退 IPv4。
|
||||||
|
|
||||||
|
**建议**:直接用 `127.0.0.1` 代替 `localhost`。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 8:Windows 没有 make 命令
|
||||||
|
|
||||||
|
**问题**:CI 里用 `make test-unit`,本地 Windows 没有 make。
|
||||||
|
|
||||||
|
**解决**:直接用 Makefile 里的原始命令:
|
||||||
|
```bash
|
||||||
|
# 代替 make test-unit
|
||||||
|
go test -tags=unit ./...
|
||||||
|
|
||||||
|
# 代替 make test-integration
|
||||||
|
go test -tags=integration ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 9:Ent Schema 修改后必须重新生成
|
||||||
|
|
||||||
|
**问题**:修改 `ent/schema/*.go` 后,代码不生效。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent # 重新生成 ent 代码
|
||||||
|
git add ent/ # 生成的文件也要提交
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 10:前端测试看似正常,但后端调用失败(模型映射被批量误改)
|
||||||
|
|
||||||
|
**典型现象**:
|
||||||
|
- 前端按钮点测看起来正常;
|
||||||
|
- 实际通过 API/客户端调用时返回 `Service temporarily unavailable` 或提示无可用账号;
|
||||||
|
- 常见于 OpenAI 账号(例如 Codex 模型)在批量修改后突然不可用。
|
||||||
|
|
||||||
|
**根因**:
|
||||||
|
- OpenAI 账号编辑页默认不显式展示映射规则,容易让人误以为“没映射也没关系”;
|
||||||
|
- 但在**批量修改同时选中不同平台账号**(OpenAI + Antigravity/Gemini)时,模型白名单/映射可能被跨平台策略覆盖;
|
||||||
|
- 结果是 OpenAI 账号的关键模型映射丢失或被改坏,后端选不到可用账号。
|
||||||
|
|
||||||
|
**修复方案(按优先级)**:
|
||||||
|
1. **快速修复(推荐)**:在批量修改中补回正确的透传映射(例如 `gpt-5.3-codex -> gpt-5.3-codex-spark`)。
|
||||||
|
2. **彻底重建**:删除并重新添加全部相关账号(最稳但成本高)。
|
||||||
|
|
||||||
|
**关键经验**:
|
||||||
|
- 如果某模型已被软件内置默认映射覆盖,通常不需要额外再加透传;
|
||||||
|
- 但当上游模型更新快于本仓库默认映射时,**手动批量添加透传映射**是最简单、最低风险的临时兜底方案;
|
||||||
|
- 批量操作前尽量按平台分组,不要混选不同平台账号。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 11:PR 提交前检查清单
|
||||||
|
|
||||||
|
提交 PR 前务必本地验证:
|
||||||
|
|
||||||
|
- [ ] `go test -tags=unit ./...` 通过
|
||||||
|
- [ ] `go test -tags=integration ./...` 通过
|
||||||
|
- [ ] `golangci-lint run ./...` 无新增问题
|
||||||
|
- [ ] `pnpm-lock.yaml` 已同步(如果改了 package.json)
|
||||||
|
- [ ] 所有 test stub 补全新接口方法(如果改了 interface)
|
||||||
|
- [ ] Ent 生成的代码已提交(如果改了 schema)
|
||||||
|
|
||||||
|
## 五、常用命令速查
|
||||||
|
|
||||||
|
### 数据库操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 连接数据库
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api
|
||||||
|
|
||||||
|
# 查看所有用户
|
||||||
|
psql -U postgres -h 127.0.0.1 -c "\du"
|
||||||
|
|
||||||
|
# 查看所有数据库
|
||||||
|
psql -U postgres -h 127.0.0.1 -c "\l"
|
||||||
|
|
||||||
|
# 执行 SQL 文件
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api -f migration.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### Git 操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 同步上游
|
||||||
|
git fetch upstream
|
||||||
|
git checkout main
|
||||||
|
git merge upstream/main
|
||||||
|
git push origin main
|
||||||
|
|
||||||
|
# 创建功能分支
|
||||||
|
git checkout -b feature/xxx
|
||||||
|
|
||||||
|
# Rebase 到最新 main
|
||||||
|
git fetch upstream
|
||||||
|
git rebase upstream/main
|
||||||
|
```
|
||||||
|
|
||||||
|
### 前端操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 安装依赖(必须用 pnpm)
|
||||||
|
cd frontend
|
||||||
|
pnpm install
|
||||||
|
|
||||||
|
# 开发服务器
|
||||||
|
pnpm dev
|
||||||
|
|
||||||
|
# 构建
|
||||||
|
pnpm build
|
||||||
|
```
|
||||||
|
|
||||||
|
### 后端操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 运行服务器
|
||||||
|
cd backend
|
||||||
|
go run ./cmd/server/
|
||||||
|
|
||||||
|
# 生成 Ent 代码
|
||||||
|
go generate ./ent
|
||||||
|
|
||||||
|
# 运行测试
|
||||||
|
go test -tags=unit ./...
|
||||||
|
go test -tags=integration ./...
|
||||||
|
|
||||||
|
# Lint 检查
|
||||||
|
golangci-lint run ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
## 六、项目结构速览
|
||||||
|
|
||||||
|
```
|
||||||
|
sub2api-bmai/
|
||||||
|
├── backend/
|
||||||
|
│ ├── cmd/server/ # 主程序入口
|
||||||
|
│ ├── ent/ # Ent ORM 生成代码
|
||||||
|
│ │ └── schema/ # 数据库 Schema 定义
|
||||||
|
│ ├── internal/
|
||||||
|
│ │ ├── handler/ # HTTP 处理器
|
||||||
|
│ │ ├── service/ # 业务逻辑
|
||||||
|
│ │ ├── repository/ # 数据访问层
|
||||||
|
│ │ └── server/ # 服务器配置
|
||||||
|
│ ├── migrations/ # 数据库迁移脚本
|
||||||
|
│ └── config.yaml # 配置文件
|
||||||
|
├── frontend/
|
||||||
|
│ ├── src/
|
||||||
|
│ │ ├── api/ # API 调用
|
||||||
|
│ │ ├── components/ # Vue 组件
|
||||||
|
│ │ ├── views/ # 页面视图
|
||||||
|
│ │ ├── types/ # TypeScript 类型
|
||||||
|
│ │ └── i18n/ # 国际化
|
||||||
|
│ ├── package.json # 依赖配置
|
||||||
|
│ └── pnpm-lock.yaml # pnpm 锁文件(必须提交)
|
||||||
|
└── .claude/
|
||||||
|
└── CLAUDE.md # 本文档
|
||||||
|
```
|
||||||
|
|
||||||
|
## 七、参考资源
|
||||||
|
|
||||||
|
- [上游仓库](https://github.com/Wei-Shaw/sub2api)
|
||||||
|
- [Ent 文档](https://entgo.io/docs/getting-started)
|
||||||
|
- [Vue3 文档](https://vuejs.org/)
|
||||||
|
- [pnpm 文档](https://pnpm.io/)
|
||||||
56
Dockerfile
56
Dockerfile
@@ -7,8 +7,9 @@
|
|||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
ARG NODE_IMAGE=node:24-alpine
|
ARG NODE_IMAGE=node:24-alpine
|
||||||
ARG GOLANG_IMAGE=golang:1.25.6-alpine
|
ARG GOLANG_IMAGE=golang:1.26.1-alpine
|
||||||
ARG ALPINE_IMAGE=alpine:3.20
|
ARG ALPINE_IMAGE=alpine:3.21
|
||||||
|
ARG POSTGRES_IMAGE=postgres:18-alpine
|
||||||
ARG GOPROXY=https://goproxy.cn,direct
|
ARG GOPROXY=https://goproxy.cn,direct
|
||||||
ARG GOSUMDB=sum.golang.google.cn
|
ARG GOSUMDB=sum.golang.google.cn
|
||||||
|
|
||||||
@@ -36,7 +37,7 @@ RUN pnpm run build
|
|||||||
FROM ${GOLANG_IMAGE} AS backend-builder
|
FROM ${GOLANG_IMAGE} AS backend-builder
|
||||||
|
|
||||||
# Build arguments for version info (set by CI)
|
# Build arguments for version info (set by CI)
|
||||||
ARG VERSION=docker
|
ARG VERSION=
|
||||||
ARG COMMIT=docker
|
ARG COMMIT=docker
|
||||||
ARG DATE
|
ARG DATE
|
||||||
ARG GOPROXY
|
ARG GOPROXY
|
||||||
@@ -61,14 +62,24 @@ COPY backend/ ./
|
|||||||
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
|
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
|
||||||
|
|
||||||
# Build the binary (BuildType=release for CI builds, embed frontend)
|
# Build the binary (BuildType=release for CI builds, embed frontend)
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
# Version precedence: build arg VERSION > cmd/server/VERSION
|
||||||
|
RUN VERSION_VALUE="${VERSION}" && \
|
||||||
|
if [ -z "${VERSION_VALUE}" ]; then VERSION_VALUE="$(tr -d '\r\n' < ./cmd/server/VERSION)"; fi && \
|
||||||
|
DATE_VALUE="${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)}" && \
|
||||||
|
CGO_ENABLED=0 GOOS=linux go build \
|
||||||
-tags embed \
|
-tags embed \
|
||||||
-ldflags="-s -w -X main.Commit=${COMMIT} -X main.Date=${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)} -X main.BuildType=release" \
|
-ldflags="-s -w -X main.Version=${VERSION_VALUE} -X main.Commit=${COMMIT} -X main.Date=${DATE_VALUE} -X main.BuildType=release" \
|
||||||
|
-trimpath \
|
||||||
-o /app/sub2api \
|
-o /app/sub2api \
|
||||||
./cmd/server
|
./cmd/server
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Stage 3: Final Runtime Image
|
# Stage 3: PostgreSQL Client (version-matched with docker-compose)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
FROM ${POSTGRES_IMAGE} AS pg-client
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Stage 4: Final Runtime Image
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
FROM ${ALPINE_IMAGE}
|
FROM ${ALPINE_IMAGE}
|
||||||
|
|
||||||
@@ -81,9 +92,21 @@ LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
tzdata \
|
tzdata \
|
||||||
curl \
|
su-exec \
|
||||||
|
libpq \
|
||||||
|
zstd-libs \
|
||||||
|
lz4-libs \
|
||||||
|
krb5-libs \
|
||||||
|
libldap \
|
||||||
|
libedit \
|
||||||
&& rm -rf /var/cache/apk/*
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Copy pg_dump and psql from the same postgres image used in docker-compose
|
||||||
|
# This ensures version consistency between backup tools and the database server
|
||||||
|
COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump
|
||||||
|
COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql
|
||||||
|
COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/
|
||||||
|
|
||||||
# Create non-root user
|
# Create non-root user
|
||||||
RUN addgroup -g 1000 sub2api && \
|
RUN addgroup -g 1000 sub2api && \
|
||||||
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
@@ -91,21 +114,24 @@ RUN addgroup -g 1000 sub2api && \
|
|||||||
# Set working directory
|
# Set working directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copy binary from builder
|
# Copy binary/resources with ownership to avoid extra full-layer chown copy
|
||||||
COPY --from=backend-builder /app/sub2api /app/sub2api
|
COPY --from=backend-builder --chown=sub2api:sub2api /app/sub2api /app/sub2api
|
||||||
|
COPY --from=backend-builder --chown=sub2api:sub2api /app/backend/resources /app/resources
|
||||||
|
|
||||||
# Create data directory
|
# Create data directory
|
||||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
RUN mkdir -p /app/data && chown sub2api:sub2api /app/data
|
||||||
|
|
||||||
# Switch to non-root user
|
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||||
USER sub2api
|
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /app/docker-entrypoint.sh
|
||||||
|
|
||||||
# Expose port (can be overridden by SERVER_PORT env var)
|
# Expose port (can be overridden by SERVER_PORT env var)
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
||||||
# Health check
|
# Health check
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
# Run the application
|
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||||
ENTRYPOINT ["/app/sub2api"]
|
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||||
|
CMD ["/app/sub2api"]
|
||||||
|
|||||||
@@ -5,7 +5,12 @@
|
|||||||
# It only packages the pre-built binary, no compilation needed.
|
# It only packages the pre-built binary, no compilation needed.
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
FROM alpine:3.19
|
ARG ALPINE_IMAGE=alpine:3.21
|
||||||
|
ARG POSTGRES_IMAGE=postgres:18-alpine
|
||||||
|
|
||||||
|
FROM ${POSTGRES_IMAGE} AS pg-client
|
||||||
|
|
||||||
|
FROM ${ALPINE_IMAGE}
|
||||||
|
|
||||||
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
||||||
LABEL description="Sub2API - AI API Gateway Platform"
|
LABEL description="Sub2API - AI API Gateway Platform"
|
||||||
@@ -16,8 +21,21 @@ RUN apk add --no-cache \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
tzdata \
|
tzdata \
|
||||||
curl \
|
curl \
|
||||||
|
su-exec \
|
||||||
|
libpq \
|
||||||
|
zstd-libs \
|
||||||
|
lz4-libs \
|
||||||
|
krb5-libs \
|
||||||
|
libldap \
|
||||||
|
libedit \
|
||||||
&& rm -rf /var/cache/apk/*
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Copy pg_dump and psql from a version-matched PostgreSQL image so backup and
|
||||||
|
# restore work in the runtime container without requiring Docker socket access.
|
||||||
|
COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump
|
||||||
|
COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql
|
||||||
|
COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/
|
||||||
|
|
||||||
# Create non-root user
|
# Create non-root user
|
||||||
RUN addgroup -g 1000 sub2api && \
|
RUN addgroup -g 1000 sub2api && \
|
||||||
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
@@ -30,11 +48,15 @@ COPY sub2api /app/sub2api
|
|||||||
# Create data directory
|
# Create data directory
|
||||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||||
|
|
||||||
USER sub2api
|
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||||
|
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /app/docker-entrypoint.sh
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
ENTRYPOINT ["/app/sub2api"]
|
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||||
|
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||||
|
CMD ["/app/sub2api"]
|
||||||
|
|||||||
12
Makefile
12
Makefile
@@ -1,4 +1,4 @@
|
|||||||
.PHONY: build build-backend build-frontend test test-backend test-frontend
|
.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-datamanagementd secret-scan
|
||||||
|
|
||||||
# 一键编译前后端
|
# 一键编译前后端
|
||||||
build: build-backend build-frontend
|
build: build-backend build-frontend
|
||||||
@@ -11,6 +11,10 @@ build-backend:
|
|||||||
build-frontend:
|
build-frontend:
|
||||||
@pnpm --dir frontend run build
|
@pnpm --dir frontend run build
|
||||||
|
|
||||||
|
# 编译 datamanagementd(宿主机数据管理进程)
|
||||||
|
build-datamanagementd:
|
||||||
|
@cd datamanagement && go build -o datamanagementd ./cmd/datamanagementd
|
||||||
|
|
||||||
# 运行测试(后端 + 前端)
|
# 运行测试(后端 + 前端)
|
||||||
test: test-backend test-frontend
|
test: test-backend test-frontend
|
||||||
|
|
||||||
@@ -20,3 +24,9 @@ test-backend:
|
|||||||
test-frontend:
|
test-frontend:
|
||||||
@pnpm --dir frontend run lint:check
|
@pnpm --dir frontend run lint:check
|
||||||
@pnpm --dir frontend run typecheck
|
@pnpm --dir frontend run typecheck
|
||||||
|
|
||||||
|
test-datamanagementd:
|
||||||
|
@cd datamanagement && go test ./...
|
||||||
|
|
||||||
|
secret-scan:
|
||||||
|
@python3 tools/secret_scan.py
|
||||||
|
|||||||
77
README.md
77
README.md
@@ -2,33 +2,37 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
[](https://golang.org/)
|
[](https://golang.org/)
|
||||||
[](https://vuejs.org/)
|
[](https://vuejs.org/)
|
||||||
[](https://www.postgresql.org/)
|
[](https://www.postgresql.org/)
|
||||||
[](https://redis.io/)
|
[](https://redis.io/)
|
||||||
[](https://www.docker.com/)
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||||
|
|
||||||
**AI API Gateway Platform for Subscription Quota Distribution**
|
**AI API Gateway Platform for Subscription Quota Distribution**
|
||||||
|
|
||||||
English | [中文](README_CN.md)
|
English | [中文](README_CN.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
> **Sub2API officially uses only the domains `sub2api.org` and `pincc.ai`. Other websites using the Sub2API name may be third-party deployments or services and are not affiliated with this project. Please verify and exercise your own judgment.**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
Try Sub2API online: **https://demo.sub2api.org/**
|
Try Sub2API online: **[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||||
|
|
||||||
Demo credentials (shared demo environment; **not** created automatically for self-hosted installs):
|
Demo credentials (shared demo environment; **not** created automatically for self-hosted installs):
|
||||||
|
|
||||||
| Email | Password |
|
| Email | Password |
|
||||||
|-------|----------|
|
|-------|----------|
|
||||||
| admin@sub2api.com | admin123 |
|
| admin@sub2api.org | admin123 |
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions (like Claude Code $200/month). Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions. Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@@ -39,21 +43,46 @@ Sub2API is an AI API gateway platform designed to distribute and manage API quot
|
|||||||
- **Concurrency Control** - Per-user and per-account concurrency limits
|
- **Concurrency Control** - Per-user and per-account concurrency limits
|
||||||
- **Rate Limiting** - Configurable request and token rate limits
|
- **Rate Limiting** - Configurable request and token rate limits
|
||||||
- **Admin Dashboard** - Web interface for monitoring and management
|
- **Admin Dashboard** - Web interface for monitoring and management
|
||||||
|
- **External System Integration** - Embed external systems (e.g. payment, ticketing) via iframe to extend the admin dashboard
|
||||||
|
|
||||||
|
## Don't Want to Self-Host?
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||||
|
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> is the official relay service built on Sub2API, offering stable access to Claude Code, Codex, Gemini and other popular models — ready to use, no deployment or maintenance required.</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## Ecosystem
|
||||||
|
|
||||||
|
Community projects that extend or integrate with Sub2API:
|
||||||
|
|
||||||
|
| Project | Description | Features |
|
||||||
|
|---------|-------------|----------|
|
||||||
|
| [Sub2ApiPay](https://github.com/touwaeriol/sub2apipay) | Self-service payment system | Self-service top-up and subscription purchase; supports YiPay protocol, WeChat Pay, Alipay, Stripe; embeddable via iframe |
|
||||||
|
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | Mobile admin console | Cross-platform app (iOS/Android/Web) for user management, account management, monitoring dashboard, and multi-backend switching; built with Expo + React Native |
|
||||||
|
|
||||||
## Tech Stack
|
## Tech Stack
|
||||||
|
|
||||||
| Component | Technology |
|
| Component | Technology |
|
||||||
|-----------|------------|
|
|-----------|------------|
|
||||||
| Backend | Go 1.25.5, Gin, Ent |
|
| Backend | Go 1.25.7, Gin, Ent |
|
||||||
| Frontend | Vue 3.4+, Vite 5+, TailwindCSS |
|
| Frontend | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
| Database | PostgreSQL 15+ |
|
| Database | PostgreSQL 15+ |
|
||||||
| Cache/Queue | Redis 7+ |
|
| Cache/Queue | Redis 7+ |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Documentation
|
## Nginx Reverse Proxy Note
|
||||||
|
|
||||||
- Dependency Security: `docs/dependency-security.md`
|
When using Nginx as a reverse proxy for Sub2API (or CRS) with Codex CLI, add the following to the `http` block in your Nginx configuration:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
underscores_in_headers on;
|
||||||
|
```
|
||||||
|
|
||||||
|
Nginx drops headers containing underscores by default (e.g. `session_id`), which breaks sticky session routing in multi-account setups.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -149,14 +178,14 @@ mkdir -p sub2api-deploy && cd sub2api-deploy
|
|||||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||||
|
|
||||||
# Start services
|
# Start services
|
||||||
docker-compose -f docker-compose.local.yml up -d
|
docker-compose up -d
|
||||||
|
|
||||||
# View logs
|
# View logs
|
||||||
docker-compose -f docker-compose.local.yml logs -f sub2api
|
docker-compose logs -f sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
**What the script does:**
|
**What the script does:**
|
||||||
- Downloads `docker-compose.local.yml` and `.env.example`
|
- Downloads `docker-compose.local.yml` (saved as `docker-compose.yml`) and `.env.example`
|
||||||
- Generates secure credentials (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD)
|
- Generates secure credentials (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD)
|
||||||
- Creates `.env` file with auto-generated secrets
|
- Creates `.env` file with auto-generated secrets
|
||||||
- Creates data directories (uses local directories for easy backup/migration)
|
- Creates data directories (uses local directories for easy backup/migration)
|
||||||
@@ -363,6 +392,12 @@ default:
|
|||||||
rate_multiplier: 1.0
|
rate_multiplier: 1.0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Sora Status (Temporarily Unavailable)
|
||||||
|
|
||||||
|
> ⚠️ Sora-related features are temporarily unavailable due to technical issues in upstream integration and media delivery.
|
||||||
|
> Please do not rely on Sora in production at this time.
|
||||||
|
> Existing `gateway.sora_*` configuration keys are reserved and may not take effect until these issues are resolved.
|
||||||
|
|
||||||
Additional security-related options are available in `config.yaml`:
|
Additional security-related options are available in `config.yaml`:
|
||||||
|
|
||||||
- `cors.allowed_origins` for CORS allowlist
|
- `cors.allowed_origins` for CORS allowlist
|
||||||
@@ -515,6 +550,28 @@ sub2api/
|
|||||||
└── install.sh # One-click installation script
|
└── install.sh # One-click installation script
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Disclaimer
|
||||||
|
|
||||||
|
> **Please read carefully before using this project:**
|
||||||
|
>
|
||||||
|
> :rotating_light: **Terms of Service Risk**: Using this project may violate Anthropic's Terms of Service. Please read Anthropic's user agreement carefully before use. All risks arising from the use of this project are borne solely by the user.
|
||||||
|
>
|
||||||
|
> :book: **Disclaimer**: This project is for technical learning and research purposes only. The author assumes no responsibility for account suspension, service interruption, or any other losses caused by the use of this project.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
MIT License
|
MIT License
|
||||||
|
|||||||
143
README_CN.md
143
README_CN.md
@@ -2,33 +2,36 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
[](https://golang.org/)
|
[](https://golang.org/)
|
||||||
[](https://vuejs.org/)
|
[](https://vuejs.org/)
|
||||||
[](https://www.postgresql.org/)
|
[](https://www.postgresql.org/)
|
||||||
[](https://redis.io/)
|
[](https://redis.io/)
|
||||||
[](https://www.docker.com/)
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||||
|
|
||||||
**AI API 网关平台 - 订阅配额分发管理**
|
**AI API 网关平台 - 订阅配额分发管理**
|
||||||
|
|
||||||
[English](README.md) | 中文
|
[English](README.md) | 中文
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
> **Sub2API 官方仅使用 `sub2api.org` 与 `pincc.ai` 两个域名。其他使用 Sub2API 名义的网站可能为第三方部署或服务,与本项目无关,请自行甄别。**
|
||||||
---
|
---
|
||||||
|
|
||||||
## 在线体验
|
## 在线体验
|
||||||
|
|
||||||
体验地址:**https://v2.pincc.ai/**
|
体验地址:**[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||||
|
|
||||||
演示账号(共享演示环境;自建部署不会自动创建该账号):
|
演示账号(共享演示环境;自建部署不会自动创建该账号):
|
||||||
|
|
||||||
| 邮箱 | 密码 |
|
| 邮箱 | 密码 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| admin@sub2api.com | admin123 |
|
| admin@sub2api.org | admin123 |
|
||||||
|
|
||||||
## 项目概述
|
## 项目概述
|
||||||
|
|
||||||
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(如 Claude Code $200/月)的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
||||||
|
|
||||||
## 核心功能
|
## 核心功能
|
||||||
|
|
||||||
@@ -39,28 +42,46 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(
|
|||||||
- **并发控制** - 用户级和账号级并发限制
|
- **并发控制** - 用户级和账号级并发限制
|
||||||
- **速率限制** - 可配置的请求和 Token 速率限制
|
- **速率限制** - 可配置的请求和 Token 速率限制
|
||||||
- **管理后台** - Web 界面进行监控和管理
|
- **管理后台** - Web 界面进行监控和管理
|
||||||
|
- **外部系统集成** - 支持通过 iframe 嵌入外部系统(如支付、工单等),扩展管理后台功能
|
||||||
|
|
||||||
|
## 不想自建?试试官方中转
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||||
|
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> 是基于 Sub2API 搭建的官方中转服务,提供 Claude Code、Codex、Gemini 等主流模型的稳定中转,开箱即用,免去自建部署与运维烦恼。</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## 生态项目
|
||||||
|
|
||||||
|
围绕 Sub2API 的社区扩展与集成项目:
|
||||||
|
|
||||||
|
| 项目 | 说明 | 功能 |
|
||||||
|
|------|------|------|
|
||||||
|
| [Sub2ApiPay](https://github.com/touwaeriol/sub2apipay) | 自助支付系统 | 用户自助充值、自助订阅购买;兼容易支付协议、微信官方支付、支付宝官方支付、Stripe;支持 iframe 嵌入管理后台 |
|
||||||
|
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | 移动端管理控制台 | 跨平台应用(iOS/Android/Web),支持用户管理、账号管理、监控看板、多后端切换;基于 Expo + React Native 构建 |
|
||||||
|
|
||||||
## 技术栈
|
## 技术栈
|
||||||
|
|
||||||
| 组件 | 技术 |
|
| 组件 | 技术 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| 后端 | Go 1.25.5, Gin, Ent |
|
| 后端 | Go 1.25.7, Gin, Ent |
|
||||||
| 前端 | Vue 3.4+, Vite 5+, TailwindCSS |
|
| 前端 | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
| 数据库 | PostgreSQL 15+ |
|
| 数据库 | PostgreSQL 15+ |
|
||||||
| 缓存/队列 | Redis 7+ |
|
| 缓存/队列 | Redis 7+ |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 文档
|
## Nginx 反向代理注意事项
|
||||||
|
|
||||||
- 依赖安全:`docs/dependency-security.md`
|
通过 Nginx 反向代理 Sub2API(或 CRS 服务)并搭配 Codex CLI 使用时,需要在 Nginx 配置的 `http` 块中添加:
|
||||||
|
|
||||||
---
|
```nginx
|
||||||
|
underscores_in_headers on;
|
||||||
|
```
|
||||||
|
|
||||||
## OpenAI Responses 兼容注意事项
|
Nginx 默认会丢弃名称中含下划线的请求头(如 `session_id`),这会导致多账号环境下的粘性会话功能失效。
|
||||||
|
|
||||||
- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。
|
|
||||||
- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -156,14 +177,14 @@ mkdir -p sub2api-deploy && cd sub2api-deploy
|
|||||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||||
|
|
||||||
# 启动服务
|
# 启动服务
|
||||||
docker-compose -f docker-compose.local.yml up -d
|
docker-compose up -d
|
||||||
|
|
||||||
# 查看日志
|
# 查看日志
|
||||||
docker-compose -f docker-compose.local.yml logs -f sub2api
|
docker-compose logs -f sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
**脚本功能:**
|
**脚本功能:**
|
||||||
- 下载 `docker-compose.local.yml` 和 `.env.example`
|
- 下载 `docker-compose.local.yml`(本地保存为 `docker-compose.yml`)和 `.env.example`
|
||||||
- 自动生成安全凭证(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD)
|
- 自动生成安全凭证(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD)
|
||||||
- 创建 `.env` 文件并填充自动生成的密钥
|
- 创建 `.env` 文件并填充自动生成的密钥
|
||||||
- 创建数据目录(使用本地目录,便于备份和迁移)
|
- 创建数据目录(使用本地目录,便于备份和迁移)
|
||||||
@@ -244,6 +265,18 @@ docker-compose -f docker-compose.local.yml logs -f sub2api
|
|||||||
|
|
||||||
**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。
|
**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。
|
||||||
|
|
||||||
|
#### 启用“数据管理”功能(datamanagementd)
|
||||||
|
|
||||||
|
如需启用管理后台“数据管理”,需要额外部署宿主机数据管理进程 `datamanagementd`。
|
||||||
|
|
||||||
|
关键点:
|
||||||
|
|
||||||
|
- 主进程固定探测:`/tmp/sub2api-datamanagement.sock`
|
||||||
|
- 只有该 Socket 可连通时,数据管理功能才会开启
|
||||||
|
- Docker 场景需将宿主机 Socket 挂载到容器同路径
|
||||||
|
|
||||||
|
详细部署步骤见:`deploy/DATAMANAGEMENTD_CN.md`
|
||||||
|
|
||||||
#### 访问
|
#### 访问
|
||||||
|
|
||||||
在浏览器中打开 `http://你的服务器IP:8080`
|
在浏览器中打开 `http://你的服务器IP:8080`
|
||||||
@@ -370,6 +403,33 @@ default:
|
|||||||
rate_multiplier: 1.0
|
rate_multiplier: 1.0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Sora 功能状态(暂不可用)
|
||||||
|
|
||||||
|
> ⚠️ 当前 Sora 相关功能因上游接入与媒体链路存在技术问题,暂时不可用。
|
||||||
|
> 现阶段请勿在生产环境依赖 Sora 能力。
|
||||||
|
> 文档中的 `gateway.sora_*` 配置仅作预留,待技术问题修复后再恢复可用。
|
||||||
|
|
||||||
|
### Sora 媒体签名 URL(功能恢复后可选)
|
||||||
|
|
||||||
|
当配置 `gateway.sora_media_signing_key` 且 `gateway.sora_media_signed_url_ttl_seconds > 0` 时,网关会将 Sora 输出的媒体地址改写为临时签名 URL(`/sora/media-signed/...`)。这样无需 API Key 即可在浏览器中直接访问,且具备过期控制与防篡改能力(签名包含 path + query)。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
gateway:
|
||||||
|
# /sora/media 是否强制要求 API Key(默认 false)
|
||||||
|
sora_media_require_api_key: false
|
||||||
|
# 媒体临时签名密钥(为空则禁用签名)
|
||||||
|
sora_media_signing_key: "your-signing-key"
|
||||||
|
# 临时签名 URL 有效期(秒)
|
||||||
|
sora_media_signed_url_ttl_seconds: 900
|
||||||
|
```
|
||||||
|
|
||||||
|
> 若未配置签名密钥,`/sora/media-signed` 将返回 503。
|
||||||
|
> 如需更严格的访问控制,可将 `sora_media_require_api_key` 设为 true,仅允许携带 API Key 的 `/sora/media` 访问。
|
||||||
|
|
||||||
|
访问策略说明:
|
||||||
|
- `/sora/media`:内部调用或客户端携带 API Key 才能下载
|
||||||
|
- `/sora/media-signed`:外部可访问,但有签名 + 过期控制
|
||||||
|
|
||||||
`config.yaml` 还支持以下安全相关配置:
|
`config.yaml` 还支持以下安全相关配置:
|
||||||
|
|
||||||
- `cors.allowed_origins` 配置 CORS 白名单
|
- `cors.allowed_origins` 配置 CORS 白名单
|
||||||
@@ -383,6 +443,14 @@ default:
|
|||||||
- `server.trusted_proxies` 启用可信代理解析 X-Forwarded-For
|
- `server.trusted_proxies` 启用可信代理解析 X-Forwarded-For
|
||||||
- `turnstile.required` 在 release 模式强制启用 Turnstile
|
- `turnstile.required` 在 release 模式强制启用 Turnstile
|
||||||
|
|
||||||
|
**网关防御纵深建议(重点)**
|
||||||
|
|
||||||
|
- `gateway.upstream_response_read_max_bytes`:限制非流式上游响应读取大小(默认 `8MB`),用于防止异常响应导致内存放大。
|
||||||
|
- `gateway.proxy_probe_response_read_max_bytes`:限制代理探测响应读取大小(默认 `1MB`)。
|
||||||
|
- `gateway.gemini_debug_response_headers`:默认 `false`,仅在排障时短时开启,避免高频请求日志开销。
|
||||||
|
- `/auth/register`、`/auth/login`、`/auth/login/2fa`、`/auth/send-verify-code` 已提供服务端兜底限流(Redis 故障时 fail-close)。
|
||||||
|
- 推荐将 WAF/CDN 作为第一层防护,服务端限流与响应读取上限作为第二层兜底;两层同时保留,避免旁路流量与误配置风险。
|
||||||
|
|
||||||
**⚠️ 安全警告:HTTP URL 配置**
|
**⚠️ 安全警告:HTTP URL 配置**
|
||||||
|
|
||||||
当 `security.url_allowlist.enabled=false` 时,系统默认执行最小 URL 校验,**拒绝 HTTP URL**,仅允许 HTTPS。要允许 HTTP URL(例如用于开发或内网测试),必须显式设置:
|
当 `security.url_allowlist.enabled=false` 时,系统默认执行最小 URL 校验,**拒绝 HTTP URL**,仅允许 HTTPS。要允许 HTTP URL(例如用于开发或内网测试),必须显式设置:
|
||||||
@@ -428,6 +496,29 @@ Invalid base URL: invalid url scheme: http
|
|||||||
./sub2api
|
./sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### HTTP/2 (h2c) 与 HTTP/1.1 回退
|
||||||
|
|
||||||
|
后端明文端口默认支持 h2c,并保留 HTTP/1.1 回退用于 WebSocket 与旧客户端。浏览器通常不支持 h2c,性能收益主要在反向代理或内网链路。
|
||||||
|
|
||||||
|
**反向代理示例(Caddy):**
|
||||||
|
|
||||||
|
```caddyfile
|
||||||
|
transport http {
|
||||||
|
versions h2c h1
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**验证:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# h2c prior knowledge
|
||||||
|
curl --http2-prior-knowledge -I http://localhost:8080/health
|
||||||
|
# HTTP/1.1 回退
|
||||||
|
curl --http1.1 -I http://localhost:8080/health
|
||||||
|
# WebSocket 回退验证(需管理员 token)
|
||||||
|
websocat -H="Sec-WebSocket-Protocol: sub2api-admin, jwt.<ADMIN_TOKEN>" ws://localhost:8080/api/v1/admin/ops/ws/qps
|
||||||
|
```
|
||||||
|
|
||||||
#### 开发模式
|
#### 开发模式
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -520,6 +611,28 @@ sub2api/
|
|||||||
└── install.sh # 一键安装脚本
|
└── install.sh # 一键安装脚本
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 免责声明
|
||||||
|
|
||||||
|
> **使用本项目前请仔细阅读:**
|
||||||
|
>
|
||||||
|
> :rotating_light: **服务条款风险**: 使用本项目可能违反 Anthropic 的服务条款。请在使用前仔细阅读 Anthropic 的用户协议,使用本项目的一切风险由用户自行承担。
|
||||||
|
>
|
||||||
|
> :book: **免责声明**: 本项目仅供技术学习和研究使用,作者不对因使用本项目导致的账户封禁、服务中断或其他损失承担任何责任。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 许可证
|
## 许可证
|
||||||
|
|
||||||
MIT License
|
MIT License
|
||||||
|
|||||||
BIN
assets/partners/logos/pincc-logo.png
Normal file
BIN
assets/partners/logos/pincc-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 171 KiB |
@@ -5,6 +5,7 @@ linters:
|
|||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
- errcheck
|
- errcheck
|
||||||
|
- gosec
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
@@ -42,6 +43,22 @@ linters:
|
|||||||
desc: "handler must not import gorm"
|
desc: "handler must not import gorm"
|
||||||
- pkg: github.com/redis/go-redis/v9
|
- pkg: github.com/redis/go-redis/v9
|
||||||
desc: "handler must not import redis"
|
desc: "handler must not import redis"
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G101
|
||||||
|
- G103
|
||||||
|
- G104
|
||||||
|
- G109
|
||||||
|
- G115
|
||||||
|
- G201
|
||||||
|
- G202
|
||||||
|
- G301
|
||||||
|
- G302
|
||||||
|
- G304
|
||||||
|
- G306
|
||||||
|
- G404
|
||||||
|
severity: high
|
||||||
|
confidence: high
|
||||||
errcheck:
|
errcheck:
|
||||||
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||||
# Such cases aren't reported by default.
|
# Such cases aren't reported by default.
|
||||||
@@ -76,20 +93,13 @@ linters:
|
|||||||
check-escaping-errors: true
|
check-escaping-errors: true
|
||||||
staticcheck:
|
staticcheck:
|
||||||
# https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist
|
# https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist
|
||||||
# Default: ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"]
|
|
||||||
dot-import-whitelist:
|
dot-import-whitelist:
|
||||||
- fmt
|
- fmt
|
||||||
# https://staticcheck.dev/docs/configuration/options/#initialisms
|
# https://staticcheck.dev/docs/configuration/options/#initialisms
|
||||||
# Default: ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"]
|
|
||||||
initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ]
|
initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ]
|
||||||
# https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist
|
# https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist
|
||||||
# Default: ["200", "400", "404", "500"]
|
|
||||||
http-status-code-whitelist: [ "200", "400", "404", "500" ]
|
http-status-code-whitelist: [ "200", "400", "404", "500" ]
|
||||||
# SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks
|
# "all" enables every SA/ST/S/QF check; only list the ones to disable.
|
||||||
# Example (to disable some checks): [ "all", "-SA1000", "-SA1001"]
|
|
||||||
# Run `GL_DEBUG=staticcheck golangci-lint run --enable=staticcheck` to see all available checks and enabled by config checks.
|
|
||||||
# Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
|
|
||||||
# Temporarily disable style checks to allow CI to pass
|
|
||||||
checks:
|
checks:
|
||||||
- all
|
- all
|
||||||
- -ST1000 # Package comment format
|
- -ST1000 # Package comment format
|
||||||
@@ -97,489 +107,19 @@ linters:
|
|||||||
- -ST1020 # Comment on exported method format
|
- -ST1020 # Comment on exported method format
|
||||||
- -ST1021 # Comment on exported type format
|
- -ST1021 # Comment on exported type format
|
||||||
- -ST1022 # Comment on exported variable format
|
- -ST1022 # Comment on exported variable format
|
||||||
# Invalid regular expression.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1000
|
|
||||||
- SA1000
|
|
||||||
# Invalid template.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1001
|
|
||||||
- SA1001
|
|
||||||
# Invalid format in 'time.Parse'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1002
|
|
||||||
- SA1002
|
|
||||||
# Unsupported argument to functions in 'encoding/binary'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1003
|
|
||||||
- SA1003
|
|
||||||
# Suspiciously small untyped constant in 'time.Sleep'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1004
|
|
||||||
- SA1004
|
|
||||||
# Invalid first argument to 'exec.Command'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1005
|
|
||||||
- SA1005
|
|
||||||
# 'Printf' with dynamic first argument and no further arguments.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1006
|
|
||||||
- SA1006
|
|
||||||
# Invalid URL in 'net/url.Parse'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1007
|
|
||||||
- SA1007
|
|
||||||
# Non-canonical key in 'http.Header' map.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1008
|
|
||||||
- SA1008
|
|
||||||
# '(*regexp.Regexp).FindAll' called with 'n == 0', which will always return zero results.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1010
|
|
||||||
- SA1010
|
|
||||||
# Various methods in the "strings" package expect valid UTF-8, but invalid input is provided.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1011
|
|
||||||
- SA1011
|
|
||||||
# A nil 'context.Context' is being passed to a function, consider using 'context.TODO' instead.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1012
|
|
||||||
- SA1012
|
|
||||||
# 'io.Seeker.Seek' is being called with the whence constant as the first argument, but it should be the second.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1013
|
|
||||||
- SA1013
|
|
||||||
# Non-pointer value passed to 'Unmarshal' or 'Decode'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1014
|
|
||||||
- SA1014
|
|
||||||
# Using 'time.Tick' in a way that will leak. Consider using 'time.NewTicker', and only use 'time.Tick' in tests, commands and endless functions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1015
|
|
||||||
- SA1015
|
|
||||||
# Trapping a signal that cannot be trapped.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1016
|
|
||||||
- SA1016
|
|
||||||
# Channels used with 'os/signal.Notify' should be buffered.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1017
|
|
||||||
- SA1017
|
|
||||||
# 'strings.Replace' called with 'n == 0', which does nothing.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1018
|
|
||||||
- SA1018
|
|
||||||
# Using a deprecated function, variable, constant or field.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1019
|
|
||||||
- SA1019
|
|
||||||
# Using an invalid host:port pair with a 'net.Listen'-related function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1020
|
|
||||||
- SA1020
|
|
||||||
# Using 'bytes.Equal' to compare two 'net.IP'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1021
|
|
||||||
- SA1021
|
|
||||||
# Modifying the buffer in an 'io.Writer' implementation.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1023
|
|
||||||
- SA1023
|
|
||||||
# A string cutset contains duplicate characters.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1024
|
|
||||||
- SA1024
|
|
||||||
# It is not possible to use '(*time.Timer).Reset''s return value correctly.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1025
|
|
||||||
- SA1025
|
|
||||||
# Cannot marshal channels or functions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1026
|
|
||||||
- SA1026
|
|
||||||
# Atomic access to 64-bit variable must be 64-bit aligned.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1027
|
|
||||||
- SA1027
|
|
||||||
# 'sort.Slice' can only be used on slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1028
|
|
||||||
- SA1028
|
|
||||||
# Inappropriate key in call to 'context.WithValue'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1029
|
|
||||||
- SA1029
|
|
||||||
# Invalid argument in call to a 'strconv' function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1030
|
|
||||||
- SA1030
|
|
||||||
# Overlapping byte slices passed to an encoder.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1031
|
|
||||||
- SA1031
|
|
||||||
# Wrong order of arguments to 'errors.Is'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1032
|
|
||||||
- SA1032
|
|
||||||
# 'sync.WaitGroup.Add' called inside the goroutine, leading to a race condition.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2000
|
|
||||||
- SA2000
|
|
||||||
# Empty critical section, did you mean to defer the unlock?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2001
|
|
||||||
- SA2001
|
|
||||||
# Called 'testing.T.FailNow' or 'SkipNow' in a goroutine, which isn't allowed.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2002
|
|
||||||
- SA2002
|
|
||||||
# Deferred 'Lock' right after locking, likely meant to defer 'Unlock' instead.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2003
|
|
||||||
- SA2003
|
|
||||||
# 'TestMain' doesn't call 'os.Exit', hiding test failures.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA3000
|
|
||||||
- SA3000
|
|
||||||
# Assigning to 'b.N' in benchmarks distorts the results.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA3001
|
|
||||||
- SA3001
|
|
||||||
# Binary operator has identical expressions on both sides.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4000
|
|
||||||
- SA4000
|
|
||||||
# '&*x' gets simplified to 'x', it does not copy 'x'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4001
|
|
||||||
- SA4001
|
|
||||||
# Comparing unsigned values against negative values is pointless.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4003
|
|
||||||
- SA4003
|
|
||||||
# The loop exits unconditionally after one iteration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4004
|
|
||||||
- SA4004
|
|
||||||
# Field assignment that will never be observed. Did you mean to use a pointer receiver?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4005
|
|
||||||
- SA4005
|
|
||||||
# A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4006
|
|
||||||
- SA4006
|
|
||||||
# The variable in the loop condition never changes, are you incrementing the wrong variable?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4008
|
|
||||||
- SA4008
|
|
||||||
# A function argument is overwritten before its first use.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4009
|
|
||||||
- SA4009
|
|
||||||
# The result of 'append' will never be observed anywhere.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4010
|
|
||||||
- SA4010
|
|
||||||
# Break statement with no effect. Did you mean to break out of an outer loop?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4011
|
|
||||||
- SA4011
|
|
||||||
# Comparing a value against NaN even though no value is equal to NaN.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4012
|
|
||||||
- SA4012
|
|
||||||
# Negating a boolean twice ('!!b') is the same as writing 'b'. This is either redundant, or a typo.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4013
|
|
||||||
- SA4013
|
|
||||||
# An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4014
|
|
||||||
- SA4014
|
|
||||||
# Calling functions like 'math.Ceil' on floats converted from integers doesn't do anything useful.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4015
|
|
||||||
- SA4015
|
|
||||||
# Certain bitwise operations, such as 'x ^ 0', do not do anything useful.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4016
|
|
||||||
- SA4016
|
|
||||||
# Discarding the return values of a function without side effects, making the call pointless.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4017
|
|
||||||
- SA4017
|
|
||||||
# Self-assignment of variables.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4018
|
|
||||||
- SA4018
|
|
||||||
# Multiple, identical build constraints in the same file.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4019
|
|
||||||
- SA4019
|
|
||||||
# Unreachable case clause in a type switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4020
|
|
||||||
- SA4020
|
|
||||||
# "x = append(y)" is equivalent to "x = y".
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4021
|
|
||||||
- SA4021
|
|
||||||
# Comparing the address of a variable against nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4022
|
|
||||||
- SA4022
|
|
||||||
# Impossible comparison of interface value with untyped nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4023
|
|
||||||
- SA4023
|
|
||||||
# Checking for impossible return value from a builtin function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4024
|
|
||||||
- SA4024
|
|
||||||
# Integer division of literals that results in zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4025
|
|
||||||
- SA4025
|
|
||||||
# Go constants cannot express negative zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4026
|
|
||||||
- SA4026
|
|
||||||
# '(*net/url.URL).Query' returns a copy, modifying it doesn't change the URL.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4027
|
|
||||||
- SA4027
|
|
||||||
# 'x % 1' is always zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4028
|
|
||||||
- SA4028
|
|
||||||
# Ineffective attempt at sorting slice.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4029
|
|
||||||
- SA4029
|
|
||||||
# Ineffective attempt at generating random number.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4030
|
|
||||||
- SA4030
|
|
||||||
# Checking never-nil value against nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4031
|
|
||||||
- SA4031
|
|
||||||
# Comparing 'runtime.GOOS' or 'runtime.GOARCH' against impossible value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4032
|
|
||||||
- SA4032
|
|
||||||
# Assignment to nil map.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5000
|
|
||||||
- SA5000
|
|
||||||
# Deferring 'Close' before checking for a possible error.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5001
|
|
||||||
- SA5001
|
|
||||||
# The empty for loop ("for {}") spins and can block the scheduler.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5002
|
|
||||||
- SA5002
|
|
||||||
# Defers in infinite loops will never execute.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5003
|
|
||||||
- SA5003
|
|
||||||
# "for { select { ..." with an empty default branch spins.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5004
|
|
||||||
- SA5004
|
|
||||||
# The finalizer references the finalized object, preventing garbage collection.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5005
|
|
||||||
- SA5005
|
|
||||||
# Infinite recursive call.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5007
|
|
||||||
- SA5007
|
|
||||||
# Invalid struct tag.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5008
|
|
||||||
- SA5008
|
|
||||||
# Invalid Printf call.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5009
|
|
||||||
- SA5009
|
|
||||||
# Impossible type assertion.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5010
|
|
||||||
- SA5010
|
|
||||||
# Possible nil pointer dereference.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5011
|
|
||||||
- SA5011
|
|
||||||
# Passing odd-sized slice to function expecting even size.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5012
|
|
||||||
- SA5012
|
|
||||||
# Using 'regexp.Match' or related in a loop, should use 'regexp.Compile'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6000
|
|
||||||
- SA6000
|
|
||||||
# Missing an optimization opportunity when indexing maps by byte slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6001
|
|
||||||
- SA6001
|
|
||||||
# Storing non-pointer values in 'sync.Pool' allocates memory.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6002
|
|
||||||
- SA6002
|
|
||||||
# Converting a string to a slice of runes before ranging over it.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6003
|
|
||||||
- SA6003
|
|
||||||
# Inefficient string comparison with 'strings.ToLower' or 'strings.ToUpper'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6005
|
|
||||||
- SA6005
|
|
||||||
# Using io.WriteString to write '[]byte'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6006
|
|
||||||
- SA6006
|
|
||||||
# Defers in range loops may not run when you expect them to.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9001
|
|
||||||
- SA9001
|
|
||||||
# Using a non-octal 'os.FileMode' that looks like it was meant to be in octal.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9002
|
|
||||||
- SA9002
|
|
||||||
# Empty body in an if or else branch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9003
|
|
||||||
- SA9003
|
|
||||||
# Only the first constant has an explicit type.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9004
|
|
||||||
- SA9004
|
|
||||||
# Trying to marshal a struct with no public fields nor custom marshaling.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9005
|
|
||||||
- SA9005
|
|
||||||
# Dubious bit shifting of a fixed size integer value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9006
|
|
||||||
- SA9006
|
|
||||||
# Deleting a directory that shouldn't be deleted.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9007
|
|
||||||
- SA9007
|
|
||||||
# 'else' branch of a type assertion is probably not reading the right value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9008
|
|
||||||
- SA9008
|
|
||||||
# Ineffectual Go compiler directive.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9009
|
|
||||||
- SA9009
|
|
||||||
# NOTE: ST1000, ST1001, ST1003, ST1020, ST1021, ST1022 are disabled above
|
|
||||||
# Incorrectly formatted error string.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1005
|
|
||||||
- ST1005
|
|
||||||
# Poorly chosen receiver name.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1006
|
|
||||||
- ST1006
|
|
||||||
# A function's error value should be its last return value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1008
|
|
||||||
- ST1008
|
|
||||||
# Poorly chosen name for variable of type 'time.Duration'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1011
|
|
||||||
- ST1011
|
|
||||||
# Poorly chosen name for error variable.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1012
|
|
||||||
- ST1012
|
|
||||||
# Should use constants for HTTP error codes, not magic numbers.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1013
|
|
||||||
- ST1013
|
|
||||||
# A switch's default case should be the first or last case.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1015
|
|
||||||
- ST1015
|
|
||||||
# Use consistent method receiver names.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1016
|
|
||||||
- ST1016
|
|
||||||
# Don't use Yoda conditions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1017
|
|
||||||
- ST1017
|
|
||||||
# Avoid zero-width and control characters in string literals.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1018
|
|
||||||
- ST1018
|
|
||||||
# Importing the same package multiple times.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1019
|
|
||||||
- ST1019
|
|
||||||
# NOTE: ST1020, ST1021, ST1022 removed (disabled above)
|
|
||||||
# Redundant type in variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1023
|
|
||||||
- ST1023
|
|
||||||
# Use plain channel send or receive instead of single-case select.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1000
|
|
||||||
- S1000
|
|
||||||
# Replace for loop with call to copy.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1001
|
|
||||||
- S1001
|
|
||||||
# Omit comparison with boolean constant.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1002
|
|
||||||
- S1002
|
|
||||||
# Replace call to 'strings.Index' with 'strings.Contains'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1003
|
|
||||||
- S1003
|
|
||||||
# Replace call to 'bytes.Compare' with 'bytes.Equal'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1004
|
|
||||||
- S1004
|
|
||||||
# Drop unnecessary use of the blank identifier.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1005
|
|
||||||
- S1005
|
|
||||||
# Use "for { ... }" for infinite loops.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1006
|
|
||||||
- S1006
|
|
||||||
# Simplify regular expression by using raw string literal.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1007
|
|
||||||
- S1007
|
|
||||||
# Simplify returning boolean expression.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1008
|
|
||||||
- S1008
|
|
||||||
# Omit redundant nil check on slices, maps, and channels.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1009
|
|
||||||
- S1009
|
|
||||||
# Omit default slice index.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1010
|
|
||||||
- S1010
|
|
||||||
# Use a single 'append' to concatenate two slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1011
|
|
||||||
- S1011
|
|
||||||
# Replace 'time.Now().Sub(x)' with 'time.Since(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1012
|
|
||||||
- S1012
|
|
||||||
# Use a type conversion instead of manually copying struct fields.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1016
|
|
||||||
- S1016
|
|
||||||
# Replace manual trimming with 'strings.TrimPrefix'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1017
|
|
||||||
- S1017
|
|
||||||
# Use "copy" for sliding elements.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1018
|
|
||||||
- S1018
|
|
||||||
# Simplify "make" call by omitting redundant arguments.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1019
|
|
||||||
- S1019
|
|
||||||
# Omit redundant nil check in type assertion.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1020
|
|
||||||
- S1020
|
|
||||||
# Merge variable declaration and assignment.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1021
|
|
||||||
- S1021
|
|
||||||
# Omit redundant control flow.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1023
|
|
||||||
- S1023
|
|
||||||
# Replace 'x.Sub(time.Now())' with 'time.Until(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1024
|
|
||||||
- S1024
|
|
||||||
# Don't use 'fmt.Sprintf("%s", x)' unnecessarily.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1025
|
|
||||||
- S1025
|
|
||||||
# Simplify error construction with 'fmt.Errorf'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1028
|
|
||||||
- S1028
|
|
||||||
# Range over the string directly.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1029
|
|
||||||
- S1029
|
|
||||||
# Use 'bytes.Buffer.String' or 'bytes.Buffer.Bytes'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1030
|
|
||||||
- S1030
|
|
||||||
# Omit redundant nil check around loop.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1031
|
|
||||||
- S1031
|
|
||||||
# Use 'sort.Ints(x)', 'sort.Float64s(x)', and 'sort.Strings(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1032
|
|
||||||
- S1032
|
|
||||||
# Unnecessary guard around call to "delete".
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1033
|
|
||||||
- S1033
|
|
||||||
# Use result of type assertion to simplify cases.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1034
|
|
||||||
- S1034
|
|
||||||
# Redundant call to 'net/http.CanonicalHeaderKey' in method call on 'net/http.Header'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1035
|
|
||||||
- S1035
|
|
||||||
# Unnecessary guard around map access.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1036
|
|
||||||
- S1036
|
|
||||||
# Elaborate way of sleeping.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1037
|
|
||||||
- S1037
|
|
||||||
# Unnecessarily complex way of printing formatted string.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1038
|
|
||||||
- S1038
|
|
||||||
# Unnecessary use of 'fmt.Sprint'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1039
|
|
||||||
- S1039
|
|
||||||
# Type assertion to current type.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1040
|
|
||||||
- S1040
|
|
||||||
# Apply De Morgan's law.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1001
|
|
||||||
- QF1001
|
|
||||||
# Convert untagged switch to tagged switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1002
|
|
||||||
- QF1002
|
|
||||||
# Convert if/else-if chain to tagged switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1003
|
|
||||||
- QF1003
|
|
||||||
# Use 'strings.ReplaceAll' instead of 'strings.Replace' with 'n == -1'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1004
|
|
||||||
- QF1004
|
|
||||||
# Expand call to 'math.Pow'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1005
|
|
||||||
- QF1005
|
|
||||||
# Lift 'if'+'break' into loop condition.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1006
|
|
||||||
- QF1006
|
|
||||||
# Merge conditional assignment into variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1007
|
|
||||||
- QF1007
|
|
||||||
# Omit embedded fields from selector expression.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1008
|
|
||||||
- QF1008
|
|
||||||
# Use 'time.Time.Equal' instead of '==' operator.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1009
|
|
||||||
- QF1009
|
|
||||||
# Convert slice of bytes to string when printing it.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1010
|
|
||||||
- QF1010
|
|
||||||
# Omit redundant type from variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1011
|
|
||||||
- QF1011
|
|
||||||
# Use 'fmt.Fprintf(x, ...)' instead of 'x.Write(fmt.Sprintf(...))'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1012
|
|
||||||
- QF1012
|
|
||||||
unused:
|
unused:
|
||||||
# Mark all struct fields that have been written to as used.
|
|
||||||
# Default: true
|
# Default: true
|
||||||
field-writes-are-uses: false
|
field-writes-are-uses: true
|
||||||
# Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just write.
|
|
||||||
# Default: false
|
# Default: false
|
||||||
post-statements-are-reads: true
|
post-statements-are-reads: true
|
||||||
# Mark all exported fields as used.
|
|
||||||
# default: true
|
|
||||||
exported-fields-are-used: false
|
|
||||||
# Mark all function parameters as used.
|
|
||||||
# default: true
|
|
||||||
parameters-are-used: true
|
|
||||||
# Mark all local variables as used.
|
|
||||||
# default: true
|
|
||||||
local-variables-are-used: false
|
|
||||||
# Mark all identifiers inside generated files as used.
|
|
||||||
# Default: true
|
# Default: true
|
||||||
generated-is-used: false
|
exported-fields-are-used: true
|
||||||
|
# Default: true
|
||||||
|
parameters-are-used: true
|
||||||
|
# Default: true
|
||||||
|
local-variables-are-used: false
|
||||||
|
# Default: true — must be true, ent generates 130K+ lines of code
|
||||||
|
generated-is-used: true
|
||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
enable:
|
enable:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.25.6-alpine
|
FROM golang:1.25.7-alpine
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,14 @@
|
|||||||
.PHONY: build test test-unit test-integration test-e2e
|
.PHONY: build generate test test-unit test-integration test-e2e
|
||||||
|
|
||||||
|
VERSION ?= $(shell tr -d '\r\n' < ./cmd/server/VERSION)
|
||||||
|
LDFLAGS ?= -s -w -X main.Version=$(VERSION)
|
||||||
|
|
||||||
build:
|
build:
|
||||||
go build -o bin/server ./cmd/server
|
CGO_ENABLED=0 go build -ldflags="$(LDFLAGS)" -trimpath -o bin/server ./cmd/server
|
||||||
|
|
||||||
|
generate:
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test ./...
|
go test ./...
|
||||||
@@ -14,4 +21,7 @@ test-integration:
|
|||||||
go test -tags=integration ./...
|
go test -tags=integration ./...
|
||||||
|
|
||||||
test-e2e:
|
test-e2e:
|
||||||
go test -tags=e2e ./...
|
./scripts/e2e-test.sh
|
||||||
|
|
||||||
|
test-e2e-local:
|
||||||
|
go test -tags=e2e -v -timeout=300s ./internal/integration/...
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ func main() {
|
|||||||
email := flag.String("email", "", "Admin email to issue a JWT for (defaults to first active admin)")
|
email := flag.String("email", "", "Admin email to issue a JWT for (defaults to first active admin)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
cfg, err := config.Load()
|
cfg, err := config.LoadForBootstrap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to load config: %v", err)
|
log.Fatalf("failed to load config: %v", err)
|
||||||
}
|
}
|
||||||
@@ -33,7 +33,7 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
userRepo := repository.NewUserRepository(client, sqlDB)
|
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||||
authService := service.NewAuthService(userRepo, nil, nil, cfg, nil, nil, nil, nil, nil)
|
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
0.1.61
|
0.1.88
|
||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"log"
|
"log"
|
||||||
"log/slog"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -19,11 +18,14 @@ import (
|
|||||||
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/setup"
|
"github.com/Wei-Shaw/sub2api/internal/setup"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/web"
|
"github.com/Wei-Shaw/sub2api/internal/web"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/h2c"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed VERSION
|
//go:embed VERSION
|
||||||
@@ -38,7 +40,12 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Read version from embedded VERSION file
|
// 如果 Version 已通过 ldflags 注入(例如 -X main.Version=...),则不要覆盖。
|
||||||
|
if strings.TrimSpace(Version) != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 默认从 embedded VERSION 文件读取版本号(编译期打包进二进制)。
|
||||||
Version = strings.TrimSpace(embeddedVersion)
|
Version = strings.TrimSpace(embeddedVersion)
|
||||||
if Version == "" {
|
if Version == "" {
|
||||||
Version = "0.0.0-dev"
|
Version = "0.0.0-dev"
|
||||||
@@ -47,22 +54,9 @@ func init() {
|
|||||||
|
|
||||||
// initLogger configures the default slog handler based on gin.Mode().
|
// initLogger configures the default slog handler based on gin.Mode().
|
||||||
// In non-release mode, Debug level logs are enabled.
|
// In non-release mode, Debug level logs are enabled.
|
||||||
func initLogger() {
|
|
||||||
var level slog.Level
|
|
||||||
if gin.Mode() == gin.ReleaseMode {
|
|
||||||
level = slog.LevelInfo
|
|
||||||
} else {
|
|
||||||
level = slog.LevelDebug
|
|
||||||
}
|
|
||||||
handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
|
|
||||||
Level: level,
|
|
||||||
})
|
|
||||||
slog.SetDefault(slog.New(handler))
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Initialize slog logger based on gin mode
|
logger.InitBootstrap()
|
||||||
initLogger()
|
defer logger.Sync()
|
||||||
|
|
||||||
// Parse command line flags
|
// Parse command line flags
|
||||||
setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode")
|
setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode")
|
||||||
@@ -106,7 +100,7 @@ func runSetupServer() {
|
|||||||
r := gin.New()
|
r := gin.New()
|
||||||
r.Use(middleware.Recovery())
|
r.Use(middleware.Recovery())
|
||||||
r.Use(middleware.CORS(config.CORSConfig{}))
|
r.Use(middleware.CORS(config.CORSConfig{}))
|
||||||
r.Use(middleware.SecurityHeaders(config.CSPConfig{Enabled: true, Policy: config.DefaultCSPPolicy}))
|
r.Use(middleware.SecurityHeaders(config.CSPConfig{Enabled: true, Policy: config.DefaultCSPPolicy}, nil))
|
||||||
|
|
||||||
// Register setup routes
|
// Register setup routes
|
||||||
setup.RegisterRoutes(r)
|
setup.RegisterRoutes(r)
|
||||||
@@ -122,16 +116,26 @@ func runSetupServer() {
|
|||||||
log.Printf("Setup wizard available at http://%s", addr)
|
log.Printf("Setup wizard available at http://%s", addr)
|
||||||
log.Println("Complete the setup wizard to configure Sub2API")
|
log.Println("Complete the setup wizard to configure Sub2API")
|
||||||
|
|
||||||
if err := r.Run(addr); err != nil {
|
server := &http.Server{
|
||||||
|
Addr: addr,
|
||||||
|
Handler: h2c.NewHandler(r, &http2.Server{}),
|
||||||
|
ReadHeaderTimeout: 30 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatalf("Failed to start setup server: %v", err)
|
log.Fatalf("Failed to start setup server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runMainServer() {
|
func runMainServer() {
|
||||||
cfg, err := config.Load()
|
cfg, err := config.LoadForBootstrap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load config: %v", err)
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
}
|
}
|
||||||
|
if err := logger.Init(logger.OptionsFromConfig(cfg.Log)); err != nil {
|
||||||
|
log.Fatalf("Failed to initialize logger: %v", err)
|
||||||
|
}
|
||||||
if cfg.RunMode == config.RunModeSimple {
|
if cfg.RunMode == config.RunModeSimple {
|
||||||
log.Println("⚠️ WARNING: Running in SIMPLE mode - billing and quota checks are DISABLED")
|
log.Println("⚠️ WARNING: Running in SIMPLE mode - billing and quota checks are DISABLED")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/ent"
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
@@ -40,6 +41,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
// Server layer ProviderSet
|
// Server layer ProviderSet
|
||||||
server.ProviderSet,
|
server.ProviderSet,
|
||||||
|
|
||||||
|
// Privacy client factory for OpenAI training opt-out
|
||||||
|
providePrivacyClientFactory,
|
||||||
|
|
||||||
// BuildInfo provider
|
// BuildInfo provider
|
||||||
provideServiceBuildInfo,
|
provideServiceBuildInfo,
|
||||||
|
|
||||||
@@ -52,6 +56,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func providePrivacyClientFactory() service.PrivacyClientFactory {
|
||||||
|
return repository.CreatePrivacyReqClient
|
||||||
|
}
|
||||||
|
|
||||||
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
return service.BuildInfo{
|
return service.BuildInfo{
|
||||||
Version: buildInfo.Version,
|
Version: buildInfo.Version,
|
||||||
@@ -67,28 +75,38 @@ func provideCleanup(
|
|||||||
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
opsCleanup *service.OpsCleanupService,
|
opsCleanup *service.OpsCleanupService,
|
||||||
opsScheduledReport *service.OpsScheduledReportService,
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
opsSystemLogSink *service.OpsSystemLogSink,
|
||||||
|
soraMediaCleanup *service.SoraMediaCleanupService,
|
||||||
schedulerSnapshot *service.SchedulerSnapshotService,
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
tokenRefresh *service.TokenRefreshService,
|
tokenRefresh *service.TokenRefreshService,
|
||||||
accountExpiry *service.AccountExpiryService,
|
accountExpiry *service.AccountExpiryService,
|
||||||
subscriptionExpiry *service.SubscriptionExpiryService,
|
subscriptionExpiry *service.SubscriptionExpiryService,
|
||||||
usageCleanup *service.UsageCleanupService,
|
usageCleanup *service.UsageCleanupService,
|
||||||
|
idempotencyCleanup *service.IdempotencyCleanupService,
|
||||||
pricing *service.PricingService,
|
pricing *service.PricingService,
|
||||||
emailQueue *service.EmailQueueService,
|
emailQueue *service.EmailQueueService,
|
||||||
billingCache *service.BillingCacheService,
|
billingCache *service.BillingCacheService,
|
||||||
|
usageRecordWorkerPool *service.UsageRecordWorkerPool,
|
||||||
|
subscriptionService *service.SubscriptionService,
|
||||||
oauth *service.OAuthService,
|
oauth *service.OAuthService,
|
||||||
openaiOAuth *service.OpenAIOAuthService,
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
geminiOAuth *service.GeminiOAuthService,
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
antigravityOAuth *service.AntigravityOAuthService,
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
openAIGateway *service.OpenAIGatewayService,
|
||||||
|
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||||
|
backupSvc *service.BackupService,
|
||||||
) func() {
|
) func() {
|
||||||
return func() {
|
return func() {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Cleanup steps in reverse dependency order
|
type cleanupStep struct {
|
||||||
cleanupSteps := []struct {
|
|
||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}
|
||||||
|
|
||||||
|
// 应用层清理步骤可并行执行,基础设施资源(Redis/Ent)最后按顺序关闭。
|
||||||
|
parallelSteps := []cleanupStep{
|
||||||
{"OpsScheduledReportService", func() error {
|
{"OpsScheduledReportService", func() error {
|
||||||
if opsScheduledReport != nil {
|
if opsScheduledReport != nil {
|
||||||
opsScheduledReport.Stop()
|
opsScheduledReport.Stop()
|
||||||
@@ -101,6 +119,18 @@ func provideCleanup(
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"OpsSystemLogSink", func() error {
|
||||||
|
if opsSystemLogSink != nil {
|
||||||
|
opsSystemLogSink.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SoraMediaCleanupService", func() error {
|
||||||
|
if soraMediaCleanup != nil {
|
||||||
|
soraMediaCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"OpsAlertEvaluatorService", func() error {
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
if opsAlertEvaluator != nil {
|
if opsAlertEvaluator != nil {
|
||||||
opsAlertEvaluator.Stop()
|
opsAlertEvaluator.Stop()
|
||||||
@@ -131,6 +161,12 @@ func provideCleanup(
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"IdempotencyCleanupService", func() error {
|
||||||
|
if idempotencyCleanup != nil {
|
||||||
|
idempotencyCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
tokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -143,6 +179,12 @@ func provideCleanup(
|
|||||||
subscriptionExpiry.Stop()
|
subscriptionExpiry.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"SubscriptionService", func() error {
|
||||||
|
if subscriptionService != nil {
|
||||||
|
subscriptionService.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"PricingService", func() error {
|
{"PricingService", func() error {
|
||||||
pricing.Stop()
|
pricing.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -155,6 +197,12 @@ func provideCleanup(
|
|||||||
billingCache.Stop()
|
billingCache.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"UsageRecordWorkerPool", func() error {
|
||||||
|
if usageRecordWorkerPool != nil {
|
||||||
|
usageRecordWorkerPool.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"OAuthService", func() error {
|
{"OAuthService", func() error {
|
||||||
oauth.Stop()
|
oauth.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -171,23 +219,72 @@ func provideCleanup(
|
|||||||
antigravityOAuth.Stop()
|
antigravityOAuth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"OpenAIWSPool", func() error {
|
||||||
|
if openAIGateway != nil {
|
||||||
|
openAIGateway.CloseOpenAIWSPool()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"ScheduledTestRunnerService", func() error {
|
||||||
|
if scheduledTestRunner != nil {
|
||||||
|
scheduledTestRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BackupService", func() error {
|
||||||
|
if backupSvc != nil {
|
||||||
|
backupSvc.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
infraSteps := []cleanupStep{
|
||||||
{"Redis", func() error {
|
{"Redis", func() error {
|
||||||
|
if rdb == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return rdb.Close()
|
return rdb.Close()
|
||||||
}},
|
}},
|
||||||
{"Ent", func() error {
|
{"Ent", func() error {
|
||||||
|
if entClient == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return entClient.Close()
|
return entClient.Close()
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, step := range cleanupSteps {
|
runParallel := func(steps []cleanupStep) {
|
||||||
if err := step.fn(); err != nil {
|
var wg sync.WaitGroup
|
||||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
for i := range steps {
|
||||||
// Continue with remaining cleanup steps even if one fails
|
step := steps[i]
|
||||||
} else {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
runSequential := func(steps []cleanupStep) {
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runParallel(parallelSteps)
|
||||||
|
runSequential(infraSteps)
|
||||||
|
|
||||||
// Check if context timed out
|
// Check if context timed out
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -47,7 +48,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
redisClient := repository.ProvideRedis(configConfig)
|
redisClient := repository.ProvideRedis(configConfig)
|
||||||
refreshTokenCache := repository.NewRefreshTokenCache(redisClient)
|
refreshTokenCache := repository.NewRefreshTokenCache(redisClient)
|
||||||
settingRepository := repository.NewSettingRepository(client)
|
settingRepository := repository.NewSettingRepository(client)
|
||||||
settingService := service.NewSettingService(settingRepository, configConfig)
|
groupRepository := repository.NewGroupRepository(client, db)
|
||||||
|
settingService := service.ProvideSettingService(settingRepository, groupRepository, configConfig)
|
||||||
emailCache := repository.NewEmailCache(redisClient)
|
emailCache := repository.NewEmailCache(redisClient)
|
||||||
emailService := service.NewEmailService(settingRepository, emailCache)
|
emailService := service.NewEmailService(settingRepository, emailCache)
|
||||||
turnstileVerifier := repository.NewTurnstileVerifier()
|
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||||
@@ -56,17 +58,17 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
||||||
billingCache := repository.NewBillingCache(redisClient)
|
billingCache := repository.NewBillingCache(redisClient)
|
||||||
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig)
|
apiKeyRepository := repository.NewAPIKeyRepository(client, db)
|
||||||
apiKeyRepository := repository.NewAPIKeyRepository(client)
|
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, apiKeyRepository, configConfig)
|
||||||
groupRepository := repository.NewGroupRepository(client, db)
|
|
||||||
userGroupRateRepository := repository.NewUserGroupRateRepository(db)
|
userGroupRateRepository := repository.NewUserGroupRateRepository(db)
|
||||||
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||||
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig)
|
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig)
|
||||||
|
apiKeyService.SetRateLimitCacheInvalidator(billingCache)
|
||||||
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||||
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
authService := service.NewAuthService(userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService)
|
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService, client, configConfig)
|
||||||
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator)
|
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService)
|
||||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService)
|
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||||
redeemCache := repository.NewRedeemCache(redisClient)
|
redeemCache := repository.NewRedeemCache(redisClient)
|
||||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
secretEncryptor, err := repository.NewAESEncryptor(configConfig)
|
secretEncryptor, err := repository.NewAESEncryptor(configConfig)
|
||||||
@@ -79,6 +81,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
userHandler := handler.NewUserHandler(userService)
|
userHandler := handler.NewUserHandler(userService)
|
||||||
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||||
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||||
|
usageBillingRepository := repository.NewUsageBillingRepository(client, db)
|
||||||
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||||
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
||||||
redeemHandler := handler.NewRedeemHandler(redeemService)
|
redeemHandler := handler.NewRedeemHandler(redeemService)
|
||||||
@@ -98,24 +101,29 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
||||||
schedulerCache := repository.NewSchedulerCache(redisClient)
|
schedulerCache := repository.NewSchedulerCache(redisClient)
|
||||||
accountRepository := repository.NewAccountRepository(client, db, schedulerCache)
|
accountRepository := repository.NewAccountRepository(client, db, schedulerCache)
|
||||||
|
soraAccountRepository := repository.NewSoraAccountRepository(db)
|
||||||
proxyRepository := repository.NewProxyRepository(client, db)
|
proxyRepository := repository.NewProxyRepository(client, db)
|
||||||
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||||
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
||||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator)
|
privacyClientFactory := providePrivacyClientFactory()
|
||||||
adminUserHandler := admin.NewUserHandler(adminService)
|
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, soraAccountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator, client, settingService, subscriptionService, userSubscriptionRepository, privacyClientFactory)
|
||||||
groupHandler := admin.NewGroupHandler(adminService)
|
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||||
|
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||||
|
adminUserHandler := admin.NewUserHandler(adminService, concurrencyService)
|
||||||
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||||
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
||||||
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
||||||
openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient)
|
openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient)
|
||||||
geminiOAuthClient := repository.NewGeminiOAuthClient(configConfig)
|
geminiOAuthClient := repository.NewGeminiOAuthClient(configConfig)
|
||||||
geminiCliCodeAssistClient := repository.NewGeminiCliCodeAssistClient()
|
geminiCliCodeAssistClient := repository.NewGeminiCliCodeAssistClient()
|
||||||
geminiOAuthService := service.NewGeminiOAuthService(proxyRepository, geminiOAuthClient, geminiCliCodeAssistClient, configConfig)
|
driveClient := repository.NewGeminiDriveClient()
|
||||||
|
geminiOAuthService := service.NewGeminiOAuthService(proxyRepository, geminiOAuthClient, geminiCliCodeAssistClient, driveClient, configConfig)
|
||||||
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
||||||
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
||||||
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
||||||
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
||||||
geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
|
geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
|
||||||
|
oauthRefreshAPI := service.NewOAuthRefreshAPI(accountRepository, geminiTokenCache)
|
||||||
compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
|
compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
|
||||||
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, compositeTokenCacheInvalidator)
|
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, compositeTokenCacheInvalidator)
|
||||||
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
||||||
@@ -124,27 +132,34 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
usageCache := service.NewUsageCache()
|
usageCache := service.NewUsageCache()
|
||||||
identityCache := repository.NewIdentityCache(redisClient)
|
identityCache := repository.NewIdentityCache(redisClient)
|
||||||
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache)
|
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache)
|
||||||
geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService)
|
geminiTokenProvider := service.ProvideGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService, oauthRefreshAPI)
|
||||||
gatewayCache := repository.NewGatewayCache(redisClient)
|
gatewayCache := repository.NewGatewayCache(redisClient)
|
||||||
antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService)
|
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
||||||
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream, settingService)
|
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
||||||
|
antigravityTokenProvider := service.ProvideAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService, oauthRefreshAPI)
|
||||||
|
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, schedulerSnapshotService, antigravityTokenProvider, rateLimitService, httpUpstream, settingService)
|
||||||
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
||||||
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
|
||||||
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
|
||||||
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||||
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, compositeTokenCacheInvalidator)
|
rpmCache := repository.NewRPMCache(redisClient)
|
||||||
|
groupCapacityService := service.NewGroupCapacityService(accountRepository, groupRepository, concurrencyService, sessionLimitCache, rpmCache)
|
||||||
|
groupHandler := admin.NewGroupHandler(adminService, dashboardService, groupCapacityService)
|
||||||
|
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, rpmCache, compositeTokenCacheInvalidator)
|
||||||
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
||||||
|
dataManagementService := service.NewDataManagementService()
|
||||||
|
dataManagementHandler := admin.NewDataManagementHandler(dataManagementService)
|
||||||
|
backupObjectStoreFactory := repository.NewS3BackupStoreFactory()
|
||||||
|
dbDumper := repository.NewPgDumper(configConfig)
|
||||||
|
backupService := service.ProvideBackupService(settingRepository, configConfig, secretEncryptor, backupObjectStoreFactory, dbDumper)
|
||||||
|
backupHandler := admin.NewBackupHandler(backupService, userService)
|
||||||
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
||||||
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
||||||
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
|
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
|
||||||
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
||||||
proxyHandler := admin.NewProxyHandler(adminService)
|
proxyHandler := admin.NewProxyHandler(adminService)
|
||||||
adminRedeemHandler := admin.NewRedeemHandler(adminService)
|
adminRedeemHandler := admin.NewRedeemHandler(adminService, redeemService)
|
||||||
promoHandler := admin.NewPromoHandler(promoService)
|
promoHandler := admin.NewPromoHandler(promoService)
|
||||||
opsRepository := repository.NewOpsRepository(db)
|
opsRepository := repository.NewOpsRepository(db)
|
||||||
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
|
||||||
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
|
||||||
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
||||||
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -153,19 +168,28 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
billingService := service.NewBillingService(configConfig, pricingService)
|
billingService := service.NewBillingService(configConfig, pricingService)
|
||||||
identityService := service.NewIdentityService(identityCache)
|
identityService := service.NewIdentityService(identityCache)
|
||||||
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||||
claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService)
|
claudeTokenProvider := service.ProvideClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService, oauthRefreshAPI)
|
||||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache)
|
digestSessionStore := service.NewDigestSessionStore()
|
||||||
openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService)
|
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore, settingService)
|
||||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider)
|
openAITokenProvider := service.ProvideOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService, oauthRefreshAPI)
|
||||||
|
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider)
|
||||||
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||||
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService)
|
opsSystemLogSink := service.ProvideOpsSystemLogSink(opsRepository)
|
||||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService)
|
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, userRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService, opsSystemLogSink)
|
||||||
|
soraS3Storage := service.NewSoraS3Storage(settingService)
|
||||||
|
settingService.SetOnS3UpdateCallback(soraS3Storage.RefreshClient)
|
||||||
|
soraGenerationRepository := repository.NewSoraGenerationRepository(db)
|
||||||
|
soraQuotaService := service.NewSoraQuotaService(userRepository, groupRepository, settingService)
|
||||||
|
soraGenerationService := service.NewSoraGenerationService(soraGenerationRepository, soraS3Storage, soraQuotaService)
|
||||||
|
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, soraS3Storage)
|
||||||
opsHandler := admin.NewOpsHandler(opsService)
|
opsHandler := admin.NewOpsHandler(opsService)
|
||||||
updateCache := repository.NewUpdateCache(redisClient)
|
updateCache := repository.NewUpdateCache(redisClient)
|
||||||
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||||
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
||||||
updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo)
|
updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo)
|
||||||
systemHandler := handler.ProvideSystemHandler(updateService)
|
idempotencyRepository := repository.NewIdempotencyRepository(client, db)
|
||||||
|
systemOperationLockService := service.ProvideSystemOperationLockService(idempotencyRepository, configConfig)
|
||||||
|
systemHandler := handler.ProvideSystemHandler(updateService, systemOperationLockService)
|
||||||
adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService)
|
adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService)
|
||||||
usageCleanupRepository := repository.NewUsageCleanupRepository(client, db)
|
usageCleanupRepository := repository.NewUsageCleanupRepository(client, db)
|
||||||
usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig)
|
usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig)
|
||||||
@@ -178,12 +202,27 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient)
|
errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient)
|
||||||
errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache)
|
errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache)
|
||||||
errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService)
|
errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService)
|
||||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler)
|
adminAPIKeyHandler := admin.NewAdminAPIKeyHandler(adminService)
|
||||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, errorPassthroughService, configConfig)
|
scheduledTestPlanRepository := repository.NewScheduledTestPlanRepository(db)
|
||||||
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, errorPassthroughService, configConfig)
|
scheduledTestResultRepository := repository.NewScheduledTestResultRepository(db)
|
||||||
|
scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository)
|
||||||
|
scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService)
|
||||||
|
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, adminAPIKeyHandler, scheduledTestHandler)
|
||||||
|
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
|
||||||
|
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
|
||||||
|
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
|
||||||
|
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, userMessageQueueService, configConfig, settingService)
|
||||||
|
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig)
|
||||||
|
soraSDKClient := service.ProvideSoraSDKClient(configConfig, httpUpstream, openAITokenProvider, accountRepository, soraAccountRepository)
|
||||||
|
soraMediaStorage := service.ProvideSoraMediaStorage(configConfig)
|
||||||
|
soraGatewayService := service.NewSoraGatewayService(soraSDKClient, rateLimitService, httpUpstream, configConfig)
|
||||||
|
soraClientHandler := handler.NewSoraClientHandler(soraGenerationService, soraQuotaService, soraS3Storage, soraGatewayService, gatewayService, soraMediaStorage, apiKeyService)
|
||||||
|
soraGatewayHandler := handler.NewSoraGatewayHandler(gatewayService, soraGatewayService, concurrencyService, billingCacheService, usageRecordWorkerPool, configConfig)
|
||||||
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||||
totpHandler := handler.NewTotpHandler(totpService)
|
totpHandler := handler.NewTotpHandler(totpService)
|
||||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler)
|
idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig)
|
||||||
|
idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig)
|
||||||
|
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, soraGatewayHandler, soraClientHandler, handlerSettingHandler, totpHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||||
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||||
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||||
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||||
@@ -194,10 +233,12 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||||
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig)
|
soraMediaCleanupService := service.ProvideSoraMediaCleanupService(soraMediaStorage, configConfig)
|
||||||
|
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oauthRefreshAPI)
|
||||||
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||||
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
||||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
|
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, rateLimitService, configConfig)
|
||||||
|
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService)
|
||||||
application := &Application{
|
application := &Application{
|
||||||
Server: httpServer,
|
Server: httpServer,
|
||||||
Cleanup: v,
|
Cleanup: v,
|
||||||
@@ -212,6 +253,10 @@ type Application struct {
|
|||||||
Cleanup func()
|
Cleanup func()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func providePrivacyClientFactory() service.PrivacyClientFactory {
|
||||||
|
return repository.CreatePrivacyReqClient
|
||||||
|
}
|
||||||
|
|
||||||
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
return service.BuildInfo{
|
return service.BuildInfo{
|
||||||
Version: buildInfo.Version,
|
Version: buildInfo.Version,
|
||||||
@@ -227,27 +272,37 @@ func provideCleanup(
|
|||||||
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
opsCleanup *service.OpsCleanupService,
|
opsCleanup *service.OpsCleanupService,
|
||||||
opsScheduledReport *service.OpsScheduledReportService,
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
opsSystemLogSink *service.OpsSystemLogSink,
|
||||||
|
soraMediaCleanup *service.SoraMediaCleanupService,
|
||||||
schedulerSnapshot *service.SchedulerSnapshotService,
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
tokenRefresh *service.TokenRefreshService,
|
tokenRefresh *service.TokenRefreshService,
|
||||||
accountExpiry *service.AccountExpiryService,
|
accountExpiry *service.AccountExpiryService,
|
||||||
subscriptionExpiry *service.SubscriptionExpiryService,
|
subscriptionExpiry *service.SubscriptionExpiryService,
|
||||||
usageCleanup *service.UsageCleanupService,
|
usageCleanup *service.UsageCleanupService,
|
||||||
|
idempotencyCleanup *service.IdempotencyCleanupService,
|
||||||
pricing *service.PricingService,
|
pricing *service.PricingService,
|
||||||
emailQueue *service.EmailQueueService,
|
emailQueue *service.EmailQueueService,
|
||||||
billingCache *service.BillingCacheService,
|
billingCache *service.BillingCacheService,
|
||||||
|
usageRecordWorkerPool *service.UsageRecordWorkerPool,
|
||||||
|
subscriptionService *service.SubscriptionService,
|
||||||
oauth *service.OAuthService,
|
oauth *service.OAuthService,
|
||||||
openaiOAuth *service.OpenAIOAuthService,
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
geminiOAuth *service.GeminiOAuthService,
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
antigravityOAuth *service.AntigravityOAuthService,
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
openAIGateway *service.OpenAIGatewayService,
|
||||||
|
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||||
|
backupSvc *service.BackupService,
|
||||||
) func() {
|
) func() {
|
||||||
return func() {
|
return func() {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
cleanupSteps := []struct {
|
type cleanupStep struct {
|
||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}
|
||||||
|
|
||||||
|
parallelSteps := []cleanupStep{
|
||||||
{"OpsScheduledReportService", func() error {
|
{"OpsScheduledReportService", func() error {
|
||||||
if opsScheduledReport != nil {
|
if opsScheduledReport != nil {
|
||||||
opsScheduledReport.Stop()
|
opsScheduledReport.Stop()
|
||||||
@@ -260,6 +315,18 @@ func provideCleanup(
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"OpsSystemLogSink", func() error {
|
||||||
|
if opsSystemLogSink != nil {
|
||||||
|
opsSystemLogSink.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SoraMediaCleanupService", func() error {
|
||||||
|
if soraMediaCleanup != nil {
|
||||||
|
soraMediaCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"OpsAlertEvaluatorService", func() error {
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
if opsAlertEvaluator != nil {
|
if opsAlertEvaluator != nil {
|
||||||
opsAlertEvaluator.Stop()
|
opsAlertEvaluator.Stop()
|
||||||
@@ -290,6 +357,12 @@ func provideCleanup(
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"IdempotencyCleanupService", func() error {
|
||||||
|
if idempotencyCleanup != nil {
|
||||||
|
idempotencyCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
tokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -302,6 +375,12 @@ func provideCleanup(
|
|||||||
subscriptionExpiry.Stop()
|
subscriptionExpiry.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"SubscriptionService", func() error {
|
||||||
|
if subscriptionService != nil {
|
||||||
|
subscriptionService.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"PricingService", func() error {
|
{"PricingService", func() error {
|
||||||
pricing.Stop()
|
pricing.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -314,6 +393,12 @@ func provideCleanup(
|
|||||||
billingCache.Stop()
|
billingCache.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"UsageRecordWorkerPool", func() error {
|
||||||
|
if usageRecordWorkerPool != nil {
|
||||||
|
usageRecordWorkerPool.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"OAuthService", func() error {
|
{"OAuthService", func() error {
|
||||||
oauth.Stop()
|
oauth.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -330,23 +415,72 @@ func provideCleanup(
|
|||||||
antigravityOAuth.Stop()
|
antigravityOAuth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
|
{"OpenAIWSPool", func() error {
|
||||||
|
if openAIGateway != nil {
|
||||||
|
openAIGateway.CloseOpenAIWSPool()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"ScheduledTestRunnerService", func() error {
|
||||||
|
if scheduledTestRunner != nil {
|
||||||
|
scheduledTestRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BackupService", func() error {
|
||||||
|
if backupSvc != nil {
|
||||||
|
backupSvc.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
infraSteps := []cleanupStep{
|
||||||
{"Redis", func() error {
|
{"Redis", func() error {
|
||||||
|
if rdb == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return rdb.Close()
|
return rdb.Close()
|
||||||
}},
|
}},
|
||||||
{"Ent", func() error {
|
{"Ent", func() error {
|
||||||
|
if entClient == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return entClient.Close()
|
return entClient.Close()
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, step := range cleanupSteps {
|
runParallel := func(steps []cleanupStep) {
|
||||||
if err := step.fn(); err != nil {
|
var wg sync.WaitGroup
|
||||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
runSequential := func(steps []cleanupStep) {
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runParallel(parallelSteps)
|
||||||
|
runSequential(infraSteps)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
||||||
|
|||||||
84
backend/cmd/server/wire_gen_test.go
Normal file
84
backend/cmd/server/wire_gen_test.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProvideServiceBuildInfo(t *testing.T) {
|
||||||
|
in := handler.BuildInfo{
|
||||||
|
Version: "v-test",
|
||||||
|
BuildType: "release",
|
||||||
|
}
|
||||||
|
out := provideServiceBuildInfo(in)
|
||||||
|
require.Equal(t, in.Version, out.Version)
|
||||||
|
require.Equal(t, in.BuildType, out.BuildType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||||
|
cfg := &config.Config{}
|
||||||
|
|
||||||
|
oauthSvc := service.NewOAuthService(nil, nil)
|
||||||
|
openAIOAuthSvc := service.NewOpenAIOAuthService(nil, nil)
|
||||||
|
geminiOAuthSvc := service.NewGeminiOAuthService(nil, nil, nil, nil, cfg)
|
||||||
|
antigravityOAuthSvc := service.NewAntigravityOAuthService(nil)
|
||||||
|
|
||||||
|
tokenRefreshSvc := service.NewTokenRefreshService(
|
||||||
|
nil,
|
||||||
|
oauthSvc,
|
||||||
|
openAIOAuthSvc,
|
||||||
|
geminiOAuthSvc,
|
||||||
|
antigravityOAuthSvc,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
cfg,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
accountExpirySvc := service.NewAccountExpiryService(nil, time.Second)
|
||||||
|
subscriptionExpirySvc := service.NewSubscriptionExpiryService(nil, time.Second)
|
||||||
|
pricingSvc := service.NewPricingService(cfg, nil)
|
||||||
|
emailQueueSvc := service.NewEmailQueueService(nil, 1)
|
||||||
|
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, cfg)
|
||||||
|
idempotencyCleanupSvc := service.NewIdempotencyCleanupService(nil, cfg)
|
||||||
|
schedulerSnapshotSvc := service.NewSchedulerSnapshotService(nil, nil, nil, nil, cfg)
|
||||||
|
opsSystemLogSinkSvc := service.NewOpsSystemLogSink(nil)
|
||||||
|
|
||||||
|
cleanup := provideCleanup(
|
||||||
|
nil, // entClient
|
||||||
|
nil, // redis
|
||||||
|
&service.OpsMetricsCollector{},
|
||||||
|
&service.OpsAggregationService{},
|
||||||
|
&service.OpsAlertEvaluatorService{},
|
||||||
|
&service.OpsCleanupService{},
|
||||||
|
&service.OpsScheduledReportService{},
|
||||||
|
opsSystemLogSinkSvc,
|
||||||
|
&service.SoraMediaCleanupService{},
|
||||||
|
schedulerSnapshotSvc,
|
||||||
|
tokenRefreshSvc,
|
||||||
|
accountExpirySvc,
|
||||||
|
subscriptionExpirySvc,
|
||||||
|
&service.UsageCleanupService{},
|
||||||
|
idempotencyCleanupSvc,
|
||||||
|
pricingSvc,
|
||||||
|
emailQueueSvc,
|
||||||
|
billingCacheSvc,
|
||||||
|
&service.UsageRecordWorkerPool{},
|
||||||
|
&service.SubscriptionService{},
|
||||||
|
oauthSvc,
|
||||||
|
openAIOAuthSvc,
|
||||||
|
geminiOAuthSvc,
|
||||||
|
antigravityOAuthSvc,
|
||||||
|
nil, // openAIGateway
|
||||||
|
nil, // scheduledTestRunner
|
||||||
|
nil, // backupSvc
|
||||||
|
)
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
cleanup()
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -41,6 +41,8 @@ type Account struct {
|
|||||||
ProxyID *int64 `json:"proxy_id,omitempty"`
|
ProxyID *int64 `json:"proxy_id,omitempty"`
|
||||||
// Concurrency holds the value of the "concurrency" field.
|
// Concurrency holds the value of the "concurrency" field.
|
||||||
Concurrency int `json:"concurrency,omitempty"`
|
Concurrency int `json:"concurrency,omitempty"`
|
||||||
|
// LoadFactor holds the value of the "load_factor" field.
|
||||||
|
LoadFactor *int `json:"load_factor,omitempty"`
|
||||||
// Priority holds the value of the "priority" field.
|
// Priority holds the value of the "priority" field.
|
||||||
Priority int `json:"priority,omitempty"`
|
Priority int `json:"priority,omitempty"`
|
||||||
// RateMultiplier holds the value of the "rate_multiplier" field.
|
// RateMultiplier holds the value of the "rate_multiplier" field.
|
||||||
@@ -63,6 +65,10 @@ type Account struct {
|
|||||||
RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"`
|
RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"`
|
||||||
// OverloadUntil holds the value of the "overload_until" field.
|
// OverloadUntil holds the value of the "overload_until" field.
|
||||||
OverloadUntil *time.Time `json:"overload_until,omitempty"`
|
OverloadUntil *time.Time `json:"overload_until,omitempty"`
|
||||||
|
// TempUnschedulableUntil holds the value of the "temp_unschedulable_until" field.
|
||||||
|
TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"`
|
||||||
|
// TempUnschedulableReason holds the value of the "temp_unschedulable_reason" field.
|
||||||
|
TempUnschedulableReason *string `json:"temp_unschedulable_reason,omitempty"`
|
||||||
// SessionWindowStart holds the value of the "session_window_start" field.
|
// SessionWindowStart holds the value of the "session_window_start" field.
|
||||||
SessionWindowStart *time.Time `json:"session_window_start,omitempty"`
|
SessionWindowStart *time.Time `json:"session_window_start,omitempty"`
|
||||||
// SessionWindowEnd holds the value of the "session_window_end" field.
|
// SessionWindowEnd holds the value of the "session_window_end" field.
|
||||||
@@ -139,11 +145,11 @@ func (*Account) scanValues(columns []string) ([]any, error) {
|
|||||||
values[i] = new(sql.NullBool)
|
values[i] = new(sql.NullBool)
|
||||||
case account.FieldRateMultiplier:
|
case account.FieldRateMultiplier:
|
||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority:
|
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldLoadFactor, account.FieldPriority:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus:
|
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldTempUnschedulableReason, account.FieldSessionWindowStatus:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldTempUnschedulableUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
values[i] = new(sql.UnknownType)
|
values[i] = new(sql.UnknownType)
|
||||||
@@ -239,6 +245,13 @@ func (_m *Account) assignValues(columns []string, values []any) error {
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Concurrency = int(value.Int64)
|
_m.Concurrency = int(value.Int64)
|
||||||
}
|
}
|
||||||
|
case account.FieldLoadFactor:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field load_factor", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LoadFactor = new(int)
|
||||||
|
*_m.LoadFactor = int(value.Int64)
|
||||||
|
}
|
||||||
case account.FieldPriority:
|
case account.FieldPriority:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
||||||
@@ -311,6 +324,20 @@ func (_m *Account) assignValues(columns []string, values []any) error {
|
|||||||
_m.OverloadUntil = new(time.Time)
|
_m.OverloadUntil = new(time.Time)
|
||||||
*_m.OverloadUntil = value.Time
|
*_m.OverloadUntil = value.Time
|
||||||
}
|
}
|
||||||
|
case account.FieldTempUnschedulableUntil:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field temp_unschedulable_until", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TempUnschedulableUntil = new(time.Time)
|
||||||
|
*_m.TempUnschedulableUntil = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldTempUnschedulableReason:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field temp_unschedulable_reason", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TempUnschedulableReason = new(string)
|
||||||
|
*_m.TempUnschedulableReason = value.String
|
||||||
|
}
|
||||||
case account.FieldSessionWindowStart:
|
case account.FieldSessionWindowStart:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field session_window_start", values[i])
|
return fmt.Errorf("unexpected type %T for field session_window_start", values[i])
|
||||||
@@ -427,6 +454,11 @@ func (_m *Account) String() string {
|
|||||||
builder.WriteString("concurrency=")
|
builder.WriteString("concurrency=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Concurrency))
|
builder.WriteString(fmt.Sprintf("%v", _m.Concurrency))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LoadFactor; v != nil {
|
||||||
|
builder.WriteString("load_factor=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("priority=")
|
builder.WriteString("priority=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
@@ -472,6 +504,16 @@ func (_m *Account) String() string {
|
|||||||
builder.WriteString(v.Format(time.ANSIC))
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.TempUnschedulableUntil; v != nil {
|
||||||
|
builder.WriteString("temp_unschedulable_until=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.TempUnschedulableReason; v != nil {
|
||||||
|
builder.WriteString("temp_unschedulable_reason=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
if v := _m.SessionWindowStart; v != nil {
|
if v := _m.SessionWindowStart; v != nil {
|
||||||
builder.WriteString("session_window_start=")
|
builder.WriteString("session_window_start=")
|
||||||
builder.WriteString(v.Format(time.ANSIC))
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ const (
|
|||||||
FieldProxyID = "proxy_id"
|
FieldProxyID = "proxy_id"
|
||||||
// FieldConcurrency holds the string denoting the concurrency field in the database.
|
// FieldConcurrency holds the string denoting the concurrency field in the database.
|
||||||
FieldConcurrency = "concurrency"
|
FieldConcurrency = "concurrency"
|
||||||
|
// FieldLoadFactor holds the string denoting the load_factor field in the database.
|
||||||
|
FieldLoadFactor = "load_factor"
|
||||||
// FieldPriority holds the string denoting the priority field in the database.
|
// FieldPriority holds the string denoting the priority field in the database.
|
||||||
FieldPriority = "priority"
|
FieldPriority = "priority"
|
||||||
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
|
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
|
||||||
@@ -59,6 +61,10 @@ const (
|
|||||||
FieldRateLimitResetAt = "rate_limit_reset_at"
|
FieldRateLimitResetAt = "rate_limit_reset_at"
|
||||||
// FieldOverloadUntil holds the string denoting the overload_until field in the database.
|
// FieldOverloadUntil holds the string denoting the overload_until field in the database.
|
||||||
FieldOverloadUntil = "overload_until"
|
FieldOverloadUntil = "overload_until"
|
||||||
|
// FieldTempUnschedulableUntil holds the string denoting the temp_unschedulable_until field in the database.
|
||||||
|
FieldTempUnschedulableUntil = "temp_unschedulable_until"
|
||||||
|
// FieldTempUnschedulableReason holds the string denoting the temp_unschedulable_reason field in the database.
|
||||||
|
FieldTempUnschedulableReason = "temp_unschedulable_reason"
|
||||||
// FieldSessionWindowStart holds the string denoting the session_window_start field in the database.
|
// FieldSessionWindowStart holds the string denoting the session_window_start field in the database.
|
||||||
FieldSessionWindowStart = "session_window_start"
|
FieldSessionWindowStart = "session_window_start"
|
||||||
// FieldSessionWindowEnd holds the string denoting the session_window_end field in the database.
|
// FieldSessionWindowEnd holds the string denoting the session_window_end field in the database.
|
||||||
@@ -117,6 +123,7 @@ var Columns = []string{
|
|||||||
FieldExtra,
|
FieldExtra,
|
||||||
FieldProxyID,
|
FieldProxyID,
|
||||||
FieldConcurrency,
|
FieldConcurrency,
|
||||||
|
FieldLoadFactor,
|
||||||
FieldPriority,
|
FieldPriority,
|
||||||
FieldRateMultiplier,
|
FieldRateMultiplier,
|
||||||
FieldStatus,
|
FieldStatus,
|
||||||
@@ -128,6 +135,8 @@ var Columns = []string{
|
|||||||
FieldRateLimitedAt,
|
FieldRateLimitedAt,
|
||||||
FieldRateLimitResetAt,
|
FieldRateLimitResetAt,
|
||||||
FieldOverloadUntil,
|
FieldOverloadUntil,
|
||||||
|
FieldTempUnschedulableUntil,
|
||||||
|
FieldTempUnschedulableReason,
|
||||||
FieldSessionWindowStart,
|
FieldSessionWindowStart,
|
||||||
FieldSessionWindowEnd,
|
FieldSessionWindowEnd,
|
||||||
FieldSessionWindowStatus,
|
FieldSessionWindowStatus,
|
||||||
@@ -244,6 +253,11 @@ func ByConcurrency(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldConcurrency, opts...).ToFunc()
|
return sql.OrderByField(FieldConcurrency, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByLoadFactor orders the results by the load_factor field.
|
||||||
|
func ByLoadFactor(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLoadFactor, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByPriority orders the results by the priority field.
|
// ByPriority orders the results by the priority field.
|
||||||
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
||||||
@@ -299,6 +313,16 @@ func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc()
|
return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByTempUnschedulableUntil orders the results by the temp_unschedulable_until field.
|
||||||
|
func ByTempUnschedulableUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTempUnschedulableUntil, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTempUnschedulableReason orders the results by the temp_unschedulable_reason field.
|
||||||
|
func ByTempUnschedulableReason(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTempUnschedulableReason, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// BySessionWindowStart orders the results by the session_window_start field.
|
// BySessionWindowStart orders the results by the session_window_start field.
|
||||||
func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption {
|
func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc()
|
return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc()
|
||||||
|
|||||||
@@ -100,6 +100,11 @@ func Concurrency(v int) predicate.Account {
|
|||||||
return predicate.Account(sql.FieldEQ(FieldConcurrency, v))
|
return predicate.Account(sql.FieldEQ(FieldConcurrency, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadFactor applies equality check predicate on the "load_factor" field. It's identical to LoadFactorEQ.
|
||||||
|
func LoadFactor(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ.
|
// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ.
|
||||||
func Priority(v int) predicate.Account {
|
func Priority(v int) predicate.Account {
|
||||||
return predicate.Account(sql.FieldEQ(FieldPriority, v))
|
return predicate.Account(sql.FieldEQ(FieldPriority, v))
|
||||||
@@ -155,6 +160,16 @@ func OverloadUntil(v time.Time) predicate.Account {
|
|||||||
return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v))
|
return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntil applies equality check predicate on the "temp_unschedulable_until" field. It's identical to TempUnschedulableUntilEQ.
|
||||||
|
func TempUnschedulableUntil(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReason applies equality check predicate on the "temp_unschedulable_reason" field. It's identical to TempUnschedulableReasonEQ.
|
||||||
|
func TempUnschedulableReason(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
// SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ.
|
// SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ.
|
||||||
func SessionWindowStart(v time.Time) predicate.Account {
|
func SessionWindowStart(v time.Time) predicate.Account {
|
||||||
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
||||||
@@ -640,6 +655,56 @@ func ConcurrencyLTE(v int) predicate.Account {
|
|||||||
return predicate.Account(sql.FieldLTE(FieldConcurrency, v))
|
return predicate.Account(sql.FieldLTE(FieldConcurrency, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadFactorEQ applies the EQ predicate on the "load_factor" field.
|
||||||
|
func LoadFactorEQ(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorNEQ applies the NEQ predicate on the "load_factor" field.
|
||||||
|
func LoadFactorNEQ(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNEQ(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorIn applies the In predicate on the "load_factor" field.
|
||||||
|
func LoadFactorIn(vs ...int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIn(FieldLoadFactor, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorNotIn applies the NotIn predicate on the "load_factor" field.
|
||||||
|
func LoadFactorNotIn(vs ...int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotIn(FieldLoadFactor, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorGT applies the GT predicate on the "load_factor" field.
|
||||||
|
func LoadFactorGT(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGT(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorGTE applies the GTE predicate on the "load_factor" field.
|
||||||
|
func LoadFactorGTE(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGTE(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorLT applies the LT predicate on the "load_factor" field.
|
||||||
|
func LoadFactorLT(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLT(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorLTE applies the LTE predicate on the "load_factor" field.
|
||||||
|
func LoadFactorLTE(v int) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLTE(FieldLoadFactor, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorIsNil applies the IsNil predicate on the "load_factor" field.
|
||||||
|
func LoadFactorIsNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIsNull(FieldLoadFactor))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFactorNotNil applies the NotNil predicate on the "load_factor" field.
|
||||||
|
func LoadFactorNotNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotNull(FieldLoadFactor))
|
||||||
|
}
|
||||||
|
|
||||||
// PriorityEQ applies the EQ predicate on the "priority" field.
|
// PriorityEQ applies the EQ predicate on the "priority" field.
|
||||||
func PriorityEQ(v int) predicate.Account {
|
func PriorityEQ(v int) predicate.Account {
|
||||||
return predicate.Account(sql.FieldEQ(FieldPriority, v))
|
return predicate.Account(sql.FieldEQ(FieldPriority, v))
|
||||||
@@ -1130,6 +1195,131 @@ func OverloadUntilNotNil() predicate.Account {
|
|||||||
return predicate.Account(sql.FieldNotNull(FieldOverloadUntil))
|
return predicate.Account(sql.FieldNotNull(FieldOverloadUntil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilEQ applies the EQ predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilEQ(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilNEQ applies the NEQ predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilNEQ(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilIn applies the In predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilIn(vs ...time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIn(FieldTempUnschedulableUntil, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilNotIn applies the NotIn predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilNotIn(vs ...time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableUntil, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilGT applies the GT predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilGT(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGT(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilGTE applies the GTE predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilGTE(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGTE(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilLT applies the LT predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilLT(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLT(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilLTE applies the LTE predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilLTE(v time.Time) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLTE(FieldTempUnschedulableUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilIsNil applies the IsNil predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilIsNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableUntil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableUntilNotNil applies the NotNil predicate on the "temp_unschedulable_until" field.
|
||||||
|
func TempUnschedulableUntilNotNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableUntil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonEQ applies the EQ predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonEQ(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonNEQ applies the NEQ predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonNEQ(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonIn applies the In predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonIn(vs ...string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIn(FieldTempUnschedulableReason, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonNotIn applies the NotIn predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonNotIn(vs ...string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableReason, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonGT applies the GT predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonGT(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGT(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonGTE applies the GTE predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonGTE(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldGTE(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonLT applies the LT predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonLT(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLT(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonLTE applies the LTE predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonLTE(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldLTE(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonContains applies the Contains predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonContains(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldContains(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonHasPrefix applies the HasPrefix predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonHasPrefix(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldHasPrefix(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonHasSuffix applies the HasSuffix predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonHasSuffix(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldHasSuffix(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonIsNil applies the IsNil predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonIsNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableReason))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonNotNil applies the NotNil predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonNotNil() predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableReason))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonEqualFold applies the EqualFold predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonEqualFold(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldEqualFold(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempUnschedulableReasonContainsFold applies the ContainsFold predicate on the "temp_unschedulable_reason" field.
|
||||||
|
func TempUnschedulableReasonContainsFold(v string) predicate.Account {
|
||||||
|
return predicate.Account(sql.FieldContainsFold(FieldTempUnschedulableReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
// SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field.
|
// SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field.
|
||||||
func SessionWindowStartEQ(v time.Time) predicate.Account {
|
func SessionWindowStartEQ(v time.Time) predicate.Account {
|
||||||
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
||||||
|
|||||||
@@ -139,6 +139,20 @@ func (_c *AccountCreate) SetNillableConcurrency(v *int) *AccountCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (_c *AccountCreate) SetLoadFactor(v int) *AccountCreate {
|
||||||
|
_c.mutation.SetLoadFactor(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLoadFactor sets the "load_factor" field if the given value is not nil.
|
||||||
|
func (_c *AccountCreate) SetNillableLoadFactor(v *int) *AccountCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetLoadFactor(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (_c *AccountCreate) SetPriority(v int) *AccountCreate {
|
func (_c *AccountCreate) SetPriority(v int) *AccountCreate {
|
||||||
_c.mutation.SetPriority(v)
|
_c.mutation.SetPriority(v)
|
||||||
@@ -293,6 +307,34 @@ func (_c *AccountCreate) SetNillableOverloadUntil(v *time.Time) *AccountCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (_c *AccountCreate) SetTempUnschedulableUntil(v time.Time) *AccountCreate {
|
||||||
|
_c.mutation.SetTempUnschedulableUntil(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||||
|
func (_c *AccountCreate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetTempUnschedulableUntil(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (_c *AccountCreate) SetTempUnschedulableReason(v string) *AccountCreate {
|
||||||
|
_c.mutation.SetTempUnschedulableReason(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||||
|
func (_c *AccountCreate) SetNillableTempUnschedulableReason(v *string) *AccountCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetTempUnschedulableReason(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate {
|
func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate {
|
||||||
_c.mutation.SetSessionWindowStart(v)
|
_c.mutation.SetSessionWindowStart(v)
|
||||||
@@ -595,6 +637,10 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(account.FieldConcurrency, field.TypeInt, value)
|
_spec.SetField(account.FieldConcurrency, field.TypeInt, value)
|
||||||
_node.Concurrency = value
|
_node.Concurrency = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.LoadFactor(); ok {
|
||||||
|
_spec.SetField(account.FieldLoadFactor, field.TypeInt, value)
|
||||||
|
_node.LoadFactor = &value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.Priority(); ok {
|
if value, ok := _c.mutation.Priority(); ok {
|
||||||
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
||||||
_node.Priority = value
|
_node.Priority = value
|
||||||
@@ -639,6 +685,14 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(account.FieldOverloadUntil, field.TypeTime, value)
|
_spec.SetField(account.FieldOverloadUntil, field.TypeTime, value)
|
||||||
_node.OverloadUntil = &value
|
_node.OverloadUntil = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.TempUnschedulableUntil(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||||
|
_node.TempUnschedulableUntil = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.TempUnschedulableReason(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||||
|
_node.TempUnschedulableReason = &value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.SessionWindowStart(); ok {
|
if value, ok := _c.mutation.SessionWindowStart(); ok {
|
||||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||||
_node.SessionWindowStart = &value
|
_node.SessionWindowStart = &value
|
||||||
@@ -900,6 +954,30 @@ func (u *AccountUpsert) AddConcurrency(v int) *AccountUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (u *AccountUpsert) SetLoadFactor(v int) *AccountUpsert {
|
||||||
|
u.Set(account.FieldLoadFactor, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLoadFactor sets the "load_factor" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsert) UpdateLoadFactor() *AccountUpsert {
|
||||||
|
u.SetExcluded(account.FieldLoadFactor)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoadFactor adds v to the "load_factor" field.
|
||||||
|
func (u *AccountUpsert) AddLoadFactor(v int) *AccountUpsert {
|
||||||
|
u.Add(account.FieldLoadFactor, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLoadFactor clears the value of the "load_factor" field.
|
||||||
|
func (u *AccountUpsert) ClearLoadFactor() *AccountUpsert {
|
||||||
|
u.SetNull(account.FieldLoadFactor)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (u *AccountUpsert) SetPriority(v int) *AccountUpsert {
|
func (u *AccountUpsert) SetPriority(v int) *AccountUpsert {
|
||||||
u.Set(account.FieldPriority, v)
|
u.Set(account.FieldPriority, v)
|
||||||
@@ -1080,6 +1158,42 @@ func (u *AccountUpsert) ClearOverloadUntil() *AccountUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsert) SetTempUnschedulableUntil(v time.Time) *AccountUpsert {
|
||||||
|
u.Set(account.FieldTempUnschedulableUntil, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsert) UpdateTempUnschedulableUntil() *AccountUpsert {
|
||||||
|
u.SetExcluded(account.FieldTempUnschedulableUntil)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsert) ClearTempUnschedulableUntil() *AccountUpsert {
|
||||||
|
u.SetNull(account.FieldTempUnschedulableUntil)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsert) SetTempUnschedulableReason(v string) *AccountUpsert {
|
||||||
|
u.Set(account.FieldTempUnschedulableReason, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsert) UpdateTempUnschedulableReason() *AccountUpsert {
|
||||||
|
u.SetExcluded(account.FieldTempUnschedulableReason)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsert) ClearTempUnschedulableReason() *AccountUpsert {
|
||||||
|
u.SetNull(account.FieldTempUnschedulableReason)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert {
|
func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert {
|
||||||
u.Set(account.FieldSessionWindowStart, v)
|
u.Set(account.FieldSessionWindowStart, v)
|
||||||
@@ -1347,6 +1461,34 @@ func (u *AccountUpsertOne) UpdateConcurrency() *AccountUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (u *AccountUpsertOne) SetLoadFactor(v int) *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetLoadFactor(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoadFactor adds v to the "load_factor" field.
|
||||||
|
func (u *AccountUpsertOne) AddLoadFactor(v int) *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.AddLoadFactor(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLoadFactor sets the "load_factor" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertOne) UpdateLoadFactor() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateLoadFactor()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLoadFactor clears the value of the "load_factor" field.
|
||||||
|
func (u *AccountUpsertOne) ClearLoadFactor() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearLoadFactor()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (u *AccountUpsertOne) SetPriority(v int) *AccountUpsertOne {
|
func (u *AccountUpsertOne) SetPriority(v int) *AccountUpsertOne {
|
||||||
return u.Update(func(s *AccountUpsert) {
|
return u.Update(func(s *AccountUpsert) {
|
||||||
@@ -1557,6 +1699,48 @@ func (u *AccountUpsertOne) ClearOverloadUntil() *AccountUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsertOne) SetTempUnschedulableUntil(v time.Time) *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetTempUnschedulableUntil(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertOne) UpdateTempUnschedulableUntil() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateTempUnschedulableUntil()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsertOne) ClearTempUnschedulableUntil() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearTempUnschedulableUntil()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsertOne) SetTempUnschedulableReason(v string) *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetTempUnschedulableReason(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertOne) UpdateTempUnschedulableReason() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateTempUnschedulableReason()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsertOne) ClearTempUnschedulableReason() *AccountUpsertOne {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearTempUnschedulableReason()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne {
|
func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne {
|
||||||
return u.Update(func(s *AccountUpsert) {
|
return u.Update(func(s *AccountUpsert) {
|
||||||
@@ -1999,6 +2183,34 @@ func (u *AccountUpsertBulk) UpdateConcurrency() *AccountUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (u *AccountUpsertBulk) SetLoadFactor(v int) *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetLoadFactor(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoadFactor adds v to the "load_factor" field.
|
||||||
|
func (u *AccountUpsertBulk) AddLoadFactor(v int) *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.AddLoadFactor(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLoadFactor sets the "load_factor" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertBulk) UpdateLoadFactor() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateLoadFactor()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLoadFactor clears the value of the "load_factor" field.
|
||||||
|
func (u *AccountUpsertBulk) ClearLoadFactor() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearLoadFactor()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (u *AccountUpsertBulk) SetPriority(v int) *AccountUpsertBulk {
|
func (u *AccountUpsertBulk) SetPriority(v int) *AccountUpsertBulk {
|
||||||
return u.Update(func(s *AccountUpsert) {
|
return u.Update(func(s *AccountUpsert) {
|
||||||
@@ -2209,6 +2421,48 @@ func (u *AccountUpsertBulk) ClearOverloadUntil() *AccountUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsertBulk) SetTempUnschedulableUntil(v time.Time) *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetTempUnschedulableUntil(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertBulk) UpdateTempUnschedulableUntil() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateTempUnschedulableUntil()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||||
|
func (u *AccountUpsertBulk) ClearTempUnschedulableUntil() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearTempUnschedulableUntil()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsertBulk) SetTempUnschedulableReason(v string) *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.SetTempUnschedulableReason(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||||
|
func (u *AccountUpsertBulk) UpdateTempUnschedulableReason() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.UpdateTempUnschedulableReason()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||||
|
func (u *AccountUpsertBulk) ClearTempUnschedulableReason() *AccountUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
s.ClearTempUnschedulableReason()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk {
|
func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk {
|
||||||
return u.Update(func(s *AccountUpsert) {
|
return u.Update(func(s *AccountUpsert) {
|
||||||
|
|||||||
@@ -172,6 +172,33 @@ func (_u *AccountUpdate) AddConcurrency(v int) *AccountUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (_u *AccountUpdate) SetLoadFactor(v int) *AccountUpdate {
|
||||||
|
_u.mutation.ResetLoadFactor()
|
||||||
|
_u.mutation.SetLoadFactor(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLoadFactor sets the "load_factor" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdate) SetNillableLoadFactor(v *int) *AccountUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLoadFactor(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoadFactor adds value to the "load_factor" field.
|
||||||
|
func (_u *AccountUpdate) AddLoadFactor(v int) *AccountUpdate {
|
||||||
|
_u.mutation.AddLoadFactor(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLoadFactor clears the value of the "load_factor" field.
|
||||||
|
func (_u *AccountUpdate) ClearLoadFactor() *AccountUpdate {
|
||||||
|
_u.mutation.ClearLoadFactor()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (_u *AccountUpdate) SetPriority(v int) *AccountUpdate {
|
func (_u *AccountUpdate) SetPriority(v int) *AccountUpdate {
|
||||||
_u.mutation.ResetPriority()
|
_u.mutation.ResetPriority()
|
||||||
@@ -376,6 +403,46 @@ func (_u *AccountUpdate) ClearOverloadUntil() *AccountUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (_u *AccountUpdate) SetTempUnschedulableUntil(v time.Time) *AccountUpdate {
|
||||||
|
_u.mutation.SetTempUnschedulableUntil(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTempUnschedulableUntil(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||||
|
func (_u *AccountUpdate) ClearTempUnschedulableUntil() *AccountUpdate {
|
||||||
|
_u.mutation.ClearTempUnschedulableUntil()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (_u *AccountUpdate) SetTempUnschedulableReason(v string) *AccountUpdate {
|
||||||
|
_u.mutation.SetTempUnschedulableReason(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdate) SetNillableTempUnschedulableReason(v *string) *AccountUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTempUnschedulableReason(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||||
|
func (_u *AccountUpdate) ClearTempUnschedulableReason() *AccountUpdate {
|
||||||
|
_u.mutation.ClearTempUnschedulableReason()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate {
|
func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate {
|
||||||
_u.mutation.SetSessionWindowStart(v)
|
_u.mutation.SetSessionWindowStart(v)
|
||||||
@@ -644,6 +711,15 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if value, ok := _u.mutation.AddedConcurrency(); ok {
|
if value, ok := _u.mutation.AddedConcurrency(); ok {
|
||||||
_spec.AddField(account.FieldConcurrency, field.TypeInt, value)
|
_spec.AddField(account.FieldConcurrency, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.LoadFactor(); ok {
|
||||||
|
_spec.SetField(account.FieldLoadFactor, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedLoadFactor(); ok {
|
||||||
|
_spec.AddField(account.FieldLoadFactor, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LoadFactorCleared() {
|
||||||
|
_spec.ClearField(account.FieldLoadFactor, field.TypeInt)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Priority(); ok {
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
@@ -701,6 +777,18 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.OverloadUntilCleared() {
|
if _u.mutation.OverloadUntilCleared() {
|
||||||
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.TempUnschedulableUntil(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TempUnschedulableUntilCleared() {
|
||||||
|
_spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.TempUnschedulableReason(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TempUnschedulableReasonCleared() {
|
||||||
|
_spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
||||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||||
}
|
}
|
||||||
@@ -1011,6 +1099,33 @@ func (_u *AccountUpdateOne) AddConcurrency(v int) *AccountUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLoadFactor sets the "load_factor" field.
|
||||||
|
func (_u *AccountUpdateOne) SetLoadFactor(v int) *AccountUpdateOne {
|
||||||
|
_u.mutation.ResetLoadFactor()
|
||||||
|
_u.mutation.SetLoadFactor(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLoadFactor sets the "load_factor" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdateOne) SetNillableLoadFactor(v *int) *AccountUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLoadFactor(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoadFactor adds value to the "load_factor" field.
|
||||||
|
func (_u *AccountUpdateOne) AddLoadFactor(v int) *AccountUpdateOne {
|
||||||
|
_u.mutation.AddLoadFactor(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLoadFactor clears the value of the "load_factor" field.
|
||||||
|
func (_u *AccountUpdateOne) ClearLoadFactor() *AccountUpdateOne {
|
||||||
|
_u.mutation.ClearLoadFactor()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetPriority sets the "priority" field.
|
// SetPriority sets the "priority" field.
|
||||||
func (_u *AccountUpdateOne) SetPriority(v int) *AccountUpdateOne {
|
func (_u *AccountUpdateOne) SetPriority(v int) *AccountUpdateOne {
|
||||||
_u.mutation.ResetPriority()
|
_u.mutation.ResetPriority()
|
||||||
@@ -1215,6 +1330,46 @@ func (_u *AccountUpdateOne) ClearOverloadUntil() *AccountUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||||
|
func (_u *AccountUpdateOne) SetTempUnschedulableUntil(v time.Time) *AccountUpdateOne {
|
||||||
|
_u.mutation.SetTempUnschedulableUntil(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdateOne) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTempUnschedulableUntil(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||||
|
func (_u *AccountUpdateOne) ClearTempUnschedulableUntil() *AccountUpdateOne {
|
||||||
|
_u.mutation.ClearTempUnschedulableUntil()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||||
|
func (_u *AccountUpdateOne) SetTempUnschedulableReason(v string) *AccountUpdateOne {
|
||||||
|
_u.mutation.SetTempUnschedulableReason(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||||
|
func (_u *AccountUpdateOne) SetNillableTempUnschedulableReason(v *string) *AccountUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTempUnschedulableReason(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||||
|
func (_u *AccountUpdateOne) ClearTempUnschedulableReason() *AccountUpdateOne {
|
||||||
|
_u.mutation.ClearTempUnschedulableReason()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetSessionWindowStart sets the "session_window_start" field.
|
// SetSessionWindowStart sets the "session_window_start" field.
|
||||||
func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne {
|
func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne {
|
||||||
_u.mutation.SetSessionWindowStart(v)
|
_u.mutation.SetSessionWindowStart(v)
|
||||||
@@ -1513,6 +1668,15 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er
|
|||||||
if value, ok := _u.mutation.AddedConcurrency(); ok {
|
if value, ok := _u.mutation.AddedConcurrency(); ok {
|
||||||
_spec.AddField(account.FieldConcurrency, field.TypeInt, value)
|
_spec.AddField(account.FieldConcurrency, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.LoadFactor(); ok {
|
||||||
|
_spec.SetField(account.FieldLoadFactor, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedLoadFactor(); ok {
|
||||||
|
_spec.AddField(account.FieldLoadFactor, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LoadFactorCleared() {
|
||||||
|
_spec.ClearField(account.FieldLoadFactor, field.TypeInt)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Priority(); ok {
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
_spec.SetField(account.FieldPriority, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
@@ -1570,6 +1734,18 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er
|
|||||||
if _u.mutation.OverloadUntilCleared() {
|
if _u.mutation.OverloadUntilCleared() {
|
||||||
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.TempUnschedulableUntil(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TempUnschedulableUntilCleared() {
|
||||||
|
_spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.TempUnschedulableReason(); ok {
|
||||||
|
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TempUnschedulableReasonCleared() {
|
||||||
|
_spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
||||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ type Announcement struct {
|
|||||||
Content string `json:"content,omitempty"`
|
Content string `json:"content,omitempty"`
|
||||||
// 状态: draft, active, archived
|
// 状态: draft, active, archived
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
// 通知模式: silent(仅铃铛), popup(弹窗提醒)
|
||||||
|
NotifyMode string `json:"notify_mode,omitempty"`
|
||||||
// 展示条件(JSON 规则)
|
// 展示条件(JSON 规则)
|
||||||
Targeting domain.AnnouncementTargeting `json:"targeting,omitempty"`
|
Targeting domain.AnnouncementTargeting `json:"targeting,omitempty"`
|
||||||
// 开始展示时间(为空表示立即生效)
|
// 开始展示时间(为空表示立即生效)
|
||||||
@@ -72,7 +74,7 @@ func (*Announcement) scanValues(columns []string) ([]any, error) {
|
|||||||
values[i] = new([]byte)
|
values[i] = new([]byte)
|
||||||
case announcement.FieldID, announcement.FieldCreatedBy, announcement.FieldUpdatedBy:
|
case announcement.FieldID, announcement.FieldCreatedBy, announcement.FieldUpdatedBy:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case announcement.FieldTitle, announcement.FieldContent, announcement.FieldStatus:
|
case announcement.FieldTitle, announcement.FieldContent, announcement.FieldStatus, announcement.FieldNotifyMode:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case announcement.FieldStartsAt, announcement.FieldEndsAt, announcement.FieldCreatedAt, announcement.FieldUpdatedAt:
|
case announcement.FieldStartsAt, announcement.FieldEndsAt, announcement.FieldCreatedAt, announcement.FieldUpdatedAt:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
@@ -115,6 +117,12 @@ func (_m *Announcement) assignValues(columns []string, values []any) error {
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Status = value.String
|
_m.Status = value.String
|
||||||
}
|
}
|
||||||
|
case announcement.FieldNotifyMode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notify_mode", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.NotifyMode = value.String
|
||||||
|
}
|
||||||
case announcement.FieldTargeting:
|
case announcement.FieldTargeting:
|
||||||
if value, ok := values[i].(*[]byte); !ok {
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field targeting", values[i])
|
return fmt.Errorf("unexpected type %T for field targeting", values[i])
|
||||||
@@ -213,6 +221,9 @@ func (_m *Announcement) String() string {
|
|||||||
builder.WriteString("status=")
|
builder.WriteString("status=")
|
||||||
builder.WriteString(_m.Status)
|
builder.WriteString(_m.Status)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("notify_mode=")
|
||||||
|
builder.WriteString(_m.NotifyMode)
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("targeting=")
|
builder.WriteString("targeting=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.Targeting))
|
builder.WriteString(fmt.Sprintf("%v", _m.Targeting))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ const (
|
|||||||
FieldContent = "content"
|
FieldContent = "content"
|
||||||
// FieldStatus holds the string denoting the status field in the database.
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
FieldStatus = "status"
|
FieldStatus = "status"
|
||||||
|
// FieldNotifyMode holds the string denoting the notify_mode field in the database.
|
||||||
|
FieldNotifyMode = "notify_mode"
|
||||||
// FieldTargeting holds the string denoting the targeting field in the database.
|
// FieldTargeting holds the string denoting the targeting field in the database.
|
||||||
FieldTargeting = "targeting"
|
FieldTargeting = "targeting"
|
||||||
// FieldStartsAt holds the string denoting the starts_at field in the database.
|
// FieldStartsAt holds the string denoting the starts_at field in the database.
|
||||||
@@ -53,6 +55,7 @@ var Columns = []string{
|
|||||||
FieldTitle,
|
FieldTitle,
|
||||||
FieldContent,
|
FieldContent,
|
||||||
FieldStatus,
|
FieldStatus,
|
||||||
|
FieldNotifyMode,
|
||||||
FieldTargeting,
|
FieldTargeting,
|
||||||
FieldStartsAt,
|
FieldStartsAt,
|
||||||
FieldEndsAt,
|
FieldEndsAt,
|
||||||
@@ -81,6 +84,10 @@ var (
|
|||||||
DefaultStatus string
|
DefaultStatus string
|
||||||
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
StatusValidator func(string) error
|
StatusValidator func(string) error
|
||||||
|
// DefaultNotifyMode holds the default value on creation for the "notify_mode" field.
|
||||||
|
DefaultNotifyMode string
|
||||||
|
// NotifyModeValidator is a validator for the "notify_mode" field. It is called by the builders before save.
|
||||||
|
NotifyModeValidator func(string) error
|
||||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
DefaultCreatedAt func() time.Time
|
DefaultCreatedAt func() time.Time
|
||||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
@@ -112,6 +119,11 @@ func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByNotifyMode orders the results by the notify_mode field.
|
||||||
|
func ByNotifyMode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotifyMode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByStartsAt orders the results by the starts_at field.
|
// ByStartsAt orders the results by the starts_at field.
|
||||||
func ByStartsAt(opts ...sql.OrderTermOption) OrderOption {
|
func ByStartsAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldStartsAt, opts...).ToFunc()
|
return sql.OrderByField(FieldStartsAt, opts...).ToFunc()
|
||||||
|
|||||||
@@ -70,6 +70,11 @@ func Status(v string) predicate.Announcement {
|
|||||||
return predicate.Announcement(sql.FieldEQ(FieldStatus, v))
|
return predicate.Announcement(sql.FieldEQ(FieldStatus, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NotifyMode applies equality check predicate on the "notify_mode" field. It's identical to NotifyModeEQ.
|
||||||
|
func NotifyMode(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ.
|
// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ.
|
||||||
func StartsAt(v time.Time) predicate.Announcement {
|
func StartsAt(v time.Time) predicate.Announcement {
|
||||||
return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v))
|
return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v))
|
||||||
@@ -295,6 +300,71 @@ func StatusContainsFold(v string) predicate.Announcement {
|
|||||||
return predicate.Announcement(sql.FieldContainsFold(FieldStatus, v))
|
return predicate.Announcement(sql.FieldContainsFold(FieldStatus, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NotifyModeEQ applies the EQ predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeNEQ applies the NEQ predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeNEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeIn applies the In predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldNotifyMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeNotIn applies the NotIn predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeNotIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldNotifyMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeGT applies the GT predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeGT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeGTE applies the GTE predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeGTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeLT applies the LT predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeLT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeLTE applies the LTE predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeLTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeContains applies the Contains predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeContains(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContains(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeHasPrefix applies the HasPrefix predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeHasPrefix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasPrefix(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeHasSuffix applies the HasSuffix predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeHasSuffix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasSuffix(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeEqualFold applies the EqualFold predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeEqualFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEqualFold(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeContainsFold applies the ContainsFold predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeContainsFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContainsFold(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
// TargetingIsNil applies the IsNil predicate on the "targeting" field.
|
// TargetingIsNil applies the IsNil predicate on the "targeting" field.
|
||||||
func TargetingIsNil() predicate.Announcement {
|
func TargetingIsNil() predicate.Announcement {
|
||||||
return predicate.Announcement(sql.FieldIsNull(FieldTargeting))
|
return predicate.Announcement(sql.FieldIsNull(FieldTargeting))
|
||||||
|
|||||||
@@ -50,6 +50,20 @@ func (_c *AnnouncementCreate) SetNillableStatus(v *string) *AnnouncementCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (_c *AnnouncementCreate) SetNotifyMode(v string) *AnnouncementCreate {
|
||||||
|
_c.mutation.SetNotifyMode(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotifyMode sets the "notify_mode" field if the given value is not nil.
|
||||||
|
func (_c *AnnouncementCreate) SetNillableNotifyMode(v *string) *AnnouncementCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetNotifyMode(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (_c *AnnouncementCreate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementCreate {
|
func (_c *AnnouncementCreate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementCreate {
|
||||||
_c.mutation.SetTargeting(v)
|
_c.mutation.SetTargeting(v)
|
||||||
@@ -202,6 +216,10 @@ func (_c *AnnouncementCreate) defaults() {
|
|||||||
v := announcement.DefaultStatus
|
v := announcement.DefaultStatus
|
||||||
_c.mutation.SetStatus(v)
|
_c.mutation.SetStatus(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.NotifyMode(); !ok {
|
||||||
|
v := announcement.DefaultNotifyMode
|
||||||
|
_c.mutation.SetNotifyMode(v)
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
v := announcement.DefaultCreatedAt()
|
v := announcement.DefaultCreatedAt()
|
||||||
_c.mutation.SetCreatedAt(v)
|
_c.mutation.SetCreatedAt(v)
|
||||||
@@ -238,6 +256,14 @@ func (_c *AnnouncementCreate) check() error {
|
|||||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.NotifyMode(); !ok {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: errors.New(`ent: missing required field "Announcement.notify_mode"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.NotifyMode(); ok {
|
||||||
|
if err := announcement.NotifyModeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: fmt.Errorf(`ent: validator failed for field "Announcement.notify_mode": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Announcement.created_at"`)}
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Announcement.created_at"`)}
|
||||||
}
|
}
|
||||||
@@ -283,6 +309,10 @@ func (_c *AnnouncementCreate) createSpec() (*Announcement, *sqlgraph.CreateSpec)
|
|||||||
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
||||||
_node.Status = value
|
_node.Status = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.NotifyMode(); ok {
|
||||||
|
_spec.SetField(announcement.FieldNotifyMode, field.TypeString, value)
|
||||||
|
_node.NotifyMode = value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.Targeting(); ok {
|
if value, ok := _c.mutation.Targeting(); ok {
|
||||||
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
||||||
_node.Targeting = value
|
_node.Targeting = value
|
||||||
@@ -415,6 +445,18 @@ func (u *AnnouncementUpsert) UpdateStatus() *AnnouncementUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (u *AnnouncementUpsert) SetNotifyMode(v string) *AnnouncementUpsert {
|
||||||
|
u.Set(announcement.FieldNotifyMode, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNotifyMode sets the "notify_mode" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementUpsert) UpdateNotifyMode() *AnnouncementUpsert {
|
||||||
|
u.SetExcluded(announcement.FieldNotifyMode)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (u *AnnouncementUpsert) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsert {
|
func (u *AnnouncementUpsert) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsert {
|
||||||
u.Set(announcement.FieldTargeting, v)
|
u.Set(announcement.FieldTargeting, v)
|
||||||
@@ -616,6 +658,20 @@ func (u *AnnouncementUpsertOne) UpdateStatus() *AnnouncementUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (u *AnnouncementUpsertOne) SetNotifyMode(v string) *AnnouncementUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
|
s.SetNotifyMode(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNotifyMode sets the "notify_mode" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementUpsertOne) UpdateNotifyMode() *AnnouncementUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
|
s.UpdateNotifyMode()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (u *AnnouncementUpsertOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertOne {
|
func (u *AnnouncementUpsertOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertOne {
|
||||||
return u.Update(func(s *AnnouncementUpsert) {
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
@@ -1002,6 +1058,20 @@ func (u *AnnouncementUpsertBulk) UpdateStatus() *AnnouncementUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (u *AnnouncementUpsertBulk) SetNotifyMode(v string) *AnnouncementUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
|
s.SetNotifyMode(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNotifyMode sets the "notify_mode" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementUpsertBulk) UpdateNotifyMode() *AnnouncementUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
|
s.UpdateNotifyMode()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (u *AnnouncementUpsertBulk) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertBulk {
|
func (u *AnnouncementUpsertBulk) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertBulk {
|
||||||
return u.Update(func(s *AnnouncementUpsert) {
|
return u.Update(func(s *AnnouncementUpsert) {
|
||||||
|
|||||||
@@ -72,6 +72,20 @@ func (_u *AnnouncementUpdate) SetNillableStatus(v *string) *AnnouncementUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetNotifyMode(v string) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetNotifyMode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotifyMode sets the "notify_mode" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableNotifyMode(v *string) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotifyMode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (_u *AnnouncementUpdate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdate {
|
func (_u *AnnouncementUpdate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdate {
|
||||||
_u.mutation.SetTargeting(v)
|
_u.mutation.SetTargeting(v)
|
||||||
@@ -286,6 +300,11 @@ func (_u *AnnouncementUpdate) check() error {
|
|||||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
if err := announcement.NotifyModeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: fmt.Errorf(`ent: validator failed for field "Announcement.notify_mode": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,6 +329,9 @@ func (_u *AnnouncementUpdate) sqlSave(ctx context.Context) (_node int, err error
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
_spec.SetField(announcement.FieldNotifyMode, field.TypeString, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Targeting(); ok {
|
if value, ok := _u.mutation.Targeting(); ok {
|
||||||
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
||||||
}
|
}
|
||||||
@@ -456,6 +478,20 @@ func (_u *AnnouncementUpdateOne) SetNillableStatus(v *string) *AnnouncementUpdat
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNotifyMode(v string) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetNotifyMode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotifyMode sets the "notify_mode" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableNotifyMode(v *string) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotifyMode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetTargeting sets the "targeting" field.
|
// SetTargeting sets the "targeting" field.
|
||||||
func (_u *AnnouncementUpdateOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdateOne {
|
func (_u *AnnouncementUpdateOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdateOne {
|
||||||
_u.mutation.SetTargeting(v)
|
_u.mutation.SetTargeting(v)
|
||||||
@@ -683,6 +719,11 @@ func (_u *AnnouncementUpdateOne) check() error {
|
|||||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
if err := announcement.NotifyModeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: fmt.Errorf(`ent: validator failed for field "Announcement.notify_mode": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -724,6 +765,9 @@ func (_u *AnnouncementUpdateOne) sqlSave(ctx context.Context) (_node *Announceme
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
_spec.SetField(announcement.FieldNotifyMode, field.TypeString, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Targeting(); ok {
|
if value, ok := _u.mutation.Targeting(); ok {
|
||||||
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,6 +36,8 @@ type APIKey struct {
|
|||||||
GroupID *int64 `json:"group_id,omitempty"`
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
// Status holds the value of the "status" field.
|
// Status holds the value of the "status" field.
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
// Last usage time of this API key
|
||||||
|
LastUsedAt *time.Time `json:"last_used_at,omitempty"`
|
||||||
// Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]
|
// Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]
|
||||||
IPWhitelist []string `json:"ip_whitelist,omitempty"`
|
IPWhitelist []string `json:"ip_whitelist,omitempty"`
|
||||||
// Blocked IPs/CIDRs
|
// Blocked IPs/CIDRs
|
||||||
@@ -46,6 +48,24 @@ type APIKey struct {
|
|||||||
QuotaUsed float64 `json:"quota_used,omitempty"`
|
QuotaUsed float64 `json:"quota_used,omitempty"`
|
||||||
// Expiration time for this API key (null = never expires)
|
// Expiration time for this API key (null = never expires)
|
||||||
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// Rate limit in USD per 5 hours (0 = unlimited)
|
||||||
|
RateLimit5h float64 `json:"rate_limit_5h,omitempty"`
|
||||||
|
// Rate limit in USD per day (0 = unlimited)
|
||||||
|
RateLimit1d float64 `json:"rate_limit_1d,omitempty"`
|
||||||
|
// Rate limit in USD per 7 days (0 = unlimited)
|
||||||
|
RateLimit7d float64 `json:"rate_limit_7d,omitempty"`
|
||||||
|
// Used amount in USD for the current 5h window
|
||||||
|
Usage5h float64 `json:"usage_5h,omitempty"`
|
||||||
|
// Used amount in USD for the current 1d window
|
||||||
|
Usage1d float64 `json:"usage_1d,omitempty"`
|
||||||
|
// Used amount in USD for the current 7d window
|
||||||
|
Usage7d float64 `json:"usage_7d,omitempty"`
|
||||||
|
// Start time of the current 5h rate limit window
|
||||||
|
Window5hStart *time.Time `json:"window_5h_start,omitempty"`
|
||||||
|
// Start time of the current 1d rate limit window
|
||||||
|
Window1dStart *time.Time `json:"window_1d_start,omitempty"`
|
||||||
|
// Start time of the current 7d rate limit window
|
||||||
|
Window7dStart *time.Time `json:"window_7d_start,omitempty"`
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
||||||
Edges APIKeyEdges `json:"edges"`
|
Edges APIKeyEdges `json:"edges"`
|
||||||
@@ -103,13 +123,13 @@ func (*APIKey) scanValues(columns []string) ([]any, error) {
|
|||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
||||||
values[i] = new([]byte)
|
values[i] = new([]byte)
|
||||||
case apikey.FieldQuota, apikey.FieldQuotaUsed:
|
case apikey.FieldQuota, apikey.FieldQuotaUsed, apikey.FieldRateLimit5h, apikey.FieldRateLimit1d, apikey.FieldRateLimit7d, apikey.FieldUsage5h, apikey.FieldUsage1d, apikey.FieldUsage7d:
|
||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt, apikey.FieldExpiresAt:
|
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt, apikey.FieldLastUsedAt, apikey.FieldExpiresAt, apikey.FieldWindow5hStart, apikey.FieldWindow1dStart, apikey.FieldWindow7dStart:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
values[i] = new(sql.UnknownType)
|
values[i] = new(sql.UnknownType)
|
||||||
@@ -182,6 +202,13 @@ func (_m *APIKey) assignValues(columns []string, values []any) error {
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Status = value.String
|
_m.Status = value.String
|
||||||
}
|
}
|
||||||
|
case apikey.FieldLastUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field last_used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LastUsedAt = new(time.Time)
|
||||||
|
*_m.LastUsedAt = value.Time
|
||||||
|
}
|
||||||
case apikey.FieldIPWhitelist:
|
case apikey.FieldIPWhitelist:
|
||||||
if value, ok := values[i].(*[]byte); !ok {
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i])
|
return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i])
|
||||||
@@ -217,6 +244,63 @@ func (_m *APIKey) assignValues(columns []string, values []any) error {
|
|||||||
_m.ExpiresAt = new(time.Time)
|
_m.ExpiresAt = new(time.Time)
|
||||||
*_m.ExpiresAt = value.Time
|
*_m.ExpiresAt = value.Time
|
||||||
}
|
}
|
||||||
|
case apikey.FieldRateLimit5h:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_5h", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit5h = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldRateLimit1d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_1d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit1d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldRateLimit7d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_7d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit7d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage5h:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_5h", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage5h = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage1d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_1d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage1d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage7d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_7d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage7d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow5hStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_5h_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window5hStart = new(time.Time)
|
||||||
|
*_m.Window5hStart = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow1dStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_1d_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window1dStart = new(time.Time)
|
||||||
|
*_m.Window1dStart = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow7dStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_7d_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window7dStart = new(time.Time)
|
||||||
|
*_m.Window7dStart = value.Time
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
_m.selectValues.Set(columns[i], values[i])
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
}
|
}
|
||||||
@@ -296,6 +380,11 @@ func (_m *APIKey) String() string {
|
|||||||
builder.WriteString("status=")
|
builder.WriteString("status=")
|
||||||
builder.WriteString(_m.Status)
|
builder.WriteString(_m.Status)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LastUsedAt; v != nil {
|
||||||
|
builder.WriteString("last_used_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("ip_whitelist=")
|
builder.WriteString("ip_whitelist=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist))
|
builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
@@ -312,6 +401,39 @@ func (_m *APIKey) String() string {
|
|||||||
builder.WriteString("expires_at=")
|
builder.WriteString("expires_at=")
|
||||||
builder.WriteString(v.Format(time.ANSIC))
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
}
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_5h=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit5h))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_1d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit1d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_7d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit7d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_5h=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage5h))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_1d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage1d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_7d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage7d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window5hStart; v != nil {
|
||||||
|
builder.WriteString("window_5h_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window1dStart; v != nil {
|
||||||
|
builder.WriteString("window_1d_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window7dStart; v != nil {
|
||||||
|
builder.WriteString("window_7d_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
builder.WriteByte(')')
|
builder.WriteByte(')')
|
||||||
return builder.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ const (
|
|||||||
FieldGroupID = "group_id"
|
FieldGroupID = "group_id"
|
||||||
// FieldStatus holds the string denoting the status field in the database.
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
FieldStatus = "status"
|
FieldStatus = "status"
|
||||||
|
// FieldLastUsedAt holds the string denoting the last_used_at field in the database.
|
||||||
|
FieldLastUsedAt = "last_used_at"
|
||||||
// FieldIPWhitelist holds the string denoting the ip_whitelist field in the database.
|
// FieldIPWhitelist holds the string denoting the ip_whitelist field in the database.
|
||||||
FieldIPWhitelist = "ip_whitelist"
|
FieldIPWhitelist = "ip_whitelist"
|
||||||
// FieldIPBlacklist holds the string denoting the ip_blacklist field in the database.
|
// FieldIPBlacklist holds the string denoting the ip_blacklist field in the database.
|
||||||
@@ -41,6 +43,24 @@ const (
|
|||||||
FieldQuotaUsed = "quota_used"
|
FieldQuotaUsed = "quota_used"
|
||||||
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
FieldExpiresAt = "expires_at"
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldRateLimit5h holds the string denoting the rate_limit_5h field in the database.
|
||||||
|
FieldRateLimit5h = "rate_limit_5h"
|
||||||
|
// FieldRateLimit1d holds the string denoting the rate_limit_1d field in the database.
|
||||||
|
FieldRateLimit1d = "rate_limit_1d"
|
||||||
|
// FieldRateLimit7d holds the string denoting the rate_limit_7d field in the database.
|
||||||
|
FieldRateLimit7d = "rate_limit_7d"
|
||||||
|
// FieldUsage5h holds the string denoting the usage_5h field in the database.
|
||||||
|
FieldUsage5h = "usage_5h"
|
||||||
|
// FieldUsage1d holds the string denoting the usage_1d field in the database.
|
||||||
|
FieldUsage1d = "usage_1d"
|
||||||
|
// FieldUsage7d holds the string denoting the usage_7d field in the database.
|
||||||
|
FieldUsage7d = "usage_7d"
|
||||||
|
// FieldWindow5hStart holds the string denoting the window_5h_start field in the database.
|
||||||
|
FieldWindow5hStart = "window_5h_start"
|
||||||
|
// FieldWindow1dStart holds the string denoting the window_1d_start field in the database.
|
||||||
|
FieldWindow1dStart = "window_1d_start"
|
||||||
|
// FieldWindow7dStart holds the string denoting the window_7d_start field in the database.
|
||||||
|
FieldWindow7dStart = "window_7d_start"
|
||||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
EdgeUser = "user"
|
EdgeUser = "user"
|
||||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
@@ -83,11 +103,21 @@ var Columns = []string{
|
|||||||
FieldName,
|
FieldName,
|
||||||
FieldGroupID,
|
FieldGroupID,
|
||||||
FieldStatus,
|
FieldStatus,
|
||||||
|
FieldLastUsedAt,
|
||||||
FieldIPWhitelist,
|
FieldIPWhitelist,
|
||||||
FieldIPBlacklist,
|
FieldIPBlacklist,
|
||||||
FieldQuota,
|
FieldQuota,
|
||||||
FieldQuotaUsed,
|
FieldQuotaUsed,
|
||||||
FieldExpiresAt,
|
FieldExpiresAt,
|
||||||
|
FieldRateLimit5h,
|
||||||
|
FieldRateLimit1d,
|
||||||
|
FieldRateLimit7d,
|
||||||
|
FieldUsage5h,
|
||||||
|
FieldUsage1d,
|
||||||
|
FieldUsage7d,
|
||||||
|
FieldWindow5hStart,
|
||||||
|
FieldWindow1dStart,
|
||||||
|
FieldWindow7dStart,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
@@ -126,6 +156,18 @@ var (
|
|||||||
DefaultQuota float64
|
DefaultQuota float64
|
||||||
// DefaultQuotaUsed holds the default value on creation for the "quota_used" field.
|
// DefaultQuotaUsed holds the default value on creation for the "quota_used" field.
|
||||||
DefaultQuotaUsed float64
|
DefaultQuotaUsed float64
|
||||||
|
// DefaultRateLimit5h holds the default value on creation for the "rate_limit_5h" field.
|
||||||
|
DefaultRateLimit5h float64
|
||||||
|
// DefaultRateLimit1d holds the default value on creation for the "rate_limit_1d" field.
|
||||||
|
DefaultRateLimit1d float64
|
||||||
|
// DefaultRateLimit7d holds the default value on creation for the "rate_limit_7d" field.
|
||||||
|
DefaultRateLimit7d float64
|
||||||
|
// DefaultUsage5h holds the default value on creation for the "usage_5h" field.
|
||||||
|
DefaultUsage5h float64
|
||||||
|
// DefaultUsage1d holds the default value on creation for the "usage_1d" field.
|
||||||
|
DefaultUsage1d float64
|
||||||
|
// DefaultUsage7d holds the default value on creation for the "usage_7d" field.
|
||||||
|
DefaultUsage7d float64
|
||||||
)
|
)
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the APIKey queries.
|
// OrderOption defines the ordering options for the APIKey queries.
|
||||||
@@ -176,6 +218,11 @@ func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByLastUsedAt orders the results by the last_used_at field.
|
||||||
|
func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByQuota orders the results by the quota field.
|
// ByQuota orders the results by the quota field.
|
||||||
func ByQuota(opts ...sql.OrderTermOption) OrderOption {
|
func ByQuota(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldQuota, opts...).ToFunc()
|
return sql.OrderByField(FieldQuota, opts...).ToFunc()
|
||||||
@@ -191,6 +238,51 @@ func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByRateLimit5h orders the results by the rate_limit_5h field.
|
||||||
|
func ByRateLimit5h(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit5h, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimit1d orders the results by the rate_limit_1d field.
|
||||||
|
func ByRateLimit1d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit1d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimit7d orders the results by the rate_limit_7d field.
|
||||||
|
func ByRateLimit7d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit7d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage5h orders the results by the usage_5h field.
|
||||||
|
func ByUsage5h(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage5h, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage1d orders the results by the usage_1d field.
|
||||||
|
func ByUsage1d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage1d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage7d orders the results by the usage_7d field.
|
||||||
|
func ByUsage7d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage7d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow5hStart orders the results by the window_5h_start field.
|
||||||
|
func ByWindow5hStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow5hStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow1dStart orders the results by the window_1d_start field.
|
||||||
|
func ByWindow1dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow1dStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow7dStart orders the results by the window_7d_start field.
|
||||||
|
func ByWindow7dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow7dStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByUserField orders the results by user field.
|
// ByUserField orders the results by user field.
|
||||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
return func(s *sql.Selector) {
|
return func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -95,6 +95,11 @@ func Status(v string) predicate.APIKey {
|
|||||||
return predicate.APIKey(sql.FieldEQ(FieldStatus, v))
|
return predicate.APIKey(sql.FieldEQ(FieldStatus, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastUsedAt applies equality check predicate on the "last_used_at" field. It's identical to LastUsedAtEQ.
|
||||||
|
func LastUsedAt(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
// Quota applies equality check predicate on the "quota" field. It's identical to QuotaEQ.
|
// Quota applies equality check predicate on the "quota" field. It's identical to QuotaEQ.
|
||||||
func Quota(v float64) predicate.APIKey {
|
func Quota(v float64) predicate.APIKey {
|
||||||
return predicate.APIKey(sql.FieldEQ(FieldQuota, v))
|
return predicate.APIKey(sql.FieldEQ(FieldQuota, v))
|
||||||
@@ -110,6 +115,51 @@ func ExpiresAt(v time.Time) predicate.APIKey {
|
|||||||
return predicate.APIKey(sql.FieldEQ(FieldExpiresAt, v))
|
return predicate.APIKey(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RateLimit5h applies equality check predicate on the "rate_limit_5h" field. It's identical to RateLimit5hEQ.
|
||||||
|
func RateLimit5h(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1d applies equality check predicate on the "rate_limit_1d" field. It's identical to RateLimit1dEQ.
|
||||||
|
func RateLimit1d(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7d applies equality check predicate on the "rate_limit_7d" field. It's identical to RateLimit7dEQ.
|
||||||
|
func RateLimit7d(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5h applies equality check predicate on the "usage_5h" field. It's identical to Usage5hEQ.
|
||||||
|
func Usage5h(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1d applies equality check predicate on the "usage_1d" field. It's identical to Usage1dEQ.
|
||||||
|
func Usage1d(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7d applies equality check predicate on the "usage_7d" field. It's identical to Usage7dEQ.
|
||||||
|
func Usage7d(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStart applies equality check predicate on the "window_5h_start" field. It's identical to Window5hStartEQ.
|
||||||
|
func Window5hStart(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStart applies equality check predicate on the "window_1d_start" field. It's identical to Window1dStartEQ.
|
||||||
|
func Window1dStart(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStart applies equality check predicate on the "window_7d_start" field. It's identical to Window7dStartEQ.
|
||||||
|
func Window7dStart(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
func CreatedAtEQ(v time.Time) predicate.APIKey {
|
func CreatedAtEQ(v time.Time) predicate.APIKey {
|
||||||
return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v))
|
return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
@@ -485,6 +535,56 @@ func StatusContainsFold(v string) predicate.APIKey {
|
|||||||
return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v))
|
return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastUsedAtEQ applies the EQ predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtNEQ applies the NEQ predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtIn applies the In predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldLastUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtNotIn applies the NotIn predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldLastUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtGT applies the GT predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtGTE applies the GTE predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtLT applies the LT predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtLTE applies the LTE predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldLastUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtIsNil applies the IsNil predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldLastUsedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastUsedAtNotNil applies the NotNil predicate on the "last_used_at" field.
|
||||||
|
func LastUsedAtNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldLastUsedAt))
|
||||||
|
}
|
||||||
|
|
||||||
// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field.
|
// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field.
|
||||||
func IPWhitelistIsNil() predicate.APIKey {
|
func IPWhitelistIsNil() predicate.APIKey {
|
||||||
return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist))
|
return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist))
|
||||||
@@ -635,6 +735,396 @@ func ExpiresAtNotNil() predicate.APIKey {
|
|||||||
return predicate.APIKey(sql.FieldNotNull(FieldExpiresAt))
|
return predicate.APIKey(sql.FieldNotNull(FieldExpiresAt))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RateLimit5hEQ applies the EQ predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hNEQ applies the NEQ predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hIn applies the In predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldRateLimit5h, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hNotIn applies the NotIn predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit5h, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hGT applies the GT predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hGTE applies the GTE predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hLT applies the LT predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit5hLTE applies the LTE predicate on the "rate_limit_5h" field.
|
||||||
|
func RateLimit5hLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldRateLimit5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dEQ applies the EQ predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dNEQ applies the NEQ predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dIn applies the In predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldRateLimit1d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dNotIn applies the NotIn predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit1d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dGT applies the GT predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dGTE applies the GTE predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dLT applies the LT predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit1dLTE applies the LTE predicate on the "rate_limit_1d" field.
|
||||||
|
func RateLimit1dLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldRateLimit1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dEQ applies the EQ predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dNEQ applies the NEQ predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dIn applies the In predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldRateLimit7d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dNotIn applies the NotIn predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit7d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dGT applies the GT predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dGTE applies the GTE predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dLT applies the LT predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimit7dLTE applies the LTE predicate on the "rate_limit_7d" field.
|
||||||
|
func RateLimit7dLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldRateLimit7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hEQ applies the EQ predicate on the "usage_5h" field.
|
||||||
|
func Usage5hEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hNEQ applies the NEQ predicate on the "usage_5h" field.
|
||||||
|
func Usage5hNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hIn applies the In predicate on the "usage_5h" field.
|
||||||
|
func Usage5hIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldUsage5h, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hNotIn applies the NotIn predicate on the "usage_5h" field.
|
||||||
|
func Usage5hNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldUsage5h, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hGT applies the GT predicate on the "usage_5h" field.
|
||||||
|
func Usage5hGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hGTE applies the GTE predicate on the "usage_5h" field.
|
||||||
|
func Usage5hGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hLT applies the LT predicate on the "usage_5h" field.
|
||||||
|
func Usage5hLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage5hLTE applies the LTE predicate on the "usage_5h" field.
|
||||||
|
func Usage5hLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldUsage5h, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dEQ applies the EQ predicate on the "usage_1d" field.
|
||||||
|
func Usage1dEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dNEQ applies the NEQ predicate on the "usage_1d" field.
|
||||||
|
func Usage1dNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dIn applies the In predicate on the "usage_1d" field.
|
||||||
|
func Usage1dIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldUsage1d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dNotIn applies the NotIn predicate on the "usage_1d" field.
|
||||||
|
func Usage1dNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldUsage1d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dGT applies the GT predicate on the "usage_1d" field.
|
||||||
|
func Usage1dGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dGTE applies the GTE predicate on the "usage_1d" field.
|
||||||
|
func Usage1dGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dLT applies the LT predicate on the "usage_1d" field.
|
||||||
|
func Usage1dLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage1dLTE applies the LTE predicate on the "usage_1d" field.
|
||||||
|
func Usage1dLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldUsage1d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dEQ applies the EQ predicate on the "usage_7d" field.
|
||||||
|
func Usage7dEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dNEQ applies the NEQ predicate on the "usage_7d" field.
|
||||||
|
func Usage7dNEQ(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dIn applies the In predicate on the "usage_7d" field.
|
||||||
|
func Usage7dIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldUsage7d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dNotIn applies the NotIn predicate on the "usage_7d" field.
|
||||||
|
func Usage7dNotIn(vs ...float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldUsage7d, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dGT applies the GT predicate on the "usage_7d" field.
|
||||||
|
func Usage7dGT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dGTE applies the GTE predicate on the "usage_7d" field.
|
||||||
|
func Usage7dGTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dLT applies the LT predicate on the "usage_7d" field.
|
||||||
|
func Usage7dLT(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage7dLTE applies the LTE predicate on the "usage_7d" field.
|
||||||
|
func Usage7dLTE(v float64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldUsage7d, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartEQ applies the EQ predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartNEQ applies the NEQ predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartIn applies the In predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldWindow5hStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartNotIn applies the NotIn predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldWindow5hStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartGT applies the GT predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartGTE applies the GTE predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartLT applies the LT predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartLTE applies the LTE predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldWindow5hStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartIsNil applies the IsNil predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldWindow5hStart))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window5hStartNotNil applies the NotNil predicate on the "window_5h_start" field.
|
||||||
|
func Window5hStartNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldWindow5hStart))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartEQ applies the EQ predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartNEQ applies the NEQ predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartIn applies the In predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldWindow1dStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartNotIn applies the NotIn predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldWindow1dStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartGT applies the GT predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartGTE applies the GTE predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartLT applies the LT predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartLTE applies the LTE predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldWindow1dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartIsNil applies the IsNil predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldWindow1dStart))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window1dStartNotNil applies the NotNil predicate on the "window_1d_start" field.
|
||||||
|
func Window1dStartNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldWindow1dStart))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartEQ applies the EQ predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartNEQ applies the NEQ predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartIn applies the In predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldWindow7dStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartNotIn applies the NotIn predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldWindow7dStart, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartGT applies the GT predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartGTE applies the GTE predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartLT applies the LT predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartLTE applies the LTE predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldWindow7dStart, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartIsNil applies the IsNil predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldWindow7dStart))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Window7dStartNotNil applies the NotNil predicate on the "window_7d_start" field.
|
||||||
|
func Window7dStartNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldWindow7dStart))
|
||||||
|
}
|
||||||
|
|
||||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
func HasUser() predicate.APIKey {
|
func HasUser() predicate.APIKey {
|
||||||
return predicate.APIKey(func(s *sql.Selector) {
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -113,6 +113,20 @@ func (_c *APIKeyCreate) SetNillableStatus(v *string) *APIKeyCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (_c *APIKeyCreate) SetLastUsedAt(v time.Time) *APIKeyCreate {
|
||||||
|
_c.mutation.SetLastUsedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableLastUsedAt(v *time.Time) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetLastUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (_c *APIKeyCreate) SetIPWhitelist(v []string) *APIKeyCreate {
|
func (_c *APIKeyCreate) SetIPWhitelist(v []string) *APIKeyCreate {
|
||||||
_c.mutation.SetIPWhitelist(v)
|
_c.mutation.SetIPWhitelist(v)
|
||||||
@@ -167,6 +181,132 @@ func (_c *APIKeyCreate) SetNillableExpiresAt(v *time.Time) *APIKeyCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (_c *APIKeyCreate) SetRateLimit5h(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetRateLimit5h(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableRateLimit5h(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetRateLimit5h(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (_c *APIKeyCreate) SetRateLimit1d(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetRateLimit1d(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableRateLimit1d(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetRateLimit1d(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (_c *APIKeyCreate) SetRateLimit7d(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetRateLimit7d(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableRateLimit7d(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetRateLimit7d(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (_c *APIKeyCreate) SetUsage5h(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetUsage5h(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableUsage5h(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUsage5h(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (_c *APIKeyCreate) SetUsage1d(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetUsage1d(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableUsage1d(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUsage1d(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (_c *APIKeyCreate) SetUsage7d(v float64) *APIKeyCreate {
|
||||||
|
_c.mutation.SetUsage7d(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableUsage7d(v *float64) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUsage7d(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (_c *APIKeyCreate) SetWindow5hStart(v time.Time) *APIKeyCreate {
|
||||||
|
_c.mutation.SetWindow5hStart(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableWindow5hStart(v *time.Time) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetWindow5hStart(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (_c *APIKeyCreate) SetWindow1dStart(v time.Time) *APIKeyCreate {
|
||||||
|
_c.mutation.SetWindow1dStart(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableWindow1dStart(v *time.Time) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetWindow1dStart(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (_c *APIKeyCreate) SetWindow7dStart(v time.Time) *APIKeyCreate {
|
||||||
|
_c.mutation.SetWindow7dStart(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||||
|
func (_c *APIKeyCreate) SetNillableWindow7dStart(v *time.Time) *APIKeyCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetWindow7dStart(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate {
|
func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate {
|
||||||
return _c.SetUserID(v.ID)
|
return _c.SetUserID(v.ID)
|
||||||
@@ -255,6 +395,30 @@ func (_c *APIKeyCreate) defaults() error {
|
|||||||
v := apikey.DefaultQuotaUsed
|
v := apikey.DefaultQuotaUsed
|
||||||
_c.mutation.SetQuotaUsed(v)
|
_c.mutation.SetQuotaUsed(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit5h(); !ok {
|
||||||
|
v := apikey.DefaultRateLimit5h
|
||||||
|
_c.mutation.SetRateLimit5h(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit1d(); !ok {
|
||||||
|
v := apikey.DefaultRateLimit1d
|
||||||
|
_c.mutation.SetRateLimit1d(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit7d(); !ok {
|
||||||
|
v := apikey.DefaultRateLimit7d
|
||||||
|
_c.mutation.SetRateLimit7d(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage5h(); !ok {
|
||||||
|
v := apikey.DefaultUsage5h
|
||||||
|
_c.mutation.SetUsage5h(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage1d(); !ok {
|
||||||
|
v := apikey.DefaultUsage1d
|
||||||
|
_c.mutation.SetUsage1d(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage7d(); !ok {
|
||||||
|
v := apikey.DefaultUsage7d
|
||||||
|
_c.mutation.SetUsage7d(v)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,6 +463,24 @@ func (_c *APIKeyCreate) check() error {
|
|||||||
if _, ok := _c.mutation.QuotaUsed(); !ok {
|
if _, ok := _c.mutation.QuotaUsed(); !ok {
|
||||||
return &ValidationError{Name: "quota_used", err: errors.New(`ent: missing required field "APIKey.quota_used"`)}
|
return &ValidationError{Name: "quota_used", err: errors.New(`ent: missing required field "APIKey.quota_used"`)}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit5h(); !ok {
|
||||||
|
return &ValidationError{Name: "rate_limit_5h", err: errors.New(`ent: missing required field "APIKey.rate_limit_5h"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit1d(); !ok {
|
||||||
|
return &ValidationError{Name: "rate_limit_1d", err: errors.New(`ent: missing required field "APIKey.rate_limit_1d"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.RateLimit7d(); !ok {
|
||||||
|
return &ValidationError{Name: "rate_limit_7d", err: errors.New(`ent: missing required field "APIKey.rate_limit_7d"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage5h(); !ok {
|
||||||
|
return &ValidationError{Name: "usage_5h", err: errors.New(`ent: missing required field "APIKey.usage_5h"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage1d(); !ok {
|
||||||
|
return &ValidationError{Name: "usage_1d", err: errors.New(`ent: missing required field "APIKey.usage_1d"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Usage7d(); !ok {
|
||||||
|
return &ValidationError{Name: "usage_7d", err: errors.New(`ent: missing required field "APIKey.usage_7d"`)}
|
||||||
|
}
|
||||||
if len(_c.mutation.UserIDs()) == 0 {
|
if len(_c.mutation.UserIDs()) == 0 {
|
||||||
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "APIKey.user"`)}
|
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "APIKey.user"`)}
|
||||||
}
|
}
|
||||||
@@ -353,6 +535,10 @@ func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
_node.Status = value
|
_node.Status = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.LastUsedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldLastUsedAt, field.TypeTime, value)
|
||||||
|
_node.LastUsedAt = &value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.IPWhitelist(); ok {
|
if value, ok := _c.mutation.IPWhitelist(); ok {
|
||||||
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
_node.IPWhitelist = value
|
_node.IPWhitelist = value
|
||||||
@@ -373,6 +559,42 @@ func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(apikey.FieldExpiresAt, field.TypeTime, value)
|
_spec.SetField(apikey.FieldExpiresAt, field.TypeTime, value)
|
||||||
_node.ExpiresAt = &value
|
_node.ExpiresAt = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.RateLimit5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||||
|
_node.RateLimit5h = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.RateLimit1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||||
|
_node.RateLimit1d = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.RateLimit7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||||
|
_node.RateLimit7d = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Usage5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||||
|
_node.Usage5h = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Usage1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||||
|
_node.Usage1d = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Usage7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||||
|
_node.Usage7d = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Window5hStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||||
|
_node.Window5hStart = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Window1dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||||
|
_node.Window1dStart = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Window7dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||||
|
_node.Window7dStart = &value
|
||||||
|
}
|
||||||
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
@@ -571,6 +793,24 @@ func (u *APIKeyUpsert) UpdateStatus() *APIKeyUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsert) SetLastUsedAt(v time.Time) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldLastUsedAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateLastUsedAt() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldLastUsedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLastUsedAt clears the value of the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsert) ClearLastUsedAt() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldLastUsedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (u *APIKeyUpsert) SetIPWhitelist(v []string) *APIKeyUpsert {
|
func (u *APIKeyUpsert) SetIPWhitelist(v []string) *APIKeyUpsert {
|
||||||
u.Set(apikey.FieldIPWhitelist, v)
|
u.Set(apikey.FieldIPWhitelist, v)
|
||||||
@@ -661,6 +901,168 @@ func (u *APIKeyUpsert) ClearExpiresAt() *APIKeyUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsert) SetRateLimit5h(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldRateLimit5h, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateRateLimit5h() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldRateLimit5h)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsert) AddRateLimit5h(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldRateLimit5h, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsert) SetRateLimit1d(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldRateLimit1d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateRateLimit1d() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldRateLimit1d)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsert) AddRateLimit1d(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldRateLimit1d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsert) SetRateLimit7d(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldRateLimit7d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateRateLimit7d() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldRateLimit7d)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsert) AddRateLimit7d(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldRateLimit7d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsert) SetUsage5h(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldUsage5h, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateUsage5h() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldUsage5h)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage5h adds v to the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsert) AddUsage5h(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldUsage5h, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsert) SetUsage1d(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldUsage1d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateUsage1d() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldUsage1d)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage1d adds v to the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsert) AddUsage1d(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldUsage1d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsert) SetUsage7d(v float64) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldUsage7d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateUsage7d() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldUsage7d)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage7d adds v to the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsert) AddUsage7d(v float64) *APIKeyUpsert {
|
||||||
|
u.Add(apikey.FieldUsage7d, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsert) SetWindow5hStart(v time.Time) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldWindow5hStart, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateWindow5hStart() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldWindow5hStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsert) ClearWindow5hStart() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldWindow5hStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsert) SetWindow1dStart(v time.Time) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldWindow1dStart, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateWindow1dStart() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldWindow1dStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsert) ClearWindow1dStart() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldWindow1dStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsert) SetWindow7dStart(v time.Time) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldWindow7dStart, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateWindow7dStart() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldWindow7dStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsert) ClearWindow7dStart() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldWindow7dStart)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
// Using this option is equivalent to using:
|
// Using this option is equivalent to using:
|
||||||
//
|
//
|
||||||
@@ -818,6 +1220,27 @@ func (u *APIKeyUpsertOne) UpdateStatus() *APIKeyUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetLastUsedAt(v time.Time) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetLastUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateLastUsedAt() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateLastUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLastUsedAt clears the value of the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearLastUsedAt() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearLastUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (u *APIKeyUpsertOne) SetIPWhitelist(v []string) *APIKeyUpsertOne {
|
func (u *APIKeyUpsertOne) SetIPWhitelist(v []string) *APIKeyUpsertOne {
|
||||||
return u.Update(func(s *APIKeyUpsert) {
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
@@ -923,6 +1346,195 @@ func (u *APIKeyUpsertOne) ClearExpiresAt() *APIKeyUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetRateLimit5h(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddRateLimit5h(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateRateLimit5h() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit5h()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetRateLimit1d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddRateLimit1d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateRateLimit1d() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit1d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetRateLimit7d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddRateLimit7d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateRateLimit7d() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit7d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetUsage5h(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage5h adds v to the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddUsage5h(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateUsage5h() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage5h()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetUsage1d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage1d adds v to the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddUsage1d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateUsage1d() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage1d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetUsage7d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage7d adds v to the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsertOne) AddUsage7d(v float64) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateUsage7d() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage7d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetWindow5hStart(v time.Time) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow5hStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateWindow5hStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow5hStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearWindow5hStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow5hStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetWindow1dStart(v time.Time) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow1dStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateWindow1dStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow1dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearWindow1dStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow1dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetWindow7dStart(v time.Time) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow7dStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateWindow7dStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow7dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearWindow7dStart() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow7dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *APIKeyUpsertOne) Exec(ctx context.Context) error {
|
func (u *APIKeyUpsertOne) Exec(ctx context.Context) error {
|
||||||
if len(u.create.conflict) == 0 {
|
if len(u.create.conflict) == 0 {
|
||||||
@@ -1246,6 +1858,27 @@ func (u *APIKeyUpsertBulk) UpdateStatus() *APIKeyUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetLastUsedAt(v time.Time) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetLastUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateLastUsedAt() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateLastUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLastUsedAt clears the value of the "last_used_at" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearLastUsedAt() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearLastUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (u *APIKeyUpsertBulk) SetIPWhitelist(v []string) *APIKeyUpsertBulk {
|
func (u *APIKeyUpsertBulk) SetIPWhitelist(v []string) *APIKeyUpsertBulk {
|
||||||
return u.Update(func(s *APIKeyUpsert) {
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
@@ -1351,6 +1984,195 @@ func (u *APIKeyUpsertBulk) ClearExpiresAt() *APIKeyUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetRateLimit5h(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddRateLimit5h(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateRateLimit5h() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit5h()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetRateLimit1d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddRateLimit1d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateRateLimit1d() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit1d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetRateLimit7d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetRateLimit7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddRateLimit7d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddRateLimit7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateRateLimit7d() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateRateLimit7d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetUsage5h(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage5h adds v to the "usage_5h" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddUsage5h(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage5h(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateUsage5h() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage5h()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetUsage1d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage1d adds v to the "usage_1d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddUsage1d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage1d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateUsage1d() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage1d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetUsage7d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetUsage7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage7d adds v to the "usage_7d" field.
|
||||||
|
func (u *APIKeyUpsertBulk) AddUsage7d(v float64) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.AddUsage7d(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateUsage7d() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateUsage7d()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetWindow5hStart(v time.Time) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow5hStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateWindow5hStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow5hStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearWindow5hStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow5hStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetWindow1dStart(v time.Time) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow1dStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateWindow1dStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow1dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearWindow1dStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow1dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetWindow7dStart(v time.Time) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetWindow7dStart(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateWindow7dStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateWindow7dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearWindow7dStart() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearWindow7dStart()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error {
|
func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error {
|
||||||
if u.create.err != nil {
|
if u.create.err != nil {
|
||||||
|
|||||||
@@ -134,6 +134,26 @@ func (_u *APIKeyUpdate) SetNillableStatus(v *string) *APIKeyUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (_u *APIKeyUpdate) SetLastUsedAt(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetLastUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableLastUsedAt(v *time.Time) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLastUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLastUsedAt clears the value of the "last_used_at" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearLastUsedAt() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearLastUsedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate {
|
func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate {
|
||||||
_u.mutation.SetIPWhitelist(v)
|
_u.mutation.SetIPWhitelist(v)
|
||||||
@@ -232,6 +252,192 @@ func (_u *APIKeyUpdate) ClearExpiresAt() *APIKeyUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (_u *APIKeyUpdate) SetRateLimit5h(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetRateLimit5h()
|
||||||
|
_u.mutation.SetRateLimit5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableRateLimit5h(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit5h(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit5h adds value to the "rate_limit_5h" field.
|
||||||
|
func (_u *APIKeyUpdate) AddRateLimit5h(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddRateLimit5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (_u *APIKeyUpdate) SetRateLimit1d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetRateLimit1d()
|
||||||
|
_u.mutation.SetRateLimit1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableRateLimit1d(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit1d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit1d adds value to the "rate_limit_1d" field.
|
||||||
|
func (_u *APIKeyUpdate) AddRateLimit1d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddRateLimit1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (_u *APIKeyUpdate) SetRateLimit7d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetRateLimit7d()
|
||||||
|
_u.mutation.SetRateLimit7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableRateLimit7d(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit7d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit7d adds value to the "rate_limit_7d" field.
|
||||||
|
func (_u *APIKeyUpdate) AddRateLimit7d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddRateLimit7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (_u *APIKeyUpdate) SetUsage5h(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetUsage5h()
|
||||||
|
_u.mutation.SetUsage5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableUsage5h(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage5h(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage5h adds value to the "usage_5h" field.
|
||||||
|
func (_u *APIKeyUpdate) AddUsage5h(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddUsage5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (_u *APIKeyUpdate) SetUsage1d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetUsage1d()
|
||||||
|
_u.mutation.SetUsage1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableUsage1d(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage1d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage1d adds value to the "usage_1d" field.
|
||||||
|
func (_u *APIKeyUpdate) AddUsage1d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddUsage1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (_u *APIKeyUpdate) SetUsage7d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.ResetUsage7d()
|
||||||
|
_u.mutation.SetUsage7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableUsage7d(v *float64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage7d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage7d adds value to the "usage_7d" field.
|
||||||
|
func (_u *APIKeyUpdate) AddUsage7d(v float64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddUsage7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (_u *APIKeyUpdate) SetWindow5hStart(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetWindow5hStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableWindow5hStart(v *time.Time) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow5hStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearWindow5hStart() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearWindow5hStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (_u *APIKeyUpdate) SetWindow1dStart(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetWindow1dStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableWindow1dStart(v *time.Time) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow1dStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearWindow1dStart() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearWindow1dStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (_u *APIKeyUpdate) SetWindow7dStart(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetWindow7dStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableWindow7dStart(v *time.Time) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow7dStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearWindow7dStart() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearWindow7dStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -390,6 +596,12 @@ func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.LastUsedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldLastUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LastUsedAtCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldLastUsedAt, field.TypeTime)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.IPWhitelist(); ok {
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
}
|
}
|
||||||
@@ -430,6 +642,60 @@ func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.ExpiresAtCleared() {
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit5h(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit1d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit7d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage5h(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage1d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage7d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window5hStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window5hStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow5hStart, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window1dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window1dStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow1dStart, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window7dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window7dStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow7dStart, field.TypeTime)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
@@ -655,6 +921,26 @@ func (_u *APIKeyUpdateOne) SetNillableStatus(v *string) *APIKeyUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLastUsedAt sets the "last_used_at" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetLastUsedAt(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetLastUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableLastUsedAt(v *time.Time) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLastUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLastUsedAt clears the value of the "last_used_at" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearLastUsedAt() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearLastUsedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetIPWhitelist sets the "ip_whitelist" field.
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne {
|
func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne {
|
||||||
_u.mutation.SetIPWhitelist(v)
|
_u.mutation.SetIPWhitelist(v)
|
||||||
@@ -753,6 +1039,192 @@ func (_u *APIKeyUpdateOne) ClearExpiresAt() *APIKeyUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetRateLimit5h(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetRateLimit5h()
|
||||||
|
_u.mutation.SetRateLimit5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableRateLimit5h(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit5h(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit5h adds value to the "rate_limit_5h" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddRateLimit5h(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddRateLimit5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetRateLimit1d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetRateLimit1d()
|
||||||
|
_u.mutation.SetRateLimit1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableRateLimit1d(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit1d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit1d adds value to the "rate_limit_1d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddRateLimit1d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddRateLimit1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetRateLimit7d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetRateLimit7d()
|
||||||
|
_u.mutation.SetRateLimit7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableRateLimit7d(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRateLimit7d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRateLimit7d adds value to the "rate_limit_7d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddRateLimit7d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddRateLimit7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage5h sets the "usage_5h" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUsage5h(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetUsage5h()
|
||||||
|
_u.mutation.SetUsage5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableUsage5h(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage5h(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage5h adds value to the "usage_5h" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddUsage5h(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddUsage5h(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage1d sets the "usage_1d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUsage1d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetUsage1d()
|
||||||
|
_u.mutation.SetUsage1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableUsage1d(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage1d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage1d adds value to the "usage_1d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddUsage1d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddUsage1d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage7d sets the "usage_7d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUsage7d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ResetUsage7d()
|
||||||
|
_u.mutation.SetUsage7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableUsage7d(v *float64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsage7d(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsage7d adds value to the "usage_7d" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AddUsage7d(v float64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddUsage7d(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow5hStart sets the "window_5h_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetWindow5hStart(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetWindow5hStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableWindow5hStart(v *time.Time) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow5hStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearWindow5hStart() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearWindow5hStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow1dStart sets the "window_1d_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetWindow1dStart(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetWindow1dStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableWindow1dStart(v *time.Time) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow1dStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearWindow1dStart() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearWindow1dStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWindow7dStart sets the "window_7d_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetWindow7dStart(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetWindow7dStart(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableWindow7dStart(v *time.Time) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetWindow7dStart(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearWindow7dStart() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearWindow7dStart()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -941,6 +1413,12 @@ func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err erro
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.LastUsedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldLastUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LastUsedAtCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldLastUsedAt, field.TypeTime)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.IPWhitelist(); ok {
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
}
|
}
|
||||||
@@ -981,6 +1459,60 @@ func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err erro
|
|||||||
if _u.mutation.ExpiresAtCleared() {
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit5h(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit1d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RateLimit7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedRateLimit7d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage5h(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage5h(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage1d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage1d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Usage7d(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsage7d(); ok {
|
||||||
|
_spec.AddField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window5hStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window5hStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow5hStart, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window1dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window1dStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow1dStart, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Window7dStart(); ok {
|
||||||
|
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.Window7dStartCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldWindow7dStart, field.TypeTime)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
|
|||||||
@@ -22,10 +22,12 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
@@ -57,6 +59,8 @@ type Client struct {
|
|||||||
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
||||||
// Group is the client for interacting with the Group builders.
|
// Group is the client for interacting with the Group builders.
|
||||||
Group *GroupClient
|
Group *GroupClient
|
||||||
|
// IdempotencyRecord is the client for interacting with the IdempotencyRecord builders.
|
||||||
|
IdempotencyRecord *IdempotencyRecordClient
|
||||||
// PromoCode is the client for interacting with the PromoCode builders.
|
// PromoCode is the client for interacting with the PromoCode builders.
|
||||||
PromoCode *PromoCodeClient
|
PromoCode *PromoCodeClient
|
||||||
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
||||||
@@ -65,6 +69,8 @@ type Client struct {
|
|||||||
Proxy *ProxyClient
|
Proxy *ProxyClient
|
||||||
// RedeemCode is the client for interacting with the RedeemCode builders.
|
// RedeemCode is the client for interacting with the RedeemCode builders.
|
||||||
RedeemCode *RedeemCodeClient
|
RedeemCode *RedeemCodeClient
|
||||||
|
// SecuritySecret is the client for interacting with the SecuritySecret builders.
|
||||||
|
SecuritySecret *SecuritySecretClient
|
||||||
// Setting is the client for interacting with the Setting builders.
|
// Setting is the client for interacting with the Setting builders.
|
||||||
Setting *SettingClient
|
Setting *SettingClient
|
||||||
// UsageCleanupTask is the client for interacting with the UsageCleanupTask builders.
|
// UsageCleanupTask is the client for interacting with the UsageCleanupTask builders.
|
||||||
@@ -99,10 +105,12 @@ func (c *Client) init() {
|
|||||||
c.AnnouncementRead = NewAnnouncementReadClient(c.config)
|
c.AnnouncementRead = NewAnnouncementReadClient(c.config)
|
||||||
c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config)
|
c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config)
|
||||||
c.Group = NewGroupClient(c.config)
|
c.Group = NewGroupClient(c.config)
|
||||||
|
c.IdempotencyRecord = NewIdempotencyRecordClient(c.config)
|
||||||
c.PromoCode = NewPromoCodeClient(c.config)
|
c.PromoCode = NewPromoCodeClient(c.config)
|
||||||
c.PromoCodeUsage = NewPromoCodeUsageClient(c.config)
|
c.PromoCodeUsage = NewPromoCodeUsageClient(c.config)
|
||||||
c.Proxy = NewProxyClient(c.config)
|
c.Proxy = NewProxyClient(c.config)
|
||||||
c.RedeemCode = NewRedeemCodeClient(c.config)
|
c.RedeemCode = NewRedeemCodeClient(c.config)
|
||||||
|
c.SecuritySecret = NewSecuritySecretClient(c.config)
|
||||||
c.Setting = NewSettingClient(c.config)
|
c.Setting = NewSettingClient(c.config)
|
||||||
c.UsageCleanupTask = NewUsageCleanupTaskClient(c.config)
|
c.UsageCleanupTask = NewUsageCleanupTaskClient(c.config)
|
||||||
c.UsageLog = NewUsageLogClient(c.config)
|
c.UsageLog = NewUsageLogClient(c.config)
|
||||||
@@ -210,10 +218,12 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
|||||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||||
Group: NewGroupClient(cfg),
|
Group: NewGroupClient(cfg),
|
||||||
|
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||||
PromoCode: NewPromoCodeClient(cfg),
|
PromoCode: NewPromoCodeClient(cfg),
|
||||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||||
Proxy: NewProxyClient(cfg),
|
Proxy: NewProxyClient(cfg),
|
||||||
RedeemCode: NewRedeemCodeClient(cfg),
|
RedeemCode: NewRedeemCodeClient(cfg),
|
||||||
|
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||||
Setting: NewSettingClient(cfg),
|
Setting: NewSettingClient(cfg),
|
||||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||||
UsageLog: NewUsageLogClient(cfg),
|
UsageLog: NewUsageLogClient(cfg),
|
||||||
@@ -248,10 +258,12 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
|||||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||||
Group: NewGroupClient(cfg),
|
Group: NewGroupClient(cfg),
|
||||||
|
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||||
PromoCode: NewPromoCodeClient(cfg),
|
PromoCode: NewPromoCodeClient(cfg),
|
||||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||||
Proxy: NewProxyClient(cfg),
|
Proxy: NewProxyClient(cfg),
|
||||||
RedeemCode: NewRedeemCodeClient(cfg),
|
RedeemCode: NewRedeemCodeClient(cfg),
|
||||||
|
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||||
Setting: NewSettingClient(cfg),
|
Setting: NewSettingClient(cfg),
|
||||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||||
UsageLog: NewUsageLogClient(cfg),
|
UsageLog: NewUsageLogClient(cfg),
|
||||||
@@ -290,10 +302,10 @@ func (c *Client) Close() error {
|
|||||||
func (c *Client) Use(hooks ...Hook) {
|
func (c *Client) Use(hooks ...Hook) {
|
||||||
for _, n := range []interface{ Use(...Hook) }{
|
for _, n := range []interface{ Use(...Hook) }{
|
||||||
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
||||||
c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy,
|
c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode,
|
||||||
c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User,
|
c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
|
||||||
c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue,
|
c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup,
|
||||||
c.UserSubscription,
|
c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription,
|
||||||
} {
|
} {
|
||||||
n.Use(hooks...)
|
n.Use(hooks...)
|
||||||
}
|
}
|
||||||
@@ -304,10 +316,10 @@ func (c *Client) Use(hooks ...Hook) {
|
|||||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||||
for _, n := range []interface{ Intercept(...Interceptor) }{
|
for _, n := range []interface{ Intercept(...Interceptor) }{
|
||||||
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
||||||
c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy,
|
c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode,
|
||||||
c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User,
|
c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
|
||||||
c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue,
|
c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup,
|
||||||
c.UserSubscription,
|
c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription,
|
||||||
} {
|
} {
|
||||||
n.Intercept(interceptors...)
|
n.Intercept(interceptors...)
|
||||||
}
|
}
|
||||||
@@ -330,6 +342,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|||||||
return c.ErrorPassthroughRule.mutate(ctx, m)
|
return c.ErrorPassthroughRule.mutate(ctx, m)
|
||||||
case *GroupMutation:
|
case *GroupMutation:
|
||||||
return c.Group.mutate(ctx, m)
|
return c.Group.mutate(ctx, m)
|
||||||
|
case *IdempotencyRecordMutation:
|
||||||
|
return c.IdempotencyRecord.mutate(ctx, m)
|
||||||
case *PromoCodeMutation:
|
case *PromoCodeMutation:
|
||||||
return c.PromoCode.mutate(ctx, m)
|
return c.PromoCode.mutate(ctx, m)
|
||||||
case *PromoCodeUsageMutation:
|
case *PromoCodeUsageMutation:
|
||||||
@@ -338,6 +352,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|||||||
return c.Proxy.mutate(ctx, m)
|
return c.Proxy.mutate(ctx, m)
|
||||||
case *RedeemCodeMutation:
|
case *RedeemCodeMutation:
|
||||||
return c.RedeemCode.mutate(ctx, m)
|
return c.RedeemCode.mutate(ctx, m)
|
||||||
|
case *SecuritySecretMutation:
|
||||||
|
return c.SecuritySecret.mutate(ctx, m)
|
||||||
case *SettingMutation:
|
case *SettingMutation:
|
||||||
return c.Setting.mutate(ctx, m)
|
return c.Setting.mutate(ctx, m)
|
||||||
case *UsageCleanupTaskMutation:
|
case *UsageCleanupTaskMutation:
|
||||||
@@ -1567,6 +1583,139 @@ func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecordClient is a client for the IdempotencyRecord schema.
|
||||||
|
type IdempotencyRecordClient struct {
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIdempotencyRecordClient returns a client for the IdempotencyRecord from the given config.
|
||||||
|
func NewIdempotencyRecordClient(c config) *IdempotencyRecordClient {
|
||||||
|
return &IdempotencyRecordClient{config: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use adds a list of mutation hooks to the hooks stack.
|
||||||
|
// A call to `Use(f, g, h)` equals to `idempotencyrecord.Hooks(f(g(h())))`.
|
||||||
|
func (c *IdempotencyRecordClient) Use(hooks ...Hook) {
|
||||||
|
c.hooks.IdempotencyRecord = append(c.hooks.IdempotencyRecord, hooks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||||
|
// A call to `Intercept(f, g, h)` equals to `idempotencyrecord.Intercept(f(g(h())))`.
|
||||||
|
func (c *IdempotencyRecordClient) Intercept(interceptors ...Interceptor) {
|
||||||
|
c.inters.IdempotencyRecord = append(c.inters.IdempotencyRecord, interceptors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create returns a builder for creating a IdempotencyRecord entity.
|
||||||
|
func (c *IdempotencyRecordClient) Create() *IdempotencyRecordCreate {
|
||||||
|
mutation := newIdempotencyRecordMutation(c.config, OpCreate)
|
||||||
|
return &IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBulk returns a builder for creating a bulk of IdempotencyRecord entities.
|
||||||
|
func (c *IdempotencyRecordClient) CreateBulk(builders ...*IdempotencyRecordCreate) *IdempotencyRecordCreateBulk {
|
||||||
|
return &IdempotencyRecordCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||||
|
// a builder and applies setFunc on it.
|
||||||
|
func (c *IdempotencyRecordClient) MapCreateBulk(slice any, setFunc func(*IdempotencyRecordCreate, int)) *IdempotencyRecordCreateBulk {
|
||||||
|
rv := reflect.ValueOf(slice)
|
||||||
|
if rv.Kind() != reflect.Slice {
|
||||||
|
return &IdempotencyRecordCreateBulk{err: fmt.Errorf("calling to IdempotencyRecordClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||||
|
}
|
||||||
|
builders := make([]*IdempotencyRecordCreate, rv.Len())
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
builders[i] = c.Create()
|
||||||
|
setFunc(builders[i], i)
|
||||||
|
}
|
||||||
|
return &IdempotencyRecordCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns an update builder for IdempotencyRecord.
|
||||||
|
func (c *IdempotencyRecordClient) Update() *IdempotencyRecordUpdate {
|
||||||
|
mutation := newIdempotencyRecordMutation(c.config, OpUpdate)
|
||||||
|
return &IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne returns an update builder for the given entity.
|
||||||
|
func (c *IdempotencyRecordClient) UpdateOne(_m *IdempotencyRecord) *IdempotencyRecordUpdateOne {
|
||||||
|
mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecord(_m))
|
||||||
|
return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOneID returns an update builder for the given id.
|
||||||
|
func (c *IdempotencyRecordClient) UpdateOneID(id int64) *IdempotencyRecordUpdateOne {
|
||||||
|
mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecordID(id))
|
||||||
|
return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete returns a delete builder for IdempotencyRecord.
|
||||||
|
func (c *IdempotencyRecordClient) Delete() *IdempotencyRecordDelete {
|
||||||
|
mutation := newIdempotencyRecordMutation(c.config, OpDelete)
|
||||||
|
return &IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne returns a builder for deleting the given entity.
|
||||||
|
func (c *IdempotencyRecordClient) DeleteOne(_m *IdempotencyRecord) *IdempotencyRecordDeleteOne {
|
||||||
|
return c.DeleteOneID(_m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||||
|
func (c *IdempotencyRecordClient) DeleteOneID(id int64) *IdempotencyRecordDeleteOne {
|
||||||
|
builder := c.Delete().Where(idempotencyrecord.ID(id))
|
||||||
|
builder.mutation.id = &id
|
||||||
|
builder.mutation.op = OpDeleteOne
|
||||||
|
return &IdempotencyRecordDeleteOne{builder}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns a query builder for IdempotencyRecord.
|
||||||
|
func (c *IdempotencyRecordClient) Query() *IdempotencyRecordQuery {
|
||||||
|
return &IdempotencyRecordQuery{
|
||||||
|
config: c.config,
|
||||||
|
ctx: &QueryContext{Type: TypeIdempotencyRecord},
|
||||||
|
inters: c.Interceptors(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a IdempotencyRecord entity by its id.
|
||||||
|
func (c *IdempotencyRecordClient) Get(ctx context.Context, id int64) (*IdempotencyRecord, error) {
|
||||||
|
return c.Query().Where(idempotencyrecord.ID(id)).Only(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetX is like Get, but panics if an error occurs.
|
||||||
|
func (c *IdempotencyRecordClient) GetX(ctx context.Context, id int64) *IdempotencyRecord {
|
||||||
|
obj, err := c.Get(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hooks returns the client hooks.
|
||||||
|
func (c *IdempotencyRecordClient) Hooks() []Hook {
|
||||||
|
return c.hooks.IdempotencyRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interceptors returns the client interceptors.
|
||||||
|
func (c *IdempotencyRecordClient) Interceptors() []Interceptor {
|
||||||
|
return c.inters.IdempotencyRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *IdempotencyRecordClient) mutate(ctx context.Context, m *IdempotencyRecordMutation) (Value, error) {
|
||||||
|
switch m.Op() {
|
||||||
|
case OpCreate:
|
||||||
|
return (&IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdate:
|
||||||
|
return (&IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdateOne:
|
||||||
|
return (&IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpDelete, OpDeleteOne:
|
||||||
|
return (&IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("ent: unknown IdempotencyRecord mutation op: %q", m.Op())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// PromoCodeClient is a client for the PromoCode schema.
|
// PromoCodeClient is a client for the PromoCode schema.
|
||||||
type PromoCodeClient struct {
|
type PromoCodeClient struct {
|
||||||
config
|
config
|
||||||
@@ -2197,6 +2346,139 @@ func (c *RedeemCodeClient) mutate(ctx context.Context, m *RedeemCodeMutation) (V
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SecuritySecretClient is a client for the SecuritySecret schema.
|
||||||
|
type SecuritySecretClient struct {
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSecuritySecretClient returns a client for the SecuritySecret from the given config.
|
||||||
|
func NewSecuritySecretClient(c config) *SecuritySecretClient {
|
||||||
|
return &SecuritySecretClient{config: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use adds a list of mutation hooks to the hooks stack.
|
||||||
|
// A call to `Use(f, g, h)` equals to `securitysecret.Hooks(f(g(h())))`.
|
||||||
|
func (c *SecuritySecretClient) Use(hooks ...Hook) {
|
||||||
|
c.hooks.SecuritySecret = append(c.hooks.SecuritySecret, hooks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||||
|
// A call to `Intercept(f, g, h)` equals to `securitysecret.Intercept(f(g(h())))`.
|
||||||
|
func (c *SecuritySecretClient) Intercept(interceptors ...Interceptor) {
|
||||||
|
c.inters.SecuritySecret = append(c.inters.SecuritySecret, interceptors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create returns a builder for creating a SecuritySecret entity.
|
||||||
|
func (c *SecuritySecretClient) Create() *SecuritySecretCreate {
|
||||||
|
mutation := newSecuritySecretMutation(c.config, OpCreate)
|
||||||
|
return &SecuritySecretCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBulk returns a builder for creating a bulk of SecuritySecret entities.
|
||||||
|
func (c *SecuritySecretClient) CreateBulk(builders ...*SecuritySecretCreate) *SecuritySecretCreateBulk {
|
||||||
|
return &SecuritySecretCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||||
|
// a builder and applies setFunc on it.
|
||||||
|
func (c *SecuritySecretClient) MapCreateBulk(slice any, setFunc func(*SecuritySecretCreate, int)) *SecuritySecretCreateBulk {
|
||||||
|
rv := reflect.ValueOf(slice)
|
||||||
|
if rv.Kind() != reflect.Slice {
|
||||||
|
return &SecuritySecretCreateBulk{err: fmt.Errorf("calling to SecuritySecretClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||||
|
}
|
||||||
|
builders := make([]*SecuritySecretCreate, rv.Len())
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
builders[i] = c.Create()
|
||||||
|
setFunc(builders[i], i)
|
||||||
|
}
|
||||||
|
return &SecuritySecretCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns an update builder for SecuritySecret.
|
||||||
|
func (c *SecuritySecretClient) Update() *SecuritySecretUpdate {
|
||||||
|
mutation := newSecuritySecretMutation(c.config, OpUpdate)
|
||||||
|
return &SecuritySecretUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne returns an update builder for the given entity.
|
||||||
|
func (c *SecuritySecretClient) UpdateOne(_m *SecuritySecret) *SecuritySecretUpdateOne {
|
||||||
|
mutation := newSecuritySecretMutation(c.config, OpUpdateOne, withSecuritySecret(_m))
|
||||||
|
return &SecuritySecretUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOneID returns an update builder for the given id.
|
||||||
|
func (c *SecuritySecretClient) UpdateOneID(id int64) *SecuritySecretUpdateOne {
|
||||||
|
mutation := newSecuritySecretMutation(c.config, OpUpdateOne, withSecuritySecretID(id))
|
||||||
|
return &SecuritySecretUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete returns a delete builder for SecuritySecret.
|
||||||
|
func (c *SecuritySecretClient) Delete() *SecuritySecretDelete {
|
||||||
|
mutation := newSecuritySecretMutation(c.config, OpDelete)
|
||||||
|
return &SecuritySecretDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne returns a builder for deleting the given entity.
|
||||||
|
func (c *SecuritySecretClient) DeleteOne(_m *SecuritySecret) *SecuritySecretDeleteOne {
|
||||||
|
return c.DeleteOneID(_m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||||
|
func (c *SecuritySecretClient) DeleteOneID(id int64) *SecuritySecretDeleteOne {
|
||||||
|
builder := c.Delete().Where(securitysecret.ID(id))
|
||||||
|
builder.mutation.id = &id
|
||||||
|
builder.mutation.op = OpDeleteOne
|
||||||
|
return &SecuritySecretDeleteOne{builder}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns a query builder for SecuritySecret.
|
||||||
|
func (c *SecuritySecretClient) Query() *SecuritySecretQuery {
|
||||||
|
return &SecuritySecretQuery{
|
||||||
|
config: c.config,
|
||||||
|
ctx: &QueryContext{Type: TypeSecuritySecret},
|
||||||
|
inters: c.Interceptors(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a SecuritySecret entity by its id.
|
||||||
|
func (c *SecuritySecretClient) Get(ctx context.Context, id int64) (*SecuritySecret, error) {
|
||||||
|
return c.Query().Where(securitysecret.ID(id)).Only(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetX is like Get, but panics if an error occurs.
|
||||||
|
func (c *SecuritySecretClient) GetX(ctx context.Context, id int64) *SecuritySecret {
|
||||||
|
obj, err := c.Get(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hooks returns the client hooks.
|
||||||
|
func (c *SecuritySecretClient) Hooks() []Hook {
|
||||||
|
return c.hooks.SecuritySecret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interceptors returns the client interceptors.
|
||||||
|
func (c *SecuritySecretClient) Interceptors() []Interceptor {
|
||||||
|
return c.inters.SecuritySecret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SecuritySecretClient) mutate(ctx context.Context, m *SecuritySecretMutation) (Value, error) {
|
||||||
|
switch m.Op() {
|
||||||
|
case OpCreate:
|
||||||
|
return (&SecuritySecretCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdate:
|
||||||
|
return (&SecuritySecretUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdateOne:
|
||||||
|
return (&SecuritySecretUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpDelete, OpDeleteOne:
|
||||||
|
return (&SecuritySecretDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("ent: unknown SecuritySecret mutation op: %q", m.Op())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SettingClient is a client for the Setting schema.
|
// SettingClient is a client for the Setting schema.
|
||||||
type SettingClient struct {
|
type SettingClient struct {
|
||||||
config
|
config
|
||||||
@@ -3606,15 +3888,17 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription
|
|||||||
type (
|
type (
|
||||||
hooks struct {
|
hooks struct {
|
||||||
APIKey, Account, AccountGroup, Announcement, AnnouncementRead,
|
APIKey, Account, AccountGroup, Announcement, AnnouncementRead,
|
||||||
ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode,
|
ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage,
|
||||||
Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup,
|
Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User,
|
||||||
UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook
|
UserAllowedGroup, UserAttributeDefinition, UserAttributeValue,
|
||||||
|
UserSubscription []ent.Hook
|
||||||
}
|
}
|
||||||
inters struct {
|
inters struct {
|
||||||
APIKey, Account, AccountGroup, Announcement, AnnouncementRead,
|
APIKey, Account, AccountGroup, Announcement, AnnouncementRead,
|
||||||
ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode,
|
ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage,
|
||||||
Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup,
|
Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User,
|
||||||
UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor
|
UserAllowedGroup, UserAttributeDefinition, UserAttributeValue,
|
||||||
|
UserSubscription []ent.Interceptor
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -19,10 +19,12 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
@@ -98,10 +100,12 @@ func checkColumn(t, c string) error {
|
|||||||
announcementread.Table: announcementread.ValidColumn,
|
announcementread.Table: announcementread.ValidColumn,
|
||||||
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
||||||
group.Table: group.ValidColumn,
|
group.Table: group.ValidColumn,
|
||||||
|
idempotencyrecord.Table: idempotencyrecord.ValidColumn,
|
||||||
promocode.Table: promocode.ValidColumn,
|
promocode.Table: promocode.ValidColumn,
|
||||||
promocodeusage.Table: promocodeusage.ValidColumn,
|
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||||
proxy.Table: proxy.ValidColumn,
|
proxy.Table: proxy.ValidColumn,
|
||||||
redeemcode.Table: redeemcode.ValidColumn,
|
redeemcode.Table: redeemcode.ValidColumn,
|
||||||
|
securitysecret.Table: securitysecret.ValidColumn,
|
||||||
setting.Table: setting.ValidColumn,
|
setting.Table: setting.ValidColumn,
|
||||||
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
||||||
usagelog.Table: usagelog.ValidColumn,
|
usagelog.Table: usagelog.ValidColumn,
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ type ErrorPassthroughRule struct {
|
|||||||
PassthroughBody bool `json:"passthrough_body,omitempty"`
|
PassthroughBody bool `json:"passthrough_body,omitempty"`
|
||||||
// CustomMessage holds the value of the "custom_message" field.
|
// CustomMessage holds the value of the "custom_message" field.
|
||||||
CustomMessage *string `json:"custom_message,omitempty"`
|
CustomMessage *string `json:"custom_message,omitempty"`
|
||||||
|
// SkipMonitoring holds the value of the "skip_monitoring" field.
|
||||||
|
SkipMonitoring bool `json:"skip_monitoring,omitempty"`
|
||||||
// Description holds the value of the "description" field.
|
// Description holds the value of the "description" field.
|
||||||
Description *string `json:"description,omitempty"`
|
Description *string `json:"description,omitempty"`
|
||||||
selectValues sql.SelectValues
|
selectValues sql.SelectValues
|
||||||
@@ -56,7 +58,7 @@ func (*ErrorPassthroughRule) scanValues(columns []string) ([]any, error) {
|
|||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case errorpassthroughrule.FieldErrorCodes, errorpassthroughrule.FieldKeywords, errorpassthroughrule.FieldPlatforms:
|
case errorpassthroughrule.FieldErrorCodes, errorpassthroughrule.FieldKeywords, errorpassthroughrule.FieldPlatforms:
|
||||||
values[i] = new([]byte)
|
values[i] = new([]byte)
|
||||||
case errorpassthroughrule.FieldEnabled, errorpassthroughrule.FieldPassthroughCode, errorpassthroughrule.FieldPassthroughBody:
|
case errorpassthroughrule.FieldEnabled, errorpassthroughrule.FieldPassthroughCode, errorpassthroughrule.FieldPassthroughBody, errorpassthroughrule.FieldSkipMonitoring:
|
||||||
values[i] = new(sql.NullBool)
|
values[i] = new(sql.NullBool)
|
||||||
case errorpassthroughrule.FieldID, errorpassthroughrule.FieldPriority, errorpassthroughrule.FieldResponseCode:
|
case errorpassthroughrule.FieldID, errorpassthroughrule.FieldPriority, errorpassthroughrule.FieldResponseCode:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
@@ -171,6 +173,12 @@ func (_m *ErrorPassthroughRule) assignValues(columns []string, values []any) err
|
|||||||
_m.CustomMessage = new(string)
|
_m.CustomMessage = new(string)
|
||||||
*_m.CustomMessage = value.String
|
*_m.CustomMessage = value.String
|
||||||
}
|
}
|
||||||
|
case errorpassthroughrule.FieldSkipMonitoring:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field skip_monitoring", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SkipMonitoring = value.Bool
|
||||||
|
}
|
||||||
case errorpassthroughrule.FieldDescription:
|
case errorpassthroughrule.FieldDescription:
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field description", values[i])
|
return fmt.Errorf("unexpected type %T for field description", values[i])
|
||||||
@@ -257,6 +265,9 @@ func (_m *ErrorPassthroughRule) String() string {
|
|||||||
builder.WriteString(*v)
|
builder.WriteString(*v)
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("skip_monitoring=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SkipMonitoring))
|
||||||
|
builder.WriteString(", ")
|
||||||
if v := _m.Description; v != nil {
|
if v := _m.Description; v != nil {
|
||||||
builder.WriteString("description=")
|
builder.WriteString("description=")
|
||||||
builder.WriteString(*v)
|
builder.WriteString(*v)
|
||||||
|
|||||||
@@ -39,6 +39,8 @@ const (
|
|||||||
FieldPassthroughBody = "passthrough_body"
|
FieldPassthroughBody = "passthrough_body"
|
||||||
// FieldCustomMessage holds the string denoting the custom_message field in the database.
|
// FieldCustomMessage holds the string denoting the custom_message field in the database.
|
||||||
FieldCustomMessage = "custom_message"
|
FieldCustomMessage = "custom_message"
|
||||||
|
// FieldSkipMonitoring holds the string denoting the skip_monitoring field in the database.
|
||||||
|
FieldSkipMonitoring = "skip_monitoring"
|
||||||
// FieldDescription holds the string denoting the description field in the database.
|
// FieldDescription holds the string denoting the description field in the database.
|
||||||
FieldDescription = "description"
|
FieldDescription = "description"
|
||||||
// Table holds the table name of the errorpassthroughrule in the database.
|
// Table holds the table name of the errorpassthroughrule in the database.
|
||||||
@@ -61,6 +63,7 @@ var Columns = []string{
|
|||||||
FieldResponseCode,
|
FieldResponseCode,
|
||||||
FieldPassthroughBody,
|
FieldPassthroughBody,
|
||||||
FieldCustomMessage,
|
FieldCustomMessage,
|
||||||
|
FieldSkipMonitoring,
|
||||||
FieldDescription,
|
FieldDescription,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,6 +98,8 @@ var (
|
|||||||
DefaultPassthroughCode bool
|
DefaultPassthroughCode bool
|
||||||
// DefaultPassthroughBody holds the default value on creation for the "passthrough_body" field.
|
// DefaultPassthroughBody holds the default value on creation for the "passthrough_body" field.
|
||||||
DefaultPassthroughBody bool
|
DefaultPassthroughBody bool
|
||||||
|
// DefaultSkipMonitoring holds the default value on creation for the "skip_monitoring" field.
|
||||||
|
DefaultSkipMonitoring bool
|
||||||
)
|
)
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the ErrorPassthroughRule queries.
|
// OrderOption defines the ordering options for the ErrorPassthroughRule queries.
|
||||||
@@ -155,6 +160,11 @@ func ByCustomMessage(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldCustomMessage, opts...).ToFunc()
|
return sql.OrderByField(FieldCustomMessage, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BySkipMonitoring orders the results by the skip_monitoring field.
|
||||||
|
func BySkipMonitoring(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSkipMonitoring, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByDescription orders the results by the description field.
|
// ByDescription orders the results by the description field.
|
||||||
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||||
|
|||||||
@@ -104,6 +104,11 @@ func CustomMessage(v string) predicate.ErrorPassthroughRule {
|
|||||||
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldCustomMessage, v))
|
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldCustomMessage, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SkipMonitoring applies equality check predicate on the "skip_monitoring" field. It's identical to SkipMonitoringEQ.
|
||||||
|
func SkipMonitoring(v bool) predicate.ErrorPassthroughRule {
|
||||||
|
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldSkipMonitoring, v))
|
||||||
|
}
|
||||||
|
|
||||||
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
|
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
|
||||||
func Description(v string) predicate.ErrorPassthroughRule {
|
func Description(v string) predicate.ErrorPassthroughRule {
|
||||||
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldDescription, v))
|
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldDescription, v))
|
||||||
@@ -544,6 +549,16 @@ func CustomMessageContainsFold(v string) predicate.ErrorPassthroughRule {
|
|||||||
return predicate.ErrorPassthroughRule(sql.FieldContainsFold(FieldCustomMessage, v))
|
return predicate.ErrorPassthroughRule(sql.FieldContainsFold(FieldCustomMessage, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SkipMonitoringEQ applies the EQ predicate on the "skip_monitoring" field.
|
||||||
|
func SkipMonitoringEQ(v bool) predicate.ErrorPassthroughRule {
|
||||||
|
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldSkipMonitoring, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipMonitoringNEQ applies the NEQ predicate on the "skip_monitoring" field.
|
||||||
|
func SkipMonitoringNEQ(v bool) predicate.ErrorPassthroughRule {
|
||||||
|
return predicate.ErrorPassthroughRule(sql.FieldNEQ(FieldSkipMonitoring, v))
|
||||||
|
}
|
||||||
|
|
||||||
// DescriptionEQ applies the EQ predicate on the "description" field.
|
// DescriptionEQ applies the EQ predicate on the "description" field.
|
||||||
func DescriptionEQ(v string) predicate.ErrorPassthroughRule {
|
func DescriptionEQ(v string) predicate.ErrorPassthroughRule {
|
||||||
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldDescription, v))
|
return predicate.ErrorPassthroughRule(sql.FieldEQ(FieldDescription, v))
|
||||||
|
|||||||
@@ -172,6 +172,20 @@ func (_c *ErrorPassthroughRuleCreate) SetNillableCustomMessage(v *string) *Error
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (_c *ErrorPassthroughRuleCreate) SetSkipMonitoring(v bool) *ErrorPassthroughRuleCreate {
|
||||||
|
_c.mutation.SetSkipMonitoring(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSkipMonitoring sets the "skip_monitoring" field if the given value is not nil.
|
||||||
|
func (_c *ErrorPassthroughRuleCreate) SetNillableSkipMonitoring(v *bool) *ErrorPassthroughRuleCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSkipMonitoring(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (_c *ErrorPassthroughRuleCreate) SetDescription(v string) *ErrorPassthroughRuleCreate {
|
func (_c *ErrorPassthroughRuleCreate) SetDescription(v string) *ErrorPassthroughRuleCreate {
|
||||||
_c.mutation.SetDescription(v)
|
_c.mutation.SetDescription(v)
|
||||||
@@ -249,6 +263,10 @@ func (_c *ErrorPassthroughRuleCreate) defaults() {
|
|||||||
v := errorpassthroughrule.DefaultPassthroughBody
|
v := errorpassthroughrule.DefaultPassthroughBody
|
||||||
_c.mutation.SetPassthroughBody(v)
|
_c.mutation.SetPassthroughBody(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SkipMonitoring(); !ok {
|
||||||
|
v := errorpassthroughrule.DefaultSkipMonitoring
|
||||||
|
_c.mutation.SetSkipMonitoring(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
// check runs all checks and user-defined validators on the builder.
|
||||||
@@ -287,6 +305,9 @@ func (_c *ErrorPassthroughRuleCreate) check() error {
|
|||||||
if _, ok := _c.mutation.PassthroughBody(); !ok {
|
if _, ok := _c.mutation.PassthroughBody(); !ok {
|
||||||
return &ValidationError{Name: "passthrough_body", err: errors.New(`ent: missing required field "ErrorPassthroughRule.passthrough_body"`)}
|
return &ValidationError{Name: "passthrough_body", err: errors.New(`ent: missing required field "ErrorPassthroughRule.passthrough_body"`)}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SkipMonitoring(); !ok {
|
||||||
|
return &ValidationError{Name: "skip_monitoring", err: errors.New(`ent: missing required field "ErrorPassthroughRule.skip_monitoring"`)}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,6 +387,10 @@ func (_c *ErrorPassthroughRuleCreate) createSpec() (*ErrorPassthroughRule, *sqlg
|
|||||||
_spec.SetField(errorpassthroughrule.FieldCustomMessage, field.TypeString, value)
|
_spec.SetField(errorpassthroughrule.FieldCustomMessage, field.TypeString, value)
|
||||||
_node.CustomMessage = &value
|
_node.CustomMessage = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.SkipMonitoring(); ok {
|
||||||
|
_spec.SetField(errorpassthroughrule.FieldSkipMonitoring, field.TypeBool, value)
|
||||||
|
_node.SkipMonitoring = value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.Description(); ok {
|
if value, ok := _c.mutation.Description(); ok {
|
||||||
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
||||||
_node.Description = &value
|
_node.Description = &value
|
||||||
@@ -608,6 +633,18 @@ func (u *ErrorPassthroughRuleUpsert) ClearCustomMessage() *ErrorPassthroughRuleU
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (u *ErrorPassthroughRuleUpsert) SetSkipMonitoring(v bool) *ErrorPassthroughRuleUpsert {
|
||||||
|
u.Set(errorpassthroughrule.FieldSkipMonitoring, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSkipMonitoring sets the "skip_monitoring" field to the value that was provided on create.
|
||||||
|
func (u *ErrorPassthroughRuleUpsert) UpdateSkipMonitoring() *ErrorPassthroughRuleUpsert {
|
||||||
|
u.SetExcluded(errorpassthroughrule.FieldSkipMonitoring)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (u *ErrorPassthroughRuleUpsert) SetDescription(v string) *ErrorPassthroughRuleUpsert {
|
func (u *ErrorPassthroughRuleUpsert) SetDescription(v string) *ErrorPassthroughRuleUpsert {
|
||||||
u.Set(errorpassthroughrule.FieldDescription, v)
|
u.Set(errorpassthroughrule.FieldDescription, v)
|
||||||
@@ -888,6 +925,20 @@ func (u *ErrorPassthroughRuleUpsertOne) ClearCustomMessage() *ErrorPassthroughRu
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (u *ErrorPassthroughRuleUpsertOne) SetSkipMonitoring(v bool) *ErrorPassthroughRuleUpsertOne {
|
||||||
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
|
s.SetSkipMonitoring(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSkipMonitoring sets the "skip_monitoring" field to the value that was provided on create.
|
||||||
|
func (u *ErrorPassthroughRuleUpsertOne) UpdateSkipMonitoring() *ErrorPassthroughRuleUpsertOne {
|
||||||
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
|
s.UpdateSkipMonitoring()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (u *ErrorPassthroughRuleUpsertOne) SetDescription(v string) *ErrorPassthroughRuleUpsertOne {
|
func (u *ErrorPassthroughRuleUpsertOne) SetDescription(v string) *ErrorPassthroughRuleUpsertOne {
|
||||||
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
@@ -1337,6 +1388,20 @@ func (u *ErrorPassthroughRuleUpsertBulk) ClearCustomMessage() *ErrorPassthroughR
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (u *ErrorPassthroughRuleUpsertBulk) SetSkipMonitoring(v bool) *ErrorPassthroughRuleUpsertBulk {
|
||||||
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
|
s.SetSkipMonitoring(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSkipMonitoring sets the "skip_monitoring" field to the value that was provided on create.
|
||||||
|
func (u *ErrorPassthroughRuleUpsertBulk) UpdateSkipMonitoring() *ErrorPassthroughRuleUpsertBulk {
|
||||||
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
|
s.UpdateSkipMonitoring()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (u *ErrorPassthroughRuleUpsertBulk) SetDescription(v string) *ErrorPassthroughRuleUpsertBulk {
|
func (u *ErrorPassthroughRuleUpsertBulk) SetDescription(v string) *ErrorPassthroughRuleUpsertBulk {
|
||||||
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
return u.Update(func(s *ErrorPassthroughRuleUpsert) {
|
||||||
|
|||||||
@@ -227,6 +227,20 @@ func (_u *ErrorPassthroughRuleUpdate) ClearCustomMessage() *ErrorPassthroughRule
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (_u *ErrorPassthroughRuleUpdate) SetSkipMonitoring(v bool) *ErrorPassthroughRuleUpdate {
|
||||||
|
_u.mutation.SetSkipMonitoring(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSkipMonitoring sets the "skip_monitoring" field if the given value is not nil.
|
||||||
|
func (_u *ErrorPassthroughRuleUpdate) SetNillableSkipMonitoring(v *bool) *ErrorPassthroughRuleUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSkipMonitoring(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (_u *ErrorPassthroughRuleUpdate) SetDescription(v string) *ErrorPassthroughRuleUpdate {
|
func (_u *ErrorPassthroughRuleUpdate) SetDescription(v string) *ErrorPassthroughRuleUpdate {
|
||||||
_u.mutation.SetDescription(v)
|
_u.mutation.SetDescription(v)
|
||||||
@@ -387,6 +401,9 @@ func (_u *ErrorPassthroughRuleUpdate) sqlSave(ctx context.Context) (_node int, e
|
|||||||
if _u.mutation.CustomMessageCleared() {
|
if _u.mutation.CustomMessageCleared() {
|
||||||
_spec.ClearField(errorpassthroughrule.FieldCustomMessage, field.TypeString)
|
_spec.ClearField(errorpassthroughrule.FieldCustomMessage, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SkipMonitoring(); ok {
|
||||||
|
_spec.SetField(errorpassthroughrule.FieldSkipMonitoring, field.TypeBool, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Description(); ok {
|
if value, ok := _u.mutation.Description(); ok {
|
||||||
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
||||||
}
|
}
|
||||||
@@ -611,6 +628,20 @@ func (_u *ErrorPassthroughRuleUpdateOne) ClearCustomMessage() *ErrorPassthroughR
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSkipMonitoring sets the "skip_monitoring" field.
|
||||||
|
func (_u *ErrorPassthroughRuleUpdateOne) SetSkipMonitoring(v bool) *ErrorPassthroughRuleUpdateOne {
|
||||||
|
_u.mutation.SetSkipMonitoring(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSkipMonitoring sets the "skip_monitoring" field if the given value is not nil.
|
||||||
|
func (_u *ErrorPassthroughRuleUpdateOne) SetNillableSkipMonitoring(v *bool) *ErrorPassthroughRuleUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSkipMonitoring(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetDescription sets the "description" field.
|
// SetDescription sets the "description" field.
|
||||||
func (_u *ErrorPassthroughRuleUpdateOne) SetDescription(v string) *ErrorPassthroughRuleUpdateOne {
|
func (_u *ErrorPassthroughRuleUpdateOne) SetDescription(v string) *ErrorPassthroughRuleUpdateOne {
|
||||||
_u.mutation.SetDescription(v)
|
_u.mutation.SetDescription(v)
|
||||||
@@ -801,6 +832,9 @@ func (_u *ErrorPassthroughRuleUpdateOne) sqlSave(ctx context.Context) (_node *Er
|
|||||||
if _u.mutation.CustomMessageCleared() {
|
if _u.mutation.CustomMessageCleared() {
|
||||||
_spec.ClearField(errorpassthroughrule.FieldCustomMessage, field.TypeString)
|
_spec.ClearField(errorpassthroughrule.FieldCustomMessage, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SkipMonitoring(); ok {
|
||||||
|
_spec.SetField(errorpassthroughrule.FieldSkipMonitoring, field.TypeBool, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.Description(); ok {
|
if value, ok := _u.mutation.Description(); ok {
|
||||||
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
_spec.SetField(errorpassthroughrule.FieldDescription, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,6 +52,16 @@ type Group struct {
|
|||||||
ImagePrice2k *float64 `json:"image_price_2k,omitempty"`
|
ImagePrice2k *float64 `json:"image_price_2k,omitempty"`
|
||||||
// ImagePrice4k holds the value of the "image_price_4k" field.
|
// ImagePrice4k holds the value of the "image_price_4k" field.
|
||||||
ImagePrice4k *float64 `json:"image_price_4k,omitempty"`
|
ImagePrice4k *float64 `json:"image_price_4k,omitempty"`
|
||||||
|
// SoraImagePrice360 holds the value of the "sora_image_price_360" field.
|
||||||
|
SoraImagePrice360 *float64 `json:"sora_image_price_360,omitempty"`
|
||||||
|
// SoraImagePrice540 holds the value of the "sora_image_price_540" field.
|
||||||
|
SoraImagePrice540 *float64 `json:"sora_image_price_540,omitempty"`
|
||||||
|
// SoraVideoPricePerRequest holds the value of the "sora_video_price_per_request" field.
|
||||||
|
SoraVideoPricePerRequest *float64 `json:"sora_video_price_per_request,omitempty"`
|
||||||
|
// SoraVideoPricePerRequestHd holds the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
SoraVideoPricePerRequestHd *float64 `json:"sora_video_price_per_request_hd,omitempty"`
|
||||||
|
// SoraStorageQuotaBytes holds the value of the "sora_storage_quota_bytes" field.
|
||||||
|
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes,omitempty"`
|
||||||
// 是否仅允许 Claude Code 客户端
|
// 是否仅允许 Claude Code 客户端
|
||||||
ClaudeCodeOnly bool `json:"claude_code_only,omitempty"`
|
ClaudeCodeOnly bool `json:"claude_code_only,omitempty"`
|
||||||
// 非 Claude Code 请求降级使用的分组 ID
|
// 非 Claude Code 请求降级使用的分组 ID
|
||||||
@@ -66,6 +76,12 @@ type Group struct {
|
|||||||
McpXMLInject bool `json:"mcp_xml_inject,omitempty"`
|
McpXMLInject bool `json:"mcp_xml_inject,omitempty"`
|
||||||
// 支持的模型系列:claude, gemini_text, gemini_image
|
// 支持的模型系列:claude, gemini_text, gemini_image
|
||||||
SupportedModelScopes []string `json:"supported_model_scopes,omitempty"`
|
SupportedModelScopes []string `json:"supported_model_scopes,omitempty"`
|
||||||
|
// 分组显示排序,数值越小越靠前
|
||||||
|
SortOrder int `json:"sort_order,omitempty"`
|
||||||
|
// 是否允许 /v1/messages 调度到此 OpenAI 分组
|
||||||
|
AllowMessagesDispatch bool `json:"allow_messages_dispatch,omitempty"`
|
||||||
|
// 默认映射模型 ID,当账号级映射找不到时使用此值
|
||||||
|
DefaultMappedModel string `json:"default_mapped_model,omitempty"`
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
// The values are being populated by the GroupQuery when eager-loading is set.
|
// The values are being populated by the GroupQuery when eager-loading is set.
|
||||||
Edges GroupEdges `json:"edges"`
|
Edges GroupEdges `json:"edges"`
|
||||||
@@ -174,13 +190,13 @@ func (*Group) scanValues(columns []string) ([]any, error) {
|
|||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case group.FieldModelRouting, group.FieldSupportedModelScopes:
|
case group.FieldModelRouting, group.FieldSupportedModelScopes:
|
||||||
values[i] = new([]byte)
|
values[i] = new([]byte)
|
||||||
case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject:
|
case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject, group.FieldAllowMessagesDispatch:
|
||||||
values[i] = new(sql.NullBool)
|
values[i] = new(sql.NullBool)
|
||||||
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k:
|
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k, group.FieldSoraImagePrice360, group.FieldSoraImagePrice540, group.FieldSoraVideoPricePerRequest, group.FieldSoraVideoPricePerRequestHd:
|
||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest:
|
case group.FieldID, group.FieldDefaultValidityDays, group.FieldSoraStorageQuotaBytes, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest, group.FieldSortOrder:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType:
|
case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType, group.FieldDefaultMappedModel:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt:
|
case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
@@ -315,6 +331,40 @@ func (_m *Group) assignValues(columns []string, values []any) error {
|
|||||||
_m.ImagePrice4k = new(float64)
|
_m.ImagePrice4k = new(float64)
|
||||||
*_m.ImagePrice4k = value.Float64
|
*_m.ImagePrice4k = value.Float64
|
||||||
}
|
}
|
||||||
|
case group.FieldSoraImagePrice360:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_image_price_360", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraImagePrice360 = new(float64)
|
||||||
|
*_m.SoraImagePrice360 = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldSoraImagePrice540:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_image_price_540", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraImagePrice540 = new(float64)
|
||||||
|
*_m.SoraImagePrice540 = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldSoraVideoPricePerRequest:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_video_price_per_request", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraVideoPricePerRequest = new(float64)
|
||||||
|
*_m.SoraVideoPricePerRequest = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldSoraVideoPricePerRequestHd:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_video_price_per_request_hd", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraVideoPricePerRequestHd = new(float64)
|
||||||
|
*_m.SoraVideoPricePerRequestHd = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldSoraStorageQuotaBytes:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_storage_quota_bytes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraStorageQuotaBytes = value.Int64
|
||||||
|
}
|
||||||
case group.FieldClaudeCodeOnly:
|
case group.FieldClaudeCodeOnly:
|
||||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field claude_code_only", values[i])
|
return fmt.Errorf("unexpected type %T for field claude_code_only", values[i])
|
||||||
@@ -363,6 +413,24 @@ func (_m *Group) assignValues(columns []string, values []any) error {
|
|||||||
return fmt.Errorf("unmarshal field supported_model_scopes: %w", err)
|
return fmt.Errorf("unmarshal field supported_model_scopes: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case group.FieldSortOrder:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sort_order", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SortOrder = int(value.Int64)
|
||||||
|
}
|
||||||
|
case group.FieldAllowMessagesDispatch:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field allow_messages_dispatch", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AllowMessagesDispatch = value.Bool
|
||||||
|
}
|
||||||
|
case group.FieldDefaultMappedModel:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field default_mapped_model", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DefaultMappedModel = value.String
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
_m.selectValues.Set(columns[i], values[i])
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
}
|
}
|
||||||
@@ -506,6 +574,29 @@ func (_m *Group) String() string {
|
|||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SoraImagePrice360; v != nil {
|
||||||
|
builder.WriteString("sora_image_price_360=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SoraImagePrice540; v != nil {
|
||||||
|
builder.WriteString("sora_image_price_540=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SoraVideoPricePerRequest; v != nil {
|
||||||
|
builder.WriteString("sora_video_price_per_request=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SoraVideoPricePerRequestHd; v != nil {
|
||||||
|
builder.WriteString("sora_video_price_per_request_hd=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sora_storage_quota_bytes=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageQuotaBytes))
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("claude_code_only=")
|
builder.WriteString("claude_code_only=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly))
|
builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
@@ -530,6 +621,15 @@ func (_m *Group) String() string {
|
|||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("supported_model_scopes=")
|
builder.WriteString("supported_model_scopes=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.SupportedModelScopes))
|
builder.WriteString(fmt.Sprintf("%v", _m.SupportedModelScopes))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sort_order=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SortOrder))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("allow_messages_dispatch=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AllowMessagesDispatch))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("default_mapped_model=")
|
||||||
|
builder.WriteString(_m.DefaultMappedModel)
|
||||||
builder.WriteByte(')')
|
builder.WriteByte(')')
|
||||||
return builder.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,6 +49,16 @@ const (
|
|||||||
FieldImagePrice2k = "image_price_2k"
|
FieldImagePrice2k = "image_price_2k"
|
||||||
// FieldImagePrice4k holds the string denoting the image_price_4k field in the database.
|
// FieldImagePrice4k holds the string denoting the image_price_4k field in the database.
|
||||||
FieldImagePrice4k = "image_price_4k"
|
FieldImagePrice4k = "image_price_4k"
|
||||||
|
// FieldSoraImagePrice360 holds the string denoting the sora_image_price_360 field in the database.
|
||||||
|
FieldSoraImagePrice360 = "sora_image_price_360"
|
||||||
|
// FieldSoraImagePrice540 holds the string denoting the sora_image_price_540 field in the database.
|
||||||
|
FieldSoraImagePrice540 = "sora_image_price_540"
|
||||||
|
// FieldSoraVideoPricePerRequest holds the string denoting the sora_video_price_per_request field in the database.
|
||||||
|
FieldSoraVideoPricePerRequest = "sora_video_price_per_request"
|
||||||
|
// FieldSoraVideoPricePerRequestHd holds the string denoting the sora_video_price_per_request_hd field in the database.
|
||||||
|
FieldSoraVideoPricePerRequestHd = "sora_video_price_per_request_hd"
|
||||||
|
// FieldSoraStorageQuotaBytes holds the string denoting the sora_storage_quota_bytes field in the database.
|
||||||
|
FieldSoraStorageQuotaBytes = "sora_storage_quota_bytes"
|
||||||
// FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database.
|
// FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database.
|
||||||
FieldClaudeCodeOnly = "claude_code_only"
|
FieldClaudeCodeOnly = "claude_code_only"
|
||||||
// FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database.
|
// FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database.
|
||||||
@@ -63,6 +73,12 @@ const (
|
|||||||
FieldMcpXMLInject = "mcp_xml_inject"
|
FieldMcpXMLInject = "mcp_xml_inject"
|
||||||
// FieldSupportedModelScopes holds the string denoting the supported_model_scopes field in the database.
|
// FieldSupportedModelScopes holds the string denoting the supported_model_scopes field in the database.
|
||||||
FieldSupportedModelScopes = "supported_model_scopes"
|
FieldSupportedModelScopes = "supported_model_scopes"
|
||||||
|
// FieldSortOrder holds the string denoting the sort_order field in the database.
|
||||||
|
FieldSortOrder = "sort_order"
|
||||||
|
// FieldAllowMessagesDispatch holds the string denoting the allow_messages_dispatch field in the database.
|
||||||
|
FieldAllowMessagesDispatch = "allow_messages_dispatch"
|
||||||
|
// FieldDefaultMappedModel holds the string denoting the default_mapped_model field in the database.
|
||||||
|
FieldDefaultMappedModel = "default_mapped_model"
|
||||||
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
||||||
EdgeAPIKeys = "api_keys"
|
EdgeAPIKeys = "api_keys"
|
||||||
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
||||||
@@ -155,6 +171,11 @@ var Columns = []string{
|
|||||||
FieldImagePrice1k,
|
FieldImagePrice1k,
|
||||||
FieldImagePrice2k,
|
FieldImagePrice2k,
|
||||||
FieldImagePrice4k,
|
FieldImagePrice4k,
|
||||||
|
FieldSoraImagePrice360,
|
||||||
|
FieldSoraImagePrice540,
|
||||||
|
FieldSoraVideoPricePerRequest,
|
||||||
|
FieldSoraVideoPricePerRequestHd,
|
||||||
|
FieldSoraStorageQuotaBytes,
|
||||||
FieldClaudeCodeOnly,
|
FieldClaudeCodeOnly,
|
||||||
FieldFallbackGroupID,
|
FieldFallbackGroupID,
|
||||||
FieldFallbackGroupIDOnInvalidRequest,
|
FieldFallbackGroupIDOnInvalidRequest,
|
||||||
@@ -162,6 +183,9 @@ var Columns = []string{
|
|||||||
FieldModelRoutingEnabled,
|
FieldModelRoutingEnabled,
|
||||||
FieldMcpXMLInject,
|
FieldMcpXMLInject,
|
||||||
FieldSupportedModelScopes,
|
FieldSupportedModelScopes,
|
||||||
|
FieldSortOrder,
|
||||||
|
FieldAllowMessagesDispatch,
|
||||||
|
FieldDefaultMappedModel,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -217,6 +241,8 @@ var (
|
|||||||
SubscriptionTypeValidator func(string) error
|
SubscriptionTypeValidator func(string) error
|
||||||
// DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field.
|
// DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field.
|
||||||
DefaultDefaultValidityDays int
|
DefaultDefaultValidityDays int
|
||||||
|
// DefaultSoraStorageQuotaBytes holds the default value on creation for the "sora_storage_quota_bytes" field.
|
||||||
|
DefaultSoraStorageQuotaBytes int64
|
||||||
// DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field.
|
// DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field.
|
||||||
DefaultClaudeCodeOnly bool
|
DefaultClaudeCodeOnly bool
|
||||||
// DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field.
|
// DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field.
|
||||||
@@ -225,6 +251,14 @@ var (
|
|||||||
DefaultMcpXMLInject bool
|
DefaultMcpXMLInject bool
|
||||||
// DefaultSupportedModelScopes holds the default value on creation for the "supported_model_scopes" field.
|
// DefaultSupportedModelScopes holds the default value on creation for the "supported_model_scopes" field.
|
||||||
DefaultSupportedModelScopes []string
|
DefaultSupportedModelScopes []string
|
||||||
|
// DefaultSortOrder holds the default value on creation for the "sort_order" field.
|
||||||
|
DefaultSortOrder int
|
||||||
|
// DefaultAllowMessagesDispatch holds the default value on creation for the "allow_messages_dispatch" field.
|
||||||
|
DefaultAllowMessagesDispatch bool
|
||||||
|
// DefaultDefaultMappedModel holds the default value on creation for the "default_mapped_model" field.
|
||||||
|
DefaultDefaultMappedModel string
|
||||||
|
// DefaultMappedModelValidator is a validator for the "default_mapped_model" field. It is called by the builders before save.
|
||||||
|
DefaultMappedModelValidator func(string) error
|
||||||
)
|
)
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the Group queries.
|
// OrderOption defines the ordering options for the Group queries.
|
||||||
@@ -320,6 +354,31 @@ func ByImagePrice4k(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldImagePrice4k, opts...).ToFunc()
|
return sql.OrderByField(FieldImagePrice4k, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BySoraImagePrice360 orders the results by the sora_image_price_360 field.
|
||||||
|
func BySoraImagePrice360(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraImagePrice360, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySoraImagePrice540 orders the results by the sora_image_price_540 field.
|
||||||
|
func BySoraImagePrice540(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraImagePrice540, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySoraVideoPricePerRequest orders the results by the sora_video_price_per_request field.
|
||||||
|
func BySoraVideoPricePerRequest(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraVideoPricePerRequest, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySoraVideoPricePerRequestHd orders the results by the sora_video_price_per_request_hd field.
|
||||||
|
func BySoraVideoPricePerRequestHd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraVideoPricePerRequestHd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySoraStorageQuotaBytes orders the results by the sora_storage_quota_bytes field.
|
||||||
|
func BySoraStorageQuotaBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraStorageQuotaBytes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByClaudeCodeOnly orders the results by the claude_code_only field.
|
// ByClaudeCodeOnly orders the results by the claude_code_only field.
|
||||||
func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption {
|
func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc()
|
return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc()
|
||||||
@@ -345,6 +404,21 @@ func ByMcpXMLInject(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldMcpXMLInject, opts...).ToFunc()
|
return sql.OrderByField(FieldMcpXMLInject, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BySortOrder orders the results by the sort_order field.
|
||||||
|
func BySortOrder(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSortOrder, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAllowMessagesDispatch orders the results by the allow_messages_dispatch field.
|
||||||
|
func ByAllowMessagesDispatch(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAllowMessagesDispatch, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDefaultMappedModel orders the results by the default_mapped_model field.
|
||||||
|
func ByDefaultMappedModel(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDefaultMappedModel, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByAPIKeysCount orders the results by api_keys count.
|
// ByAPIKeysCount orders the results by api_keys count.
|
||||||
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return func(s *sql.Selector) {
|
return func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -140,6 +140,31 @@ func ImagePrice4k(v float64) predicate.Group {
|
|||||||
return predicate.Group(sql.FieldEQ(FieldImagePrice4k, v))
|
return predicate.Group(sql.FieldEQ(FieldImagePrice4k, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360 applies equality check predicate on the "sora_image_price_360" field. It's identical to SoraImagePrice360EQ.
|
||||||
|
func SoraImagePrice360(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540 applies equality check predicate on the "sora_image_price_540" field. It's identical to SoraImagePrice540EQ.
|
||||||
|
func SoraImagePrice540(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequest applies equality check predicate on the "sora_video_price_per_request" field. It's identical to SoraVideoPricePerRequestEQ.
|
||||||
|
func SoraVideoPricePerRequest(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHd applies equality check predicate on the "sora_video_price_per_request_hd" field. It's identical to SoraVideoPricePerRequestHdEQ.
|
||||||
|
func SoraVideoPricePerRequestHd(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytes applies equality check predicate on the "sora_storage_quota_bytes" field. It's identical to SoraStorageQuotaBytesEQ.
|
||||||
|
func SoraStorageQuotaBytes(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
// ClaudeCodeOnly applies equality check predicate on the "claude_code_only" field. It's identical to ClaudeCodeOnlyEQ.
|
// ClaudeCodeOnly applies equality check predicate on the "claude_code_only" field. It's identical to ClaudeCodeOnlyEQ.
|
||||||
func ClaudeCodeOnly(v bool) predicate.Group {
|
func ClaudeCodeOnly(v bool) predicate.Group {
|
||||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||||
@@ -165,6 +190,21 @@ func McpXMLInject(v bool) predicate.Group {
|
|||||||
return predicate.Group(sql.FieldEQ(FieldMcpXMLInject, v))
|
return predicate.Group(sql.FieldEQ(FieldMcpXMLInject, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SortOrder applies equality check predicate on the "sort_order" field. It's identical to SortOrderEQ.
|
||||||
|
func SortOrder(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowMessagesDispatch applies equality check predicate on the "allow_messages_dispatch" field. It's identical to AllowMessagesDispatchEQ.
|
||||||
|
func AllowMessagesDispatch(v bool) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldAllowMessagesDispatch, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModel applies equality check predicate on the "default_mapped_model" field. It's identical to DefaultMappedModelEQ.
|
||||||
|
func DefaultMappedModel(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
func CreatedAtEQ(v time.Time) predicate.Group {
|
func CreatedAtEQ(v time.Time) predicate.Group {
|
||||||
return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
|
return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
@@ -1020,6 +1060,246 @@ func ImagePrice4kNotNil() predicate.Group {
|
|||||||
return predicate.Group(sql.FieldNotNull(FieldImagePrice4k))
|
return predicate.Group(sql.FieldNotNull(FieldImagePrice4k))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360EQ applies the EQ predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360EQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360NEQ applies the NEQ predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360NEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360In applies the In predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360In(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSoraImagePrice360, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360NotIn applies the NotIn predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360NotIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSoraImagePrice360, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360GT applies the GT predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360GT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360GTE applies the GTE predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360GTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360LT applies the LT predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360LT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360LTE applies the LTE predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360LTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSoraImagePrice360, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360IsNil applies the IsNil predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360IsNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIsNull(FieldSoraImagePrice360))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice360NotNil applies the NotNil predicate on the "sora_image_price_360" field.
|
||||||
|
func SoraImagePrice360NotNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotNull(FieldSoraImagePrice360))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540EQ applies the EQ predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540EQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540NEQ applies the NEQ predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540NEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540In applies the In predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540In(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSoraImagePrice540, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540NotIn applies the NotIn predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540NotIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSoraImagePrice540, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540GT applies the GT predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540GT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540GTE applies the GTE predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540GTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540LT applies the LT predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540LT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540LTE applies the LTE predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540LTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSoraImagePrice540, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540IsNil applies the IsNil predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540IsNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIsNull(FieldSoraImagePrice540))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraImagePrice540NotNil applies the NotNil predicate on the "sora_image_price_540" field.
|
||||||
|
func SoraImagePrice540NotNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotNull(FieldSoraImagePrice540))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestEQ applies the EQ predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestNEQ applies the NEQ predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestNEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestIn applies the In predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSoraVideoPricePerRequest, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestNotIn applies the NotIn predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestNotIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSoraVideoPricePerRequest, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestGT applies the GT predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestGT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestGTE applies the GTE predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestGTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestLT applies the LT predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestLT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestLTE applies the LTE predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestLTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSoraVideoPricePerRequest, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestIsNil applies the IsNil predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestIsNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIsNull(FieldSoraVideoPricePerRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestNotNil applies the NotNil predicate on the "sora_video_price_per_request" field.
|
||||||
|
func SoraVideoPricePerRequestNotNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotNull(FieldSoraVideoPricePerRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdEQ applies the EQ predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdNEQ applies the NEQ predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdNEQ(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdIn applies the In predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSoraVideoPricePerRequestHd, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdNotIn applies the NotIn predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdNotIn(vs ...float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSoraVideoPricePerRequestHd, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdGT applies the GT predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdGT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdGTE applies the GTE predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdGTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdLT applies the LT predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdLT(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdLTE applies the LTE predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdLTE(v float64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSoraVideoPricePerRequestHd, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdIsNil applies the IsNil predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdIsNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIsNull(FieldSoraVideoPricePerRequestHd))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraVideoPricePerRequestHdNotNil applies the NotNil predicate on the "sora_video_price_per_request_hd" field.
|
||||||
|
func SoraVideoPricePerRequestHdNotNil() predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotNull(FieldSoraVideoPricePerRequestHd))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesEQ applies the EQ predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesEQ(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesNEQ applies the NEQ predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesNEQ(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesIn applies the In predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesIn(vs ...int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSoraStorageQuotaBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesNotIn applies the NotIn predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesNotIn(vs ...int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSoraStorageQuotaBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesGT applies the GT predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesGT(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesGTE applies the GTE predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesGTE(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesLT applies the LT predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesLT(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesLTE applies the LTE predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesLTE(v int64) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
// ClaudeCodeOnlyEQ applies the EQ predicate on the "claude_code_only" field.
|
// ClaudeCodeOnlyEQ applies the EQ predicate on the "claude_code_only" field.
|
||||||
func ClaudeCodeOnlyEQ(v bool) predicate.Group {
|
func ClaudeCodeOnlyEQ(v bool) predicate.Group {
|
||||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||||
@@ -1160,6 +1440,121 @@ func McpXMLInjectNEQ(v bool) predicate.Group {
|
|||||||
return predicate.Group(sql.FieldNEQ(FieldMcpXMLInject, v))
|
return predicate.Group(sql.FieldNEQ(FieldMcpXMLInject, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SortOrderEQ applies the EQ predicate on the "sort_order" field.
|
||||||
|
func SortOrderEQ(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderNEQ applies the NEQ predicate on the "sort_order" field.
|
||||||
|
func SortOrderNEQ(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderIn applies the In predicate on the "sort_order" field.
|
||||||
|
func SortOrderIn(vs ...int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldSortOrder, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderNotIn applies the NotIn predicate on the "sort_order" field.
|
||||||
|
func SortOrderNotIn(vs ...int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldSortOrder, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderGT applies the GT predicate on the "sort_order" field.
|
||||||
|
func SortOrderGT(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderGTE applies the GTE predicate on the "sort_order" field.
|
||||||
|
func SortOrderGTE(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderLT applies the LT predicate on the "sort_order" field.
|
||||||
|
func SortOrderLT(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortOrderLTE applies the LTE predicate on the "sort_order" field.
|
||||||
|
func SortOrderLTE(v int) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldSortOrder, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowMessagesDispatchEQ applies the EQ predicate on the "allow_messages_dispatch" field.
|
||||||
|
func AllowMessagesDispatchEQ(v bool) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldAllowMessagesDispatch, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowMessagesDispatchNEQ applies the NEQ predicate on the "allow_messages_dispatch" field.
|
||||||
|
func AllowMessagesDispatchNEQ(v bool) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldAllowMessagesDispatch, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelEQ applies the EQ predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelEQ(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEQ(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelNEQ applies the NEQ predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelNEQ(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNEQ(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelIn applies the In predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelIn(vs ...string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldIn(FieldDefaultMappedModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelNotIn applies the NotIn predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelNotIn(vs ...string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldNotIn(FieldDefaultMappedModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelGT applies the GT predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelGT(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGT(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelGTE applies the GTE predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelGTE(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldGTE(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelLT applies the LT predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelLT(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLT(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelLTE applies the LTE predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelLTE(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldLTE(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelContains applies the Contains predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelContains(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldContains(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelHasPrefix applies the HasPrefix predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelHasPrefix(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldHasPrefix(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelHasSuffix applies the HasSuffix predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelHasSuffix(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldHasSuffix(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelEqualFold applies the EqualFold predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelEqualFold(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldEqualFold(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMappedModelContainsFold applies the ContainsFold predicate on the "default_mapped_model" field.
|
||||||
|
func DefaultMappedModelContainsFold(v string) predicate.Group {
|
||||||
|
return predicate.Group(sql.FieldContainsFold(FieldDefaultMappedModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
||||||
func HasAPIKeys() predicate.Group {
|
func HasAPIKeys() predicate.Group {
|
||||||
return predicate.Group(func(s *sql.Selector) {
|
return predicate.Group(func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -258,6 +258,76 @@ func (_c *GroupCreate) SetNillableImagePrice4k(v *float64) *GroupCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (_c *GroupCreate) SetSoraImagePrice360(v float64) *GroupCreate {
|
||||||
|
_c.mutation.SetSoraImagePrice360(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSoraImagePrice360(v *float64) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraImagePrice360(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (_c *GroupCreate) SetSoraImagePrice540(v float64) *GroupCreate {
|
||||||
|
_c.mutation.SetSoraImagePrice540(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSoraImagePrice540(v *float64) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraImagePrice540(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (_c *GroupCreate) SetSoraVideoPricePerRequest(v float64) *GroupCreate {
|
||||||
|
_c.mutation.SetSoraVideoPricePerRequest(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSoraVideoPricePerRequest(v *float64) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraVideoPricePerRequest(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_c *GroupCreate) SetSoraVideoPricePerRequestHd(v float64) *GroupCreate {
|
||||||
|
_c.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraVideoPricePerRequestHd(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_c *GroupCreate) SetSoraStorageQuotaBytes(v int64) *GroupCreate {
|
||||||
|
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (_c *GroupCreate) SetClaudeCodeOnly(v bool) *GroupCreate {
|
func (_c *GroupCreate) SetClaudeCodeOnly(v bool) *GroupCreate {
|
||||||
_c.mutation.SetClaudeCodeOnly(v)
|
_c.mutation.SetClaudeCodeOnly(v)
|
||||||
@@ -340,6 +410,48 @@ func (_c *GroupCreate) SetSupportedModelScopes(v []string) *GroupCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (_c *GroupCreate) SetSortOrder(v int) *GroupCreate {
|
||||||
|
_c.mutation.SetSortOrder(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSortOrder sets the "sort_order" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableSortOrder(v *int) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSortOrder(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (_c *GroupCreate) SetAllowMessagesDispatch(v bool) *GroupCreate {
|
||||||
|
_c.mutation.SetAllowMessagesDispatch(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAllowMessagesDispatch sets the "allow_messages_dispatch" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableAllowMessagesDispatch(v *bool) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetAllowMessagesDispatch(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (_c *GroupCreate) SetDefaultMappedModel(v string) *GroupCreate {
|
||||||
|
_c.mutation.SetDefaultMappedModel(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDefaultMappedModel sets the "default_mapped_model" field if the given value is not nil.
|
||||||
|
func (_c *GroupCreate) SetNillableDefaultMappedModel(v *string) *GroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetDefaultMappedModel(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate {
|
func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate {
|
||||||
_c.mutation.AddAPIKeyIDs(ids...)
|
_c.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -505,6 +617,10 @@ func (_c *GroupCreate) defaults() error {
|
|||||||
v := group.DefaultDefaultValidityDays
|
v := group.DefaultDefaultValidityDays
|
||||||
_c.mutation.SetDefaultValidityDays(v)
|
_c.mutation.SetDefaultValidityDays(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||||
|
v := group.DefaultSoraStorageQuotaBytes
|
||||||
|
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||||
v := group.DefaultClaudeCodeOnly
|
v := group.DefaultClaudeCodeOnly
|
||||||
_c.mutation.SetClaudeCodeOnly(v)
|
_c.mutation.SetClaudeCodeOnly(v)
|
||||||
@@ -521,6 +637,18 @@ func (_c *GroupCreate) defaults() error {
|
|||||||
v := group.DefaultSupportedModelScopes
|
v := group.DefaultSupportedModelScopes
|
||||||
_c.mutation.SetSupportedModelScopes(v)
|
_c.mutation.SetSupportedModelScopes(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SortOrder(); !ok {
|
||||||
|
v := group.DefaultSortOrder
|
||||||
|
_c.mutation.SetSortOrder(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.AllowMessagesDispatch(); !ok {
|
||||||
|
v := group.DefaultAllowMessagesDispatch
|
||||||
|
_c.mutation.SetAllowMessagesDispatch(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.DefaultMappedModel(); !ok {
|
||||||
|
v := group.DefaultDefaultMappedModel
|
||||||
|
_c.mutation.SetDefaultMappedModel(v)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -573,6 +701,9 @@ func (_c *GroupCreate) check() error {
|
|||||||
if _, ok := _c.mutation.DefaultValidityDays(); !ok {
|
if _, ok := _c.mutation.DefaultValidityDays(); !ok {
|
||||||
return &ValidationError{Name: "default_validity_days", err: errors.New(`ent: missing required field "Group.default_validity_days"`)}
|
return &ValidationError{Name: "default_validity_days", err: errors.New(`ent: missing required field "Group.default_validity_days"`)}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||||
|
return &ValidationError{Name: "sora_storage_quota_bytes", err: errors.New(`ent: missing required field "Group.sora_storage_quota_bytes"`)}
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||||
return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)}
|
return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)}
|
||||||
}
|
}
|
||||||
@@ -585,6 +716,20 @@ func (_c *GroupCreate) check() error {
|
|||||||
if _, ok := _c.mutation.SupportedModelScopes(); !ok {
|
if _, ok := _c.mutation.SupportedModelScopes(); !ok {
|
||||||
return &ValidationError{Name: "supported_model_scopes", err: errors.New(`ent: missing required field "Group.supported_model_scopes"`)}
|
return &ValidationError{Name: "supported_model_scopes", err: errors.New(`ent: missing required field "Group.supported_model_scopes"`)}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SortOrder(); !ok {
|
||||||
|
return &ValidationError{Name: "sort_order", err: errors.New(`ent: missing required field "Group.sort_order"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.AllowMessagesDispatch(); !ok {
|
||||||
|
return &ValidationError{Name: "allow_messages_dispatch", err: errors.New(`ent: missing required field "Group.allow_messages_dispatch"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.DefaultMappedModel(); !ok {
|
||||||
|
return &ValidationError{Name: "default_mapped_model", err: errors.New(`ent: missing required field "Group.default_mapped_model"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.DefaultMappedModel(); ok {
|
||||||
|
if err := group.DefaultMappedModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "default_mapped_model", err: fmt.Errorf(`ent: validator failed for field "Group.default_mapped_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -680,6 +825,26 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value)
|
_spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value)
|
||||||
_node.ImagePrice4k = &value
|
_node.ImagePrice4k = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.SoraImagePrice360(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||||
|
_node.SoraImagePrice360 = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.SoraImagePrice540(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||||
|
_node.SoraImagePrice540 = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.SoraVideoPricePerRequest(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||||
|
_node.SoraVideoPricePerRequest = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||||
|
_node.SoraVideoPricePerRequestHd = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
_node.SoraStorageQuotaBytes = value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.ClaudeCodeOnly(); ok {
|
if value, ok := _c.mutation.ClaudeCodeOnly(); ok {
|
||||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||||
_node.ClaudeCodeOnly = value
|
_node.ClaudeCodeOnly = value
|
||||||
@@ -708,6 +873,18 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(group.FieldSupportedModelScopes, field.TypeJSON, value)
|
_spec.SetField(group.FieldSupportedModelScopes, field.TypeJSON, value)
|
||||||
_node.SupportedModelScopes = value
|
_node.SupportedModelScopes = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.SortOrder(); ok {
|
||||||
|
_spec.SetField(group.FieldSortOrder, field.TypeInt, value)
|
||||||
|
_node.SortOrder = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.AllowMessagesDispatch(); ok {
|
||||||
|
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||||
|
_node.AllowMessagesDispatch = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.DefaultMappedModel(); ok {
|
||||||
|
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||||
|
_node.DefaultMappedModel = value
|
||||||
|
}
|
||||||
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
@@ -1152,6 +1329,120 @@ func (u *GroupUpsert) ClearImagePrice4k() *GroupUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsert) SetSoraImagePrice360(v float64) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSoraImagePrice360, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSoraImagePrice360() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSoraImagePrice360)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsert) AddSoraImagePrice360(v float64) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSoraImagePrice360, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsert) ClearSoraImagePrice360() *GroupUpsert {
|
||||||
|
u.SetNull(group.FieldSoraImagePrice360)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsert) SetSoraImagePrice540(v float64) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSoraImagePrice540, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSoraImagePrice540() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSoraImagePrice540)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsert) AddSoraImagePrice540(v float64) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSoraImagePrice540, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsert) ClearSoraImagePrice540() *GroupUpsert {
|
||||||
|
u.SetNull(group.FieldSoraImagePrice540)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsert) SetSoraVideoPricePerRequest(v float64) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSoraVideoPricePerRequest, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSoraVideoPricePerRequest() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSoraVideoPricePerRequest)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsert) AddSoraVideoPricePerRequest(v float64) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSoraVideoPricePerRequest, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsert) ClearSoraVideoPricePerRequest() *GroupUpsert {
|
||||||
|
u.SetNull(group.FieldSoraVideoPricePerRequest)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsert) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSoraVideoPricePerRequestHd, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSoraVideoPricePerRequestHd() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSoraVideoPricePerRequestHd)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsert) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSoraVideoPricePerRequestHd, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsert) ClearSoraVideoPricePerRequestHd() *GroupUpsert {
|
||||||
|
u.SetNull(group.FieldSoraVideoPricePerRequestHd)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsert) SetSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSoraStorageQuotaBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSoraStorageQuotaBytes() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSoraStorageQuotaBytes)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsert) AddSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSoraStorageQuotaBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (u *GroupUpsert) SetClaudeCodeOnly(v bool) *GroupUpsert {
|
func (u *GroupUpsert) SetClaudeCodeOnly(v bool) *GroupUpsert {
|
||||||
u.Set(group.FieldClaudeCodeOnly, v)
|
u.Set(group.FieldClaudeCodeOnly, v)
|
||||||
@@ -1266,6 +1557,48 @@ func (u *GroupUpsert) UpdateSupportedModelScopes() *GroupUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (u *GroupUpsert) SetSortOrder(v int) *GroupUpsert {
|
||||||
|
u.Set(group.FieldSortOrder, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSortOrder sets the "sort_order" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateSortOrder() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldSortOrder)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSortOrder adds v to the "sort_order" field.
|
||||||
|
func (u *GroupUpsert) AddSortOrder(v int) *GroupUpsert {
|
||||||
|
u.Add(group.FieldSortOrder, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (u *GroupUpsert) SetAllowMessagesDispatch(v bool) *GroupUpsert {
|
||||||
|
u.Set(group.FieldAllowMessagesDispatch, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAllowMessagesDispatch sets the "allow_messages_dispatch" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateAllowMessagesDispatch() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldAllowMessagesDispatch)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (u *GroupUpsert) SetDefaultMappedModel(v string) *GroupUpsert {
|
||||||
|
u.Set(group.FieldDefaultMappedModel, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDefaultMappedModel sets the "default_mapped_model" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsert) UpdateDefaultMappedModel() *GroupUpsert {
|
||||||
|
u.SetExcluded(group.FieldDefaultMappedModel)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
// Using this option is equivalent to using:
|
// Using this option is equivalent to using:
|
||||||
//
|
//
|
||||||
@@ -1647,6 +1980,139 @@ func (u *GroupUpsertOne) ClearImagePrice4k() *GroupUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertOne) SetSoraImagePrice360(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraImagePrice360(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertOne) AddSoraImagePrice360(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraImagePrice360(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSoraImagePrice360() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraImagePrice360()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertOne) ClearSoraImagePrice360() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraImagePrice360()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertOne) SetSoraImagePrice540(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraImagePrice540(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertOne) AddSoraImagePrice540(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraImagePrice540(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSoraImagePrice540() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraImagePrice540()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertOne) ClearSoraImagePrice540() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraImagePrice540()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertOne) SetSoraVideoPricePerRequest(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraVideoPricePerRequest(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertOne) AddSoraVideoPricePerRequest(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraVideoPricePerRequest(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSoraVideoPricePerRequest() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraVideoPricePerRequest()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertOne) ClearSoraVideoPricePerRequest() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraVideoPricePerRequest()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertOne) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraVideoPricePerRequestHd(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertOne) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraVideoPricePerRequestHd(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSoraVideoPricePerRequestHd() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraVideoPricePerRequestHd()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertOne) ClearSoraVideoPricePerRequestHd() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraVideoPricePerRequestHd()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsertOne) SetSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsertOne) AddSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSoraStorageQuotaBytes() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraStorageQuotaBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (u *GroupUpsertOne) SetClaudeCodeOnly(v bool) *GroupUpsertOne {
|
func (u *GroupUpsertOne) SetClaudeCodeOnly(v bool) *GroupUpsertOne {
|
||||||
return u.Update(func(s *GroupUpsert) {
|
return u.Update(func(s *GroupUpsert) {
|
||||||
@@ -1780,6 +2246,55 @@ func (u *GroupUpsertOne) UpdateSupportedModelScopes() *GroupUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (u *GroupUpsertOne) SetSortOrder(v int) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSortOrder(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSortOrder adds v to the "sort_order" field.
|
||||||
|
func (u *GroupUpsertOne) AddSortOrder(v int) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSortOrder(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSortOrder sets the "sort_order" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateSortOrder() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSortOrder()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (u *GroupUpsertOne) SetAllowMessagesDispatch(v bool) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetAllowMessagesDispatch(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAllowMessagesDispatch sets the "allow_messages_dispatch" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateAllowMessagesDispatch() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateAllowMessagesDispatch()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (u *GroupUpsertOne) SetDefaultMappedModel(v string) *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetDefaultMappedModel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDefaultMappedModel sets the "default_mapped_model" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertOne) UpdateDefaultMappedModel() *GroupUpsertOne {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateDefaultMappedModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *GroupUpsertOne) Exec(ctx context.Context) error {
|
func (u *GroupUpsertOne) Exec(ctx context.Context) error {
|
||||||
if len(u.create.conflict) == 0 {
|
if len(u.create.conflict) == 0 {
|
||||||
@@ -2327,6 +2842,139 @@ func (u *GroupUpsertBulk) ClearImagePrice4k() *GroupUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSoraImagePrice360(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraImagePrice360(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSoraImagePrice360(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraImagePrice360(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSoraImagePrice360() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraImagePrice360()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||||
|
func (u *GroupUpsertBulk) ClearSoraImagePrice360() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraImagePrice360()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSoraImagePrice540(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraImagePrice540(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSoraImagePrice540(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraImagePrice540(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSoraImagePrice540() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraImagePrice540()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||||
|
func (u *GroupUpsertBulk) ClearSoraImagePrice540() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraImagePrice540()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSoraVideoPricePerRequest(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraVideoPricePerRequest(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSoraVideoPricePerRequest(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraVideoPricePerRequest(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSoraVideoPricePerRequest() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraVideoPricePerRequest()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||||
|
func (u *GroupUpsertBulk) ClearSoraVideoPricePerRequest() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraVideoPricePerRequest()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraVideoPricePerRequestHd(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraVideoPricePerRequestHd(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSoraVideoPricePerRequestHd() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraVideoPricePerRequestHd()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
func (u *GroupUpsertBulk) ClearSoraVideoPricePerRequestHd() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.ClearSoraVideoPricePerRequestHd()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSoraStorageQuotaBytes() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSoraStorageQuotaBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (u *GroupUpsertBulk) SetClaudeCodeOnly(v bool) *GroupUpsertBulk {
|
func (u *GroupUpsertBulk) SetClaudeCodeOnly(v bool) *GroupUpsertBulk {
|
||||||
return u.Update(func(s *GroupUpsert) {
|
return u.Update(func(s *GroupUpsert) {
|
||||||
@@ -2460,6 +3108,55 @@ func (u *GroupUpsertBulk) UpdateSupportedModelScopes() *GroupUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (u *GroupUpsertBulk) SetSortOrder(v int) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetSortOrder(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSortOrder adds v to the "sort_order" field.
|
||||||
|
func (u *GroupUpsertBulk) AddSortOrder(v int) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.AddSortOrder(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSortOrder sets the "sort_order" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateSortOrder() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateSortOrder()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (u *GroupUpsertBulk) SetAllowMessagesDispatch(v bool) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetAllowMessagesDispatch(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAllowMessagesDispatch sets the "allow_messages_dispatch" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateAllowMessagesDispatch() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateAllowMessagesDispatch()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (u *GroupUpsertBulk) SetDefaultMappedModel(v string) *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.SetDefaultMappedModel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDefaultMappedModel sets the "default_mapped_model" field to the value that was provided on create.
|
||||||
|
func (u *GroupUpsertBulk) UpdateDefaultMappedModel() *GroupUpsertBulk {
|
||||||
|
return u.Update(func(s *GroupUpsert) {
|
||||||
|
s.UpdateDefaultMappedModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *GroupUpsertBulk) Exec(ctx context.Context) error {
|
func (u *GroupUpsertBulk) Exec(ctx context.Context) error {
|
||||||
if u.create.err != nil {
|
if u.create.err != nil {
|
||||||
|
|||||||
@@ -355,6 +355,135 @@ func (_u *GroupUpdate) ClearImagePrice4k() *GroupUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdate) SetSoraImagePrice360(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSoraImagePrice360()
|
||||||
|
_u.mutation.SetSoraImagePrice360(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSoraImagePrice360(v *float64) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraImagePrice360(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice360 adds value to the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdate) AddSoraImagePrice360(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.AddSoraImagePrice360(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdate) ClearSoraImagePrice360() *GroupUpdate {
|
||||||
|
_u.mutation.ClearSoraImagePrice360()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdate) SetSoraImagePrice540(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSoraImagePrice540()
|
||||||
|
_u.mutation.SetSoraImagePrice540(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSoraImagePrice540(v *float64) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraImagePrice540(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice540 adds value to the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdate) AddSoraImagePrice540(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.AddSoraImagePrice540(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdate) ClearSoraImagePrice540() *GroupUpdate {
|
||||||
|
_u.mutation.ClearSoraImagePrice540()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdate) SetSoraVideoPricePerRequest(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSoraVideoPricePerRequest()
|
||||||
|
_u.mutation.SetSoraVideoPricePerRequest(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSoraVideoPricePerRequest(v *float64) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraVideoPricePerRequest(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequest adds value to the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdate) AddSoraVideoPricePerRequest(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.AddSoraVideoPricePerRequest(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdate) ClearSoraVideoPricePerRequest() *GroupUpdate {
|
||||||
|
_u.mutation.ClearSoraVideoPricePerRequest()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdate) SetSoraVideoPricePerRequestHd(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSoraVideoPricePerRequestHd()
|
||||||
|
_u.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraVideoPricePerRequestHd(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequestHd adds value to the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdate) AddSoraVideoPricePerRequestHd(v float64) *GroupUpdate {
|
||||||
|
_u.mutation.AddSoraVideoPricePerRequestHd(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdate) ClearSoraVideoPricePerRequestHd() *GroupUpdate {
|
||||||
|
_u.mutation.ClearSoraVideoPricePerRequestHd()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *GroupUpdate) SetSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||||
|
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *GroupUpdate) AddSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||||
|
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (_u *GroupUpdate) SetClaudeCodeOnly(v bool) *GroupUpdate {
|
func (_u *GroupUpdate) SetClaudeCodeOnly(v bool) *GroupUpdate {
|
||||||
_u.mutation.SetClaudeCodeOnly(v)
|
_u.mutation.SetClaudeCodeOnly(v)
|
||||||
@@ -475,6 +604,55 @@ func (_u *GroupUpdate) AppendSupportedModelScopes(v []string) *GroupUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (_u *GroupUpdate) SetSortOrder(v int) *GroupUpdate {
|
||||||
|
_u.mutation.ResetSortOrder()
|
||||||
|
_u.mutation.SetSortOrder(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSortOrder sets the "sort_order" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableSortOrder(v *int) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSortOrder(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSortOrder adds value to the "sort_order" field.
|
||||||
|
func (_u *GroupUpdate) AddSortOrder(v int) *GroupUpdate {
|
||||||
|
_u.mutation.AddSortOrder(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (_u *GroupUpdate) SetAllowMessagesDispatch(v bool) *GroupUpdate {
|
||||||
|
_u.mutation.SetAllowMessagesDispatch(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAllowMessagesDispatch sets the "allow_messages_dispatch" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableAllowMessagesDispatch(v *bool) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAllowMessagesDispatch(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (_u *GroupUpdate) SetDefaultMappedModel(v string) *GroupUpdate {
|
||||||
|
_u.mutation.SetDefaultMappedModel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDefaultMappedModel sets the "default_mapped_model" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdate) SetNillableDefaultMappedModel(v *string) *GroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDefaultMappedModel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate {
|
func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate {
|
||||||
_u.mutation.AddAPIKeyIDs(ids...)
|
_u.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -760,6 +938,11 @@ func (_u *GroupUpdate) check() error {
|
|||||||
return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)}
|
return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||||
|
if err := group.DefaultMappedModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "default_mapped_model", err: fmt.Errorf(`ent: validator failed for field "Group.default_mapped_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -871,6 +1054,48 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.ImagePrice4kCleared() {
|
if _u.mutation.ImagePrice4kCleared() {
|
||||||
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SoraImagePrice360(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraImagePrice360(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraImagePrice360Cleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraImagePrice360, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraImagePrice540(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraImagePrice540(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraImagePrice540Cleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraImagePrice540, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraVideoPricePerRequest(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraVideoPricePerRequest(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraVideoPricePerRequestCleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraVideoPricePerRequestHd(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||||
}
|
}
|
||||||
@@ -912,6 +1137,18 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
sqljson.Append(u, group.FieldSupportedModelScopes, value)
|
sqljson.Append(u, group.FieldSupportedModelScopes, value)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SortOrder(); ok {
|
||||||
|
_spec.SetField(group.FieldSortOrder, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSortOrder(); ok {
|
||||||
|
_spec.AddField(group.FieldSortOrder, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AllowMessagesDispatch(); ok {
|
||||||
|
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||||
|
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||||
|
}
|
||||||
if _u.mutation.APIKeysCleared() {
|
if _u.mutation.APIKeysCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
@@ -1546,6 +1783,135 @@ func (_u *GroupUpdateOne) ClearImagePrice4k() *GroupUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSoraImagePrice360(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSoraImagePrice360()
|
||||||
|
_u.mutation.SetSoraImagePrice360(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSoraImagePrice360(v *float64) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraImagePrice360(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice360 adds value to the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSoraImagePrice360(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSoraImagePrice360(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||||
|
func (_u *GroupUpdateOne) ClearSoraImagePrice360() *GroupUpdateOne {
|
||||||
|
_u.mutation.ClearSoraImagePrice360()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSoraImagePrice540(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSoraImagePrice540()
|
||||||
|
_u.mutation.SetSoraImagePrice540(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSoraImagePrice540(v *float64) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraImagePrice540(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraImagePrice540 adds value to the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSoraImagePrice540(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSoraImagePrice540(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||||
|
func (_u *GroupUpdateOne) ClearSoraImagePrice540() *GroupUpdateOne {
|
||||||
|
_u.mutation.ClearSoraImagePrice540()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSoraVideoPricePerRequest(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSoraVideoPricePerRequest()
|
||||||
|
_u.mutation.SetSoraVideoPricePerRequest(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSoraVideoPricePerRequest(v *float64) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraVideoPricePerRequest(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequest adds value to the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSoraVideoPricePerRequest(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSoraVideoPricePerRequest(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||||
|
func (_u *GroupUpdateOne) ClearSoraVideoPricePerRequest() *GroupUpdateOne {
|
||||||
|
_u.mutation.ClearSoraVideoPricePerRequest()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSoraVideoPricePerRequestHd(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSoraVideoPricePerRequestHd()
|
||||||
|
_u.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraVideoPricePerRequestHd(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraVideoPricePerRequestHd adds value to the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSoraVideoPricePerRequestHd(v float64) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSoraVideoPricePerRequestHd(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||||
|
func (_u *GroupUpdateOne) ClearSoraVideoPricePerRequestHd() *GroupUpdateOne {
|
||||||
|
_u.mutation.ClearSoraVideoPricePerRequestHd()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||||
|
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||||
func (_u *GroupUpdateOne) SetClaudeCodeOnly(v bool) *GroupUpdateOne {
|
func (_u *GroupUpdateOne) SetClaudeCodeOnly(v bool) *GroupUpdateOne {
|
||||||
_u.mutation.SetClaudeCodeOnly(v)
|
_u.mutation.SetClaudeCodeOnly(v)
|
||||||
@@ -1666,6 +2032,55 @@ func (_u *GroupUpdateOne) AppendSupportedModelScopes(v []string) *GroupUpdateOne
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSortOrder sets the "sort_order" field.
|
||||||
|
func (_u *GroupUpdateOne) SetSortOrder(v int) *GroupUpdateOne {
|
||||||
|
_u.mutation.ResetSortOrder()
|
||||||
|
_u.mutation.SetSortOrder(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSortOrder sets the "sort_order" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableSortOrder(v *int) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSortOrder(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSortOrder adds value to the "sort_order" field.
|
||||||
|
func (_u *GroupUpdateOne) AddSortOrder(v int) *GroupUpdateOne {
|
||||||
|
_u.mutation.AddSortOrder(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowMessagesDispatch sets the "allow_messages_dispatch" field.
|
||||||
|
func (_u *GroupUpdateOne) SetAllowMessagesDispatch(v bool) *GroupUpdateOne {
|
||||||
|
_u.mutation.SetAllowMessagesDispatch(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAllowMessagesDispatch sets the "allow_messages_dispatch" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableAllowMessagesDispatch(v *bool) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAllowMessagesDispatch(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||||
|
func (_u *GroupUpdateOne) SetDefaultMappedModel(v string) *GroupUpdateOne {
|
||||||
|
_u.mutation.SetDefaultMappedModel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDefaultMappedModel sets the "default_mapped_model" field if the given value is not nil.
|
||||||
|
func (_u *GroupUpdateOne) SetNillableDefaultMappedModel(v *string) *GroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDefaultMappedModel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne {
|
func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne {
|
||||||
_u.mutation.AddAPIKeyIDs(ids...)
|
_u.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -1964,6 +2379,11 @@ func (_u *GroupUpdateOne) check() error {
|
|||||||
return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)}
|
return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||||
|
if err := group.DefaultMappedModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "default_mapped_model", err: fmt.Errorf(`ent: validator failed for field "Group.default_mapped_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2092,6 +2512,48 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
|
|||||||
if _u.mutation.ImagePrice4kCleared() {
|
if _u.mutation.ImagePrice4kCleared() {
|
||||||
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SoraImagePrice360(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraImagePrice360(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraImagePrice360Cleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraImagePrice360, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraImagePrice540(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraImagePrice540(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraImagePrice540Cleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraImagePrice540, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraVideoPricePerRequest(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraVideoPricePerRequest(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraVideoPricePerRequestCleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraVideoPricePerRequestHd(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||||
|
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||||
}
|
}
|
||||||
@@ -2133,6 +2595,18 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
|
|||||||
sqljson.Append(u, group.FieldSupportedModelScopes, value)
|
sqljson.Append(u, group.FieldSupportedModelScopes, value)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SortOrder(); ok {
|
||||||
|
_spec.SetField(group.FieldSortOrder, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSortOrder(); ok {
|
||||||
|
_spec.AddField(group.FieldSortOrder, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AllowMessagesDispatch(); ok {
|
||||||
|
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||||
|
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||||
|
}
|
||||||
if _u.mutation.APIKeysCleared() {
|
if _u.mutation.APIKeysCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
|
|||||||
@@ -93,6 +93,18 @@ func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error
|
|||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as IdempotencyRecord mutator.
|
||||||
|
type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f IdempotencyRecordFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.IdempotencyRecordMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdempotencyRecordMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
// The PromoCodeFunc type is an adapter to allow the use of ordinary
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary
|
||||||
// function as PromoCode mutator.
|
// function as PromoCode mutator.
|
||||||
type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error)
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error)
|
||||||
@@ -141,6 +153,18 @@ func (f RedeemCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value,
|
|||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RedeemCodeMutation", m)
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RedeemCodeMutation", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The SecuritySecretFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as SecuritySecret mutator.
|
||||||
|
type SecuritySecretFunc func(context.Context, *ent.SecuritySecretMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f SecuritySecretFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.SecuritySecretMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SecuritySecretMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
// The SettingFunc type is an adapter to allow the use of ordinary
|
// The SettingFunc type is an adapter to allow the use of ordinary
|
||||||
// function as Setting mutator.
|
// function as Setting mutator.
|
||||||
type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error)
|
type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error)
|
||||||
|
|||||||
228
backend/ent/idempotencyrecord.go
Normal file
228
backend/ent/idempotencyrecord.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdempotencyRecord is the model entity for the IdempotencyRecord schema.
|
||||||
|
type IdempotencyRecord struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Scope holds the value of the "scope" field.
|
||||||
|
Scope string `json:"scope,omitempty"`
|
||||||
|
// IdempotencyKeyHash holds the value of the "idempotency_key_hash" field.
|
||||||
|
IdempotencyKeyHash string `json:"idempotency_key_hash,omitempty"`
|
||||||
|
// RequestFingerprint holds the value of the "request_fingerprint" field.
|
||||||
|
RequestFingerprint string `json:"request_fingerprint,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// ResponseStatus holds the value of the "response_status" field.
|
||||||
|
ResponseStatus *int `json:"response_status,omitempty"`
|
||||||
|
// ResponseBody holds the value of the "response_body" field.
|
||||||
|
ResponseBody *string `json:"response_body,omitempty"`
|
||||||
|
// ErrorReason holds the value of the "error_reason" field.
|
||||||
|
ErrorReason *string `json:"error_reason,omitempty"`
|
||||||
|
// LockedUntil holds the value of the "locked_until" field.
|
||||||
|
LockedUntil *time.Time `json:"locked_until,omitempty"`
|
||||||
|
// ExpiresAt holds the value of the "expires_at" field.
|
||||||
|
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*IdempotencyRecord) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case idempotencyrecord.FieldID, idempotencyrecord.FieldResponseStatus:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case idempotencyrecord.FieldScope, idempotencyrecord.FieldIdempotencyKeyHash, idempotencyrecord.FieldRequestFingerprint, idempotencyrecord.FieldStatus, idempotencyrecord.FieldResponseBody, idempotencyrecord.FieldErrorReason:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case idempotencyrecord.FieldCreatedAt, idempotencyrecord.FieldUpdatedAt, idempotencyrecord.FieldLockedUntil, idempotencyrecord.FieldExpiresAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the IdempotencyRecord fields.
|
||||||
|
func (_m *IdempotencyRecord) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case idempotencyrecord.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case idempotencyrecord.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldScope:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field scope", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Scope = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldIdempotencyKeyHash:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field idempotency_key_hash", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.IdempotencyKeyHash = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldRequestFingerprint:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field request_fingerprint", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RequestFingerprint = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldResponseStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field response_status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ResponseStatus = new(int)
|
||||||
|
*_m.ResponseStatus = int(value.Int64)
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldResponseBody:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field response_body", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ResponseBody = new(string)
|
||||||
|
*_m.ResponseBody = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldErrorReason:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field error_reason", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ErrorReason = new(string)
|
||||||
|
*_m.ErrorReason = value.String
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldLockedUntil:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field locked_until", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LockedUntil = new(time.Time)
|
||||||
|
*_m.LockedUntil = value.Time
|
||||||
|
}
|
||||||
|
case idempotencyrecord.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the IdempotencyRecord.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *IdempotencyRecord) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this IdempotencyRecord.
|
||||||
|
// Note that you need to call IdempotencyRecord.Unwrap() before calling this method if this IdempotencyRecord
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *IdempotencyRecord) Update() *IdempotencyRecordUpdateOne {
|
||||||
|
return NewIdempotencyRecordClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the IdempotencyRecord entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *IdempotencyRecord) Unwrap() *IdempotencyRecord {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: IdempotencyRecord is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *IdempotencyRecord) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("IdempotencyRecord(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("scope=")
|
||||||
|
builder.WriteString(_m.Scope)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("idempotency_key_hash=")
|
||||||
|
builder.WriteString(_m.IdempotencyKeyHash)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("request_fingerprint=")
|
||||||
|
builder.WriteString(_m.RequestFingerprint)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ResponseStatus; v != nil {
|
||||||
|
builder.WriteString("response_status=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ResponseBody; v != nil {
|
||||||
|
builder.WriteString("response_body=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ErrorReason; v != nil {
|
||||||
|
builder.WriteString("error_reason=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LockedUntil; v != nil {
|
||||||
|
builder.WriteString("locked_until=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(_m.ExpiresAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecords is a parsable slice of IdempotencyRecord.
|
||||||
|
type IdempotencyRecords []*IdempotencyRecord
|
||||||
148
backend/ent/idempotencyrecord/idempotencyrecord.go
Normal file
148
backend/ent/idempotencyrecord/idempotencyrecord.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package idempotencyrecord
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the idempotencyrecord type in the database.
|
||||||
|
Label = "idempotency_record"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldScope holds the string denoting the scope field in the database.
|
||||||
|
FieldScope = "scope"
|
||||||
|
// FieldIdempotencyKeyHash holds the string denoting the idempotency_key_hash field in the database.
|
||||||
|
FieldIdempotencyKeyHash = "idempotency_key_hash"
|
||||||
|
// FieldRequestFingerprint holds the string denoting the request_fingerprint field in the database.
|
||||||
|
FieldRequestFingerprint = "request_fingerprint"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldResponseStatus holds the string denoting the response_status field in the database.
|
||||||
|
FieldResponseStatus = "response_status"
|
||||||
|
// FieldResponseBody holds the string denoting the response_body field in the database.
|
||||||
|
FieldResponseBody = "response_body"
|
||||||
|
// FieldErrorReason holds the string denoting the error_reason field in the database.
|
||||||
|
FieldErrorReason = "error_reason"
|
||||||
|
// FieldLockedUntil holds the string denoting the locked_until field in the database.
|
||||||
|
FieldLockedUntil = "locked_until"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// Table holds the table name of the idempotencyrecord in the database.
|
||||||
|
Table = "idempotency_records"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for idempotencyrecord fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldScope,
|
||||||
|
FieldIdempotencyKeyHash,
|
||||||
|
FieldRequestFingerprint,
|
||||||
|
FieldStatus,
|
||||||
|
FieldResponseStatus,
|
||||||
|
FieldResponseBody,
|
||||||
|
FieldErrorReason,
|
||||||
|
FieldLockedUntil,
|
||||||
|
FieldExpiresAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// ScopeValidator is a validator for the "scope" field. It is called by the builders before save.
|
||||||
|
ScopeValidator func(string) error
|
||||||
|
// IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save.
|
||||||
|
IdempotencyKeyHashValidator func(string) error
|
||||||
|
// RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save.
|
||||||
|
RequestFingerprintValidator func(string) error
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save.
|
||||||
|
ErrorReasonValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the IdempotencyRecord queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByScope orders the results by the scope field.
|
||||||
|
func ByScope(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldScope, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIdempotencyKeyHash orders the results by the idempotency_key_hash field.
|
||||||
|
func ByIdempotencyKeyHash(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIdempotencyKeyHash, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRequestFingerprint orders the results by the request_fingerprint field.
|
||||||
|
func ByRequestFingerprint(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRequestFingerprint, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByResponseStatus orders the results by the response_status field.
|
||||||
|
func ByResponseStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldResponseStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByResponseBody orders the results by the response_body field.
|
||||||
|
func ByResponseBody(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldResponseBody, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByErrorReason orders the results by the error_reason field.
|
||||||
|
func ByErrorReason(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldErrorReason, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLockedUntil orders the results by the locked_until field.
|
||||||
|
func ByLockedUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLockedUntil, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
755
backend/ent/idempotencyrecord/where.go
Normal file
755
backend/ent/idempotencyrecord/where.go
Normal file
@@ -0,0 +1,755 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package idempotencyrecord
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ.
|
||||||
|
func Scope(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHash applies equality check predicate on the "idempotency_key_hash" field. It's identical to IdempotencyKeyHashEQ.
|
||||||
|
func IdempotencyKeyHash(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprint applies equality check predicate on the "request_fingerprint" field. It's identical to RequestFingerprintEQ.
|
||||||
|
func RequestFingerprint(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatus applies equality check predicate on the "response_status" field. It's identical to ResponseStatusEQ.
|
||||||
|
func ResponseStatus(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBody applies equality check predicate on the "response_body" field. It's identical to ResponseBodyEQ.
|
||||||
|
func ResponseBody(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReason applies equality check predicate on the "error_reason" field. It's identical to ErrorReasonEQ.
|
||||||
|
func ErrorReason(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntil applies equality check predicate on the "locked_until" field. It's identical to LockedUntilEQ.
|
||||||
|
func LockedUntil(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
|
||||||
|
func ExpiresAt(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeEQ applies the EQ predicate on the "scope" field.
|
||||||
|
func ScopeEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeNEQ applies the NEQ predicate on the "scope" field.
|
||||||
|
func ScopeNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeIn applies the In predicate on the "scope" field.
|
||||||
|
func ScopeIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldScope, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeNotIn applies the NotIn predicate on the "scope" field.
|
||||||
|
func ScopeNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldScope, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeGT applies the GT predicate on the "scope" field.
|
||||||
|
func ScopeGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeGTE applies the GTE predicate on the "scope" field.
|
||||||
|
func ScopeGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeLT applies the LT predicate on the "scope" field.
|
||||||
|
func ScopeLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeLTE applies the LTE predicate on the "scope" field.
|
||||||
|
func ScopeLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeContains applies the Contains predicate on the "scope" field.
|
||||||
|
func ScopeContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeHasPrefix applies the HasPrefix predicate on the "scope" field.
|
||||||
|
func ScopeHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeHasSuffix applies the HasSuffix predicate on the "scope" field.
|
||||||
|
func ScopeHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeEqualFold applies the EqualFold predicate on the "scope" field.
|
||||||
|
func ScopeEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeContainsFold applies the ContainsFold predicate on the "scope" field.
|
||||||
|
func ScopeContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldScope, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashEQ applies the EQ predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashNEQ applies the NEQ predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashIn applies the In predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldIdempotencyKeyHash, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashNotIn applies the NotIn predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldIdempotencyKeyHash, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashGT applies the GT predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashGTE applies the GTE predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashLT applies the LT predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashLTE applies the LTE predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashContains applies the Contains predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashHasPrefix applies the HasPrefix predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashHasSuffix applies the HasSuffix predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashEqualFold applies the EqualFold predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyKeyHashContainsFold applies the ContainsFold predicate on the "idempotency_key_hash" field.
|
||||||
|
func IdempotencyKeyHashContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldIdempotencyKeyHash, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintEQ applies the EQ predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintNEQ applies the NEQ predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintIn applies the In predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldRequestFingerprint, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintNotIn applies the NotIn predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldRequestFingerprint, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintGT applies the GT predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintGTE applies the GTE predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintLT applies the LT predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintLTE applies the LTE predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintContains applies the Contains predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintHasPrefix applies the HasPrefix predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintHasSuffix applies the HasSuffix predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintEqualFold applies the EqualFold predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFingerprintContainsFold applies the ContainsFold predicate on the "request_fingerprint" field.
|
||||||
|
func RequestFingerprintContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldRequestFingerprint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusEQ applies the EQ predicate on the "response_status" field.
|
||||||
|
func ResponseStatusEQ(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusNEQ applies the NEQ predicate on the "response_status" field.
|
||||||
|
func ResponseStatusNEQ(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusIn applies the In predicate on the "response_status" field.
|
||||||
|
func ResponseStatusIn(vs ...int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusNotIn applies the NotIn predicate on the "response_status" field.
|
||||||
|
func ResponseStatusNotIn(vs ...int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusGT applies the GT predicate on the "response_status" field.
|
||||||
|
func ResponseStatusGT(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusGTE applies the GTE predicate on the "response_status" field.
|
||||||
|
func ResponseStatusGTE(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusLT applies the LT predicate on the "response_status" field.
|
||||||
|
func ResponseStatusLT(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusLTE applies the LTE predicate on the "response_status" field.
|
||||||
|
func ResponseStatusLTE(v int) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusIsNil applies the IsNil predicate on the "response_status" field.
|
||||||
|
func ResponseStatusIsNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseStatus))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseStatusNotNil applies the NotNil predicate on the "response_status" field.
|
||||||
|
func ResponseStatusNotNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseStatus))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyEQ applies the EQ predicate on the "response_body" field.
|
||||||
|
func ResponseBodyEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyNEQ applies the NEQ predicate on the "response_body" field.
|
||||||
|
func ResponseBodyNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyIn applies the In predicate on the "response_body" field.
|
||||||
|
func ResponseBodyIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseBody, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyNotIn applies the NotIn predicate on the "response_body" field.
|
||||||
|
func ResponseBodyNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseBody, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyGT applies the GT predicate on the "response_body" field.
|
||||||
|
func ResponseBodyGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyGTE applies the GTE predicate on the "response_body" field.
|
||||||
|
func ResponseBodyGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyLT applies the LT predicate on the "response_body" field.
|
||||||
|
func ResponseBodyLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyLTE applies the LTE predicate on the "response_body" field.
|
||||||
|
func ResponseBodyLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyContains applies the Contains predicate on the "response_body" field.
|
||||||
|
func ResponseBodyContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyHasPrefix applies the HasPrefix predicate on the "response_body" field.
|
||||||
|
func ResponseBodyHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyHasSuffix applies the HasSuffix predicate on the "response_body" field.
|
||||||
|
func ResponseBodyHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyIsNil applies the IsNil predicate on the "response_body" field.
|
||||||
|
func ResponseBodyIsNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyNotNil applies the NotNil predicate on the "response_body" field.
|
||||||
|
func ResponseBodyNotNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyEqualFold applies the EqualFold predicate on the "response_body" field.
|
||||||
|
func ResponseBodyEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseBodyContainsFold applies the ContainsFold predicate on the "response_body" field.
|
||||||
|
func ResponseBodyContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldResponseBody, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonEQ applies the EQ predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonNEQ applies the NEQ predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonNEQ(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonIn applies the In predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldErrorReason, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonNotIn applies the NotIn predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonNotIn(vs ...string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldErrorReason, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonGT applies the GT predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonGT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonGTE applies the GTE predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonGTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonLT applies the LT predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonLT(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonLTE applies the LTE predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonLTE(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonContains applies the Contains predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonContains(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContains(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonHasPrefix applies the HasPrefix predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonHasPrefix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonHasSuffix applies the HasSuffix predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonHasSuffix(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonIsNil applies the IsNil predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonIsNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIsNull(FieldErrorReason))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonNotNil applies the NotNil predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonNotNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotNull(FieldErrorReason))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonEqualFold applies the EqualFold predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonEqualFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorReasonContainsFold applies the ContainsFold predicate on the "error_reason" field.
|
||||||
|
func ErrorReasonContainsFold(v string) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldErrorReason, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilEQ applies the EQ predicate on the "locked_until" field.
|
||||||
|
func LockedUntilEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilNEQ applies the NEQ predicate on the "locked_until" field.
|
||||||
|
func LockedUntilNEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilIn applies the In predicate on the "locked_until" field.
|
||||||
|
func LockedUntilIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldLockedUntil, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilNotIn applies the NotIn predicate on the "locked_until" field.
|
||||||
|
func LockedUntilNotIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldLockedUntil, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilGT applies the GT predicate on the "locked_until" field.
|
||||||
|
func LockedUntilGT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilGTE applies the GTE predicate on the "locked_until" field.
|
||||||
|
func LockedUntilGTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilLT applies the LT predicate on the "locked_until" field.
|
||||||
|
func LockedUntilLT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilLTE applies the LTE predicate on the "locked_until" field.
|
||||||
|
func LockedUntilLTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldLockedUntil, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilIsNil applies the IsNil predicate on the "locked_until" field.
|
||||||
|
func LockedUntilIsNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIsNull(FieldLockedUntil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockedUntilNotNil applies the NotNil predicate on the "locked_until" field.
|
||||||
|
func LockedUntilNotNil() predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotNull(FieldLockedUntil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNEQ(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtIn applies the In predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNotIn(vs ...time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldNotIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldGTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLT(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLTE(v time.Time) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.FieldLTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.IdempotencyRecord) predicate.IdempotencyRecord {
|
||||||
|
return predicate.IdempotencyRecord(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1132
backend/ent/idempotencyrecord_create.go
Normal file
1132
backend/ent/idempotencyrecord_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/idempotencyrecord_delete.go
Normal file
88
backend/ent/idempotencyrecord_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdempotencyRecordDelete is the builder for deleting a IdempotencyRecord entity.
|
||||||
|
type IdempotencyRecordDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *IdempotencyRecordMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the IdempotencyRecordDelete builder.
|
||||||
|
func (_d *IdempotencyRecordDelete) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *IdempotencyRecordDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *IdempotencyRecordDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *IdempotencyRecordDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(idempotencyrecord.Table, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecordDeleteOne is the builder for deleting a single IdempotencyRecord entity.
|
||||||
|
type IdempotencyRecordDeleteOne struct {
|
||||||
|
_d *IdempotencyRecordDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the IdempotencyRecordDelete builder.
|
||||||
|
func (_d *IdempotencyRecordDeleteOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *IdempotencyRecordDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{idempotencyrecord.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *IdempotencyRecordDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
564
backend/ent/idempotencyrecord_query.go
Normal file
564
backend/ent/idempotencyrecord_query.go
Normal file
@@ -0,0 +1,564 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdempotencyRecordQuery is the builder for querying IdempotencyRecord entities.
|
||||||
|
type IdempotencyRecordQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []idempotencyrecord.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.IdempotencyRecord
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the IdempotencyRecordQuery builder.
|
||||||
|
func (_q *IdempotencyRecordQuery) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *IdempotencyRecordQuery) Limit(limit int) *IdempotencyRecordQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *IdempotencyRecordQuery) Offset(offset int) *IdempotencyRecordQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *IdempotencyRecordQuery) Unique(unique bool) *IdempotencyRecordQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *IdempotencyRecordQuery) Order(o ...idempotencyrecord.OrderOption) *IdempotencyRecordQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first IdempotencyRecord entity from the query.
|
||||||
|
// Returns a *NotFoundError when no IdempotencyRecord was found.
|
||||||
|
func (_q *IdempotencyRecordQuery) First(ctx context.Context) (*IdempotencyRecord, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{idempotencyrecord.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) FirstX(ctx context.Context) *IdempotencyRecord {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first IdempotencyRecord ID from the query.
|
||||||
|
// Returns a *NotFoundError when no IdempotencyRecord ID was found.
|
||||||
|
func (_q *IdempotencyRecordQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{idempotencyrecord.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single IdempotencyRecord entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one IdempotencyRecord entity is found.
|
||||||
|
// Returns a *NotFoundError when no IdempotencyRecord entities are found.
|
||||||
|
func (_q *IdempotencyRecordQuery) Only(ctx context.Context) (*IdempotencyRecord, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{idempotencyrecord.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{idempotencyrecord.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) OnlyX(ctx context.Context) *IdempotencyRecord {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only IdempotencyRecord ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one IdempotencyRecord ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *IdempotencyRecordQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{idempotencyrecord.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{idempotencyrecord.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of IdempotencyRecords.
|
||||||
|
func (_q *IdempotencyRecordQuery) All(ctx context.Context) ([]*IdempotencyRecord, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*IdempotencyRecord, *IdempotencyRecordQuery]()
|
||||||
|
return withInterceptors[[]*IdempotencyRecord](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) AllX(ctx context.Context) []*IdempotencyRecord {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of IdempotencyRecord IDs.
|
||||||
|
func (_q *IdempotencyRecordQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(idempotencyrecord.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *IdempotencyRecordQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*IdempotencyRecordQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *IdempotencyRecordQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *IdempotencyRecordQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the IdempotencyRecordQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *IdempotencyRecordQuery) Clone() *IdempotencyRecordQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &IdempotencyRecordQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]idempotencyrecord.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.IdempotencyRecord{}, _q.predicates...),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.IdempotencyRecord.Query().
|
||||||
|
// GroupBy(idempotencyrecord.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *IdempotencyRecordQuery) GroupBy(field string, fields ...string) *IdempotencyRecordGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &IdempotencyRecordGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = idempotencyrecord.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.IdempotencyRecord.Query().
|
||||||
|
// Select(idempotencyrecord.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *IdempotencyRecordQuery) Select(fields ...string) *IdempotencyRecordSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &IdempotencyRecordSelect{IdempotencyRecordQuery: _q}
|
||||||
|
sbuild.label = idempotencyrecord.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a IdempotencyRecordSelect configured with the given aggregations.
|
||||||
|
func (_q *IdempotencyRecordQuery) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *IdempotencyRecordQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !idempotencyrecord.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *IdempotencyRecordQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IdempotencyRecord, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*IdempotencyRecord{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*IdempotencyRecord).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &IdempotencyRecord{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *IdempotencyRecordQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *IdempotencyRecordQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != idempotencyrecord.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *IdempotencyRecordQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(idempotencyrecord.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = idempotencyrecord.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *IdempotencyRecordQuery) ForUpdate(opts ...sql.LockOption) *IdempotencyRecordQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *IdempotencyRecordQuery) ForShare(opts ...sql.LockOption) *IdempotencyRecordQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecordGroupBy is the group-by builder for IdempotencyRecord entities.
|
||||||
|
type IdempotencyRecordGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *IdempotencyRecordQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *IdempotencyRecordGroupBy) Aggregate(fns ...AggregateFunc) *IdempotencyRecordGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *IdempotencyRecordGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *IdempotencyRecordGroupBy) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecordSelect is the builder for selecting fields of IdempotencyRecord entities.
|
||||||
|
type IdempotencyRecordSelect struct {
|
||||||
|
*IdempotencyRecordQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *IdempotencyRecordSelect) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *IdempotencyRecordSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordSelect](ctx, _s.IdempotencyRecordQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *IdempotencyRecordSelect) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
676
backend/ent/idempotencyrecord_update.go
Normal file
676
backend/ent/idempotencyrecord_update.go
Normal file
@@ -0,0 +1,676 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdempotencyRecordUpdate is the builder for updating IdempotencyRecord entities.
|
||||||
|
type IdempotencyRecordUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *IdempotencyRecordMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the IdempotencyRecordUpdate builder.
|
||||||
|
func (_u *IdempotencyRecordUpdate) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetScope sets the "scope" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetScope(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetScope(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableScope sets the "scope" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableScope(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetScope(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdempotencyKeyHash sets the "idempotency_key_hash" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetIdempotencyKeyHash(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIdempotencyKeyHash(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRequestFingerprint sets the "request_fingerprint" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetRequestFingerprint(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetRequestFingerprint(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRequestFingerprint(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetStatus(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableStatus(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResponseStatus sets the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetResponseStatus(v int) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.ResetResponseStatus()
|
||||||
|
_u.mutation.SetResponseStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableResponseStatus sets the "response_status" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetResponseStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResponseStatus adds value to the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) AddResponseStatus(v int) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.AddResponseStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearResponseStatus clears the value of the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) ClearResponseStatus() *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.ClearResponseStatus()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResponseBody sets the "response_body" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetResponseBody(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetResponseBody(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableResponseBody sets the "response_body" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableResponseBody(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetResponseBody(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearResponseBody clears the value of the "response_body" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) ClearResponseBody() *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.ClearResponseBody()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetErrorReason sets the "error_reason" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetErrorReason(v string) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetErrorReason(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableErrorReason sets the "error_reason" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableErrorReason(v *string) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetErrorReason(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearErrorReason clears the value of the "error_reason" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) ClearErrorReason() *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.ClearErrorReason()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLockedUntil sets the "locked_until" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetLockedUntil(v time.Time) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetLockedUntil(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLockedUntil(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLockedUntil clears the value of the "locked_until" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) ClearLockedUntil() *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.ClearLockedUntil()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetExpiresAt(v time.Time) *IdempotencyRecordUpdate {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the IdempotencyRecordMutation object of the builder.
|
||||||
|
func (_u *IdempotencyRecordUpdate) Mutation() *IdempotencyRecordMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *IdempotencyRecordUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *IdempotencyRecordUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *IdempotencyRecordUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *IdempotencyRecordUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *IdempotencyRecordUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := idempotencyrecord.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *IdempotencyRecordUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Scope(); ok {
|
||||||
|
if err := idempotencyrecord.ScopeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.IdempotencyKeyHash(); ok {
|
||||||
|
if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.RequestFingerprint(); ok {
|
||||||
|
if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := idempotencyrecord.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ErrorReason(); ok {
|
||||||
|
if err := idempotencyrecord.ErrorReasonValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *IdempotencyRecordUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Scope(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IdempotencyKeyHash(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RequestFingerprint(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ResponseStatus(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedResponseStatus(); ok {
|
||||||
|
_spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ResponseStatusCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ResponseBody(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ResponseBodyCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ErrorReason(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ErrorReasonCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.LockedUntil(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LockedUntilCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{idempotencyrecord.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdempotencyRecordUpdateOne is the builder for updating a single IdempotencyRecord entity.
|
||||||
|
type IdempotencyRecordUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *IdempotencyRecordMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetScope sets the "scope" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetScope(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetScope(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableScope sets the "scope" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableScope(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetScope(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdempotencyKeyHash sets the "idempotency_key_hash" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetIdempotencyKeyHash(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIdempotencyKeyHash(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRequestFingerprint sets the "request_fingerprint" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetRequestFingerprint(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetRequestFingerprint(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetRequestFingerprint(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetStatus(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableStatus(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResponseStatus sets the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetResponseStatus(v int) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.ResetResponseStatus()
|
||||||
|
_u.mutation.SetResponseStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableResponseStatus sets the "response_status" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetResponseStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResponseStatus adds value to the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) AddResponseStatus(v int) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.AddResponseStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearResponseStatus clears the value of the "response_status" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) ClearResponseStatus() *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.ClearResponseStatus()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResponseBody sets the "response_body" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetResponseBody(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetResponseBody(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableResponseBody sets the "response_body" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableResponseBody(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetResponseBody(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearResponseBody clears the value of the "response_body" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) ClearResponseBody() *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.ClearResponseBody()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetErrorReason sets the "error_reason" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetErrorReason(v string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetErrorReason(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableErrorReason sets the "error_reason" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableErrorReason(v *string) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetErrorReason(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearErrorReason clears the value of the "error_reason" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) ClearErrorReason() *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.ClearErrorReason()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLockedUntil sets the "locked_until" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetLockedUntil(v time.Time) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetLockedUntil(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetLockedUntil(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLockedUntil clears the value of the "locked_until" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) ClearLockedUntil() *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.ClearLockedUntil()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetExpiresAt(v time.Time) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the IdempotencyRecordMutation object of the builder.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) Mutation() *IdempotencyRecordMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the IdempotencyRecordUpdate builder.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) Select(field string, fields ...string) *IdempotencyRecordUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated IdempotencyRecord entity.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) Save(ctx context.Context) (*IdempotencyRecord, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) SaveX(ctx context.Context) *IdempotencyRecord {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := idempotencyrecord.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Scope(); ok {
|
||||||
|
if err := idempotencyrecord.ScopeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.IdempotencyKeyHash(); ok {
|
||||||
|
if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.RequestFingerprint(); ok {
|
||||||
|
if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := idempotencyrecord.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ErrorReason(); ok {
|
||||||
|
if err := idempotencyrecord.ErrorReasonValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *IdempotencyRecordUpdateOne) sqlSave(ctx context.Context) (_node *IdempotencyRecord, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IdempotencyRecord.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !idempotencyrecord.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != idempotencyrecord.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Scope(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IdempotencyKeyHash(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.RequestFingerprint(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ResponseStatus(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedResponseStatus(); ok {
|
||||||
|
_spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ResponseStatusCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ResponseBody(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ResponseBodyCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ErrorReason(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ErrorReasonCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.LockedUntil(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.LockedUntilCleared() {
|
||||||
|
_spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
_node = &IdempotencyRecord{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{idempotencyrecord.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
@@ -15,11 +15,13 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
@@ -275,6 +277,33 @@ func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error {
|
|||||||
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f IdempotencyRecordFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.IdempotencyRecordQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseIdempotencyRecord type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseIdempotencyRecord func(context.Context, *ent.IdempotencyRecordQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseIdempotencyRecord) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseIdempotencyRecord) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.IdempotencyRecordQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error)
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error)
|
||||||
|
|
||||||
@@ -383,6 +412,33 @@ func (f TraverseRedeemCode) Traverse(ctx context.Context, q ent.Query) error {
|
|||||||
return fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q)
|
return fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The SecuritySecretFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type SecuritySecretFunc func(context.Context, *ent.SecuritySecretQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f SecuritySecretFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.SecuritySecretQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.SecuritySecretQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseSecuritySecret type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseSecuritySecret func(context.Context, *ent.SecuritySecretQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseSecuritySecret) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseSecuritySecret) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.SecuritySecretQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.SecuritySecretQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier.
|
// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error)
|
type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error)
|
||||||
|
|
||||||
@@ -616,6 +672,8 @@ func NewQuery(q ent.Query) (Query, error) {
|
|||||||
return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil
|
return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil
|
||||||
case *ent.GroupQuery:
|
case *ent.GroupQuery:
|
||||||
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
||||||
|
case *ent.IdempotencyRecordQuery:
|
||||||
|
return &query[*ent.IdempotencyRecordQuery, predicate.IdempotencyRecord, idempotencyrecord.OrderOption]{typ: ent.TypeIdempotencyRecord, tq: q}, nil
|
||||||
case *ent.PromoCodeQuery:
|
case *ent.PromoCodeQuery:
|
||||||
return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil
|
return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil
|
||||||
case *ent.PromoCodeUsageQuery:
|
case *ent.PromoCodeUsageQuery:
|
||||||
@@ -624,6 +682,8 @@ func NewQuery(q ent.Query) (Query, error) {
|
|||||||
return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil
|
return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil
|
||||||
case *ent.RedeemCodeQuery:
|
case *ent.RedeemCodeQuery:
|
||||||
return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil
|
return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil
|
||||||
|
case *ent.SecuritySecretQuery:
|
||||||
|
return &query[*ent.SecuritySecretQuery, predicate.SecuritySecret, securitysecret.OrderOption]{typ: ent.TypeSecuritySecret, tq: q}, nil
|
||||||
case *ent.SettingQuery:
|
case *ent.SettingQuery:
|
||||||
return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil
|
return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil
|
||||||
case *ent.UsageCleanupTaskQuery:
|
case *ent.UsageCleanupTaskQuery:
|
||||||
|
|||||||
@@ -18,11 +18,21 @@ var (
|
|||||||
{Name: "key", Type: field.TypeString, Unique: true, Size: 128},
|
{Name: "key", Type: field.TypeString, Unique: true, Size: 128},
|
||||||
{Name: "name", Type: field.TypeString, Size: 100},
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "last_used_at", Type: field.TypeTime, Nullable: true},
|
||||||
{Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true},
|
{Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true},
|
||||||
{Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true},
|
{Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true},
|
||||||
{Name: "quota", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
{Name: "quota", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
{Name: "quota_used", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
{Name: "quota_used", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
{Name: "expires_at", Type: field.TypeTime, Nullable: true},
|
{Name: "expires_at", Type: field.TypeTime, Nullable: true},
|
||||||
|
{Name: "rate_limit_5h", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "rate_limit_1d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "rate_limit_7d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "usage_5h", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "usage_1d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "usage_7d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "window_5h_start", Type: field.TypeTime, Nullable: true},
|
||||||
|
{Name: "window_1d_start", Type: field.TypeTime, Nullable: true},
|
||||||
|
{Name: "window_7d_start", Type: field.TypeTime, Nullable: true},
|
||||||
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
{Name: "user_id", Type: field.TypeInt64},
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
}
|
}
|
||||||
@@ -34,13 +44,13 @@ var (
|
|||||||
ForeignKeys: []*schema.ForeignKey{
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
{
|
{
|
||||||
Symbol: "api_keys_groups_api_keys",
|
Symbol: "api_keys_groups_api_keys",
|
||||||
Columns: []*schema.Column{APIKeysColumns[12]},
|
Columns: []*schema.Column{APIKeysColumns[22]},
|
||||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "api_keys_users_api_keys",
|
Symbol: "api_keys_users_api_keys",
|
||||||
Columns: []*schema.Column{APIKeysColumns[13]},
|
Columns: []*schema.Column{APIKeysColumns[23]},
|
||||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
@@ -49,12 +59,12 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "apikey_user_id",
|
Name: "apikey_user_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[13]},
|
Columns: []*schema.Column{APIKeysColumns[23]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_group_id",
|
Name: "apikey_group_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[12]},
|
Columns: []*schema.Column{APIKeysColumns[22]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_status",
|
Name: "apikey_status",
|
||||||
@@ -66,15 +76,20 @@ var (
|
|||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[3]},
|
Columns: []*schema.Column{APIKeysColumns[3]},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "apikey_last_used_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[7]},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_quota_quota_used",
|
Name: "apikey_quota_quota_used",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[9], APIKeysColumns[10]},
|
Columns: []*schema.Column{APIKeysColumns[10], APIKeysColumns[11]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_expires_at",
|
Name: "apikey_expires_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[11]},
|
Columns: []*schema.Column{APIKeysColumns[12]},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -91,6 +106,7 @@ var (
|
|||||||
{Name: "credentials", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
{Name: "credentials", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
{Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
{Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
{Name: "concurrency", Type: field.TypeInt, Default: 3},
|
{Name: "concurrency", Type: field.TypeInt, Default: 3},
|
||||||
|
{Name: "load_factor", Type: field.TypeInt, Nullable: true},
|
||||||
{Name: "priority", Type: field.TypeInt, Default: 50},
|
{Name: "priority", Type: field.TypeInt, Default: 50},
|
||||||
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||||
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
@@ -102,6 +118,8 @@ var (
|
|||||||
{Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "temp_unschedulable_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "temp_unschedulable_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
{Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20},
|
{Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20},
|
||||||
@@ -115,7 +133,7 @@ var (
|
|||||||
ForeignKeys: []*schema.ForeignKey{
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
{
|
{
|
||||||
Symbol: "accounts_proxies_proxy",
|
Symbol: "accounts_proxies_proxy",
|
||||||
Columns: []*schema.Column{AccountsColumns[25]},
|
Columns: []*schema.Column{AccountsColumns[28]},
|
||||||
RefColumns: []*schema.Column{ProxiesColumns[0]},
|
RefColumns: []*schema.Column{ProxiesColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
@@ -134,42 +152,52 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "account_status",
|
Name: "account_status",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[13]},
|
Columns: []*schema.Column{AccountsColumns[14]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_proxy_id",
|
Name: "account_proxy_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[25]},
|
Columns: []*schema.Column{AccountsColumns[28]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_priority",
|
Name: "account_priority",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[11]},
|
Columns: []*schema.Column{AccountsColumns[12]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_last_used_at",
|
Name: "account_last_used_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[15]},
|
Columns: []*schema.Column{AccountsColumns[16]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_schedulable",
|
Name: "account_schedulable",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[18]},
|
Columns: []*schema.Column{AccountsColumns[19]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_rate_limited_at",
|
Name: "account_rate_limited_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[19]},
|
Columns: []*schema.Column{AccountsColumns[20]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_rate_limit_reset_at",
|
Name: "account_rate_limit_reset_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[20]},
|
Columns: []*schema.Column{AccountsColumns[21]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_overload_until",
|
Name: "account_overload_until",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AccountsColumns[21]},
|
Columns: []*schema.Column{AccountsColumns[22]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_platform_priority",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[6], AccountsColumns[12]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_priority_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[12], AccountsColumns[14]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "account_deleted_at",
|
Name: "account_deleted_at",
|
||||||
@@ -223,6 +251,7 @@ var (
|
|||||||
{Name: "title", Type: field.TypeString, Size: 200},
|
{Name: "title", Type: field.TypeString, Size: 200},
|
||||||
{Name: "content", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
{Name: "content", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
{Name: "status", Type: field.TypeString, Size: 20, Default: "draft"},
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "draft"},
|
||||||
|
{Name: "notify_mode", Type: field.TypeString, Size: 20, Default: "silent"},
|
||||||
{Name: "targeting", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
{Name: "targeting", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
{Name: "starts_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "starts_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "ends_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "ends_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
@@ -245,17 +274,17 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "announcement_created_at",
|
Name: "announcement_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AnnouncementsColumns[9]},
|
Columns: []*schema.Column{AnnouncementsColumns[10]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "announcement_starts_at",
|
Name: "announcement_starts_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AnnouncementsColumns[5]},
|
Columns: []*schema.Column{AnnouncementsColumns[6]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "announcement_ends_at",
|
Name: "announcement_ends_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{AnnouncementsColumns[6]},
|
Columns: []*schema.Column{AnnouncementsColumns[7]},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -325,6 +354,7 @@ var (
|
|||||||
{Name: "response_code", Type: field.TypeInt, Nullable: true},
|
{Name: "response_code", Type: field.TypeInt, Nullable: true},
|
||||||
{Name: "passthrough_body", Type: field.TypeBool, Default: true},
|
{Name: "passthrough_body", Type: field.TypeBool, Default: true},
|
||||||
{Name: "custom_message", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
{Name: "custom_message", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
||||||
|
{Name: "skip_monitoring", Type: field.TypeBool, Default: false},
|
||||||
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
||||||
}
|
}
|
||||||
// ErrorPassthroughRulesTable holds the schema information for the "error_passthrough_rules" table.
|
// ErrorPassthroughRulesTable holds the schema information for the "error_passthrough_rules" table.
|
||||||
@@ -365,6 +395,11 @@ var (
|
|||||||
{Name: "image_price_1k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
{Name: "image_price_1k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
{Name: "image_price_2k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
{Name: "image_price_2k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
{Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
{Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "sora_image_price_360", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "sora_image_price_540", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "sora_video_price_per_request", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "sora_video_price_per_request_hd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||||
{Name: "claude_code_only", Type: field.TypeBool, Default: false},
|
{Name: "claude_code_only", Type: field.TypeBool, Default: false},
|
||||||
{Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true},
|
{Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
{Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true},
|
{Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true},
|
||||||
@@ -372,6 +407,9 @@ var (
|
|||||||
{Name: "model_routing_enabled", Type: field.TypeBool, Default: false},
|
{Name: "model_routing_enabled", Type: field.TypeBool, Default: false},
|
||||||
{Name: "mcp_xml_inject", Type: field.TypeBool, Default: true},
|
{Name: "mcp_xml_inject", Type: field.TypeBool, Default: true},
|
||||||
{Name: "supported_model_scopes", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
{Name: "supported_model_scopes", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
|
{Name: "sort_order", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "allow_messages_dispatch", Type: field.TypeBool, Default: false},
|
||||||
|
{Name: "default_mapped_model", Type: field.TypeString, Size: 100, Default: ""},
|
||||||
}
|
}
|
||||||
// GroupsTable holds the schema information for the "groups" table.
|
// GroupsTable holds the schema information for the "groups" table.
|
||||||
GroupsTable = &schema.Table{
|
GroupsTable = &schema.Table{
|
||||||
@@ -404,6 +442,49 @@ var (
|
|||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{GroupsColumns[3]},
|
Columns: []*schema.Column{GroupsColumns[3]},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "group_sort_order",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[30]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// IdempotencyRecordsColumns holds the columns for the "idempotency_records" table.
|
||||||
|
IdempotencyRecordsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "scope", Type: field.TypeString, Size: 128},
|
||||||
|
{Name: "idempotency_key_hash", Type: field.TypeString, Size: 64},
|
||||||
|
{Name: "request_fingerprint", Type: field.TypeString, Size: 64},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 32},
|
||||||
|
{Name: "response_status", Type: field.TypeInt, Nullable: true},
|
||||||
|
{Name: "response_body", Type: field.TypeString, Nullable: true},
|
||||||
|
{Name: "error_reason", Type: field.TypeString, Nullable: true, Size: 128},
|
||||||
|
{Name: "locked_until", Type: field.TypeTime, Nullable: true},
|
||||||
|
{Name: "expires_at", Type: field.TypeTime},
|
||||||
|
}
|
||||||
|
// IdempotencyRecordsTable holds the schema information for the "idempotency_records" table.
|
||||||
|
IdempotencyRecordsTable = &schema.Table{
|
||||||
|
Name: "idempotency_records",
|
||||||
|
Columns: IdempotencyRecordsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{IdempotencyRecordsColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "idempotencyrecord_scope_idempotency_key_hash",
|
||||||
|
Unique: true,
|
||||||
|
Columns: []*schema.Column{IdempotencyRecordsColumns[3], IdempotencyRecordsColumns[4]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "idempotencyrecord_expires_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{IdempotencyRecordsColumns[11]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "idempotencyrecord_status_locked_until",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{IdempotencyRecordsColumns[6], IdempotencyRecordsColumns[10]},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// PromoCodesColumns holds the columns for the "promo_codes" table.
|
// PromoCodesColumns holds the columns for the "promo_codes" table.
|
||||||
@@ -565,6 +646,20 @@ var (
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// SecuritySecretsColumns holds the columns for the "security_secrets" table.
|
||||||
|
SecuritySecretsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "key", Type: field.TypeString, Unique: true, Size: 100},
|
||||||
|
{Name: "value", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
}
|
||||||
|
// SecuritySecretsTable holds the schema information for the "security_secrets" table.
|
||||||
|
SecuritySecretsTable = &schema.Table{
|
||||||
|
Name: "security_secrets",
|
||||||
|
Columns: SecuritySecretsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{SecuritySecretsColumns[0]},
|
||||||
|
}
|
||||||
// SettingsColumns holds the columns for the "settings" table.
|
// SettingsColumns holds the columns for the "settings" table.
|
||||||
SettingsColumns = []*schema.Column{
|
SettingsColumns = []*schema.Column{
|
||||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
@@ -621,6 +716,7 @@ var (
|
|||||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
{Name: "request_id", Type: field.TypeString, Size: 64},
|
{Name: "request_id", Type: field.TypeString, Size: 64},
|
||||||
{Name: "model", Type: field.TypeString, Size: 100},
|
{Name: "model", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "upstream_model", Type: field.TypeString, Nullable: true, Size: 100},
|
||||||
{Name: "input_tokens", Type: field.TypeInt, Default: 0},
|
{Name: "input_tokens", Type: field.TypeInt, Default: 0},
|
||||||
{Name: "output_tokens", Type: field.TypeInt, Default: 0},
|
{Name: "output_tokens", Type: field.TypeInt, Default: 0},
|
||||||
{Name: "cache_creation_tokens", Type: field.TypeInt, Default: 0},
|
{Name: "cache_creation_tokens", Type: field.TypeInt, Default: 0},
|
||||||
@@ -643,6 +739,8 @@ var (
|
|||||||
{Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45},
|
{Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45},
|
||||||
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
||||||
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
||||||
|
{Name: "media_type", Type: field.TypeString, Nullable: true, Size: 16},
|
||||||
|
{Name: "cache_ttl_overridden", Type: field.TypeBool, Default: false},
|
||||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
{Name: "api_key_id", Type: field.TypeInt64},
|
{Name: "api_key_id", Type: field.TypeInt64},
|
||||||
{Name: "account_id", Type: field.TypeInt64},
|
{Name: "account_id", Type: field.TypeInt64},
|
||||||
@@ -658,31 +756,31 @@ var (
|
|||||||
ForeignKeys: []*schema.ForeignKey{
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_api_keys_usage_logs",
|
Symbol: "usage_logs_api_keys_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[26]},
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_accounts_usage_logs",
|
Symbol: "usage_logs_accounts_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[27]},
|
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||||
RefColumns: []*schema.Column{AccountsColumns[0]},
|
RefColumns: []*schema.Column{AccountsColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_groups_usage_logs",
|
Symbol: "usage_logs_groups_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[28]},
|
Columns: []*schema.Column{UsageLogsColumns[31]},
|
||||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_users_usage_logs",
|
Symbol: "usage_logs_users_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[29]},
|
Columns: []*schema.Column{UsageLogsColumns[32]},
|
||||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[30]},
|
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||||
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
@@ -691,32 +789,32 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "usagelog_user_id",
|
Name: "usagelog_user_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[29]},
|
Columns: []*schema.Column{UsageLogsColumns[32]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_api_key_id",
|
Name: "usagelog_api_key_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[26]},
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_account_id",
|
Name: "usagelog_account_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[27]},
|
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_group_id",
|
Name: "usagelog_group_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[28]},
|
Columns: []*schema.Column{UsageLogsColumns[31]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_subscription_id",
|
Name: "usagelog_subscription_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[30]},
|
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_created_at",
|
Name: "usagelog_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[25]},
|
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_model",
|
Name: "usagelog_model",
|
||||||
@@ -731,12 +829,17 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "usagelog_user_id_created_at",
|
Name: "usagelog_user_id_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[25]},
|
Columns: []*schema.Column{UsageLogsColumns[32], UsageLogsColumns[28]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_api_key_id_created_at",
|
Name: "usagelog_api_key_id_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[26], UsageLogsColumns[25]},
|
Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[28]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_group_id_created_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[31], UsageLogsColumns[28]},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -757,6 +860,8 @@ var (
|
|||||||
{Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
{Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
{Name: "totp_enabled", Type: field.TypeBool, Default: false},
|
{Name: "totp_enabled", Type: field.TypeBool, Default: false},
|
||||||
{Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true},
|
{Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true},
|
||||||
|
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||||
|
{Name: "sora_storage_used_bytes", Type: field.TypeInt64, Default: 0},
|
||||||
}
|
}
|
||||||
// UsersTable holds the schema information for the "users" table.
|
// UsersTable holds the schema information for the "users" table.
|
||||||
UsersTable = &schema.Table{
|
UsersTable = &schema.Table{
|
||||||
@@ -962,6 +1067,11 @@ var (
|
|||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UserSubscriptionsColumns[5]},
|
Columns: []*schema.Column{UserSubscriptionsColumns[5]},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_user_id_status_expires_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[16], UserSubscriptionsColumns[6], UserSubscriptionsColumns[5]},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "usersubscription_assigned_by",
|
Name: "usersubscription_assigned_by",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
@@ -988,10 +1098,12 @@ var (
|
|||||||
AnnouncementReadsTable,
|
AnnouncementReadsTable,
|
||||||
ErrorPassthroughRulesTable,
|
ErrorPassthroughRulesTable,
|
||||||
GroupsTable,
|
GroupsTable,
|
||||||
|
IdempotencyRecordsTable,
|
||||||
PromoCodesTable,
|
PromoCodesTable,
|
||||||
PromoCodeUsagesTable,
|
PromoCodeUsagesTable,
|
||||||
ProxiesTable,
|
ProxiesTable,
|
||||||
RedeemCodesTable,
|
RedeemCodesTable,
|
||||||
|
SecuritySecretsTable,
|
||||||
SettingsTable,
|
SettingsTable,
|
||||||
UsageCleanupTasksTable,
|
UsageCleanupTasksTable,
|
||||||
UsageLogsTable,
|
UsageLogsTable,
|
||||||
@@ -1032,6 +1144,9 @@ func init() {
|
|||||||
GroupsTable.Annotation = &entsql.Annotation{
|
GroupsTable.Annotation = &entsql.Annotation{
|
||||||
Table: "groups",
|
Table: "groups",
|
||||||
}
|
}
|
||||||
|
IdempotencyRecordsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "idempotency_records",
|
||||||
|
}
|
||||||
PromoCodesTable.Annotation = &entsql.Annotation{
|
PromoCodesTable.Annotation = &entsql.Annotation{
|
||||||
Table: "promo_codes",
|
Table: "promo_codes",
|
||||||
}
|
}
|
||||||
@@ -1048,6 +1163,9 @@ func init() {
|
|||||||
RedeemCodesTable.Annotation = &entsql.Annotation{
|
RedeemCodesTable.Annotation = &entsql.Annotation{
|
||||||
Table: "redeem_codes",
|
Table: "redeem_codes",
|
||||||
}
|
}
|
||||||
|
SecuritySecretsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "security_secrets",
|
||||||
|
}
|
||||||
SettingsTable.Annotation = &entsql.Annotation{
|
SettingsTable.Annotation = &entsql.Annotation{
|
||||||
Table: "settings",
|
Table: "settings",
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,9 @@ type ErrorPassthroughRule func(*sql.Selector)
|
|||||||
// Group is the predicate function for group builders.
|
// Group is the predicate function for group builders.
|
||||||
type Group func(*sql.Selector)
|
type Group func(*sql.Selector)
|
||||||
|
|
||||||
|
// IdempotencyRecord is the predicate function for idempotencyrecord builders.
|
||||||
|
type IdempotencyRecord func(*sql.Selector)
|
||||||
|
|
||||||
// PromoCode is the predicate function for promocode builders.
|
// PromoCode is the predicate function for promocode builders.
|
||||||
type PromoCode func(*sql.Selector)
|
type PromoCode func(*sql.Selector)
|
||||||
|
|
||||||
@@ -39,6 +42,9 @@ type Proxy func(*sql.Selector)
|
|||||||
// RedeemCode is the predicate function for redeemcode builders.
|
// RedeemCode is the predicate function for redeemcode builders.
|
||||||
type RedeemCode func(*sql.Selector)
|
type RedeemCode func(*sql.Selector)
|
||||||
|
|
||||||
|
// SecuritySecret is the predicate function for securitysecret builders.
|
||||||
|
type SecuritySecret func(*sql.Selector)
|
||||||
|
|
||||||
// Setting is the predicate function for setting builders.
|
// Setting is the predicate function for setting builders.
|
||||||
type Setting func(*sql.Selector)
|
type Setting func(*sql.Selector)
|
||||||
|
|
||||||
|
|||||||
@@ -12,11 +12,13 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/schema"
|
"github.com/Wei-Shaw/sub2api/ent/schema"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
@@ -93,13 +95,37 @@ func init() {
|
|||||||
// apikey.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
// apikey.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
apikey.StatusValidator = apikeyDescStatus.Validators[0].(func(string) error)
|
apikey.StatusValidator = apikeyDescStatus.Validators[0].(func(string) error)
|
||||||
// apikeyDescQuota is the schema descriptor for quota field.
|
// apikeyDescQuota is the schema descriptor for quota field.
|
||||||
apikeyDescQuota := apikeyFields[7].Descriptor()
|
apikeyDescQuota := apikeyFields[8].Descriptor()
|
||||||
// apikey.DefaultQuota holds the default value on creation for the quota field.
|
// apikey.DefaultQuota holds the default value on creation for the quota field.
|
||||||
apikey.DefaultQuota = apikeyDescQuota.Default.(float64)
|
apikey.DefaultQuota = apikeyDescQuota.Default.(float64)
|
||||||
// apikeyDescQuotaUsed is the schema descriptor for quota_used field.
|
// apikeyDescQuotaUsed is the schema descriptor for quota_used field.
|
||||||
apikeyDescQuotaUsed := apikeyFields[8].Descriptor()
|
apikeyDescQuotaUsed := apikeyFields[9].Descriptor()
|
||||||
// apikey.DefaultQuotaUsed holds the default value on creation for the quota_used field.
|
// apikey.DefaultQuotaUsed holds the default value on creation for the quota_used field.
|
||||||
apikey.DefaultQuotaUsed = apikeyDescQuotaUsed.Default.(float64)
|
apikey.DefaultQuotaUsed = apikeyDescQuotaUsed.Default.(float64)
|
||||||
|
// apikeyDescRateLimit5h is the schema descriptor for rate_limit_5h field.
|
||||||
|
apikeyDescRateLimit5h := apikeyFields[11].Descriptor()
|
||||||
|
// apikey.DefaultRateLimit5h holds the default value on creation for the rate_limit_5h field.
|
||||||
|
apikey.DefaultRateLimit5h = apikeyDescRateLimit5h.Default.(float64)
|
||||||
|
// apikeyDescRateLimit1d is the schema descriptor for rate_limit_1d field.
|
||||||
|
apikeyDescRateLimit1d := apikeyFields[12].Descriptor()
|
||||||
|
// apikey.DefaultRateLimit1d holds the default value on creation for the rate_limit_1d field.
|
||||||
|
apikey.DefaultRateLimit1d = apikeyDescRateLimit1d.Default.(float64)
|
||||||
|
// apikeyDescRateLimit7d is the schema descriptor for rate_limit_7d field.
|
||||||
|
apikeyDescRateLimit7d := apikeyFields[13].Descriptor()
|
||||||
|
// apikey.DefaultRateLimit7d holds the default value on creation for the rate_limit_7d field.
|
||||||
|
apikey.DefaultRateLimit7d = apikeyDescRateLimit7d.Default.(float64)
|
||||||
|
// apikeyDescUsage5h is the schema descriptor for usage_5h field.
|
||||||
|
apikeyDescUsage5h := apikeyFields[14].Descriptor()
|
||||||
|
// apikey.DefaultUsage5h holds the default value on creation for the usage_5h field.
|
||||||
|
apikey.DefaultUsage5h = apikeyDescUsage5h.Default.(float64)
|
||||||
|
// apikeyDescUsage1d is the schema descriptor for usage_1d field.
|
||||||
|
apikeyDescUsage1d := apikeyFields[15].Descriptor()
|
||||||
|
// apikey.DefaultUsage1d holds the default value on creation for the usage_1d field.
|
||||||
|
apikey.DefaultUsage1d = apikeyDescUsage1d.Default.(float64)
|
||||||
|
// apikeyDescUsage7d is the schema descriptor for usage_7d field.
|
||||||
|
apikeyDescUsage7d := apikeyFields[16].Descriptor()
|
||||||
|
// apikey.DefaultUsage7d holds the default value on creation for the usage_7d field.
|
||||||
|
apikey.DefaultUsage7d = apikeyDescUsage7d.Default.(float64)
|
||||||
accountMixin := schema.Account{}.Mixin()
|
accountMixin := schema.Account{}.Mixin()
|
||||||
accountMixinHooks1 := accountMixin[1].Hooks()
|
accountMixinHooks1 := accountMixin[1].Hooks()
|
||||||
account.Hooks[0] = accountMixinHooks1[0]
|
account.Hooks[0] = accountMixinHooks1[0]
|
||||||
@@ -186,29 +212,29 @@ func init() {
|
|||||||
// account.DefaultConcurrency holds the default value on creation for the concurrency field.
|
// account.DefaultConcurrency holds the default value on creation for the concurrency field.
|
||||||
account.DefaultConcurrency = accountDescConcurrency.Default.(int)
|
account.DefaultConcurrency = accountDescConcurrency.Default.(int)
|
||||||
// accountDescPriority is the schema descriptor for priority field.
|
// accountDescPriority is the schema descriptor for priority field.
|
||||||
accountDescPriority := accountFields[8].Descriptor()
|
accountDescPriority := accountFields[9].Descriptor()
|
||||||
// account.DefaultPriority holds the default value on creation for the priority field.
|
// account.DefaultPriority holds the default value on creation for the priority field.
|
||||||
account.DefaultPriority = accountDescPriority.Default.(int)
|
account.DefaultPriority = accountDescPriority.Default.(int)
|
||||||
// accountDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
// accountDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
||||||
accountDescRateMultiplier := accountFields[9].Descriptor()
|
accountDescRateMultiplier := accountFields[10].Descriptor()
|
||||||
// account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
// account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
||||||
account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64)
|
account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64)
|
||||||
// accountDescStatus is the schema descriptor for status field.
|
// accountDescStatus is the schema descriptor for status field.
|
||||||
accountDescStatus := accountFields[10].Descriptor()
|
accountDescStatus := accountFields[11].Descriptor()
|
||||||
// account.DefaultStatus holds the default value on creation for the status field.
|
// account.DefaultStatus holds the default value on creation for the status field.
|
||||||
account.DefaultStatus = accountDescStatus.Default.(string)
|
account.DefaultStatus = accountDescStatus.Default.(string)
|
||||||
// account.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
// account.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
account.StatusValidator = accountDescStatus.Validators[0].(func(string) error)
|
account.StatusValidator = accountDescStatus.Validators[0].(func(string) error)
|
||||||
// accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field.
|
// accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field.
|
||||||
accountDescAutoPauseOnExpired := accountFields[14].Descriptor()
|
accountDescAutoPauseOnExpired := accountFields[15].Descriptor()
|
||||||
// account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field.
|
// account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field.
|
||||||
account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool)
|
account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool)
|
||||||
// accountDescSchedulable is the schema descriptor for schedulable field.
|
// accountDescSchedulable is the schema descriptor for schedulable field.
|
||||||
accountDescSchedulable := accountFields[15].Descriptor()
|
accountDescSchedulable := accountFields[16].Descriptor()
|
||||||
// account.DefaultSchedulable holds the default value on creation for the schedulable field.
|
// account.DefaultSchedulable holds the default value on creation for the schedulable field.
|
||||||
account.DefaultSchedulable = accountDescSchedulable.Default.(bool)
|
account.DefaultSchedulable = accountDescSchedulable.Default.(bool)
|
||||||
// accountDescSessionWindowStatus is the schema descriptor for session_window_status field.
|
// accountDescSessionWindowStatus is the schema descriptor for session_window_status field.
|
||||||
accountDescSessionWindowStatus := accountFields[21].Descriptor()
|
accountDescSessionWindowStatus := accountFields[24].Descriptor()
|
||||||
// account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
// account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
||||||
account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error)
|
account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error)
|
||||||
accountgroupFields := schema.AccountGroup{}.Fields()
|
accountgroupFields := schema.AccountGroup{}.Fields()
|
||||||
@@ -251,12 +277,18 @@ func init() {
|
|||||||
announcement.DefaultStatus = announcementDescStatus.Default.(string)
|
announcement.DefaultStatus = announcementDescStatus.Default.(string)
|
||||||
// announcement.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
// announcement.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
announcement.StatusValidator = announcementDescStatus.Validators[0].(func(string) error)
|
announcement.StatusValidator = announcementDescStatus.Validators[0].(func(string) error)
|
||||||
|
// announcementDescNotifyMode is the schema descriptor for notify_mode field.
|
||||||
|
announcementDescNotifyMode := announcementFields[3].Descriptor()
|
||||||
|
// announcement.DefaultNotifyMode holds the default value on creation for the notify_mode field.
|
||||||
|
announcement.DefaultNotifyMode = announcementDescNotifyMode.Default.(string)
|
||||||
|
// announcement.NotifyModeValidator is a validator for the "notify_mode" field. It is called by the builders before save.
|
||||||
|
announcement.NotifyModeValidator = announcementDescNotifyMode.Validators[0].(func(string) error)
|
||||||
// announcementDescCreatedAt is the schema descriptor for created_at field.
|
// announcementDescCreatedAt is the schema descriptor for created_at field.
|
||||||
announcementDescCreatedAt := announcementFields[8].Descriptor()
|
announcementDescCreatedAt := announcementFields[9].Descriptor()
|
||||||
// announcement.DefaultCreatedAt holds the default value on creation for the created_at field.
|
// announcement.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
announcement.DefaultCreatedAt = announcementDescCreatedAt.Default.(func() time.Time)
|
announcement.DefaultCreatedAt = announcementDescCreatedAt.Default.(func() time.Time)
|
||||||
// announcementDescUpdatedAt is the schema descriptor for updated_at field.
|
// announcementDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
announcementDescUpdatedAt := announcementFields[9].Descriptor()
|
announcementDescUpdatedAt := announcementFields[10].Descriptor()
|
||||||
// announcement.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
// announcement.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
announcement.DefaultUpdatedAt = announcementDescUpdatedAt.Default.(func() time.Time)
|
announcement.DefaultUpdatedAt = announcementDescUpdatedAt.Default.(func() time.Time)
|
||||||
// announcement.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
// announcement.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
@@ -326,6 +358,10 @@ func init() {
|
|||||||
errorpassthroughruleDescPassthroughBody := errorpassthroughruleFields[9].Descriptor()
|
errorpassthroughruleDescPassthroughBody := errorpassthroughruleFields[9].Descriptor()
|
||||||
// errorpassthroughrule.DefaultPassthroughBody holds the default value on creation for the passthrough_body field.
|
// errorpassthroughrule.DefaultPassthroughBody holds the default value on creation for the passthrough_body field.
|
||||||
errorpassthroughrule.DefaultPassthroughBody = errorpassthroughruleDescPassthroughBody.Default.(bool)
|
errorpassthroughrule.DefaultPassthroughBody = errorpassthroughruleDescPassthroughBody.Default.(bool)
|
||||||
|
// errorpassthroughruleDescSkipMonitoring is the schema descriptor for skip_monitoring field.
|
||||||
|
errorpassthroughruleDescSkipMonitoring := errorpassthroughruleFields[11].Descriptor()
|
||||||
|
// errorpassthroughrule.DefaultSkipMonitoring holds the default value on creation for the skip_monitoring field.
|
||||||
|
errorpassthroughrule.DefaultSkipMonitoring = errorpassthroughruleDescSkipMonitoring.Default.(bool)
|
||||||
groupMixin := schema.Group{}.Mixin()
|
groupMixin := schema.Group{}.Mixin()
|
||||||
groupMixinHooks1 := groupMixin[1].Hooks()
|
groupMixinHooks1 := groupMixin[1].Hooks()
|
||||||
group.Hooks[0] = groupMixinHooks1[0]
|
group.Hooks[0] = groupMixinHooks1[0]
|
||||||
@@ -393,22 +429,75 @@ func init() {
|
|||||||
groupDescDefaultValidityDays := groupFields[10].Descriptor()
|
groupDescDefaultValidityDays := groupFields[10].Descriptor()
|
||||||
// group.DefaultDefaultValidityDays holds the default value on creation for the default_validity_days field.
|
// group.DefaultDefaultValidityDays holds the default value on creation for the default_validity_days field.
|
||||||
group.DefaultDefaultValidityDays = groupDescDefaultValidityDays.Default.(int)
|
group.DefaultDefaultValidityDays = groupDescDefaultValidityDays.Default.(int)
|
||||||
|
// groupDescSoraStorageQuotaBytes is the schema descriptor for sora_storage_quota_bytes field.
|
||||||
|
groupDescSoraStorageQuotaBytes := groupFields[18].Descriptor()
|
||||||
|
// group.DefaultSoraStorageQuotaBytes holds the default value on creation for the sora_storage_quota_bytes field.
|
||||||
|
group.DefaultSoraStorageQuotaBytes = groupDescSoraStorageQuotaBytes.Default.(int64)
|
||||||
// groupDescClaudeCodeOnly is the schema descriptor for claude_code_only field.
|
// groupDescClaudeCodeOnly is the schema descriptor for claude_code_only field.
|
||||||
groupDescClaudeCodeOnly := groupFields[14].Descriptor()
|
groupDescClaudeCodeOnly := groupFields[19].Descriptor()
|
||||||
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
||||||
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
||||||
// groupDescModelRoutingEnabled is the schema descriptor for model_routing_enabled field.
|
// groupDescModelRoutingEnabled is the schema descriptor for model_routing_enabled field.
|
||||||
groupDescModelRoutingEnabled := groupFields[18].Descriptor()
|
groupDescModelRoutingEnabled := groupFields[23].Descriptor()
|
||||||
// group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field.
|
// group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field.
|
||||||
group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool)
|
group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool)
|
||||||
// groupDescMcpXMLInject is the schema descriptor for mcp_xml_inject field.
|
// groupDescMcpXMLInject is the schema descriptor for mcp_xml_inject field.
|
||||||
groupDescMcpXMLInject := groupFields[19].Descriptor()
|
groupDescMcpXMLInject := groupFields[24].Descriptor()
|
||||||
// group.DefaultMcpXMLInject holds the default value on creation for the mcp_xml_inject field.
|
// group.DefaultMcpXMLInject holds the default value on creation for the mcp_xml_inject field.
|
||||||
group.DefaultMcpXMLInject = groupDescMcpXMLInject.Default.(bool)
|
group.DefaultMcpXMLInject = groupDescMcpXMLInject.Default.(bool)
|
||||||
// groupDescSupportedModelScopes is the schema descriptor for supported_model_scopes field.
|
// groupDescSupportedModelScopes is the schema descriptor for supported_model_scopes field.
|
||||||
groupDescSupportedModelScopes := groupFields[20].Descriptor()
|
groupDescSupportedModelScopes := groupFields[25].Descriptor()
|
||||||
// group.DefaultSupportedModelScopes holds the default value on creation for the supported_model_scopes field.
|
// group.DefaultSupportedModelScopes holds the default value on creation for the supported_model_scopes field.
|
||||||
group.DefaultSupportedModelScopes = groupDescSupportedModelScopes.Default.([]string)
|
group.DefaultSupportedModelScopes = groupDescSupportedModelScopes.Default.([]string)
|
||||||
|
// groupDescSortOrder is the schema descriptor for sort_order field.
|
||||||
|
groupDescSortOrder := groupFields[26].Descriptor()
|
||||||
|
// group.DefaultSortOrder holds the default value on creation for the sort_order field.
|
||||||
|
group.DefaultSortOrder = groupDescSortOrder.Default.(int)
|
||||||
|
// groupDescAllowMessagesDispatch is the schema descriptor for allow_messages_dispatch field.
|
||||||
|
groupDescAllowMessagesDispatch := groupFields[27].Descriptor()
|
||||||
|
// group.DefaultAllowMessagesDispatch holds the default value on creation for the allow_messages_dispatch field.
|
||||||
|
group.DefaultAllowMessagesDispatch = groupDescAllowMessagesDispatch.Default.(bool)
|
||||||
|
// groupDescDefaultMappedModel is the schema descriptor for default_mapped_model field.
|
||||||
|
groupDescDefaultMappedModel := groupFields[28].Descriptor()
|
||||||
|
// group.DefaultDefaultMappedModel holds the default value on creation for the default_mapped_model field.
|
||||||
|
group.DefaultDefaultMappedModel = groupDescDefaultMappedModel.Default.(string)
|
||||||
|
// group.DefaultMappedModelValidator is a validator for the "default_mapped_model" field. It is called by the builders before save.
|
||||||
|
group.DefaultMappedModelValidator = groupDescDefaultMappedModel.Validators[0].(func(string) error)
|
||||||
|
idempotencyrecordMixin := schema.IdempotencyRecord{}.Mixin()
|
||||||
|
idempotencyrecordMixinFields0 := idempotencyrecordMixin[0].Fields()
|
||||||
|
_ = idempotencyrecordMixinFields0
|
||||||
|
idempotencyrecordFields := schema.IdempotencyRecord{}.Fields()
|
||||||
|
_ = idempotencyrecordFields
|
||||||
|
// idempotencyrecordDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
idempotencyrecordDescCreatedAt := idempotencyrecordMixinFields0[0].Descriptor()
|
||||||
|
// idempotencyrecord.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
idempotencyrecord.DefaultCreatedAt = idempotencyrecordDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// idempotencyrecordDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
idempotencyrecordDescUpdatedAt := idempotencyrecordMixinFields0[1].Descriptor()
|
||||||
|
// idempotencyrecord.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
idempotencyrecord.DefaultUpdatedAt = idempotencyrecordDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// idempotencyrecord.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
idempotencyrecord.UpdateDefaultUpdatedAt = idempotencyrecordDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// idempotencyrecordDescScope is the schema descriptor for scope field.
|
||||||
|
idempotencyrecordDescScope := idempotencyrecordFields[0].Descriptor()
|
||||||
|
// idempotencyrecord.ScopeValidator is a validator for the "scope" field. It is called by the builders before save.
|
||||||
|
idempotencyrecord.ScopeValidator = idempotencyrecordDescScope.Validators[0].(func(string) error)
|
||||||
|
// idempotencyrecordDescIdempotencyKeyHash is the schema descriptor for idempotency_key_hash field.
|
||||||
|
idempotencyrecordDescIdempotencyKeyHash := idempotencyrecordFields[1].Descriptor()
|
||||||
|
// idempotencyrecord.IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save.
|
||||||
|
idempotencyrecord.IdempotencyKeyHashValidator = idempotencyrecordDescIdempotencyKeyHash.Validators[0].(func(string) error)
|
||||||
|
// idempotencyrecordDescRequestFingerprint is the schema descriptor for request_fingerprint field.
|
||||||
|
idempotencyrecordDescRequestFingerprint := idempotencyrecordFields[2].Descriptor()
|
||||||
|
// idempotencyrecord.RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save.
|
||||||
|
idempotencyrecord.RequestFingerprintValidator = idempotencyrecordDescRequestFingerprint.Validators[0].(func(string) error)
|
||||||
|
// idempotencyrecordDescStatus is the schema descriptor for status field.
|
||||||
|
idempotencyrecordDescStatus := idempotencyrecordFields[3].Descriptor()
|
||||||
|
// idempotencyrecord.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
idempotencyrecord.StatusValidator = idempotencyrecordDescStatus.Validators[0].(func(string) error)
|
||||||
|
// idempotencyrecordDescErrorReason is the schema descriptor for error_reason field.
|
||||||
|
idempotencyrecordDescErrorReason := idempotencyrecordFields[6].Descriptor()
|
||||||
|
// idempotencyrecord.ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save.
|
||||||
|
idempotencyrecord.ErrorReasonValidator = idempotencyrecordDescErrorReason.Validators[0].(func(string) error)
|
||||||
promocodeFields := schema.PromoCode{}.Fields()
|
promocodeFields := schema.PromoCode{}.Fields()
|
||||||
_ = promocodeFields
|
_ = promocodeFields
|
||||||
// promocodeDescCode is the schema descriptor for code field.
|
// promocodeDescCode is the schema descriptor for code field.
|
||||||
@@ -594,6 +683,43 @@ func init() {
|
|||||||
redeemcodeDescValidityDays := redeemcodeFields[9].Descriptor()
|
redeemcodeDescValidityDays := redeemcodeFields[9].Descriptor()
|
||||||
// redeemcode.DefaultValidityDays holds the default value on creation for the validity_days field.
|
// redeemcode.DefaultValidityDays holds the default value on creation for the validity_days field.
|
||||||
redeemcode.DefaultValidityDays = redeemcodeDescValidityDays.Default.(int)
|
redeemcode.DefaultValidityDays = redeemcodeDescValidityDays.Default.(int)
|
||||||
|
securitysecretMixin := schema.SecuritySecret{}.Mixin()
|
||||||
|
securitysecretMixinFields0 := securitysecretMixin[0].Fields()
|
||||||
|
_ = securitysecretMixinFields0
|
||||||
|
securitysecretFields := schema.SecuritySecret{}.Fields()
|
||||||
|
_ = securitysecretFields
|
||||||
|
// securitysecretDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
securitysecretDescCreatedAt := securitysecretMixinFields0[0].Descriptor()
|
||||||
|
// securitysecret.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
securitysecret.DefaultCreatedAt = securitysecretDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// securitysecretDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
securitysecretDescUpdatedAt := securitysecretMixinFields0[1].Descriptor()
|
||||||
|
// securitysecret.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
securitysecret.DefaultUpdatedAt = securitysecretDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// securitysecret.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
securitysecret.UpdateDefaultUpdatedAt = securitysecretDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// securitysecretDescKey is the schema descriptor for key field.
|
||||||
|
securitysecretDescKey := securitysecretFields[0].Descriptor()
|
||||||
|
// securitysecret.KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
securitysecret.KeyValidator = func() func(string) error {
|
||||||
|
validators := securitysecretDescKey.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(key string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// securitysecretDescValue is the schema descriptor for value field.
|
||||||
|
securitysecretDescValue := securitysecretFields[1].Descriptor()
|
||||||
|
// securitysecret.ValueValidator is a validator for the "value" field. It is called by the builders before save.
|
||||||
|
securitysecret.ValueValidator = securitysecretDescValue.Validators[0].(func(string) error)
|
||||||
settingFields := schema.Setting{}.Fields()
|
settingFields := schema.Setting{}.Fields()
|
||||||
_ = settingFields
|
_ = settingFields
|
||||||
// settingDescKey is the schema descriptor for key field.
|
// settingDescKey is the schema descriptor for key field.
|
||||||
@@ -695,84 +821,96 @@ func init() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
// usagelogDescUpstreamModel is the schema descriptor for upstream_model field.
|
||||||
|
usagelogDescUpstreamModel := usagelogFields[5].Descriptor()
|
||||||
|
// usagelog.UpstreamModelValidator is a validator for the "upstream_model" field. It is called by the builders before save.
|
||||||
|
usagelog.UpstreamModelValidator = usagelogDescUpstreamModel.Validators[0].(func(string) error)
|
||||||
// usagelogDescInputTokens is the schema descriptor for input_tokens field.
|
// usagelogDescInputTokens is the schema descriptor for input_tokens field.
|
||||||
usagelogDescInputTokens := usagelogFields[7].Descriptor()
|
usagelogDescInputTokens := usagelogFields[8].Descriptor()
|
||||||
// usagelog.DefaultInputTokens holds the default value on creation for the input_tokens field.
|
// usagelog.DefaultInputTokens holds the default value on creation for the input_tokens field.
|
||||||
usagelog.DefaultInputTokens = usagelogDescInputTokens.Default.(int)
|
usagelog.DefaultInputTokens = usagelogDescInputTokens.Default.(int)
|
||||||
// usagelogDescOutputTokens is the schema descriptor for output_tokens field.
|
// usagelogDescOutputTokens is the schema descriptor for output_tokens field.
|
||||||
usagelogDescOutputTokens := usagelogFields[8].Descriptor()
|
usagelogDescOutputTokens := usagelogFields[9].Descriptor()
|
||||||
// usagelog.DefaultOutputTokens holds the default value on creation for the output_tokens field.
|
// usagelog.DefaultOutputTokens holds the default value on creation for the output_tokens field.
|
||||||
usagelog.DefaultOutputTokens = usagelogDescOutputTokens.Default.(int)
|
usagelog.DefaultOutputTokens = usagelogDescOutputTokens.Default.(int)
|
||||||
// usagelogDescCacheCreationTokens is the schema descriptor for cache_creation_tokens field.
|
// usagelogDescCacheCreationTokens is the schema descriptor for cache_creation_tokens field.
|
||||||
usagelogDescCacheCreationTokens := usagelogFields[9].Descriptor()
|
usagelogDescCacheCreationTokens := usagelogFields[10].Descriptor()
|
||||||
// usagelog.DefaultCacheCreationTokens holds the default value on creation for the cache_creation_tokens field.
|
// usagelog.DefaultCacheCreationTokens holds the default value on creation for the cache_creation_tokens field.
|
||||||
usagelog.DefaultCacheCreationTokens = usagelogDescCacheCreationTokens.Default.(int)
|
usagelog.DefaultCacheCreationTokens = usagelogDescCacheCreationTokens.Default.(int)
|
||||||
// usagelogDescCacheReadTokens is the schema descriptor for cache_read_tokens field.
|
// usagelogDescCacheReadTokens is the schema descriptor for cache_read_tokens field.
|
||||||
usagelogDescCacheReadTokens := usagelogFields[10].Descriptor()
|
usagelogDescCacheReadTokens := usagelogFields[11].Descriptor()
|
||||||
// usagelog.DefaultCacheReadTokens holds the default value on creation for the cache_read_tokens field.
|
// usagelog.DefaultCacheReadTokens holds the default value on creation for the cache_read_tokens field.
|
||||||
usagelog.DefaultCacheReadTokens = usagelogDescCacheReadTokens.Default.(int)
|
usagelog.DefaultCacheReadTokens = usagelogDescCacheReadTokens.Default.(int)
|
||||||
// usagelogDescCacheCreation5mTokens is the schema descriptor for cache_creation_5m_tokens field.
|
// usagelogDescCacheCreation5mTokens is the schema descriptor for cache_creation_5m_tokens field.
|
||||||
usagelogDescCacheCreation5mTokens := usagelogFields[11].Descriptor()
|
usagelogDescCacheCreation5mTokens := usagelogFields[12].Descriptor()
|
||||||
// usagelog.DefaultCacheCreation5mTokens holds the default value on creation for the cache_creation_5m_tokens field.
|
// usagelog.DefaultCacheCreation5mTokens holds the default value on creation for the cache_creation_5m_tokens field.
|
||||||
usagelog.DefaultCacheCreation5mTokens = usagelogDescCacheCreation5mTokens.Default.(int)
|
usagelog.DefaultCacheCreation5mTokens = usagelogDescCacheCreation5mTokens.Default.(int)
|
||||||
// usagelogDescCacheCreation1hTokens is the schema descriptor for cache_creation_1h_tokens field.
|
// usagelogDescCacheCreation1hTokens is the schema descriptor for cache_creation_1h_tokens field.
|
||||||
usagelogDescCacheCreation1hTokens := usagelogFields[12].Descriptor()
|
usagelogDescCacheCreation1hTokens := usagelogFields[13].Descriptor()
|
||||||
// usagelog.DefaultCacheCreation1hTokens holds the default value on creation for the cache_creation_1h_tokens field.
|
// usagelog.DefaultCacheCreation1hTokens holds the default value on creation for the cache_creation_1h_tokens field.
|
||||||
usagelog.DefaultCacheCreation1hTokens = usagelogDescCacheCreation1hTokens.Default.(int)
|
usagelog.DefaultCacheCreation1hTokens = usagelogDescCacheCreation1hTokens.Default.(int)
|
||||||
// usagelogDescInputCost is the schema descriptor for input_cost field.
|
// usagelogDescInputCost is the schema descriptor for input_cost field.
|
||||||
usagelogDescInputCost := usagelogFields[13].Descriptor()
|
usagelogDescInputCost := usagelogFields[14].Descriptor()
|
||||||
// usagelog.DefaultInputCost holds the default value on creation for the input_cost field.
|
// usagelog.DefaultInputCost holds the default value on creation for the input_cost field.
|
||||||
usagelog.DefaultInputCost = usagelogDescInputCost.Default.(float64)
|
usagelog.DefaultInputCost = usagelogDescInputCost.Default.(float64)
|
||||||
// usagelogDescOutputCost is the schema descriptor for output_cost field.
|
// usagelogDescOutputCost is the schema descriptor for output_cost field.
|
||||||
usagelogDescOutputCost := usagelogFields[14].Descriptor()
|
usagelogDescOutputCost := usagelogFields[15].Descriptor()
|
||||||
// usagelog.DefaultOutputCost holds the default value on creation for the output_cost field.
|
// usagelog.DefaultOutputCost holds the default value on creation for the output_cost field.
|
||||||
usagelog.DefaultOutputCost = usagelogDescOutputCost.Default.(float64)
|
usagelog.DefaultOutputCost = usagelogDescOutputCost.Default.(float64)
|
||||||
// usagelogDescCacheCreationCost is the schema descriptor for cache_creation_cost field.
|
// usagelogDescCacheCreationCost is the schema descriptor for cache_creation_cost field.
|
||||||
usagelogDescCacheCreationCost := usagelogFields[15].Descriptor()
|
usagelogDescCacheCreationCost := usagelogFields[16].Descriptor()
|
||||||
// usagelog.DefaultCacheCreationCost holds the default value on creation for the cache_creation_cost field.
|
// usagelog.DefaultCacheCreationCost holds the default value on creation for the cache_creation_cost field.
|
||||||
usagelog.DefaultCacheCreationCost = usagelogDescCacheCreationCost.Default.(float64)
|
usagelog.DefaultCacheCreationCost = usagelogDescCacheCreationCost.Default.(float64)
|
||||||
// usagelogDescCacheReadCost is the schema descriptor for cache_read_cost field.
|
// usagelogDescCacheReadCost is the schema descriptor for cache_read_cost field.
|
||||||
usagelogDescCacheReadCost := usagelogFields[16].Descriptor()
|
usagelogDescCacheReadCost := usagelogFields[17].Descriptor()
|
||||||
// usagelog.DefaultCacheReadCost holds the default value on creation for the cache_read_cost field.
|
// usagelog.DefaultCacheReadCost holds the default value on creation for the cache_read_cost field.
|
||||||
usagelog.DefaultCacheReadCost = usagelogDescCacheReadCost.Default.(float64)
|
usagelog.DefaultCacheReadCost = usagelogDescCacheReadCost.Default.(float64)
|
||||||
// usagelogDescTotalCost is the schema descriptor for total_cost field.
|
// usagelogDescTotalCost is the schema descriptor for total_cost field.
|
||||||
usagelogDescTotalCost := usagelogFields[17].Descriptor()
|
usagelogDescTotalCost := usagelogFields[18].Descriptor()
|
||||||
// usagelog.DefaultTotalCost holds the default value on creation for the total_cost field.
|
// usagelog.DefaultTotalCost holds the default value on creation for the total_cost field.
|
||||||
usagelog.DefaultTotalCost = usagelogDescTotalCost.Default.(float64)
|
usagelog.DefaultTotalCost = usagelogDescTotalCost.Default.(float64)
|
||||||
// usagelogDescActualCost is the schema descriptor for actual_cost field.
|
// usagelogDescActualCost is the schema descriptor for actual_cost field.
|
||||||
usagelogDescActualCost := usagelogFields[18].Descriptor()
|
usagelogDescActualCost := usagelogFields[19].Descriptor()
|
||||||
// usagelog.DefaultActualCost holds the default value on creation for the actual_cost field.
|
// usagelog.DefaultActualCost holds the default value on creation for the actual_cost field.
|
||||||
usagelog.DefaultActualCost = usagelogDescActualCost.Default.(float64)
|
usagelog.DefaultActualCost = usagelogDescActualCost.Default.(float64)
|
||||||
// usagelogDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
// usagelogDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
||||||
usagelogDescRateMultiplier := usagelogFields[19].Descriptor()
|
usagelogDescRateMultiplier := usagelogFields[20].Descriptor()
|
||||||
// usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
// usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
||||||
usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64)
|
usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64)
|
||||||
// usagelogDescBillingType is the schema descriptor for billing_type field.
|
// usagelogDescBillingType is the schema descriptor for billing_type field.
|
||||||
usagelogDescBillingType := usagelogFields[21].Descriptor()
|
usagelogDescBillingType := usagelogFields[22].Descriptor()
|
||||||
// usagelog.DefaultBillingType holds the default value on creation for the billing_type field.
|
// usagelog.DefaultBillingType holds the default value on creation for the billing_type field.
|
||||||
usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8)
|
usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8)
|
||||||
// usagelogDescStream is the schema descriptor for stream field.
|
// usagelogDescStream is the schema descriptor for stream field.
|
||||||
usagelogDescStream := usagelogFields[22].Descriptor()
|
usagelogDescStream := usagelogFields[23].Descriptor()
|
||||||
// usagelog.DefaultStream holds the default value on creation for the stream field.
|
// usagelog.DefaultStream holds the default value on creation for the stream field.
|
||||||
usagelog.DefaultStream = usagelogDescStream.Default.(bool)
|
usagelog.DefaultStream = usagelogDescStream.Default.(bool)
|
||||||
// usagelogDescUserAgent is the schema descriptor for user_agent field.
|
// usagelogDescUserAgent is the schema descriptor for user_agent field.
|
||||||
usagelogDescUserAgent := usagelogFields[25].Descriptor()
|
usagelogDescUserAgent := usagelogFields[26].Descriptor()
|
||||||
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
||||||
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
|
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
|
||||||
// usagelogDescIPAddress is the schema descriptor for ip_address field.
|
// usagelogDescIPAddress is the schema descriptor for ip_address field.
|
||||||
usagelogDescIPAddress := usagelogFields[26].Descriptor()
|
usagelogDescIPAddress := usagelogFields[27].Descriptor()
|
||||||
// usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
|
// usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
|
||||||
usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error)
|
usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error)
|
||||||
// usagelogDescImageCount is the schema descriptor for image_count field.
|
// usagelogDescImageCount is the schema descriptor for image_count field.
|
||||||
usagelogDescImageCount := usagelogFields[27].Descriptor()
|
usagelogDescImageCount := usagelogFields[28].Descriptor()
|
||||||
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
|
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
|
||||||
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
|
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
|
||||||
// usagelogDescImageSize is the schema descriptor for image_size field.
|
// usagelogDescImageSize is the schema descriptor for image_size field.
|
||||||
usagelogDescImageSize := usagelogFields[28].Descriptor()
|
usagelogDescImageSize := usagelogFields[29].Descriptor()
|
||||||
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
||||||
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
|
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescMediaType is the schema descriptor for media_type field.
|
||||||
|
usagelogDescMediaType := usagelogFields[30].Descriptor()
|
||||||
|
// usagelog.MediaTypeValidator is a validator for the "media_type" field. It is called by the builders before save.
|
||||||
|
usagelog.MediaTypeValidator = usagelogDescMediaType.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescCacheTTLOverridden is the schema descriptor for cache_ttl_overridden field.
|
||||||
|
usagelogDescCacheTTLOverridden := usagelogFields[31].Descriptor()
|
||||||
|
// usagelog.DefaultCacheTTLOverridden holds the default value on creation for the cache_ttl_overridden field.
|
||||||
|
usagelog.DefaultCacheTTLOverridden = usagelogDescCacheTTLOverridden.Default.(bool)
|
||||||
// usagelogDescCreatedAt is the schema descriptor for created_at field.
|
// usagelogDescCreatedAt is the schema descriptor for created_at field.
|
||||||
usagelogDescCreatedAt := usagelogFields[29].Descriptor()
|
usagelogDescCreatedAt := usagelogFields[32].Descriptor()
|
||||||
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
|
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
|
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
|
||||||
userMixin := schema.User{}.Mixin()
|
userMixin := schema.User{}.Mixin()
|
||||||
@@ -864,6 +1002,14 @@ func init() {
|
|||||||
userDescTotpEnabled := userFields[9].Descriptor()
|
userDescTotpEnabled := userFields[9].Descriptor()
|
||||||
// user.DefaultTotpEnabled holds the default value on creation for the totp_enabled field.
|
// user.DefaultTotpEnabled holds the default value on creation for the totp_enabled field.
|
||||||
user.DefaultTotpEnabled = userDescTotpEnabled.Default.(bool)
|
user.DefaultTotpEnabled = userDescTotpEnabled.Default.(bool)
|
||||||
|
// userDescSoraStorageQuotaBytes is the schema descriptor for sora_storage_quota_bytes field.
|
||||||
|
userDescSoraStorageQuotaBytes := userFields[11].Descriptor()
|
||||||
|
// user.DefaultSoraStorageQuotaBytes holds the default value on creation for the sora_storage_quota_bytes field.
|
||||||
|
user.DefaultSoraStorageQuotaBytes = userDescSoraStorageQuotaBytes.Default.(int64)
|
||||||
|
// userDescSoraStorageUsedBytes is the schema descriptor for sora_storage_used_bytes field.
|
||||||
|
userDescSoraStorageUsedBytes := userFields[12].Descriptor()
|
||||||
|
// user.DefaultSoraStorageUsedBytes holds the default value on creation for the sora_storage_used_bytes field.
|
||||||
|
user.DefaultSoraStorageUsedBytes = userDescSoraStorageUsedBytes.Default.(int64)
|
||||||
userallowedgroupFields := schema.UserAllowedGroup{}.Fields()
|
userallowedgroupFields := schema.UserAllowedGroup{}.Fields()
|
||||||
_ = userallowedgroupFields
|
_ = userallowedgroupFields
|
||||||
// userallowedgroupDescCreatedAt is the schema descriptor for created_at field.
|
// userallowedgroupDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
|||||||
@@ -97,6 +97,8 @@ func (Account) Fields() []ent.Field {
|
|||||||
field.Int("concurrency").
|
field.Int("concurrency").
|
||||||
Default(3),
|
Default(3),
|
||||||
|
|
||||||
|
field.Int("load_factor").Optional().Nillable(),
|
||||||
|
|
||||||
// priority: 账户优先级,数值越小优先级越高
|
// priority: 账户优先级,数值越小优先级越高
|
||||||
// 调度器会优先使用高优先级的账户
|
// 调度器会优先使用高优先级的账户
|
||||||
field.Int("priority").
|
field.Int("priority").
|
||||||
@@ -164,6 +166,19 @@ func (Account) Fields() []ent.Field {
|
|||||||
Nillable().
|
Nillable().
|
||||||
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
|
||||||
|
// temp_unschedulable_until: 临时不可调度状态解除时间
|
||||||
|
// 当命中临时不可调度规则时设置,在此时间前调度器应跳过该账号
|
||||||
|
field.Time("temp_unschedulable_until").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
|
||||||
|
// temp_unschedulable_reason: 临时不可调度原因,便于排障审计
|
||||||
|
field.String("temp_unschedulable_reason").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "text"}),
|
||||||
|
|
||||||
// session_window_*: 会话窗口相关字段
|
// session_window_*: 会话窗口相关字段
|
||||||
// 用于管理某些需要会话时间窗口的 API(如 Claude Pro)
|
// 用于管理某些需要会话时间窗口的 API(如 Claude Pro)
|
||||||
field.Time("session_window_start").
|
field.Time("session_window_start").
|
||||||
@@ -213,6 +228,9 @@ func (Account) Indexes() []ent.Index {
|
|||||||
index.Fields("rate_limited_at"), // 筛选速率限制账户
|
index.Fields("rate_limited_at"), // 筛选速率限制账户
|
||||||
index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间
|
index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间
|
||||||
index.Fields("overload_until"), // 筛选过载账户
|
index.Fields("overload_until"), // 筛选过载账户
|
||||||
index.Fields("deleted_at"), // 软删除查询优化
|
// 调度热路径复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐)
|
||||||
|
index.Fields("platform", "priority"),
|
||||||
|
index.Fields("priority", "status"),
|
||||||
|
index.Fields("deleted_at"), // 软删除查询优化
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,6 +41,10 @@ func (Announcement) Fields() []ent.Field {
|
|||||||
MaxLen(20).
|
MaxLen(20).
|
||||||
Default(domain.AnnouncementStatusDraft).
|
Default(domain.AnnouncementStatusDraft).
|
||||||
Comment("状态: draft, active, archived"),
|
Comment("状态: draft, active, archived"),
|
||||||
|
field.String("notify_mode").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(domain.AnnouncementNotifyModeSilent).
|
||||||
|
Comment("通知模式: silent(仅铃铛), popup(弹窗提醒)"),
|
||||||
field.JSON("targeting", domain.AnnouncementTargeting{}).
|
field.JSON("targeting", domain.AnnouncementTargeting{}).
|
||||||
Optional().
|
Optional().
|
||||||
SchemaType(map[string]string{dialect.Postgres: "jsonb"}).
|
SchemaType(map[string]string{dialect.Postgres: "jsonb"}).
|
||||||
|
|||||||
@@ -47,6 +47,10 @@ func (APIKey) Fields() []ent.Field {
|
|||||||
field.String("status").
|
field.String("status").
|
||||||
MaxLen(20).
|
MaxLen(20).
|
||||||
Default(domain.StatusActive),
|
Default(domain.StatusActive),
|
||||||
|
field.Time("last_used_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("Last usage time of this API key"),
|
||||||
field.JSON("ip_whitelist", []string{}).
|
field.JSON("ip_whitelist", []string{}).
|
||||||
Optional().
|
Optional().
|
||||||
Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"),
|
Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"),
|
||||||
@@ -70,6 +74,47 @@ func (APIKey) Fields() []ent.Field {
|
|||||||
Optional().
|
Optional().
|
||||||
Nillable().
|
Nillable().
|
||||||
Comment("Expiration time for this API key (null = never expires)"),
|
Comment("Expiration time for this API key (null = never expires)"),
|
||||||
|
|
||||||
|
// ========== Rate limit fields ==========
|
||||||
|
// Rate limit configuration (0 = unlimited)
|
||||||
|
field.Float("rate_limit_5h").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Rate limit in USD per 5 hours (0 = unlimited)"),
|
||||||
|
field.Float("rate_limit_1d").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Rate limit in USD per day (0 = unlimited)"),
|
||||||
|
field.Float("rate_limit_7d").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Rate limit in USD per 7 days (0 = unlimited)"),
|
||||||
|
// Rate limit usage tracking
|
||||||
|
field.Float("usage_5h").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Used amount in USD for the current 5h window"),
|
||||||
|
field.Float("usage_1d").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Used amount in USD for the current 1d window"),
|
||||||
|
field.Float("usage_7d").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("Used amount in USD for the current 7d window"),
|
||||||
|
// Window start times
|
||||||
|
field.Time("window_5h_start").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("Start time of the current 5h rate limit window"),
|
||||||
|
field.Time("window_1d_start").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("Start time of the current 1d rate limit window"),
|
||||||
|
field.Time("window_7d_start").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("Start time of the current 7d rate limit window"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,6 +140,7 @@ func (APIKey) Indexes() []ent.Index {
|
|||||||
index.Fields("group_id"),
|
index.Fields("group_id"),
|
||||||
index.Fields("status"),
|
index.Fields("status"),
|
||||||
index.Fields("deleted_at"),
|
index.Fields("deleted_at"),
|
||||||
|
index.Fields("last_used_at"),
|
||||||
// Index for quota queries
|
// Index for quota queries
|
||||||
index.Fields("quota", "quota_used"),
|
index.Fields("quota", "quota_used"),
|
||||||
index.Fields("expires_at"),
|
index.Fields("expires_at"),
|
||||||
|
|||||||
@@ -105,6 +105,12 @@ func (ErrorPassthroughRule) Fields() []ent.Field {
|
|||||||
Optional().
|
Optional().
|
||||||
Nillable(),
|
Nillable(),
|
||||||
|
|
||||||
|
// skip_monitoring: 是否跳过运维监控记录
|
||||||
|
// true: 匹配此规则的错误不会被记录到 ops_error_logs
|
||||||
|
// false: 正常记录到运维监控(默认行为)
|
||||||
|
field.Bool("skip_monitoring").
|
||||||
|
Default(false),
|
||||||
|
|
||||||
// description: 规则描述,用于说明规则的用途
|
// description: 规则描述,用于说明规则的用途
|
||||||
field.Text("description").
|
field.Text("description").
|
||||||
Optional().
|
Optional().
|
||||||
|
|||||||
@@ -87,6 +87,28 @@ func (Group) Fields() []ent.Field {
|
|||||||
Nillable().
|
Nillable().
|
||||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
|
||||||
|
// Sora 按次计费配置(阶段 1)
|
||||||
|
field.Float("sora_image_price_360").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("sora_image_price_540").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("sora_video_price_per_request").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("sora_video_price_per_request_hd").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
|
||||||
|
// Sora 存储配额
|
||||||
|
field.Int64("sora_storage_quota_bytes").
|
||||||
|
Default(0),
|
||||||
|
|
||||||
// Claude Code 客户端限制 (added by migration 029)
|
// Claude Code 客户端限制 (added by migration 029)
|
||||||
field.Bool("claude_code_only").
|
field.Bool("claude_code_only").
|
||||||
Default(false).
|
Default(false).
|
||||||
@@ -121,6 +143,20 @@ func (Group) Fields() []ent.Field {
|
|||||||
Default([]string{"claude", "gemini_text", "gemini_image"}).
|
Default([]string{"claude", "gemini_text", "gemini_image"}).
|
||||||
SchemaType(map[string]string{dialect.Postgres: "jsonb"}).
|
SchemaType(map[string]string{dialect.Postgres: "jsonb"}).
|
||||||
Comment("支持的模型系列:claude, gemini_text, gemini_image"),
|
Comment("支持的模型系列:claude, gemini_text, gemini_image"),
|
||||||
|
|
||||||
|
// 分组排序 (added by migration 052)
|
||||||
|
field.Int("sort_order").
|
||||||
|
Default(0).
|
||||||
|
Comment("分组显示排序,数值越小越靠前"),
|
||||||
|
|
||||||
|
// OpenAI Messages 调度配置 (added by migration 069)
|
||||||
|
field.Bool("allow_messages_dispatch").
|
||||||
|
Default(false).
|
||||||
|
Comment("是否允许 /v1/messages 调度到此 OpenAI 分组"),
|
||||||
|
field.String("default_mapped_model").
|
||||||
|
MaxLen(100).
|
||||||
|
Default("").
|
||||||
|
Comment("默认映射模型 ID,当账号级映射找不到时使用此值"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -149,5 +185,6 @@ func (Group) Indexes() []ent.Index {
|
|||||||
index.Fields("subscription_type"),
|
index.Fields("subscription_type"),
|
||||||
index.Fields("is_exclusive"),
|
index.Fields("is_exclusive"),
|
||||||
index.Fields("deleted_at"),
|
index.Fields("deleted_at"),
|
||||||
|
index.Fields("sort_order"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
50
backend/ent/schema/idempotency_record.go
Normal file
50
backend/ent/schema/idempotency_record.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdempotencyRecord 幂等请求记录表。
|
||||||
|
type IdempotencyRecord struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (IdempotencyRecord) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "idempotency_records"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (IdempotencyRecord) Mixin() []ent.Mixin {
|
||||||
|
return []ent.Mixin{
|
||||||
|
mixins.TimeMixin{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (IdempotencyRecord) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.String("scope").MaxLen(128),
|
||||||
|
field.String("idempotency_key_hash").MaxLen(64),
|
||||||
|
field.String("request_fingerprint").MaxLen(64),
|
||||||
|
field.String("status").MaxLen(32),
|
||||||
|
field.Int("response_status").Optional().Nillable(),
|
||||||
|
field.String("response_body").Optional().Nillable(),
|
||||||
|
field.String("error_reason").MaxLen(128).Optional().Nillable(),
|
||||||
|
field.Time("locked_until").Optional().Nillable(),
|
||||||
|
field.Time("expires_at"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (IdempotencyRecord) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
index.Fields("scope", "idempotency_key_hash").Unique(),
|
||||||
|
index.Fields("expires_at"),
|
||||||
|
index.Fields("status", "locked_until"),
|
||||||
|
}
|
||||||
|
}
|
||||||
42
backend/ent/schema/security_secret.go
Normal file
42
backend/ent/schema/security_secret.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecret 存储系统级安全密钥(如 JWT 签名密钥、TOTP 加密密钥)。
|
||||||
|
type SecuritySecret struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SecuritySecret) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "security_secrets"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SecuritySecret) Mixin() []ent.Mixin {
|
||||||
|
return []ent.Mixin{
|
||||||
|
mixins.TimeMixin{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SecuritySecret) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.String("key").
|
||||||
|
MaxLen(100).
|
||||||
|
NotEmpty().
|
||||||
|
Unique(),
|
||||||
|
field.String("value").
|
||||||
|
NotEmpty().
|
||||||
|
SchemaType(map[string]string{
|
||||||
|
dialect.Postgres: "text",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -41,6 +41,12 @@ func (UsageLog) Fields() []ent.Field {
|
|||||||
field.String("model").
|
field.String("model").
|
||||||
MaxLen(100).
|
MaxLen(100).
|
||||||
NotEmpty(),
|
NotEmpty(),
|
||||||
|
// UpstreamModel stores the actual upstream model name when model mapping
|
||||||
|
// is applied. NULL means no mapping — the requested model was used as-is.
|
||||||
|
field.String("upstream_model").
|
||||||
|
MaxLen(100).
|
||||||
|
Optional().
|
||||||
|
Nillable(),
|
||||||
field.Int64("group_id").
|
field.Int64("group_id").
|
||||||
Optional().
|
Optional().
|
||||||
Nillable(),
|
Nillable(),
|
||||||
@@ -118,6 +124,15 @@ func (UsageLog) Fields() []ent.Field {
|
|||||||
MaxLen(10).
|
MaxLen(10).
|
||||||
Optional().
|
Optional().
|
||||||
Nillable(),
|
Nillable(),
|
||||||
|
// 媒体类型字段(sora 使用)
|
||||||
|
field.String("media_type").
|
||||||
|
MaxLen(16).
|
||||||
|
Optional().
|
||||||
|
Nillable(),
|
||||||
|
|
||||||
|
// Cache TTL Override 标记(管理员强制替换了缓存 TTL 计费)
|
||||||
|
field.Bool("cache_ttl_overridden").
|
||||||
|
Default(false),
|
||||||
|
|
||||||
// 时间戳(只有 created_at,日志不可修改)
|
// 时间戳(只有 created_at,日志不可修改)
|
||||||
field.Time("created_at").
|
field.Time("created_at").
|
||||||
@@ -170,5 +185,7 @@ func (UsageLog) Indexes() []ent.Index {
|
|||||||
// 复合索引用于时间范围查询
|
// 复合索引用于时间范围查询
|
||||||
index.Fields("user_id", "created_at"),
|
index.Fields("user_id", "created_at"),
|
||||||
index.Fields("api_key_id", "created_at"),
|
index.Fields("api_key_id", "created_at"),
|
||||||
|
// 分组维度时间范围查询(线上由 SQL 迁移创建 group_id IS NOT NULL 的部分索引)
|
||||||
|
index.Fields("group_id", "created_at"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,6 +72,12 @@ func (User) Fields() []ent.Field {
|
|||||||
field.Time("totp_enabled_at").
|
field.Time("totp_enabled_at").
|
||||||
Optional().
|
Optional().
|
||||||
Nillable(),
|
Nillable(),
|
||||||
|
|
||||||
|
// Sora 存储配额
|
||||||
|
field.Int64("sora_storage_quota_bytes").
|
||||||
|
Default(0),
|
||||||
|
field.Int64("sora_storage_used_bytes").
|
||||||
|
Default(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -108,6 +108,8 @@ func (UserSubscription) Indexes() []ent.Index {
|
|||||||
index.Fields("group_id"),
|
index.Fields("group_id"),
|
||||||
index.Fields("status"),
|
index.Fields("status"),
|
||||||
index.Fields("expires_at"),
|
index.Fields("expires_at"),
|
||||||
|
// 活跃订阅查询复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐)
|
||||||
|
index.Fields("user_id", "status", "expires_at"),
|
||||||
index.Fields("assigned_by"),
|
index.Fields("assigned_by"),
|
||||||
// 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重新订阅
|
// 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重新订阅
|
||||||
// 见迁移文件 016_soft_delete_partial_unique_indexes.sql
|
// 见迁移文件 016_soft_delete_partial_unique_indexes.sql
|
||||||
|
|||||||
139
backend/ent/securitysecret.go
Normal file
139
backend/ent/securitysecret.go
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecret is the model entity for the SecuritySecret schema.
|
||||||
|
type SecuritySecret struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Key holds the value of the "key" field.
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
// Value holds the value of the "value" field.
|
||||||
|
Value string `json:"value,omitempty"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*SecuritySecret) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case securitysecret.FieldID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case securitysecret.FieldKey, securitysecret.FieldValue:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case securitysecret.FieldCreatedAt, securitysecret.FieldUpdatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the SecuritySecret fields.
|
||||||
|
func (_m *SecuritySecret) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case securitysecret.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case securitysecret.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case securitysecret.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case securitysecret.FieldKey:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field key", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Key = value.String
|
||||||
|
}
|
||||||
|
case securitysecret.FieldValue:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field value", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Value = value.String
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetValue returns the ent.Value that was dynamically selected and assigned to the SecuritySecret.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *SecuritySecret) GetValue(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this SecuritySecret.
|
||||||
|
// Note that you need to call SecuritySecret.Unwrap() before calling this method if this SecuritySecret
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *SecuritySecret) Update() *SecuritySecretUpdateOne {
|
||||||
|
return NewSecuritySecretClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the SecuritySecret entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *SecuritySecret) Unwrap() *SecuritySecret {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: SecuritySecret is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *SecuritySecret) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("SecuritySecret(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("key=")
|
||||||
|
builder.WriteString(_m.Key)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("value=")
|
||||||
|
builder.WriteString(_m.Value)
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecrets is a parsable slice of SecuritySecret.
|
||||||
|
type SecuritySecrets []*SecuritySecret
|
||||||
86
backend/ent/securitysecret/securitysecret.go
Normal file
86
backend/ent/securitysecret/securitysecret.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package securitysecret
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the securitysecret type in the database.
|
||||||
|
Label = "security_secret"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldKey holds the string denoting the key field in the database.
|
||||||
|
FieldKey = "key"
|
||||||
|
// FieldValue holds the string denoting the value field in the database.
|
||||||
|
FieldValue = "value"
|
||||||
|
// Table holds the table name of the securitysecret in the database.
|
||||||
|
Table = "security_secrets"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for securitysecret fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldKey,
|
||||||
|
FieldValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
KeyValidator func(string) error
|
||||||
|
// ValueValidator is a validator for the "value" field. It is called by the builders before save.
|
||||||
|
ValueValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the SecuritySecret queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByKey orders the results by the key field.
|
||||||
|
func ByKey(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldKey, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByValue orders the results by the value field.
|
||||||
|
func ByValue(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldValue, opts...).ToFunc()
|
||||||
|
}
|
||||||
300
backend/ent/securitysecret/where.go
Normal file
300
backend/ent/securitysecret/where.go
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package securitysecret
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key applies equality check predicate on the "key" field. It's identical to KeyEQ.
|
||||||
|
func Key(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
|
||||||
|
func Value(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyEQ applies the EQ predicate on the "key" field.
|
||||||
|
func KeyEQ(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNEQ applies the NEQ predicate on the "key" field.
|
||||||
|
func KeyNEQ(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIn applies the In predicate on the "key" field.
|
||||||
|
func KeyIn(vs ...string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldIn(FieldKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNotIn applies the NotIn predicate on the "key" field.
|
||||||
|
func KeyNotIn(vs ...string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNotIn(FieldKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyGT applies the GT predicate on the "key" field.
|
||||||
|
func KeyGT(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGT(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyGTE applies the GTE predicate on the "key" field.
|
||||||
|
func KeyGTE(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGTE(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyLT applies the LT predicate on the "key" field.
|
||||||
|
func KeyLT(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLT(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyLTE applies the LTE predicate on the "key" field.
|
||||||
|
func KeyLTE(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLTE(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyContains applies the Contains predicate on the "key" field.
|
||||||
|
func KeyContains(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldContains(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyHasPrefix applies the HasPrefix predicate on the "key" field.
|
||||||
|
func KeyHasPrefix(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldHasPrefix(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyHasSuffix applies the HasSuffix predicate on the "key" field.
|
||||||
|
func KeyHasSuffix(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldHasSuffix(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyEqualFold applies the EqualFold predicate on the "key" field.
|
||||||
|
func KeyEqualFold(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEqualFold(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyContainsFold applies the ContainsFold predicate on the "key" field.
|
||||||
|
func KeyContainsFold(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldContainsFold(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueEQ applies the EQ predicate on the "value" field.
|
||||||
|
func ValueEQ(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueNEQ applies the NEQ predicate on the "value" field.
|
||||||
|
func ValueNEQ(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueIn applies the In predicate on the "value" field.
|
||||||
|
func ValueIn(vs ...string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldIn(FieldValue, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueNotIn applies the NotIn predicate on the "value" field.
|
||||||
|
func ValueNotIn(vs ...string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldNotIn(FieldValue, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueGT applies the GT predicate on the "value" field.
|
||||||
|
func ValueGT(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGT(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueGTE applies the GTE predicate on the "value" field.
|
||||||
|
func ValueGTE(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldGTE(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueLT applies the LT predicate on the "value" field.
|
||||||
|
func ValueLT(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLT(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueLTE applies the LTE predicate on the "value" field.
|
||||||
|
func ValueLTE(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldLTE(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueContains applies the Contains predicate on the "value" field.
|
||||||
|
func ValueContains(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldContains(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
|
||||||
|
func ValueHasPrefix(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldHasPrefix(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
|
||||||
|
func ValueHasSuffix(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldHasSuffix(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueEqualFold applies the EqualFold predicate on the "value" field.
|
||||||
|
func ValueEqualFold(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldEqualFold(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueContainsFold applies the ContainsFold predicate on the "value" field.
|
||||||
|
func ValueContainsFold(v string) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.FieldContainsFold(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.SecuritySecret) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.SecuritySecret) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.SecuritySecret) predicate.SecuritySecret {
|
||||||
|
return predicate.SecuritySecret(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
626
backend/ent/securitysecret_create.go
Normal file
626
backend/ent/securitysecret_create.go
Normal file
@@ -0,0 +1,626 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecretCreate is the builder for creating a SecuritySecret entity.
|
||||||
|
type SecuritySecretCreate struct {
|
||||||
|
config
|
||||||
|
mutation *SecuritySecretMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedAt sets the "created_at" field.
|
||||||
|
func (_c *SecuritySecretCreate) SetCreatedAt(v time.Time) *SecuritySecretCreate {
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||||
|
func (_c *SecuritySecretCreate) SetNillableCreatedAt(v *time.Time) *SecuritySecretCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCreatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_c *SecuritySecretCreate) SetUpdatedAt(v time.Time) *SecuritySecretCreate {
|
||||||
|
_c.mutation.SetUpdatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||||
|
func (_c *SecuritySecretCreate) SetNillableUpdatedAt(v *time.Time) *SecuritySecretCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUpdatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (_c *SecuritySecretCreate) SetKey(v string) *SecuritySecretCreate {
|
||||||
|
_c.mutation.SetKey(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (_c *SecuritySecretCreate) SetValue(v string) *SecuritySecretCreate {
|
||||||
|
_c.mutation.SetValue(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the SecuritySecretMutation object of the builder.
|
||||||
|
func (_c *SecuritySecretCreate) Mutation() *SecuritySecretMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the SecuritySecret in the database.
|
||||||
|
func (_c *SecuritySecretCreate) Save(ctx context.Context) (*SecuritySecret, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *SecuritySecretCreate) SaveX(ctx context.Context) *SecuritySecret {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *SecuritySecretCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *SecuritySecretCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *SecuritySecretCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
v := securitysecret.DefaultCreatedAt()
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||||
|
v := securitysecret.DefaultUpdatedAt()
|
||||||
|
_c.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *SecuritySecretCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "SecuritySecret.created_at"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "SecuritySecret.updated_at"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Key(); !ok {
|
||||||
|
return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "SecuritySecret.key"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.Key(); ok {
|
||||||
|
if err := securitysecret.KeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Value(); !ok {
|
||||||
|
return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "SecuritySecret.value"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.Value(); ok {
|
||||||
|
if err := securitysecret.ValueValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.value": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *SecuritySecretCreate) sqlSave(ctx context.Context) (*SecuritySecret, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := _spec.ID.Value.(int64)
|
||||||
|
_node.ID = int64(id)
|
||||||
|
_c.mutation.id = &_node.ID
|
||||||
|
_c.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *SecuritySecretCreate) createSpec() (*SecuritySecret, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &SecuritySecret{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(securitysecret.Table, sqlgraph.NewFieldSpec(securitysecret.FieldID, field.TypeInt64))
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldCreatedAt, field.TypeTime, value)
|
||||||
|
_node.CreatedAt = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
_node.UpdatedAt = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Key(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldKey, field.TypeString, value)
|
||||||
|
_node.Key = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Value(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldValue, field.TypeString, value)
|
||||||
|
_node.Value = value
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// SetCreatedAt(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.SecuritySecretUpsert) {
|
||||||
|
// SetCreatedAt(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *SecuritySecretCreate) OnConflict(opts ...sql.ConflictOption) *SecuritySecretUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &SecuritySecretUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *SecuritySecretCreate) OnConflictColumns(columns ...string) *SecuritySecretUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &SecuritySecretUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// SecuritySecretUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one SecuritySecret node.
|
||||||
|
SecuritySecretUpsertOne struct {
|
||||||
|
create *SecuritySecretCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretUpsert is the "OnConflict" setter.
|
||||||
|
SecuritySecretUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *SecuritySecretUpsert) SetUpdatedAt(v time.Time) *SecuritySecretUpsert {
|
||||||
|
u.Set(securitysecret.FieldUpdatedAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsert) UpdateUpdatedAt() *SecuritySecretUpsert {
|
||||||
|
u.SetExcluded(securitysecret.FieldUpdatedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (u *SecuritySecretUpsert) SetKey(v string) *SecuritySecretUpsert {
|
||||||
|
u.Set(securitysecret.FieldKey, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateKey sets the "key" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsert) UpdateKey() *SecuritySecretUpsert {
|
||||||
|
u.SetExcluded(securitysecret.FieldKey)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (u *SecuritySecretUpsert) SetValue(v string) *SecuritySecretUpsert {
|
||||||
|
u.Set(securitysecret.FieldValue, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateValue sets the "value" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsert) UpdateValue() *SecuritySecretUpsert {
|
||||||
|
u.SetExcluded(securitysecret.FieldValue)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *SecuritySecretUpsertOne) UpdateNewValues() *SecuritySecretUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(securitysecret.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *SecuritySecretUpsertOne) Ignore() *SecuritySecretUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *SecuritySecretUpsertOne) DoNothing() *SecuritySecretUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the SecuritySecretCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *SecuritySecretUpsertOne) Update(set func(*SecuritySecretUpsert)) *SecuritySecretUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&SecuritySecretUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *SecuritySecretUpsertOne) SetUpdatedAt(v time.Time) *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetUpdatedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertOne) UpdateUpdatedAt() *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateUpdatedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (u *SecuritySecretUpsertOne) SetKey(v string) *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetKey(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateKey sets the "key" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertOne) UpdateKey() *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateKey()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (u *SecuritySecretUpsertOne) SetValue(v string) *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetValue(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateValue sets the "value" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertOne) UpdateValue() *SecuritySecretUpsertOne {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateValue()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *SecuritySecretUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for SecuritySecretCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *SecuritySecretUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||||
|
func (u *SecuritySecretUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||||
|
node, err := u.create.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
return node.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDX is like ID, but panics if an error occurs.
|
||||||
|
func (u *SecuritySecretUpsertOne) IDX(ctx context.Context) int64 {
|
||||||
|
id, err := u.ID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretCreateBulk is the builder for creating many SecuritySecret entities in bulk.
|
||||||
|
type SecuritySecretCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*SecuritySecretCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the SecuritySecret entities in the database.
|
||||||
|
func (_c *SecuritySecretCreateBulk) Save(ctx context.Context) ([]*SecuritySecret, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*SecuritySecret, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*SecuritySecretMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.id = &nodes[i].ID
|
||||||
|
if specs[i].ID.Value != nil {
|
||||||
|
id := specs[i].ID.Value.(int64)
|
||||||
|
nodes[i].ID = int64(id)
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *SecuritySecretCreateBulk) SaveX(ctx context.Context) []*SecuritySecret {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *SecuritySecretCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *SecuritySecretCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.SecuritySecretUpsert) {
|
||||||
|
// SetCreatedAt(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *SecuritySecretCreateBulk) OnConflict(opts ...sql.ConflictOption) *SecuritySecretUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &SecuritySecretUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *SecuritySecretCreateBulk) OnConflictColumns(columns ...string) *SecuritySecretUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &SecuritySecretUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of SecuritySecret nodes.
|
||||||
|
type SecuritySecretUpsertBulk struct {
|
||||||
|
create *SecuritySecretCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *SecuritySecretUpsertBulk) UpdateNewValues() *SecuritySecretUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
for _, b := range u.create.builders {
|
||||||
|
if _, exists := b.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(securitysecret.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *SecuritySecretUpsertBulk) Ignore() *SecuritySecretUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *SecuritySecretUpsertBulk) DoNothing() *SecuritySecretUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the SecuritySecretCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *SecuritySecretUpsertBulk) Update(set func(*SecuritySecretUpsert)) *SecuritySecretUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&SecuritySecretUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *SecuritySecretUpsertBulk) SetUpdatedAt(v time.Time) *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetUpdatedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertBulk) UpdateUpdatedAt() *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateUpdatedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (u *SecuritySecretUpsertBulk) SetKey(v string) *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetKey(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateKey sets the "key" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertBulk) UpdateKey() *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateKey()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (u *SecuritySecretUpsertBulk) SetValue(v string) *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.SetValue(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateValue sets the "value" field to the value that was provided on create.
|
||||||
|
func (u *SecuritySecretUpsertBulk) UpdateValue() *SecuritySecretUpsertBulk {
|
||||||
|
return u.Update(func(s *SecuritySecretUpsert) {
|
||||||
|
s.UpdateValue()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *SecuritySecretUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the SecuritySecretCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for SecuritySecretCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *SecuritySecretUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
88
backend/ent/securitysecret_delete.go
Normal file
88
backend/ent/securitysecret_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecretDelete is the builder for deleting a SecuritySecret entity.
|
||||||
|
type SecuritySecretDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *SecuritySecretMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the SecuritySecretDelete builder.
|
||||||
|
func (_d *SecuritySecretDelete) Where(ps ...predicate.SecuritySecret) *SecuritySecretDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *SecuritySecretDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *SecuritySecretDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *SecuritySecretDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(securitysecret.Table, sqlgraph.NewFieldSpec(securitysecret.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretDeleteOne is the builder for deleting a single SecuritySecret entity.
|
||||||
|
type SecuritySecretDeleteOne struct {
|
||||||
|
_d *SecuritySecretDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the SecuritySecretDelete builder.
|
||||||
|
func (_d *SecuritySecretDeleteOne) Where(ps ...predicate.SecuritySecret) *SecuritySecretDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *SecuritySecretDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{securitysecret.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *SecuritySecretDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
564
backend/ent/securitysecret_query.go
Normal file
564
backend/ent/securitysecret_query.go
Normal file
@@ -0,0 +1,564 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecretQuery is the builder for querying SecuritySecret entities.
|
||||||
|
type SecuritySecretQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []securitysecret.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.SecuritySecret
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the SecuritySecretQuery builder.
|
||||||
|
func (_q *SecuritySecretQuery) Where(ps ...predicate.SecuritySecret) *SecuritySecretQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *SecuritySecretQuery) Limit(limit int) *SecuritySecretQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *SecuritySecretQuery) Offset(offset int) *SecuritySecretQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *SecuritySecretQuery) Unique(unique bool) *SecuritySecretQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *SecuritySecretQuery) Order(o ...securitysecret.OrderOption) *SecuritySecretQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first SecuritySecret entity from the query.
|
||||||
|
// Returns a *NotFoundError when no SecuritySecret was found.
|
||||||
|
func (_q *SecuritySecretQuery) First(ctx context.Context) (*SecuritySecret, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{securitysecret.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) FirstX(ctx context.Context) *SecuritySecret {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first SecuritySecret ID from the query.
|
||||||
|
// Returns a *NotFoundError when no SecuritySecret ID was found.
|
||||||
|
func (_q *SecuritySecretQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{securitysecret.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single SecuritySecret entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one SecuritySecret entity is found.
|
||||||
|
// Returns a *NotFoundError when no SecuritySecret entities are found.
|
||||||
|
func (_q *SecuritySecretQuery) Only(ctx context.Context) (*SecuritySecret, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{securitysecret.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{securitysecret.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) OnlyX(ctx context.Context) *SecuritySecret {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only SecuritySecret ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one SecuritySecret ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *SecuritySecretQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{securitysecret.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{securitysecret.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of SecuritySecrets.
|
||||||
|
func (_q *SecuritySecretQuery) All(ctx context.Context) ([]*SecuritySecret, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*SecuritySecret, *SecuritySecretQuery]()
|
||||||
|
return withInterceptors[[]*SecuritySecret](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) AllX(ctx context.Context) []*SecuritySecret {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of SecuritySecret IDs.
|
||||||
|
func (_q *SecuritySecretQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(securitysecret.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *SecuritySecretQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*SecuritySecretQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *SecuritySecretQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *SecuritySecretQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the SecuritySecretQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *SecuritySecretQuery) Clone() *SecuritySecretQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &SecuritySecretQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]securitysecret.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.SecuritySecret{}, _q.predicates...),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Query().
|
||||||
|
// GroupBy(securitysecret.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *SecuritySecretQuery) GroupBy(field string, fields ...string) *SecuritySecretGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &SecuritySecretGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = securitysecret.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.SecuritySecret.Query().
|
||||||
|
// Select(securitysecret.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *SecuritySecretQuery) Select(fields ...string) *SecuritySecretSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &SecuritySecretSelect{SecuritySecretQuery: _q}
|
||||||
|
sbuild.label = securitysecret.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a SecuritySecretSelect configured with the given aggregations.
|
||||||
|
func (_q *SecuritySecretQuery) Aggregate(fns ...AggregateFunc) *SecuritySecretSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *SecuritySecretQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !securitysecret.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *SecuritySecretQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*SecuritySecret, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*SecuritySecret{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*SecuritySecret).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &SecuritySecret{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *SecuritySecretQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *SecuritySecretQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(securitysecret.Table, securitysecret.Columns, sqlgraph.NewFieldSpec(securitysecret.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, securitysecret.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != securitysecret.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *SecuritySecretQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(securitysecret.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = securitysecret.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *SecuritySecretQuery) ForUpdate(opts ...sql.LockOption) *SecuritySecretQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *SecuritySecretQuery) ForShare(opts ...sql.LockOption) *SecuritySecretQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretGroupBy is the group-by builder for SecuritySecret entities.
|
||||||
|
type SecuritySecretGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *SecuritySecretQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *SecuritySecretGroupBy) Aggregate(fns ...AggregateFunc) *SecuritySecretGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *SecuritySecretGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*SecuritySecretQuery, *SecuritySecretGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *SecuritySecretGroupBy) sqlScan(ctx context.Context, root *SecuritySecretQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretSelect is the builder for selecting fields of SecuritySecret entities.
|
||||||
|
type SecuritySecretSelect struct {
|
||||||
|
*SecuritySecretQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *SecuritySecretSelect) Aggregate(fns ...AggregateFunc) *SecuritySecretSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *SecuritySecretSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*SecuritySecretQuery, *SecuritySecretSelect](ctx, _s.SecuritySecretQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *SecuritySecretSelect) sqlScan(ctx context.Context, root *SecuritySecretQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
316
backend/ent/securitysecret_update.go
Normal file
316
backend/ent/securitysecret_update.go
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SecuritySecretUpdate is the builder for updating SecuritySecret entities.
|
||||||
|
type SecuritySecretUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *SecuritySecretMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the SecuritySecretUpdate builder.
|
||||||
|
func (_u *SecuritySecretUpdate) Where(ps ...predicate.SecuritySecret) *SecuritySecretUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *SecuritySecretUpdate) SetUpdatedAt(v time.Time) *SecuritySecretUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (_u *SecuritySecretUpdate) SetKey(v string) *SecuritySecretUpdate {
|
||||||
|
_u.mutation.SetKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableKey sets the "key" field if the given value is not nil.
|
||||||
|
func (_u *SecuritySecretUpdate) SetNillableKey(v *string) *SecuritySecretUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (_u *SecuritySecretUpdate) SetValue(v string) *SecuritySecretUpdate {
|
||||||
|
_u.mutation.SetValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValue sets the "value" field if the given value is not nil.
|
||||||
|
func (_u *SecuritySecretUpdate) SetNillableValue(v *string) *SecuritySecretUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValue(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the SecuritySecretMutation object of the builder.
|
||||||
|
func (_u *SecuritySecretUpdate) Mutation() *SecuritySecretMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *SecuritySecretUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *SecuritySecretUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *SecuritySecretUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *SecuritySecretUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *SecuritySecretUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := securitysecret.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *SecuritySecretUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Key(); ok {
|
||||||
|
if err := securitysecret.KeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Value(); ok {
|
||||||
|
if err := securitysecret.ValueValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.value": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *SecuritySecretUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(securitysecret.Table, securitysecret.Columns, sqlgraph.NewFieldSpec(securitysecret.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Key(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Value(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldValue, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{securitysecret.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecuritySecretUpdateOne is the builder for updating a single SecuritySecret entity.
|
||||||
|
type SecuritySecretUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *SecuritySecretMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SetUpdatedAt(v time.Time) *SecuritySecretUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SetKey(v string) *SecuritySecretUpdateOne {
|
||||||
|
_u.mutation.SetKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableKey sets the "key" field if the given value is not nil.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SetNillableKey(v *string) *SecuritySecretUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SetValue(v string) *SecuritySecretUpdateOne {
|
||||||
|
_u.mutation.SetValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValue sets the "value" field if the given value is not nil.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SetNillableValue(v *string) *SecuritySecretUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValue(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the SecuritySecretMutation object of the builder.
|
||||||
|
func (_u *SecuritySecretUpdateOne) Mutation() *SecuritySecretMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the SecuritySecretUpdate builder.
|
||||||
|
func (_u *SecuritySecretUpdateOne) Where(ps ...predicate.SecuritySecret) *SecuritySecretUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *SecuritySecretUpdateOne) Select(field string, fields ...string) *SecuritySecretUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated SecuritySecret entity.
|
||||||
|
func (_u *SecuritySecretUpdateOne) Save(ctx context.Context) (*SecuritySecret, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *SecuritySecretUpdateOne) SaveX(ctx context.Context) *SecuritySecret {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *SecuritySecretUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *SecuritySecretUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *SecuritySecretUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := securitysecret.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *SecuritySecretUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Key(); ok {
|
||||||
|
if err := securitysecret.KeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Value(); ok {
|
||||||
|
if err := securitysecret.ValueValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "SecuritySecret.value": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *SecuritySecretUpdateOne) sqlSave(ctx context.Context) (_node *SecuritySecret, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(securitysecret.Table, securitysecret.Columns, sqlgraph.NewFieldSpec(securitysecret.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "SecuritySecret.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, securitysecret.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !securitysecret.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != securitysecret.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Key(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Value(); ok {
|
||||||
|
_spec.SetField(securitysecret.FieldValue, field.TypeString, value)
|
||||||
|
}
|
||||||
|
_node = &SecuritySecret{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{securitysecret.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
@@ -28,6 +28,8 @@ type Tx struct {
|
|||||||
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
||||||
// Group is the client for interacting with the Group builders.
|
// Group is the client for interacting with the Group builders.
|
||||||
Group *GroupClient
|
Group *GroupClient
|
||||||
|
// IdempotencyRecord is the client for interacting with the IdempotencyRecord builders.
|
||||||
|
IdempotencyRecord *IdempotencyRecordClient
|
||||||
// PromoCode is the client for interacting with the PromoCode builders.
|
// PromoCode is the client for interacting with the PromoCode builders.
|
||||||
PromoCode *PromoCodeClient
|
PromoCode *PromoCodeClient
|
||||||
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
||||||
@@ -36,6 +38,8 @@ type Tx struct {
|
|||||||
Proxy *ProxyClient
|
Proxy *ProxyClient
|
||||||
// RedeemCode is the client for interacting with the RedeemCode builders.
|
// RedeemCode is the client for interacting with the RedeemCode builders.
|
||||||
RedeemCode *RedeemCodeClient
|
RedeemCode *RedeemCodeClient
|
||||||
|
// SecuritySecret is the client for interacting with the SecuritySecret builders.
|
||||||
|
SecuritySecret *SecuritySecretClient
|
||||||
// Setting is the client for interacting with the Setting builders.
|
// Setting is the client for interacting with the Setting builders.
|
||||||
Setting *SettingClient
|
Setting *SettingClient
|
||||||
// UsageCleanupTask is the client for interacting with the UsageCleanupTask builders.
|
// UsageCleanupTask is the client for interacting with the UsageCleanupTask builders.
|
||||||
@@ -190,10 +194,12 @@ func (tx *Tx) init() {
|
|||||||
tx.AnnouncementRead = NewAnnouncementReadClient(tx.config)
|
tx.AnnouncementRead = NewAnnouncementReadClient(tx.config)
|
||||||
tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config)
|
tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config)
|
||||||
tx.Group = NewGroupClient(tx.config)
|
tx.Group = NewGroupClient(tx.config)
|
||||||
|
tx.IdempotencyRecord = NewIdempotencyRecordClient(tx.config)
|
||||||
tx.PromoCode = NewPromoCodeClient(tx.config)
|
tx.PromoCode = NewPromoCodeClient(tx.config)
|
||||||
tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config)
|
tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config)
|
||||||
tx.Proxy = NewProxyClient(tx.config)
|
tx.Proxy = NewProxyClient(tx.config)
|
||||||
tx.RedeemCode = NewRedeemCodeClient(tx.config)
|
tx.RedeemCode = NewRedeemCodeClient(tx.config)
|
||||||
|
tx.SecuritySecret = NewSecuritySecretClient(tx.config)
|
||||||
tx.Setting = NewSettingClient(tx.config)
|
tx.Setting = NewSettingClient(tx.config)
|
||||||
tx.UsageCleanupTask = NewUsageCleanupTaskClient(tx.config)
|
tx.UsageCleanupTask = NewUsageCleanupTaskClient(tx.config)
|
||||||
tx.UsageLog = NewUsageLogClient(tx.config)
|
tx.UsageLog = NewUsageLogClient(tx.config)
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ type UsageLog struct {
|
|||||||
RequestID string `json:"request_id,omitempty"`
|
RequestID string `json:"request_id,omitempty"`
|
||||||
// Model holds the value of the "model" field.
|
// Model holds the value of the "model" field.
|
||||||
Model string `json:"model,omitempty"`
|
Model string `json:"model,omitempty"`
|
||||||
|
// UpstreamModel holds the value of the "upstream_model" field.
|
||||||
|
UpstreamModel *string `json:"upstream_model,omitempty"`
|
||||||
// GroupID holds the value of the "group_id" field.
|
// GroupID holds the value of the "group_id" field.
|
||||||
GroupID *int64 `json:"group_id,omitempty"`
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
// SubscriptionID holds the value of the "subscription_id" field.
|
// SubscriptionID holds the value of the "subscription_id" field.
|
||||||
@@ -80,6 +82,10 @@ type UsageLog struct {
|
|||||||
ImageCount int `json:"image_count,omitempty"`
|
ImageCount int `json:"image_count,omitempty"`
|
||||||
// ImageSize holds the value of the "image_size" field.
|
// ImageSize holds the value of the "image_size" field.
|
||||||
ImageSize *string `json:"image_size,omitempty"`
|
ImageSize *string `json:"image_size,omitempty"`
|
||||||
|
// MediaType holds the value of the "media_type" field.
|
||||||
|
MediaType *string `json:"media_type,omitempty"`
|
||||||
|
// CacheTTLOverridden holds the value of the "cache_ttl_overridden" field.
|
||||||
|
CacheTTLOverridden bool `json:"cache_ttl_overridden,omitempty"`
|
||||||
// CreatedAt holds the value of the "created_at" field.
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
@@ -165,13 +171,13 @@ func (*UsageLog) scanValues(columns []string) ([]any, error) {
|
|||||||
values := make([]any, len(columns))
|
values := make([]any, len(columns))
|
||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case usagelog.FieldStream:
|
case usagelog.FieldStream, usagelog.FieldCacheTTLOverridden:
|
||||||
values[i] = new(sql.NullBool)
|
values[i] = new(sql.NullBool)
|
||||||
case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier, usagelog.FieldAccountRateMultiplier:
|
case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier, usagelog.FieldAccountRateMultiplier:
|
||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount:
|
case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldIPAddress, usagelog.FieldImageSize:
|
case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUpstreamModel, usagelog.FieldUserAgent, usagelog.FieldIPAddress, usagelog.FieldImageSize, usagelog.FieldMediaType:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case usagelog.FieldCreatedAt:
|
case usagelog.FieldCreatedAt:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
@@ -226,6 +232,13 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error {
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Model = value.String
|
_m.Model = value.String
|
||||||
}
|
}
|
||||||
|
case usagelog.FieldUpstreamModel:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field upstream_model", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpstreamModel = new(string)
|
||||||
|
*_m.UpstreamModel = value.String
|
||||||
|
}
|
||||||
case usagelog.FieldGroupID:
|
case usagelog.FieldGroupID:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
@@ -378,6 +391,19 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error {
|
|||||||
_m.ImageSize = new(string)
|
_m.ImageSize = new(string)
|
||||||
*_m.ImageSize = value.String
|
*_m.ImageSize = value.String
|
||||||
}
|
}
|
||||||
|
case usagelog.FieldMediaType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field media_type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.MediaType = new(string)
|
||||||
|
*_m.MediaType = value.String
|
||||||
|
}
|
||||||
|
case usagelog.FieldCacheTTLOverridden:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field cache_ttl_overridden", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CacheTTLOverridden = value.Bool
|
||||||
|
}
|
||||||
case usagelog.FieldCreatedAt:
|
case usagelog.FieldCreatedAt:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
@@ -460,6 +486,11 @@ func (_m *UsageLog) String() string {
|
|||||||
builder.WriteString("model=")
|
builder.WriteString("model=")
|
||||||
builder.WriteString(_m.Model)
|
builder.WriteString(_m.Model)
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.UpstreamModel; v != nil {
|
||||||
|
builder.WriteString("upstream_model=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
if v := _m.GroupID; v != nil {
|
if v := _m.GroupID; v != nil {
|
||||||
builder.WriteString("group_id=")
|
builder.WriteString("group_id=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
@@ -548,6 +579,14 @@ func (_m *UsageLog) String() string {
|
|||||||
builder.WriteString(*v)
|
builder.WriteString(*v)
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.MediaType; v != nil {
|
||||||
|
builder.WriteString("media_type=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("cache_ttl_overridden=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.CacheTTLOverridden))
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("created_at=")
|
builder.WriteString("created_at=")
|
||||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
builder.WriteByte(')')
|
builder.WriteByte(')')
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ const (
|
|||||||
FieldRequestID = "request_id"
|
FieldRequestID = "request_id"
|
||||||
// FieldModel holds the string denoting the model field in the database.
|
// FieldModel holds the string denoting the model field in the database.
|
||||||
FieldModel = "model"
|
FieldModel = "model"
|
||||||
|
// FieldUpstreamModel holds the string denoting the upstream_model field in the database.
|
||||||
|
FieldUpstreamModel = "upstream_model"
|
||||||
// FieldGroupID holds the string denoting the group_id field in the database.
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
FieldGroupID = "group_id"
|
FieldGroupID = "group_id"
|
||||||
// FieldSubscriptionID holds the string denoting the subscription_id field in the database.
|
// FieldSubscriptionID holds the string denoting the subscription_id field in the database.
|
||||||
@@ -72,6 +74,10 @@ const (
|
|||||||
FieldImageCount = "image_count"
|
FieldImageCount = "image_count"
|
||||||
// FieldImageSize holds the string denoting the image_size field in the database.
|
// FieldImageSize holds the string denoting the image_size field in the database.
|
||||||
FieldImageSize = "image_size"
|
FieldImageSize = "image_size"
|
||||||
|
// FieldMediaType holds the string denoting the media_type field in the database.
|
||||||
|
FieldMediaType = "media_type"
|
||||||
|
// FieldCacheTTLOverridden holds the string denoting the cache_ttl_overridden field in the database.
|
||||||
|
FieldCacheTTLOverridden = "cache_ttl_overridden"
|
||||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
FieldCreatedAt = "created_at"
|
FieldCreatedAt = "created_at"
|
||||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
@@ -131,6 +137,7 @@ var Columns = []string{
|
|||||||
FieldAccountID,
|
FieldAccountID,
|
||||||
FieldRequestID,
|
FieldRequestID,
|
||||||
FieldModel,
|
FieldModel,
|
||||||
|
FieldUpstreamModel,
|
||||||
FieldGroupID,
|
FieldGroupID,
|
||||||
FieldSubscriptionID,
|
FieldSubscriptionID,
|
||||||
FieldInputTokens,
|
FieldInputTokens,
|
||||||
@@ -155,6 +162,8 @@ var Columns = []string{
|
|||||||
FieldIPAddress,
|
FieldIPAddress,
|
||||||
FieldImageCount,
|
FieldImageCount,
|
||||||
FieldImageSize,
|
FieldImageSize,
|
||||||
|
FieldMediaType,
|
||||||
|
FieldCacheTTLOverridden,
|
||||||
FieldCreatedAt,
|
FieldCreatedAt,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,6 +182,8 @@ var (
|
|||||||
RequestIDValidator func(string) error
|
RequestIDValidator func(string) error
|
||||||
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||||
ModelValidator func(string) error
|
ModelValidator func(string) error
|
||||||
|
// UpstreamModelValidator is a validator for the "upstream_model" field. It is called by the builders before save.
|
||||||
|
UpstreamModelValidator func(string) error
|
||||||
// DefaultInputTokens holds the default value on creation for the "input_tokens" field.
|
// DefaultInputTokens holds the default value on creation for the "input_tokens" field.
|
||||||
DefaultInputTokens int
|
DefaultInputTokens int
|
||||||
// DefaultOutputTokens holds the default value on creation for the "output_tokens" field.
|
// DefaultOutputTokens holds the default value on creation for the "output_tokens" field.
|
||||||
@@ -211,6 +222,10 @@ var (
|
|||||||
DefaultImageCount int
|
DefaultImageCount int
|
||||||
// ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
// ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
||||||
ImageSizeValidator func(string) error
|
ImageSizeValidator func(string) error
|
||||||
|
// MediaTypeValidator is a validator for the "media_type" field. It is called by the builders before save.
|
||||||
|
MediaTypeValidator func(string) error
|
||||||
|
// DefaultCacheTTLOverridden holds the default value on creation for the "cache_ttl_overridden" field.
|
||||||
|
DefaultCacheTTLOverridden bool
|
||||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
DefaultCreatedAt func() time.Time
|
DefaultCreatedAt func() time.Time
|
||||||
)
|
)
|
||||||
@@ -248,6 +263,11 @@ func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByUpstreamModel orders the results by the upstream_model field.
|
||||||
|
func ByUpstreamModel(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpstreamModel, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByGroupID orders the results by the group_id field.
|
// ByGroupID orders the results by the group_id field.
|
||||||
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
@@ -368,6 +388,16 @@ func ByImageSize(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldImageSize, opts...).ToFunc()
|
return sql.OrderByField(FieldImageSize, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByMediaType orders the results by the media_type field.
|
||||||
|
func ByMediaType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldMediaType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCacheTTLOverridden orders the results by the cache_ttl_overridden field.
|
||||||
|
func ByCacheTTLOverridden(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCacheTTLOverridden, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByCreatedAt orders the results by the created_at field.
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
|||||||
@@ -80,6 +80,11 @@ func Model(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldEQ(FieldModel, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldModel, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpstreamModel applies equality check predicate on the "upstream_model" field. It's identical to UpstreamModelEQ.
|
||||||
|
func UpstreamModel(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||||
func GroupID(v int64) predicate.UsageLog {
|
func GroupID(v int64) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v))
|
||||||
@@ -200,6 +205,16 @@ func ImageSize(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldEQ(FieldImageSize, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldImageSize, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MediaType applies equality check predicate on the "media_type" field. It's identical to MediaTypeEQ.
|
||||||
|
func MediaType(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheTTLOverridden applies equality check predicate on the "cache_ttl_overridden" field. It's identical to CacheTTLOverriddenEQ.
|
||||||
|
func CacheTTLOverridden(v bool) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldCacheTTLOverridden, v))
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
func CreatedAt(v time.Time) predicate.UsageLog {
|
func CreatedAt(v time.Time) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
@@ -395,6 +410,81 @@ func ModelContainsFold(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldContainsFold(FieldModel, v))
|
return predicate.UsageLog(sql.FieldContainsFold(FieldModel, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpstreamModelEQ applies the EQ predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelNEQ applies the NEQ predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelNEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNEQ(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelIn applies the In predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIn(FieldUpstreamModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelNotIn applies the NotIn predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelNotIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotIn(FieldUpstreamModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelGT applies the GT predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelGT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGT(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelGTE applies the GTE predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelGTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGTE(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelLT applies the LT predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelLT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLT(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelLTE applies the LTE predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelLTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLTE(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelContains applies the Contains predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelContains(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContains(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelHasPrefix applies the HasPrefix predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelHasPrefix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasPrefix(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelHasSuffix applies the HasSuffix predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelHasSuffix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasSuffix(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelIsNil applies the IsNil predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelIsNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIsNull(FieldUpstreamModel))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelNotNil applies the NotNil predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelNotNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotNull(FieldUpstreamModel))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelEqualFold applies the EqualFold predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelEqualFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEqualFold(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamModelContainsFold applies the ContainsFold predicate on the "upstream_model" field.
|
||||||
|
func UpstreamModelContainsFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContainsFold(FieldUpstreamModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||||
func GroupIDEQ(v int64) predicate.UsageLog {
|
func GroupIDEQ(v int64) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v))
|
||||||
@@ -1440,6 +1530,91 @@ func ImageSizeContainsFold(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldContainsFold(FieldImageSize, v))
|
return predicate.UsageLog(sql.FieldContainsFold(FieldImageSize, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MediaTypeEQ applies the EQ predicate on the "media_type" field.
|
||||||
|
func MediaTypeEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeNEQ applies the NEQ predicate on the "media_type" field.
|
||||||
|
func MediaTypeNEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNEQ(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeIn applies the In predicate on the "media_type" field.
|
||||||
|
func MediaTypeIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIn(FieldMediaType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeNotIn applies the NotIn predicate on the "media_type" field.
|
||||||
|
func MediaTypeNotIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotIn(FieldMediaType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeGT applies the GT predicate on the "media_type" field.
|
||||||
|
func MediaTypeGT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGT(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeGTE applies the GTE predicate on the "media_type" field.
|
||||||
|
func MediaTypeGTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGTE(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeLT applies the LT predicate on the "media_type" field.
|
||||||
|
func MediaTypeLT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLT(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeLTE applies the LTE predicate on the "media_type" field.
|
||||||
|
func MediaTypeLTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLTE(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeContains applies the Contains predicate on the "media_type" field.
|
||||||
|
func MediaTypeContains(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContains(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeHasPrefix applies the HasPrefix predicate on the "media_type" field.
|
||||||
|
func MediaTypeHasPrefix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasPrefix(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeHasSuffix applies the HasSuffix predicate on the "media_type" field.
|
||||||
|
func MediaTypeHasSuffix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasSuffix(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeIsNil applies the IsNil predicate on the "media_type" field.
|
||||||
|
func MediaTypeIsNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIsNull(FieldMediaType))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeNotNil applies the NotNil predicate on the "media_type" field.
|
||||||
|
func MediaTypeNotNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotNull(FieldMediaType))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeEqualFold applies the EqualFold predicate on the "media_type" field.
|
||||||
|
func MediaTypeEqualFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEqualFold(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeContainsFold applies the ContainsFold predicate on the "media_type" field.
|
||||||
|
func MediaTypeContainsFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContainsFold(FieldMediaType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheTTLOverriddenEQ applies the EQ predicate on the "cache_ttl_overridden" field.
|
||||||
|
func CacheTTLOverriddenEQ(v bool) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldCacheTTLOverridden, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheTTLOverriddenNEQ applies the NEQ predicate on the "cache_ttl_overridden" field.
|
||||||
|
func CacheTTLOverriddenNEQ(v bool) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNEQ(FieldCacheTTLOverridden, v))
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
func CreatedAtEQ(v time.Time) predicate.UsageLog {
|
func CreatedAtEQ(v time.Time) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
|||||||
@@ -57,6 +57,20 @@ func (_c *UsageLogCreate) SetModel(v string) *UsageLogCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (_c *UsageLogCreate) SetUpstreamModel(v string) *UsageLogCreate {
|
||||||
|
_c.mutation.SetUpstreamModel(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpstreamModel sets the "upstream_model" field if the given value is not nil.
|
||||||
|
func (_c *UsageLogCreate) SetNillableUpstreamModel(v *string) *UsageLogCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUpstreamModel(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (_c *UsageLogCreate) SetGroupID(v int64) *UsageLogCreate {
|
func (_c *UsageLogCreate) SetGroupID(v int64) *UsageLogCreate {
|
||||||
_c.mutation.SetGroupID(v)
|
_c.mutation.SetGroupID(v)
|
||||||
@@ -393,6 +407,34 @@ func (_c *UsageLogCreate) SetNillableImageSize(v *string) *UsageLogCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (_c *UsageLogCreate) SetMediaType(v string) *UsageLogCreate {
|
||||||
|
_c.mutation.SetMediaType(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMediaType sets the "media_type" field if the given value is not nil.
|
||||||
|
func (_c *UsageLogCreate) SetNillableMediaType(v *string) *UsageLogCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetMediaType(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (_c *UsageLogCreate) SetCacheTTLOverridden(v bool) *UsageLogCreate {
|
||||||
|
_c.mutation.SetCacheTTLOverridden(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCacheTTLOverridden sets the "cache_ttl_overridden" field if the given value is not nil.
|
||||||
|
func (_c *UsageLogCreate) SetNillableCacheTTLOverridden(v *bool) *UsageLogCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCacheTTLOverridden(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
// SetCreatedAt sets the "created_at" field.
|
||||||
func (_c *UsageLogCreate) SetCreatedAt(v time.Time) *UsageLogCreate {
|
func (_c *UsageLogCreate) SetCreatedAt(v time.Time) *UsageLogCreate {
|
||||||
_c.mutation.SetCreatedAt(v)
|
_c.mutation.SetCreatedAt(v)
|
||||||
@@ -531,6 +573,10 @@ func (_c *UsageLogCreate) defaults() {
|
|||||||
v := usagelog.DefaultImageCount
|
v := usagelog.DefaultImageCount
|
||||||
_c.mutation.SetImageCount(v)
|
_c.mutation.SetImageCount(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.CacheTTLOverridden(); !ok {
|
||||||
|
v := usagelog.DefaultCacheTTLOverridden
|
||||||
|
_c.mutation.SetCacheTTLOverridden(v)
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
v := usagelog.DefaultCreatedAt()
|
v := usagelog.DefaultCreatedAt()
|
||||||
_c.mutation.SetCreatedAt(v)
|
_c.mutation.SetCreatedAt(v)
|
||||||
@@ -564,6 +610,11 @@ func (_c *UsageLogCreate) check() error {
|
|||||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _c.mutation.UpstreamModel(); ok {
|
||||||
|
if err := usagelog.UpstreamModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "upstream_model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.upstream_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.InputTokens(); !ok {
|
if _, ok := _c.mutation.InputTokens(); !ok {
|
||||||
return &ValidationError{Name: "input_tokens", err: errors.New(`ent: missing required field "UsageLog.input_tokens"`)}
|
return &ValidationError{Name: "input_tokens", err: errors.New(`ent: missing required field "UsageLog.input_tokens"`)}
|
||||||
}
|
}
|
||||||
@@ -627,6 +678,14 @@ func (_c *UsageLogCreate) check() error {
|
|||||||
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _c.mutation.MediaType(); ok {
|
||||||
|
if err := usagelog.MediaTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "media_type", err: fmt.Errorf(`ent: validator failed for field "UsageLog.media_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CacheTTLOverridden(); !ok {
|
||||||
|
return &ValidationError{Name: "cache_ttl_overridden", err: errors.New(`ent: missing required field "UsageLog.cache_ttl_overridden"`)}
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UsageLog.created_at"`)}
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UsageLog.created_at"`)}
|
||||||
}
|
}
|
||||||
@@ -674,6 +733,10 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
||||||
_node.Model = value
|
_node.Model = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.UpstreamModel(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldUpstreamModel, field.TypeString, value)
|
||||||
|
_node.UpstreamModel = &value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.InputTokens(); ok {
|
if value, ok := _c.mutation.InputTokens(); ok {
|
||||||
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
||||||
_node.InputTokens = value
|
_node.InputTokens = value
|
||||||
@@ -762,6 +825,14 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(usagelog.FieldImageSize, field.TypeString, value)
|
_spec.SetField(usagelog.FieldImageSize, field.TypeString, value)
|
||||||
_node.ImageSize = &value
|
_node.ImageSize = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.MediaType(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldMediaType, field.TypeString, value)
|
||||||
|
_node.MediaType = &value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.CacheTTLOverridden(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldCacheTTLOverridden, field.TypeBool, value)
|
||||||
|
_node.CacheTTLOverridden = value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
_spec.SetField(usagelog.FieldCreatedAt, field.TypeTime, value)
|
_spec.SetField(usagelog.FieldCreatedAt, field.TypeTime, value)
|
||||||
_node.CreatedAt = value
|
_node.CreatedAt = value
|
||||||
@@ -963,6 +1034,24 @@ func (u *UsageLogUpsert) UpdateModel() *UsageLogUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsert) SetUpstreamModel(v string) *UsageLogUpsert {
|
||||||
|
u.Set(usagelog.FieldUpstreamModel, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpstreamModel sets the "upstream_model" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsert) UpdateUpstreamModel() *UsageLogUpsert {
|
||||||
|
u.SetExcluded(usagelog.FieldUpstreamModel)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpstreamModel clears the value of the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsert) ClearUpstreamModel() *UsageLogUpsert {
|
||||||
|
u.SetNull(usagelog.FieldUpstreamModel)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (u *UsageLogUpsert) SetGroupID(v int64) *UsageLogUpsert {
|
func (u *UsageLogUpsert) SetGroupID(v int64) *UsageLogUpsert {
|
||||||
u.Set(usagelog.FieldGroupID, v)
|
u.Set(usagelog.FieldGroupID, v)
|
||||||
@@ -1407,6 +1496,36 @@ func (u *UsageLogUpsert) ClearImageSize() *UsageLogUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (u *UsageLogUpsert) SetMediaType(v string) *UsageLogUpsert {
|
||||||
|
u.Set(usagelog.FieldMediaType, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMediaType sets the "media_type" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsert) UpdateMediaType() *UsageLogUpsert {
|
||||||
|
u.SetExcluded(usagelog.FieldMediaType)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMediaType clears the value of the "media_type" field.
|
||||||
|
func (u *UsageLogUpsert) ClearMediaType() *UsageLogUpsert {
|
||||||
|
u.SetNull(usagelog.FieldMediaType)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (u *UsageLogUpsert) SetCacheTTLOverridden(v bool) *UsageLogUpsert {
|
||||||
|
u.Set(usagelog.FieldCacheTTLOverridden, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCacheTTLOverridden sets the "cache_ttl_overridden" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsert) UpdateCacheTTLOverridden() *UsageLogUpsert {
|
||||||
|
u.SetExcluded(usagelog.FieldCacheTTLOverridden)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
// Using this option is equivalent to using:
|
// Using this option is equivalent to using:
|
||||||
//
|
//
|
||||||
@@ -1522,6 +1641,27 @@ func (u *UsageLogUpsertOne) UpdateModel() *UsageLogUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsertOne) SetUpstreamModel(v string) *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetUpstreamModel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpstreamModel sets the "upstream_model" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertOne) UpdateUpstreamModel() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateUpstreamModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpstreamModel clears the value of the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsertOne) ClearUpstreamModel() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearUpstreamModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (u *UsageLogUpsertOne) SetGroupID(v int64) *UsageLogUpsertOne {
|
func (u *UsageLogUpsertOne) SetGroupID(v int64) *UsageLogUpsertOne {
|
||||||
return u.Update(func(s *UsageLogUpsert) {
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
@@ -2040,6 +2180,41 @@ func (u *UsageLogUpsertOne) ClearImageSize() *UsageLogUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (u *UsageLogUpsertOne) SetMediaType(v string) *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetMediaType(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMediaType sets the "media_type" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertOne) UpdateMediaType() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateMediaType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMediaType clears the value of the "media_type" field.
|
||||||
|
func (u *UsageLogUpsertOne) ClearMediaType() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearMediaType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (u *UsageLogUpsertOne) SetCacheTTLOverridden(v bool) *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetCacheTTLOverridden(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCacheTTLOverridden sets the "cache_ttl_overridden" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertOne) UpdateCacheTTLOverridden() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateCacheTTLOverridden()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *UsageLogUpsertOne) Exec(ctx context.Context) error {
|
func (u *UsageLogUpsertOne) Exec(ctx context.Context) error {
|
||||||
if len(u.create.conflict) == 0 {
|
if len(u.create.conflict) == 0 {
|
||||||
@@ -2321,6 +2496,27 @@ func (u *UsageLogUpsertBulk) UpdateModel() *UsageLogUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsertBulk) SetUpstreamModel(v string) *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetUpstreamModel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpstreamModel sets the "upstream_model" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertBulk) UpdateUpstreamModel() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateUpstreamModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpstreamModel clears the value of the "upstream_model" field.
|
||||||
|
func (u *UsageLogUpsertBulk) ClearUpstreamModel() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearUpstreamModel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (u *UsageLogUpsertBulk) SetGroupID(v int64) *UsageLogUpsertBulk {
|
func (u *UsageLogUpsertBulk) SetGroupID(v int64) *UsageLogUpsertBulk {
|
||||||
return u.Update(func(s *UsageLogUpsert) {
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
@@ -2839,6 +3035,41 @@ func (u *UsageLogUpsertBulk) ClearImageSize() *UsageLogUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (u *UsageLogUpsertBulk) SetMediaType(v string) *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetMediaType(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMediaType sets the "media_type" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertBulk) UpdateMediaType() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateMediaType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMediaType clears the value of the "media_type" field.
|
||||||
|
func (u *UsageLogUpsertBulk) ClearMediaType() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearMediaType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (u *UsageLogUpsertBulk) SetCacheTTLOverridden(v bool) *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetCacheTTLOverridden(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCacheTTLOverridden sets the "cache_ttl_overridden" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertBulk) UpdateCacheTTLOverridden() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateCacheTTLOverridden()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *UsageLogUpsertBulk) Exec(ctx context.Context) error {
|
func (u *UsageLogUpsertBulk) Exec(ctx context.Context) error {
|
||||||
if u.create.err != nil {
|
if u.create.err != nil {
|
||||||
|
|||||||
@@ -102,6 +102,26 @@ func (_u *UsageLogUpdate) SetNillableModel(v *string) *UsageLogUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (_u *UsageLogUpdate) SetUpstreamModel(v string) *UsageLogUpdate {
|
||||||
|
_u.mutation.SetUpstreamModel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpstreamModel sets the "upstream_model" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdate) SetNillableUpstreamModel(v *string) *UsageLogUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUpstreamModel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpstreamModel clears the value of the "upstream_model" field.
|
||||||
|
func (_u *UsageLogUpdate) ClearUpstreamModel() *UsageLogUpdate {
|
||||||
|
_u.mutation.ClearUpstreamModel()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (_u *UsageLogUpdate) SetGroupID(v int64) *UsageLogUpdate {
|
func (_u *UsageLogUpdate) SetGroupID(v int64) *UsageLogUpdate {
|
||||||
_u.mutation.SetGroupID(v)
|
_u.mutation.SetGroupID(v)
|
||||||
@@ -612,6 +632,40 @@ func (_u *UsageLogUpdate) ClearImageSize() *UsageLogUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (_u *UsageLogUpdate) SetMediaType(v string) *UsageLogUpdate {
|
||||||
|
_u.mutation.SetMediaType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMediaType sets the "media_type" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdate) SetNillableMediaType(v *string) *UsageLogUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMediaType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMediaType clears the value of the "media_type" field.
|
||||||
|
func (_u *UsageLogUpdate) ClearMediaType() *UsageLogUpdate {
|
||||||
|
_u.mutation.ClearMediaType()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (_u *UsageLogUpdate) SetCacheTTLOverridden(v bool) *UsageLogUpdate {
|
||||||
|
_u.mutation.SetCacheTTLOverridden(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCacheTTLOverridden sets the "cache_ttl_overridden" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdate) SetNillableCacheTTLOverridden(v *bool) *UsageLogUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCacheTTLOverridden(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *UsageLogUpdate) SetUser(v *User) *UsageLogUpdate {
|
func (_u *UsageLogUpdate) SetUser(v *User) *UsageLogUpdate {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -711,6 +765,11 @@ func (_u *UsageLogUpdate) check() error {
|
|||||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.UpstreamModel(); ok {
|
||||||
|
if err := usagelog.UpstreamModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "upstream_model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.upstream_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if v, ok := _u.mutation.UserAgent(); ok {
|
if v, ok := _u.mutation.UserAgent(); ok {
|
||||||
if err := usagelog.UserAgentValidator(v); err != nil {
|
if err := usagelog.UserAgentValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
||||||
@@ -726,6 +785,11 @@ func (_u *UsageLogUpdate) check() error {
|
|||||||
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.MediaType(); ok {
|
||||||
|
if err := usagelog.MediaTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "media_type", err: fmt.Errorf(`ent: validator failed for field "UsageLog.media_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
return errors.New(`ent: clearing a required unique edge "UsageLog.user"`)
|
return errors.New(`ent: clearing a required unique edge "UsageLog.user"`)
|
||||||
}
|
}
|
||||||
@@ -756,6 +820,12 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if value, ok := _u.mutation.Model(); ok {
|
if value, ok := _u.mutation.Model(); ok {
|
||||||
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.UpstreamModel(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldUpstreamModel, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UpstreamModelCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldUpstreamModel, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.InputTokens(); ok {
|
if value, ok := _u.mutation.InputTokens(); ok {
|
||||||
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
@@ -894,6 +964,15 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.ImageSizeCleared() {
|
if _u.mutation.ImageSizeCleared() {
|
||||||
_spec.ClearField(usagelog.FieldImageSize, field.TypeString)
|
_spec.ClearField(usagelog.FieldImageSize, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.MediaType(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldMediaType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.MediaTypeCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldMediaType, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.CacheTTLOverridden(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldCacheTTLOverridden, field.TypeBool, value)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
@@ -1129,6 +1208,26 @@ func (_u *UsageLogUpdateOne) SetNillableModel(v *string) *UsageLogUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUpstreamModel sets the "upstream_model" field.
|
||||||
|
func (_u *UsageLogUpdateOne) SetUpstreamModel(v string) *UsageLogUpdateOne {
|
||||||
|
_u.mutation.SetUpstreamModel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpstreamModel sets the "upstream_model" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdateOne) SetNillableUpstreamModel(v *string) *UsageLogUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUpstreamModel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpstreamModel clears the value of the "upstream_model" field.
|
||||||
|
func (_u *UsageLogUpdateOne) ClearUpstreamModel() *UsageLogUpdateOne {
|
||||||
|
_u.mutation.ClearUpstreamModel()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupID sets the "group_id" field.
|
// SetGroupID sets the "group_id" field.
|
||||||
func (_u *UsageLogUpdateOne) SetGroupID(v int64) *UsageLogUpdateOne {
|
func (_u *UsageLogUpdateOne) SetGroupID(v int64) *UsageLogUpdateOne {
|
||||||
_u.mutation.SetGroupID(v)
|
_u.mutation.SetGroupID(v)
|
||||||
@@ -1639,6 +1738,40 @@ func (_u *UsageLogUpdateOne) ClearImageSize() *UsageLogUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMediaType sets the "media_type" field.
|
||||||
|
func (_u *UsageLogUpdateOne) SetMediaType(v string) *UsageLogUpdateOne {
|
||||||
|
_u.mutation.SetMediaType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMediaType sets the "media_type" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdateOne) SetNillableMediaType(v *string) *UsageLogUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMediaType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMediaType clears the value of the "media_type" field.
|
||||||
|
func (_u *UsageLogUpdateOne) ClearMediaType() *UsageLogUpdateOne {
|
||||||
|
_u.mutation.ClearMediaType()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCacheTTLOverridden sets the "cache_ttl_overridden" field.
|
||||||
|
func (_u *UsageLogUpdateOne) SetCacheTTLOverridden(v bool) *UsageLogUpdateOne {
|
||||||
|
_u.mutation.SetCacheTTLOverridden(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCacheTTLOverridden sets the "cache_ttl_overridden" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdateOne) SetNillableCacheTTLOverridden(v *bool) *UsageLogUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCacheTTLOverridden(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *UsageLogUpdateOne) SetUser(v *User) *UsageLogUpdateOne {
|
func (_u *UsageLogUpdateOne) SetUser(v *User) *UsageLogUpdateOne {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -1751,6 +1884,11 @@ func (_u *UsageLogUpdateOne) check() error {
|
|||||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.UpstreamModel(); ok {
|
||||||
|
if err := usagelog.UpstreamModelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "upstream_model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.upstream_model": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if v, ok := _u.mutation.UserAgent(); ok {
|
if v, ok := _u.mutation.UserAgent(); ok {
|
||||||
if err := usagelog.UserAgentValidator(v); err != nil {
|
if err := usagelog.UserAgentValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
||||||
@@ -1766,6 +1904,11 @@ func (_u *UsageLogUpdateOne) check() error {
|
|||||||
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.MediaType(); ok {
|
||||||
|
if err := usagelog.MediaTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "media_type", err: fmt.Errorf(`ent: validator failed for field "UsageLog.media_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
return errors.New(`ent: clearing a required unique edge "UsageLog.user"`)
|
return errors.New(`ent: clearing a required unique edge "UsageLog.user"`)
|
||||||
}
|
}
|
||||||
@@ -1813,6 +1956,12 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err
|
|||||||
if value, ok := _u.mutation.Model(); ok {
|
if value, ok := _u.mutation.Model(); ok {
|
||||||
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
_spec.SetField(usagelog.FieldModel, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.UpstreamModel(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldUpstreamModel, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UpstreamModelCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldUpstreamModel, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.InputTokens(); ok {
|
if value, ok := _u.mutation.InputTokens(); ok {
|
||||||
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
@@ -1951,6 +2100,15 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err
|
|||||||
if _u.mutation.ImageSizeCleared() {
|
if _u.mutation.ImageSizeCleared() {
|
||||||
_spec.ClearField(usagelog.FieldImageSize, field.TypeString)
|
_spec.ClearField(usagelog.FieldImageSize, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.MediaType(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldMediaType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.MediaTypeCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldMediaType, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.CacheTTLOverridden(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldCacheTTLOverridden, field.TypeBool, value)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
|
|||||||
@@ -45,6 +45,10 @@ type User struct {
|
|||||||
TotpEnabled bool `json:"totp_enabled,omitempty"`
|
TotpEnabled bool `json:"totp_enabled,omitempty"`
|
||||||
// TotpEnabledAt holds the value of the "totp_enabled_at" field.
|
// TotpEnabledAt holds the value of the "totp_enabled_at" field.
|
||||||
TotpEnabledAt *time.Time `json:"totp_enabled_at,omitempty"`
|
TotpEnabledAt *time.Time `json:"totp_enabled_at,omitempty"`
|
||||||
|
// SoraStorageQuotaBytes holds the value of the "sora_storage_quota_bytes" field.
|
||||||
|
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes,omitempty"`
|
||||||
|
// SoraStorageUsedBytes holds the value of the "sora_storage_used_bytes" field.
|
||||||
|
SoraStorageUsedBytes int64 `json:"sora_storage_used_bytes,omitempty"`
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
// The values are being populated by the UserQuery when eager-loading is set.
|
// The values are being populated by the UserQuery when eager-loading is set.
|
||||||
Edges UserEdges `json:"edges"`
|
Edges UserEdges `json:"edges"`
|
||||||
@@ -177,7 +181,7 @@ func (*User) scanValues(columns []string) ([]any, error) {
|
|||||||
values[i] = new(sql.NullBool)
|
values[i] = new(sql.NullBool)
|
||||||
case user.FieldBalance:
|
case user.FieldBalance:
|
||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case user.FieldID, user.FieldConcurrency:
|
case user.FieldID, user.FieldConcurrency, user.FieldSoraStorageQuotaBytes, user.FieldSoraStorageUsedBytes:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes, user.FieldTotpSecretEncrypted:
|
case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes, user.FieldTotpSecretEncrypted:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
@@ -291,6 +295,18 @@ func (_m *User) assignValues(columns []string, values []any) error {
|
|||||||
_m.TotpEnabledAt = new(time.Time)
|
_m.TotpEnabledAt = new(time.Time)
|
||||||
*_m.TotpEnabledAt = value.Time
|
*_m.TotpEnabledAt = value.Time
|
||||||
}
|
}
|
||||||
|
case user.FieldSoraStorageQuotaBytes:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_storage_quota_bytes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraStorageQuotaBytes = value.Int64
|
||||||
|
}
|
||||||
|
case user.FieldSoraStorageUsedBytes:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sora_storage_used_bytes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SoraStorageUsedBytes = value.Int64
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
_m.selectValues.Set(columns[i], values[i])
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
}
|
}
|
||||||
@@ -424,6 +440,12 @@ func (_m *User) String() string {
|
|||||||
builder.WriteString("totp_enabled_at=")
|
builder.WriteString("totp_enabled_at=")
|
||||||
builder.WriteString(v.Format(time.ANSIC))
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
}
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sora_storage_quota_bytes=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageQuotaBytes))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sora_storage_used_bytes=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageUsedBytes))
|
||||||
builder.WriteByte(')')
|
builder.WriteByte(')')
|
||||||
return builder.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,6 +43,10 @@ const (
|
|||||||
FieldTotpEnabled = "totp_enabled"
|
FieldTotpEnabled = "totp_enabled"
|
||||||
// FieldTotpEnabledAt holds the string denoting the totp_enabled_at field in the database.
|
// FieldTotpEnabledAt holds the string denoting the totp_enabled_at field in the database.
|
||||||
FieldTotpEnabledAt = "totp_enabled_at"
|
FieldTotpEnabledAt = "totp_enabled_at"
|
||||||
|
// FieldSoraStorageQuotaBytes holds the string denoting the sora_storage_quota_bytes field in the database.
|
||||||
|
FieldSoraStorageQuotaBytes = "sora_storage_quota_bytes"
|
||||||
|
// FieldSoraStorageUsedBytes holds the string denoting the sora_storage_used_bytes field in the database.
|
||||||
|
FieldSoraStorageUsedBytes = "sora_storage_used_bytes"
|
||||||
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
||||||
EdgeAPIKeys = "api_keys"
|
EdgeAPIKeys = "api_keys"
|
||||||
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
||||||
@@ -152,6 +156,8 @@ var Columns = []string{
|
|||||||
FieldTotpSecretEncrypted,
|
FieldTotpSecretEncrypted,
|
||||||
FieldTotpEnabled,
|
FieldTotpEnabled,
|
||||||
FieldTotpEnabledAt,
|
FieldTotpEnabledAt,
|
||||||
|
FieldSoraStorageQuotaBytes,
|
||||||
|
FieldSoraStorageUsedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -208,6 +214,10 @@ var (
|
|||||||
DefaultNotes string
|
DefaultNotes string
|
||||||
// DefaultTotpEnabled holds the default value on creation for the "totp_enabled" field.
|
// DefaultTotpEnabled holds the default value on creation for the "totp_enabled" field.
|
||||||
DefaultTotpEnabled bool
|
DefaultTotpEnabled bool
|
||||||
|
// DefaultSoraStorageQuotaBytes holds the default value on creation for the "sora_storage_quota_bytes" field.
|
||||||
|
DefaultSoraStorageQuotaBytes int64
|
||||||
|
// DefaultSoraStorageUsedBytes holds the default value on creation for the "sora_storage_used_bytes" field.
|
||||||
|
DefaultSoraStorageUsedBytes int64
|
||||||
)
|
)
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the User queries.
|
// OrderOption defines the ordering options for the User queries.
|
||||||
@@ -288,6 +298,16 @@ func ByTotpEnabledAt(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldTotpEnabledAt, opts...).ToFunc()
|
return sql.OrderByField(FieldTotpEnabledAt, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BySoraStorageQuotaBytes orders the results by the sora_storage_quota_bytes field.
|
||||||
|
func BySoraStorageQuotaBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraStorageQuotaBytes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySoraStorageUsedBytes orders the results by the sora_storage_used_bytes field.
|
||||||
|
func BySoraStorageUsedBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSoraStorageUsedBytes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByAPIKeysCount orders the results by api_keys count.
|
// ByAPIKeysCount orders the results by api_keys count.
|
||||||
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return func(s *sql.Selector) {
|
return func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -125,6 +125,16 @@ func TotpEnabledAt(v time.Time) predicate.User {
|
|||||||
return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v))
|
return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytes applies equality check predicate on the "sora_storage_quota_bytes" field. It's identical to SoraStorageQuotaBytesEQ.
|
||||||
|
func SoraStorageQuotaBytes(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytes applies equality check predicate on the "sora_storage_used_bytes" field. It's identical to SoraStorageUsedBytesEQ.
|
||||||
|
func SoraStorageUsedBytes(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldEQ(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
func CreatedAtEQ(v time.Time) predicate.User {
|
func CreatedAtEQ(v time.Time) predicate.User {
|
||||||
return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
|
return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
@@ -860,6 +870,86 @@ func TotpEnabledAtNotNil() predicate.User {
|
|||||||
return predicate.User(sql.FieldNotNull(FieldTotpEnabledAt))
|
return predicate.User(sql.FieldNotNull(FieldTotpEnabledAt))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesEQ applies the EQ predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesEQ(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesNEQ applies the NEQ predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesNEQ(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldNEQ(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesIn applies the In predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesIn(vs ...int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldIn(FieldSoraStorageQuotaBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesNotIn applies the NotIn predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesNotIn(vs ...int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldNotIn(FieldSoraStorageQuotaBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesGT applies the GT predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesGT(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldGT(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesGTE applies the GTE predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesGTE(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldGTE(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesLT applies the LT predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesLT(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldLT(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageQuotaBytesLTE applies the LTE predicate on the "sora_storage_quota_bytes" field.
|
||||||
|
func SoraStorageQuotaBytesLTE(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldLTE(FieldSoraStorageQuotaBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesEQ applies the EQ predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesEQ(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldEQ(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesNEQ applies the NEQ predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesNEQ(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldNEQ(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesIn applies the In predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesIn(vs ...int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldIn(FieldSoraStorageUsedBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesNotIn applies the NotIn predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesNotIn(vs ...int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldNotIn(FieldSoraStorageUsedBytes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesGT applies the GT predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesGT(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldGT(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesGTE applies the GTE predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesGTE(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldGTE(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesLT applies the LT predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesLT(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldLT(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoraStorageUsedBytesLTE applies the LTE predicate on the "sora_storage_used_bytes" field.
|
||||||
|
func SoraStorageUsedBytesLTE(v int64) predicate.User {
|
||||||
|
return predicate.User(sql.FieldLTE(FieldSoraStorageUsedBytes, v))
|
||||||
|
}
|
||||||
|
|
||||||
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
||||||
func HasAPIKeys() predicate.User {
|
func HasAPIKeys() predicate.User {
|
||||||
return predicate.User(func(s *sql.Selector) {
|
return predicate.User(func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -210,6 +210,34 @@ func (_c *UserCreate) SetNillableTotpEnabledAt(v *time.Time) *UserCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_c *UserCreate) SetSoraStorageQuotaBytes(v int64) *UserCreate {
|
||||||
|
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_c *UserCreate) SetNillableSoraStorageQuotaBytes(v *int64) *UserCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (_c *UserCreate) SetSoraStorageUsedBytes(v int64) *UserCreate {
|
||||||
|
_c.mutation.SetSoraStorageUsedBytes(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||||
|
func (_c *UserCreate) SetNillableSoraStorageUsedBytes(v *int64) *UserCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetSoraStorageUsedBytes(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate {
|
func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate {
|
||||||
_c.mutation.AddAPIKeyIDs(ids...)
|
_c.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -424,6 +452,14 @@ func (_c *UserCreate) defaults() error {
|
|||||||
v := user.DefaultTotpEnabled
|
v := user.DefaultTotpEnabled
|
||||||
_c.mutation.SetTotpEnabled(v)
|
_c.mutation.SetTotpEnabled(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||||
|
v := user.DefaultSoraStorageQuotaBytes
|
||||||
|
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageUsedBytes(); !ok {
|
||||||
|
v := user.DefaultSoraStorageUsedBytes
|
||||||
|
_c.mutation.SetSoraStorageUsedBytes(v)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -487,6 +523,12 @@ func (_c *UserCreate) check() error {
|
|||||||
if _, ok := _c.mutation.TotpEnabled(); !ok {
|
if _, ok := _c.mutation.TotpEnabled(); !ok {
|
||||||
return &ValidationError{Name: "totp_enabled", err: errors.New(`ent: missing required field "User.totp_enabled"`)}
|
return &ValidationError{Name: "totp_enabled", err: errors.New(`ent: missing required field "User.totp_enabled"`)}
|
||||||
}
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||||
|
return &ValidationError{Name: "sora_storage_quota_bytes", err: errors.New(`ent: missing required field "User.sora_storage_quota_bytes"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.SoraStorageUsedBytes(); !ok {
|
||||||
|
return &ValidationError{Name: "sora_storage_used_bytes", err: errors.New(`ent: missing required field "User.sora_storage_used_bytes"`)}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -570,6 +612,14 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value)
|
_spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value)
|
||||||
_node.TotpEnabledAt = &value
|
_node.TotpEnabledAt = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
_node.SoraStorageQuotaBytes = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.SoraStorageUsedBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||||
|
_node.SoraStorageUsedBytes = value
|
||||||
|
}
|
||||||
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
@@ -956,6 +1006,42 @@ func (u *UserUpsert) ClearTotpEnabledAt() *UserUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsert) SetSoraStorageQuotaBytes(v int64) *UserUpsert {
|
||||||
|
u.Set(user.FieldSoraStorageQuotaBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsert) UpdateSoraStorageQuotaBytes() *UserUpsert {
|
||||||
|
u.SetExcluded(user.FieldSoraStorageQuotaBytes)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsert) AddSoraStorageQuotaBytes(v int64) *UserUpsert {
|
||||||
|
u.Add(user.FieldSoraStorageQuotaBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsert) SetSoraStorageUsedBytes(v int64) *UserUpsert {
|
||||||
|
u.Set(user.FieldSoraStorageUsedBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsert) UpdateSoraStorageUsedBytes() *UserUpsert {
|
||||||
|
u.SetExcluded(user.FieldSoraStorageUsedBytes)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsert) AddSoraStorageUsedBytes(v int64) *UserUpsert {
|
||||||
|
u.Add(user.FieldSoraStorageUsedBytes, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
// Using this option is equivalent to using:
|
// Using this option is equivalent to using:
|
||||||
//
|
//
|
||||||
@@ -1218,6 +1304,48 @@ func (u *UserUpsertOne) ClearTotpEnabledAt() *UserUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsertOne) SetSoraStorageQuotaBytes(v int64) *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.SetSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsertOne) AddSoraStorageQuotaBytes(v int64) *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.AddSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsertOne) UpdateSoraStorageQuotaBytes() *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.UpdateSoraStorageQuotaBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsertOne) SetSoraStorageUsedBytes(v int64) *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.SetSoraStorageUsedBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsertOne) AddSoraStorageUsedBytes(v int64) *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.AddSoraStorageUsedBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsertOne) UpdateSoraStorageUsedBytes() *UserUpsertOne {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.UpdateSoraStorageUsedBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *UserUpsertOne) Exec(ctx context.Context) error {
|
func (u *UserUpsertOne) Exec(ctx context.Context) error {
|
||||||
if len(u.create.conflict) == 0 {
|
if len(u.create.conflict) == 0 {
|
||||||
@@ -1646,6 +1774,48 @@ func (u *UserUpsertBulk) ClearTotpEnabledAt() *UserUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsertBulk) SetSoraStorageQuotaBytes(v int64) *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.SetSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||||
|
func (u *UserUpsertBulk) AddSoraStorageQuotaBytes(v int64) *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.AddSoraStorageQuotaBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsertBulk) UpdateSoraStorageQuotaBytes() *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.UpdateSoraStorageQuotaBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsertBulk) SetSoraStorageUsedBytes(v int64) *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.SetSoraStorageUsedBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||||
|
func (u *UserUpsertBulk) AddSoraStorageUsedBytes(v int64) *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.AddSoraStorageUsedBytes(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||||
|
func (u *UserUpsertBulk) UpdateSoraStorageUsedBytes() *UserUpsertBulk {
|
||||||
|
return u.Update(func(s *UserUpsert) {
|
||||||
|
s.UpdateSoraStorageUsedBytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *UserUpsertBulk) Exec(ctx context.Context) error {
|
func (u *UserUpsertBulk) Exec(ctx context.Context) error {
|
||||||
if u.create.err != nil {
|
if u.create.err != nil {
|
||||||
|
|||||||
@@ -242,6 +242,48 @@ func (_u *UserUpdate) ClearTotpEnabledAt() *UserUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *UserUpdate) SetSoraStorageQuotaBytes(v int64) *UserUpdate {
|
||||||
|
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||||
|
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_u *UserUpdate) SetNillableSoraStorageQuotaBytes(v *int64) *UserUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *UserUpdate) AddSoraStorageQuotaBytes(v int64) *UserUpdate {
|
||||||
|
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (_u *UserUpdate) SetSoraStorageUsedBytes(v int64) *UserUpdate {
|
||||||
|
_u.mutation.ResetSoraStorageUsedBytes()
|
||||||
|
_u.mutation.SetSoraStorageUsedBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||||
|
func (_u *UserUpdate) SetNillableSoraStorageUsedBytes(v *int64) *UserUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageUsedBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageUsedBytes adds value to the "sora_storage_used_bytes" field.
|
||||||
|
func (_u *UserUpdate) AddSoraStorageUsedBytes(v int64) *UserUpdate {
|
||||||
|
_u.mutation.AddSoraStorageUsedBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate {
|
func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate {
|
||||||
_u.mutation.AddAPIKeyIDs(ids...)
|
_u.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -709,6 +751,18 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.TotpEnabledAtCleared() {
|
if _u.mutation.TotpEnabledAtCleared() {
|
||||||
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.AddField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageUsedBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageUsedBytes(); ok {
|
||||||
|
_spec.AddField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
if _u.mutation.APIKeysCleared() {
|
if _u.mutation.APIKeysCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
@@ -1352,6 +1406,48 @@ func (_u *UserUpdateOne) ClearTotpEnabledAt() *UserUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *UserUpdateOne) SetSoraStorageQuotaBytes(v int64) *UserUpdateOne {
|
||||||
|
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||||
|
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||||
|
func (_u *UserUpdateOne) SetNillableSoraStorageQuotaBytes(v *int64) *UserUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageQuotaBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||||
|
func (_u *UserUpdateOne) AddSoraStorageQuotaBytes(v int64) *UserUpdateOne {
|
||||||
|
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||||
|
func (_u *UserUpdateOne) SetSoraStorageUsedBytes(v int64) *UserUpdateOne {
|
||||||
|
_u.mutation.ResetSoraStorageUsedBytes()
|
||||||
|
_u.mutation.SetSoraStorageUsedBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||||
|
func (_u *UserUpdateOne) SetNillableSoraStorageUsedBytes(v *int64) *UserUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetSoraStorageUsedBytes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSoraStorageUsedBytes adds value to the "sora_storage_used_bytes" field.
|
||||||
|
func (_u *UserUpdateOne) AddSoraStorageUsedBytes(v int64) *UserUpdateOne {
|
||||||
|
_u.mutation.AddSoraStorageUsedBytes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||||
func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne {
|
func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne {
|
||||||
_u.mutation.AddAPIKeyIDs(ids...)
|
_u.mutation.AddAPIKeyIDs(ids...)
|
||||||
@@ -1849,6 +1945,18 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
|
|||||||
if _u.mutation.TotpEnabledAtCleared() {
|
if _u.mutation.TotpEnabledAtCleared() {
|
||||||
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||||
|
_spec.AddField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.SoraStorageUsedBytes(); ok {
|
||||||
|
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedSoraStorageUsedBytes(); ok {
|
||||||
|
_spec.AddField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||||
|
}
|
||||||
if _u.mutation.APIKeysCleared() {
|
if _u.mutation.APIKeysCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.O2M,
|
Rel: sqlgraph.O2M,
|
||||||
|
|||||||
@@ -1,10 +1,18 @@
|
|||||||
module github.com/Wei-Shaw/sub2api
|
module github.com/Wei-Shaw/sub2api
|
||||||
|
|
||||||
go 1.25.6
|
go 1.26.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
entgo.io/ent v0.14.5
|
entgo.io/ent v0.14.5
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||||
|
github.com/DouDOU-start/go-sora2api v1.1.0
|
||||||
|
github.com/alitto/pond/v2 v2.6.2
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.41.3
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.10
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.10
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0
|
||||||
|
github.com/coder/websocket v1.8.14
|
||||||
github.com/dgraph-io/ristretto v0.2.0
|
github.com/dgraph-io/ristretto v0.2.0
|
||||||
github.com/gin-gonic/gin v1.9.1
|
github.com/gin-gonic/gin v1.9.1
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
@@ -13,9 +21,10 @@ require (
|
|||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/imroc/req/v3 v3.57.0
|
github.com/imroc/req/v3 v3.57.0
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pquerna/otp v1.5.0
|
github.com/pquerna/otp v1.5.0
|
||||||
github.com/redis/go-redis/v9 v9.17.2
|
github.com/redis/go-redis/v9 v9.17.2
|
||||||
github.com/refraction-networking/utls v1.8.1
|
github.com/refraction-networking/utls v1.8.2
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/shirou/gopsutil/v4 v4.25.6
|
github.com/shirou/gopsutil/v4 v4.25.6
|
||||||
github.com/spf13/viper v1.18.2
|
github.com/spf13/viper v1.18.2
|
||||||
@@ -25,10 +34,12 @@ require (
|
|||||||
github.com/tidwall/gjson v1.18.0
|
github.com/tidwall/gjson v1.18.0
|
||||||
github.com/tidwall/sjson v1.2.5
|
github.com/tidwall/sjson v1.2.5
|
||||||
github.com/zeromicro/go-zero v1.9.4
|
github.com/zeromicro/go-zero v1.9.4
|
||||||
golang.org/x/crypto v0.47.0
|
go.uber.org/zap v1.24.0
|
||||||
|
golang.org/x/crypto v0.48.0
|
||||||
golang.org/x/net v0.49.0
|
golang.org/x/net v0.49.0
|
||||||
golang.org/x/sync v0.19.0
|
golang.org/x/sync v0.19.0
|
||||||
golang.org/x/term v0.39.0
|
golang.org/x/term v0.40.0
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
modernc.org/sqlite v1.44.3
|
modernc.org/sqlite v1.44.3
|
||||||
)
|
)
|
||||||
@@ -41,11 +52,32 @@ require (
|
|||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
github.com/agext/levenshtein v1.2.3 // indirect
|
||||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 // indirect
|
||||||
|
github.com/aws/smithy-go v1.24.2 // indirect
|
||||||
|
github.com/bdandy/go-errors v1.2.2 // indirect
|
||||||
|
github.com/bdandy/go-socks4 v1.2.3 // indirect
|
||||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
||||||
|
github.com/bogdanfinn/fhttp v0.6.8 // indirect
|
||||||
|
github.com/bogdanfinn/quic-go-utls v1.0.9-utls // indirect
|
||||||
|
github.com/bogdanfinn/tls-client v1.14.0 // indirect
|
||||||
|
github.com/bogdanfinn/utls v1.7.7-barnius // indirect
|
||||||
|
github.com/bogdanfinn/websocket v1.5.5-barnius // indirect
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||||
github.com/bytedance/sonic v1.9.1 // indirect
|
github.com/bytedance/sonic v1.9.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||||
github.com/containerd/errdefs v1.0.0 // indirect
|
github.com/containerd/errdefs v1.0.0 // indirect
|
||||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||||
@@ -119,6 +151,7 @@ require (
|
|||||||
github.com/spf13/cast v1.6.0 // indirect
|
github.com/spf13/cast v1.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
|
github.com/tam7t/hpkp v0.0.0-20160821193359-2b70b4024ed5 // indirect
|
||||||
github.com/testcontainers/testcontainers-go v0.40.0 // indirect
|
github.com/testcontainers/testcontainers-go v0.40.0 // indirect
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
github.com/tidwall/pretty v1.2.0 // indirect
|
||||||
@@ -140,9 +173,9 @@ require (
|
|||||||
go.uber.org/multierr v1.9.0 // indirect
|
go.uber.org/multierr v1.9.0 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||||
golang.org/x/mod v0.31.0 // indirect
|
golang.org/x/mod v0.32.0 // indirect
|
||||||
golang.org/x/sys v0.40.0 // indirect
|
golang.org/x/sys v0.41.0 // indirect
|
||||||
golang.org/x/text v0.33.0 // indirect
|
golang.org/x/text v0.34.0 // indirect
|
||||||
google.golang.org/grpc v1.75.1 // indirect
|
google.golang.org/grpc v1.75.1 // indirect
|
||||||
google.golang.org/protobuf v1.36.10 // indirect
|
google.golang.org/protobuf v1.36.10 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
|||||||
112
backend/go.sum
112
backend/go.sum
@@ -10,16 +10,74 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl
|
|||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||||
|
github.com/DouDOU-start/go-sora2api v1.1.0 h1:PxWiukK77StiHxEngOFwT1rKUn9oTAJJTl07wQUXwiU=
|
||||||
|
github.com/DouDOU-start/go-sora2api v1.1.0/go.mod h1:dcwpethoKfAsMWskDD9iGgc/3yox2tkthPLSMVGnhkE=
|
||||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
|
github.com/alitto/pond/v2 v2.6.2 h1:Sphe40g0ILeM1pA2c2K+Th0DGU+pt0A/Kprr+WB24Pw=
|
||||||
|
github.com/alitto/pond/v2 v2.6.2/go.mod h1:xkjYEgQ05RSpWdfSd1nM3OVv7TBhLdy7rMp3+2Nq+yE=
|
||||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 h1:zWFmPmgw4sveAYi1mRqG+E/g0461cJ5M4bJ8/nc6d3Q=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5/go.mod h1:nVUlMLVV8ycXSb7mSkcNu9e3v/1TJq2RTlrPwhYWr5c=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.10 h1:9DMthfO6XWZYLfzZglAgW5Fyou2nRI5CuV44sTedKBI=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.10/go.mod h1:2rUIOnA2JaiqYmSKYmRJlcMWy6qTj1vuRFscppSBMcw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iKMdkkYFTRpZXNw7m8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 h1:F43zk1vemYIqPAwhjTjYIz0irU2EY7sOb/F5eJ3HuyM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18/go.mod h1:w1jdlZXrGKaJcNoL+Nnrj+k5wlpGXqnNrKoP22HvAug=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 h1:xCeWVjj0ki0l3nruoyP2slHsGArMxeiiaoPN5QZH6YQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18/go.mod h1:r/eLGuGCBw6l36ZRWiw6PaZwPXb6YOj+i/7MizNl5/k=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 h1:eZioDaZGJ0tMM4gzmkNIO2aAoQd+je7Ug7TkvAzlmkU=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18/go.mod h1:CCXwUKAJdoWr6/NcxZ+zsiPr6oH/Q5aTooRGYieAyj4=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 h1:CeY9LUdur+Dxoeldqoun6y4WtJ3RQtzk0JMP2gfUay0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5/go.mod h1:AZLZf2fMaahW5s/wMRciu1sYbdsikT/UHwbUjOdEVTc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10 h1:fJvQ5mIBVfKtiyx0AHY6HeWcRX5LGANLpq8SVR+Uazs=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10/go.mod h1:Kzm5e6OmNH8VMkgK9t+ry5jEih4Y8whqs+1hrkxim1I=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 h1:LTRCYFlnnKFlKsyIQxKhJuDuA3ZkrDQMRYm6rXiHlLY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18/go.mod h1:XhwkgGG6bHSd00nO/mexWTcTjgd6PjuvWQMqSn2UaEk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 h1:/A/xDuZAVD2BpsS2fftFRo/NoEKQJ8YTnJDEHBy2Gtg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18/go.mod h1:hWe9b4f+djUQGmyiGEeOnZv69dtMSgpDRIvNMvuvzvY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2 h1:M1A9AjcFwlxTLuf0Faj88L8Iqw0n/AJHjpZTQzMMsSc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2/go.mod h1:KsdTV6Q9WKUZm2mNJnUFmIoXfZux91M3sr/a4REX8e0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6/go.mod h1:hXzcHLARD7GeWnifd8j9RWqtfIgxj4/cAtIVIK7hg8g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 h1:7oGD8KPfBOJGXiCoRKrrrQkbvCp8N++u36hrLMPey6o=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11/go.mod h1:0DO9B5EUJQlIDif+XJRWCljZRKsAFKh3gpFz7UnDtOo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 h1:edCcNp9eGIUDUCrzoCu1jWAXLGFIizeqkdkKgRlJwWc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15/go.mod h1:lyRQKED9xWfgkYC/wmmYfv7iVIM68Z5OQ88ZdcV1QbU=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 h1:NITQpgo9A5NrDZ57uOWj+abvXSb83BbyggcUBVksN7c=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7/go.mod h1:sks5UWBhEuWYDPdwlnRFn1w7xWdH29Jcpe+/PJQefEs=
|
||||||
|
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
|
||||||
|
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
|
||||||
|
github.com/bdandy/go-errors v1.2.2 h1:WdFv/oukjTJCLa79UfkGmwX7ZxONAihKu4V0mLIs11Q=
|
||||||
|
github.com/bdandy/go-errors v1.2.2/go.mod h1:NkYHl4Fey9oRRdbB1CoC6e84tuqQHiqrOcZpqFEkBxM=
|
||||||
|
github.com/bdandy/go-socks4 v1.2.3 h1:Q6Y2heY1GRjCtHbmlKfnwrKVU/k81LS8mRGLRlmDlic=
|
||||||
|
github.com/bdandy/go-socks4 v1.2.3/go.mod h1:98kiVFgpdogR8aIGLWLvjDVZ8XcKPsSI/ypGrO+bqHI=
|
||||||
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
||||||
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
||||||
|
github.com/bogdanfinn/fhttp v0.6.8 h1:LiQyHOY3i0QoxxNB7nq27/nGNNbtPj0fuBPozhR7Ws4=
|
||||||
|
github.com/bogdanfinn/fhttp v0.6.8/go.mod h1:A+EKDzMx2hb4IUbMx4TlkoHnaJEiLl8r/1Ss1Y+5e5M=
|
||||||
|
github.com/bogdanfinn/quic-go-utls v1.0.9-utls h1:tV6eDEiRbRCcepALSzxR94JUVD3N3ACIiRLgyc2Ep8s=
|
||||||
|
github.com/bogdanfinn/quic-go-utls v1.0.9-utls/go.mod h1:aHph9B9H9yPOt5xnhWKSOum27DJAqpiHzwX+gjvaXcg=
|
||||||
|
github.com/bogdanfinn/tls-client v1.14.0 h1:vyk7Cn4BIvLAGVuMfb0tP22OqogfO1lYamquQNEZU1A=
|
||||||
|
github.com/bogdanfinn/tls-client v1.14.0/go.mod h1:LsU6mXVn8MOFDwTkyRfI7V1BZM1p0wf2ZfZsICW/1fM=
|
||||||
|
github.com/bogdanfinn/utls v1.7.7-barnius h1:OuJ497cc7F3yKNVHRsYPQdGggmk5x6+V5ZlrCR7fOLU=
|
||||||
|
github.com/bogdanfinn/utls v1.7.7-barnius/go.mod h1:aAK1VZQlpKZClF1WEQeq6kyclbkPq4hz6xTbB5xSlmg=
|
||||||
|
github.com/bogdanfinn/websocket v1.5.5-barnius h1:bY+qnxpai1qe7Jmjx+Sds/cmOSpuuLoR8x61rWltjOI=
|
||||||
|
github.com/bogdanfinn/websocket v1.5.5-barnius/go.mod h1:gvvEw6pTKHb7yOiFvIfAFTStQWyrm25BMVCTj5wRSsI=
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||||
@@ -36,6 +94,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
|||||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||||
|
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||||
|
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||||
@@ -170,8 +230,6 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
|
||||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||||
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
|
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
|
||||||
@@ -205,12 +263,12 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
|||||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||||
|
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||||
|
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
@@ -230,12 +288,10 @@ github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI1
|
|||||||
github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
|
github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
|
||||||
github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI=
|
github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI=
|
||||||
github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
|
github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
|
||||||
github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkrMJI0pRUOCAo=
|
github.com/refraction-networking/utls v1.8.2 h1:j4Q1gJj0xngdeH+Ox/qND11aEfhpgoEvV+S9iJ2IdQo=
|
||||||
github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
github.com/refraction-networking/utls v1.8.2/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
@@ -258,8 +314,6 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
|||||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||||
@@ -281,6 +335,8 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
|||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
|
github.com/tam7t/hpkp v0.0.0-20160821193359-2b70b4024ed5 h1:YqAladjX7xpA6BM04leXMWAEjS0mTZ5kUU9KRBriQJc=
|
||||||
|
github.com/tam7t/hpkp v0.0.0-20160821193359-2b70b4024ed5/go.mod h1:2JjD2zLQYH5HO74y5+aE3remJQvl6q4Sn6aWA2wD1Ng=
|
||||||
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
||||||
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
||||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 h1:s2bIayFXlbDFexo96y+htn7FzuhpXLYJNnIuglNKqOk=
|
github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 h1:s2bIayFXlbDFexo96y+htn7FzuhpXLYJNnIuglNKqOk=
|
||||||
@@ -336,25 +392,32 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
|||||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||||
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||||
|
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||||
|
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
||||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||||
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||||
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||||
|
golang.org/x/net v0.0.0-20211104170005-ce137452f963/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@@ -362,16 +425,19 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||||
|
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||||
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||||
|
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||||
@@ -387,6 +453,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
|||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -9,5 +9,5 @@ var ProviderSet = wire.NewSet(
|
|||||||
|
|
||||||
// ProvideConfig 提供应用配置
|
// ProvideConfig 提供应用配置
|
||||||
func ProvideConfig() (*Config, error) {
|
func ProvideConfig() (*Config, error) {
|
||||||
return Load()
|
return LoadForBootstrap()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,11 @@ const (
|
|||||||
AnnouncementStatusArchived = "archived"
|
AnnouncementStatusArchived = "archived"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AnnouncementNotifyModeSilent = "silent"
|
||||||
|
AnnouncementNotifyModePopup = "popup"
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AnnouncementConditionTypeSubscription = "subscription"
|
AnnouncementConditionTypeSubscription = "subscription"
|
||||||
AnnouncementConditionTypeBalance = "balance"
|
AnnouncementConditionTypeBalance = "balance"
|
||||||
@@ -195,17 +200,18 @@ func (c AnnouncementCondition) validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Announcement struct {
|
type Announcement struct {
|
||||||
ID int64
|
ID int64
|
||||||
Title string
|
Title string
|
||||||
Content string
|
Content string
|
||||||
Status string
|
Status string
|
||||||
Targeting AnnouncementTargeting
|
NotifyMode string
|
||||||
StartsAt *time.Time
|
Targeting AnnouncementTargeting
|
||||||
EndsAt *time.Time
|
StartsAt *time.Time
|
||||||
CreatedBy *int64
|
EndsAt *time.Time
|
||||||
UpdatedBy *int64
|
CreatedBy *int64
|
||||||
CreatedAt time.Time
|
UpdatedBy *int64
|
||||||
UpdatedAt time.Time
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Announcement) IsActiveAt(now time.Time) bool {
|
func (a *Announcement) IsActiveAt(now time.Time) bool {
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ const (
|
|||||||
PlatformOpenAI = "openai"
|
PlatformOpenAI = "openai"
|
||||||
PlatformGemini = "gemini"
|
PlatformGemini = "gemini"
|
||||||
PlatformAntigravity = "antigravity"
|
PlatformAntigravity = "antigravity"
|
||||||
|
PlatformSora = "sora"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Account type constants
|
// Account type constants
|
||||||
@@ -30,6 +31,7 @@ const (
|
|||||||
AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope)
|
AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope)
|
||||||
AccountTypeAPIKey = "apikey" // API Key类型账号
|
AccountTypeAPIKey = "apikey" // API Key类型账号
|
||||||
AccountTypeUpstream = "upstream" // 上游透传类型账号(通过 Base URL + API Key 连接上游)
|
AccountTypeUpstream = "upstream" // 上游透传类型账号(通过 Base URL + API Key 连接上游)
|
||||||
|
AccountTypeBedrock = "bedrock" // AWS Bedrock 类型账号(通过 SigV4 签名或 API Key 连接 Bedrock,由 credentials.auth_mode 区分)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Redeem type constants
|
// Redeem type constants
|
||||||
@@ -64,3 +66,75 @@ const (
|
|||||||
SubscriptionStatusExpired = "expired"
|
SubscriptionStatusExpired = "expired"
|
||||||
SubscriptionStatusSuspended = "suspended"
|
SubscriptionStatusSuspended = "suspended"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DefaultAntigravityModelMapping 是 Antigravity 平台的默认模型映射
|
||||||
|
// 当账号未配置 model_mapping 时使用此默认值
|
||||||
|
// 与前端 useModelWhitelist.ts 中的 antigravityDefaultMappings 保持一致
|
||||||
|
var DefaultAntigravityModelMapping = map[string]string{
|
||||||
|
// Claude 白名单
|
||||||
|
"claude-opus-4-6-thinking": "claude-opus-4-6-thinking", // 官方模型
|
||||||
|
"claude-opus-4-6": "claude-opus-4-6-thinking", // 简称映射
|
||||||
|
"claude-opus-4-5-thinking": "claude-opus-4-6-thinking", // 迁移旧模型
|
||||||
|
"claude-sonnet-4-6": "claude-sonnet-4-6",
|
||||||
|
"claude-sonnet-4-5": "claude-sonnet-4-5",
|
||||||
|
"claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking",
|
||||||
|
// Claude 详细版本 ID 映射
|
||||||
|
"claude-opus-4-5-20251101": "claude-opus-4-6-thinking", // 迁移旧模型
|
||||||
|
"claude-sonnet-4-5-20250929": "claude-sonnet-4-5",
|
||||||
|
// Claude Haiku → Sonnet(无 Haiku 支持)
|
||||||
|
"claude-haiku-4-5": "claude-sonnet-4-6",
|
||||||
|
"claude-haiku-4-5-20251001": "claude-sonnet-4-6",
|
||||||
|
// Gemini 2.5 白名单
|
||||||
|
"gemini-2.5-flash": "gemini-2.5-flash",
|
||||||
|
"gemini-2.5-flash-image": "gemini-2.5-flash-image",
|
||||||
|
"gemini-2.5-flash-image-preview": "gemini-2.5-flash-image",
|
||||||
|
"gemini-2.5-flash-lite": "gemini-2.5-flash-lite",
|
||||||
|
"gemini-2.5-flash-thinking": "gemini-2.5-flash-thinking",
|
||||||
|
"gemini-2.5-pro": "gemini-2.5-pro",
|
||||||
|
// Gemini 3 白名单
|
||||||
|
"gemini-3-flash": "gemini-3-flash",
|
||||||
|
"gemini-3-pro-high": "gemini-3-pro-high",
|
||||||
|
"gemini-3-pro-low": "gemini-3-pro-low",
|
||||||
|
// Gemini 3 preview 映射
|
||||||
|
"gemini-3-flash-preview": "gemini-3-flash",
|
||||||
|
"gemini-3-pro-preview": "gemini-3-pro-high",
|
||||||
|
// Gemini 3.1 白名单
|
||||||
|
"gemini-3.1-pro-high": "gemini-3.1-pro-high",
|
||||||
|
"gemini-3.1-pro-low": "gemini-3.1-pro-low",
|
||||||
|
// Gemini 3.1 preview 映射
|
||||||
|
"gemini-3.1-pro-preview": "gemini-3.1-pro-high",
|
||||||
|
// Gemini 3.1 image 白名单
|
||||||
|
"gemini-3.1-flash-image": "gemini-3.1-flash-image",
|
||||||
|
// Gemini 3.1 image preview 映射
|
||||||
|
"gemini-3.1-flash-image-preview": "gemini-3.1-flash-image",
|
||||||
|
// Gemini 3 image 兼容映射(向 3.1 image 迁移)
|
||||||
|
"gemini-3-pro-image": "gemini-3.1-flash-image",
|
||||||
|
"gemini-3-pro-image-preview": "gemini-3.1-flash-image",
|
||||||
|
// 其他官方模型
|
||||||
|
"gpt-oss-120b-medium": "gpt-oss-120b-medium",
|
||||||
|
"tab_flash_lite_preview": "tab_flash_lite_preview",
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultBedrockModelMapping 是 AWS Bedrock 平台的默认模型映射
|
||||||
|
// 将 Anthropic 标准模型名映射到 Bedrock 模型 ID
|
||||||
|
// 注意:此处的 "us." 前缀仅为默认值,ResolveBedrockModelID 会根据账号配置的
|
||||||
|
// aws_region 自动调整为匹配的区域前缀(如 eu.、apac.、jp. 等)
|
||||||
|
var DefaultBedrockModelMapping = map[string]string{
|
||||||
|
// Claude Opus
|
||||||
|
"claude-opus-4-6-thinking": "us.anthropic.claude-opus-4-6-v1",
|
||||||
|
"claude-opus-4-6": "us.anthropic.claude-opus-4-6-v1",
|
||||||
|
"claude-opus-4-5-thinking": "us.anthropic.claude-opus-4-5-20251101-v1:0",
|
||||||
|
"claude-opus-4-5-20251101": "us.anthropic.claude-opus-4-5-20251101-v1:0",
|
||||||
|
"claude-opus-4-1": "us.anthropic.claude-opus-4-1-20250805-v1:0",
|
||||||
|
"claude-opus-4-20250514": "us.anthropic.claude-opus-4-20250514-v1:0",
|
||||||
|
// Claude Sonnet
|
||||||
|
"claude-sonnet-4-6-thinking": "us.anthropic.claude-sonnet-4-6",
|
||||||
|
"claude-sonnet-4-6": "us.anthropic.claude-sonnet-4-6",
|
||||||
|
"claude-sonnet-4-5": "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||||
|
"claude-sonnet-4-5-thinking": "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||||
|
"claude-sonnet-4-5-20250929": "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||||
|
"claude-sonnet-4-20250514": "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||||
|
// Claude Haiku
|
||||||
|
"claude-haiku-4-5": "us.anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||||
|
"claude-haiku-4-5-20251001": "us.anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||||
|
}
|
||||||
|
|||||||
26
backend/internal/domain/constants_test.go
Normal file
26
backend/internal/domain/constants_test.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package domain
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestDefaultAntigravityModelMapping_ImageCompatibilityAliases(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := map[string]string{
|
||||||
|
"gemini-2.5-flash-image": "gemini-2.5-flash-image",
|
||||||
|
"gemini-2.5-flash-image-preview": "gemini-2.5-flash-image",
|
||||||
|
"gemini-3.1-flash-image": "gemini-3.1-flash-image",
|
||||||
|
"gemini-3.1-flash-image-preview": "gemini-3.1-flash-image",
|
||||||
|
"gemini-3-pro-image": "gemini-3.1-flash-image",
|
||||||
|
"gemini-3-pro-image-preview": "gemini-3.1-flash-image",
|
||||||
|
}
|
||||||
|
|
||||||
|
for from, want := range cases {
|
||||||
|
got, ok := DefaultAntigravityModelMapping[from]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected mapping for %q to exist", from)
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("unexpected mapping for %q: got %q want %q", from, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
606
backend/internal/handler/admin/account_data.go
Normal file
606
backend/internal/handler/admin/account_data.go
Normal file
@@ -0,0 +1,606 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dataType = "sub2api-data"
|
||||||
|
legacyDataType = "sub2api-bundle"
|
||||||
|
dataVersion = 1
|
||||||
|
dataPageCap = 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
type DataPayload struct {
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Version int `json:"version,omitempty"`
|
||||||
|
ExportedAt string `json:"exported_at"`
|
||||||
|
Proxies []DataProxy `json:"proxies"`
|
||||||
|
Accounts []DataAccount `json:"accounts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataProxy struct {
|
||||||
|
ProxyKey string `json:"proxy_key"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Port int `json:"port"`
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataAccount struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
Platform string `json:"platform"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Credentials map[string]any `json:"credentials"`
|
||||||
|
Extra map[string]any `json:"extra,omitempty"`
|
||||||
|
ProxyKey *string `json:"proxy_key,omitempty"`
|
||||||
|
Concurrency int `json:"concurrency"`
|
||||||
|
Priority int `json:"priority"`
|
||||||
|
RateMultiplier *float64 `json:"rate_multiplier,omitempty"`
|
||||||
|
ExpiresAt *int64 `json:"expires_at,omitempty"`
|
||||||
|
AutoPauseOnExpired *bool `json:"auto_pause_on_expired,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataImportRequest struct {
|
||||||
|
Data DataPayload `json:"data"`
|
||||||
|
SkipDefaultGroupBind *bool `json:"skip_default_group_bind"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataImportResult struct {
|
||||||
|
ProxyCreated int `json:"proxy_created"`
|
||||||
|
ProxyReused int `json:"proxy_reused"`
|
||||||
|
ProxyFailed int `json:"proxy_failed"`
|
||||||
|
AccountCreated int `json:"account_created"`
|
||||||
|
AccountFailed int `json:"account_failed"`
|
||||||
|
Errors []DataImportError `json:"errors,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataImportError struct {
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
ProxyKey string `json:"proxy_key,omitempty"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildProxyKey(protocol, host string, port int, username, password string) string {
|
||||||
|
return fmt.Sprintf("%s|%s|%d|%s|%s", strings.TrimSpace(protocol), strings.TrimSpace(host), port, strings.TrimSpace(username), strings.TrimSpace(password))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) ExportData(c *gin.Context) {
|
||||||
|
ctx := c.Request.Context()
|
||||||
|
|
||||||
|
selectedIDs, err := parseAccountIDs(c)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
accounts, err := h.resolveExportAccounts(ctx, selectedIDs, c)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
includeProxies, err := parseIncludeProxies(c)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var proxies []service.Proxy
|
||||||
|
if includeProxies {
|
||||||
|
proxies, err = h.resolveExportProxies(ctx, accounts)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
proxies = []service.Proxy{}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyKeyByID := make(map[int64]string, len(proxies))
|
||||||
|
dataProxies := make([]DataProxy, 0, len(proxies))
|
||||||
|
for i := range proxies {
|
||||||
|
p := proxies[i]
|
||||||
|
key := buildProxyKey(p.Protocol, p.Host, p.Port, p.Username, p.Password)
|
||||||
|
proxyKeyByID[p.ID] = key
|
||||||
|
dataProxies = append(dataProxies, DataProxy{
|
||||||
|
ProxyKey: key,
|
||||||
|
Name: p.Name,
|
||||||
|
Protocol: p.Protocol,
|
||||||
|
Host: p.Host,
|
||||||
|
Port: p.Port,
|
||||||
|
Username: p.Username,
|
||||||
|
Password: p.Password,
|
||||||
|
Status: p.Status,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
dataAccounts := make([]DataAccount, 0, len(accounts))
|
||||||
|
for i := range accounts {
|
||||||
|
acc := accounts[i]
|
||||||
|
var proxyKey *string
|
||||||
|
if acc.ProxyID != nil {
|
||||||
|
if key, ok := proxyKeyByID[*acc.ProxyID]; ok {
|
||||||
|
proxyKey = &key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var expiresAt *int64
|
||||||
|
if acc.ExpiresAt != nil {
|
||||||
|
v := acc.ExpiresAt.Unix()
|
||||||
|
expiresAt = &v
|
||||||
|
}
|
||||||
|
dataAccounts = append(dataAccounts, DataAccount{
|
||||||
|
Name: acc.Name,
|
||||||
|
Notes: acc.Notes,
|
||||||
|
Platform: acc.Platform,
|
||||||
|
Type: acc.Type,
|
||||||
|
Credentials: acc.Credentials,
|
||||||
|
Extra: acc.Extra,
|
||||||
|
ProxyKey: proxyKey,
|
||||||
|
Concurrency: acc.Concurrency,
|
||||||
|
Priority: acc.Priority,
|
||||||
|
RateMultiplier: acc.RateMultiplier,
|
||||||
|
ExpiresAt: expiresAt,
|
||||||
|
AutoPauseOnExpired: &acc.AutoPauseOnExpired,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := DataPayload{
|
||||||
|
ExportedAt: time.Now().UTC().Format(time.RFC3339),
|
||||||
|
Proxies: dataProxies,
|
||||||
|
Accounts: dataAccounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) ImportData(c *gin.Context) {
|
||||||
|
var req DataImportRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateDataHeader(req.Data); err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
executeAdminIdempotentJSON(c, "admin.accounts.import_data", req, service.DefaultWriteIdempotencyTTL(), func(ctx context.Context) (any, error) {
|
||||||
|
return h.importData(ctx, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) importData(ctx context.Context, req DataImportRequest) (DataImportResult, error) {
|
||||||
|
skipDefaultGroupBind := true
|
||||||
|
if req.SkipDefaultGroupBind != nil {
|
||||||
|
skipDefaultGroupBind = *req.SkipDefaultGroupBind
|
||||||
|
}
|
||||||
|
|
||||||
|
dataPayload := req.Data
|
||||||
|
result := DataImportResult{}
|
||||||
|
|
||||||
|
existingProxies, err := h.listAllProxies(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyKeyToID := make(map[string]int64, len(existingProxies))
|
||||||
|
for i := range existingProxies {
|
||||||
|
p := existingProxies[i]
|
||||||
|
key := buildProxyKey(p.Protocol, p.Host, p.Port, p.Username, p.Password)
|
||||||
|
proxyKeyToID[key] = p.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range dataPayload.Proxies {
|
||||||
|
item := dataPayload.Proxies[i]
|
||||||
|
key := item.ProxyKey
|
||||||
|
if key == "" {
|
||||||
|
key = buildProxyKey(item.Protocol, item.Host, item.Port, item.Username, item.Password)
|
||||||
|
}
|
||||||
|
if err := validateDataProxy(item); err != nil {
|
||||||
|
result.ProxyFailed++
|
||||||
|
result.Errors = append(result.Errors, DataImportError{
|
||||||
|
Kind: "proxy",
|
||||||
|
Name: item.Name,
|
||||||
|
ProxyKey: key,
|
||||||
|
Message: err.Error(),
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
normalizedStatus := normalizeProxyStatus(item.Status)
|
||||||
|
if existingID, ok := proxyKeyToID[key]; ok {
|
||||||
|
proxyKeyToID[key] = existingID
|
||||||
|
result.ProxyReused++
|
||||||
|
if normalizedStatus != "" {
|
||||||
|
if proxy, getErr := h.adminService.GetProxy(ctx, existingID); getErr == nil && proxy != nil && proxy.Status != normalizedStatus {
|
||||||
|
_, _ = h.adminService.UpdateProxy(ctx, existingID, &service.UpdateProxyInput{
|
||||||
|
Status: normalizedStatus,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
created, createErr := h.adminService.CreateProxy(ctx, &service.CreateProxyInput{
|
||||||
|
Name: defaultProxyName(item.Name),
|
||||||
|
Protocol: item.Protocol,
|
||||||
|
Host: item.Host,
|
||||||
|
Port: item.Port,
|
||||||
|
Username: item.Username,
|
||||||
|
Password: item.Password,
|
||||||
|
})
|
||||||
|
if createErr != nil {
|
||||||
|
result.ProxyFailed++
|
||||||
|
result.Errors = append(result.Errors, DataImportError{
|
||||||
|
Kind: "proxy",
|
||||||
|
Name: item.Name,
|
||||||
|
ProxyKey: key,
|
||||||
|
Message: createErr.Error(),
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
proxyKeyToID[key] = created.ID
|
||||||
|
result.ProxyCreated++
|
||||||
|
|
||||||
|
if normalizedStatus != "" && normalizedStatus != created.Status {
|
||||||
|
_, _ = h.adminService.UpdateProxy(ctx, created.ID, &service.UpdateProxyInput{
|
||||||
|
Status: normalizedStatus,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range dataPayload.Accounts {
|
||||||
|
item := dataPayload.Accounts[i]
|
||||||
|
if err := validateDataAccount(item); err != nil {
|
||||||
|
result.AccountFailed++
|
||||||
|
result.Errors = append(result.Errors, DataImportError{
|
||||||
|
Kind: "account",
|
||||||
|
Name: item.Name,
|
||||||
|
Message: err.Error(),
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var proxyID *int64
|
||||||
|
if item.ProxyKey != nil && *item.ProxyKey != "" {
|
||||||
|
if id, ok := proxyKeyToID[*item.ProxyKey]; ok {
|
||||||
|
proxyID = &id
|
||||||
|
} else {
|
||||||
|
result.AccountFailed++
|
||||||
|
result.Errors = append(result.Errors, DataImportError{
|
||||||
|
Kind: "account",
|
||||||
|
Name: item.Name,
|
||||||
|
ProxyKey: *item.ProxyKey,
|
||||||
|
Message: "proxy_key not found",
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enrichCredentialsFromIDToken(&item)
|
||||||
|
|
||||||
|
accountInput := &service.CreateAccountInput{
|
||||||
|
Name: item.Name,
|
||||||
|
Notes: item.Notes,
|
||||||
|
Platform: item.Platform,
|
||||||
|
Type: item.Type,
|
||||||
|
Credentials: item.Credentials,
|
||||||
|
Extra: item.Extra,
|
||||||
|
ProxyID: proxyID,
|
||||||
|
Concurrency: item.Concurrency,
|
||||||
|
Priority: item.Priority,
|
||||||
|
RateMultiplier: item.RateMultiplier,
|
||||||
|
GroupIDs: nil,
|
||||||
|
ExpiresAt: item.ExpiresAt,
|
||||||
|
AutoPauseOnExpired: item.AutoPauseOnExpired,
|
||||||
|
SkipDefaultGroupBind: skipDefaultGroupBind,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := h.adminService.CreateAccount(ctx, accountInput); err != nil {
|
||||||
|
result.AccountFailed++
|
||||||
|
result.Errors = append(result.Errors, DataImportError{
|
||||||
|
Kind: "account",
|
||||||
|
Name: item.Name,
|
||||||
|
Message: err.Error(),
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result.AccountCreated++
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) listAllProxies(ctx context.Context) ([]service.Proxy, error) {
|
||||||
|
page := 1
|
||||||
|
pageSize := dataPageCap
|
||||||
|
var out []service.Proxy
|
||||||
|
for {
|
||||||
|
items, total, err := h.adminService.ListProxies(ctx, page, pageSize, "", "", "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out = append(out, items...)
|
||||||
|
if len(out) >= int(total) || len(items) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) listAccountsFiltered(ctx context.Context, platform, accountType, status, search string) ([]service.Account, error) {
|
||||||
|
page := 1
|
||||||
|
pageSize := dataPageCap
|
||||||
|
var out []service.Account
|
||||||
|
for {
|
||||||
|
items, total, err := h.adminService.ListAccounts(ctx, page, pageSize, platform, accountType, status, search, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out = append(out, items...)
|
||||||
|
if len(out) >= int(total) || len(items) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) resolveExportAccounts(ctx context.Context, ids []int64, c *gin.Context) ([]service.Account, error) {
|
||||||
|
if len(ids) > 0 {
|
||||||
|
accounts, err := h.adminService.GetAccountsByIDs(ctx, ids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out := make([]service.Account, 0, len(accounts))
|
||||||
|
for _, acc := range accounts {
|
||||||
|
if acc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, *acc)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
platform := c.Query("platform")
|
||||||
|
accountType := c.Query("type")
|
||||||
|
status := c.Query("status")
|
||||||
|
search := strings.TrimSpace(c.Query("search"))
|
||||||
|
if len(search) > 100 {
|
||||||
|
search = search[:100]
|
||||||
|
}
|
||||||
|
return h.listAccountsFiltered(ctx, platform, accountType, status, search)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AccountHandler) resolveExportProxies(ctx context.Context, accounts []service.Account) ([]service.Proxy, error) {
|
||||||
|
if len(accounts) == 0 {
|
||||||
|
return []service.Proxy{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := make(map[int64]struct{})
|
||||||
|
ids := make([]int64, 0)
|
||||||
|
for i := range accounts {
|
||||||
|
if accounts[i].ProxyID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id := *accounts[i].ProxyID
|
||||||
|
if id <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := seen[id]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[id] = struct{}{}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return []service.Proxy{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.adminService.GetProxiesByIDs(ctx, ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAccountIDs(c *gin.Context) ([]int64, error) {
|
||||||
|
values := c.QueryArray("ids")
|
||||||
|
if len(values) == 0 {
|
||||||
|
raw := strings.TrimSpace(c.Query("ids"))
|
||||||
|
if raw != "" {
|
||||||
|
values = []string{raw}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ids := make([]int64, 0, len(values))
|
||||||
|
for _, item := range values {
|
||||||
|
for _, part := range strings.Split(item, ",") {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if part == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id, err := strconv.ParseInt(part, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
return nil, fmt.Errorf("invalid account id: %s", part)
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIncludeProxies(c *gin.Context) (bool, error) {
|
||||||
|
raw := strings.TrimSpace(strings.ToLower(c.Query("include_proxies")))
|
||||||
|
if raw == "" {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
switch raw {
|
||||||
|
case "1", "true", "yes", "on":
|
||||||
|
return true, nil
|
||||||
|
case "0", "false", "no", "off":
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return true, fmt.Errorf("invalid include_proxies value: %s", raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDataHeader(payload DataPayload) error {
|
||||||
|
if payload.Type != "" && payload.Type != dataType && payload.Type != legacyDataType {
|
||||||
|
return fmt.Errorf("unsupported data type: %s", payload.Type)
|
||||||
|
}
|
||||||
|
if payload.Version != 0 && payload.Version != dataVersion {
|
||||||
|
return fmt.Errorf("unsupported data version: %d", payload.Version)
|
||||||
|
}
|
||||||
|
if payload.Proxies == nil {
|
||||||
|
return errors.New("proxies is required")
|
||||||
|
}
|
||||||
|
if payload.Accounts == nil {
|
||||||
|
return errors.New("accounts is required")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDataProxy(item DataProxy) error {
|
||||||
|
if strings.TrimSpace(item.Protocol) == "" {
|
||||||
|
return errors.New("proxy protocol is required")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(item.Host) == "" {
|
||||||
|
return errors.New("proxy host is required")
|
||||||
|
}
|
||||||
|
if item.Port <= 0 || item.Port > 65535 {
|
||||||
|
return errors.New("proxy port is invalid")
|
||||||
|
}
|
||||||
|
switch item.Protocol {
|
||||||
|
case "http", "https", "socks5", "socks5h":
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("proxy protocol is invalid: %s", item.Protocol)
|
||||||
|
}
|
||||||
|
if item.Status != "" {
|
||||||
|
normalizedStatus := normalizeProxyStatus(item.Status)
|
||||||
|
if normalizedStatus != service.StatusActive && normalizedStatus != "inactive" {
|
||||||
|
return fmt.Errorf("proxy status is invalid: %s", item.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDataAccount(item DataAccount) error {
|
||||||
|
if strings.TrimSpace(item.Name) == "" {
|
||||||
|
return errors.New("account name is required")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(item.Platform) == "" {
|
||||||
|
return errors.New("account platform is required")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(item.Type) == "" {
|
||||||
|
return errors.New("account type is required")
|
||||||
|
}
|
||||||
|
if len(item.Credentials) == 0 {
|
||||||
|
return errors.New("account credentials is required")
|
||||||
|
}
|
||||||
|
switch item.Type {
|
||||||
|
case service.AccountTypeOAuth, service.AccountTypeSetupToken, service.AccountTypeAPIKey, service.AccountTypeUpstream:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("account type is invalid: %s", item.Type)
|
||||||
|
}
|
||||||
|
if item.RateMultiplier != nil && *item.RateMultiplier < 0 {
|
||||||
|
return errors.New("rate_multiplier must be >= 0")
|
||||||
|
}
|
||||||
|
if item.Concurrency < 0 {
|
||||||
|
return errors.New("concurrency must be >= 0")
|
||||||
|
}
|
||||||
|
if item.Priority < 0 {
|
||||||
|
return errors.New("priority must be >= 0")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultProxyName(name string) string {
|
||||||
|
if strings.TrimSpace(name) == "" {
|
||||||
|
return "imported-proxy"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// enrichCredentialsFromIDToken performs best-effort extraction of user info fields
|
||||||
|
// (email, plan_type, chatgpt_account_id, etc.) from id_token in credentials.
|
||||||
|
// Only applies to OpenAI/Sora OAuth accounts. Skips expired token errors silently.
|
||||||
|
// Existing credential values are never overwritten — only missing fields are filled.
|
||||||
|
func enrichCredentialsFromIDToken(item *DataAccount) {
|
||||||
|
if item.Credentials == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Only enrich OpenAI/Sora OAuth accounts
|
||||||
|
platform := strings.ToLower(strings.TrimSpace(item.Platform))
|
||||||
|
if platform != service.PlatformOpenAI && platform != service.PlatformSora {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.ToLower(strings.TrimSpace(item.Type)) != service.AccountTypeOAuth {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idToken, _ := item.Credentials["id_token"].(string)
|
||||||
|
if strings.TrimSpace(idToken) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeIDToken skips expiry validation — safe for imported data
|
||||||
|
claims, err := openai.DecodeIDToken(idToken)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("import_enrich_id_token_decode_failed", "account", item.Name, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
userInfo := claims.GetUserInfo()
|
||||||
|
if userInfo == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill missing fields only (never overwrite existing values)
|
||||||
|
setIfMissing := func(key, value string) {
|
||||||
|
if value == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if existing, _ := item.Credentials[key].(string); existing == "" {
|
||||||
|
item.Credentials[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setIfMissing("email", userInfo.Email)
|
||||||
|
setIfMissing("plan_type", userInfo.PlanType)
|
||||||
|
setIfMissing("chatgpt_account_id", userInfo.ChatGPTAccountID)
|
||||||
|
setIfMissing("chatgpt_user_id", userInfo.ChatGPTUserID)
|
||||||
|
setIfMissing("organization_id", userInfo.OrganizationID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeProxyStatus(status string) string {
|
||||||
|
normalized := strings.TrimSpace(strings.ToLower(status))
|
||||||
|
switch normalized {
|
||||||
|
case "":
|
||||||
|
return ""
|
||||||
|
case service.StatusActive:
|
||||||
|
return service.StatusActive
|
||||||
|
case "inactive", service.StatusDisabled:
|
||||||
|
return "inactive"
|
||||||
|
default:
|
||||||
|
return normalized
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user